merge default into stable for 3.0 code freeze stable 3.0-rc
authorMatt Mackall <mpm@selenic.com>
Thu, 17 Apr 2014 19:36:17 -0400
branchstable
changeset 21160 564f55b25122
parent 21028 a0f437e2f5a9 (current diff)
parent 21159 024f38f6d5f6 (diff)
child 21161 ef59019f4771
merge default into stable for 3.0 code freeze
contrib/tmplrewrite.py
hgext/inotify/__init__.py
hgext/inotify/client.py
hgext/inotify/common.py
hgext/inotify/linux/__init__.py
hgext/inotify/linux/_inotify.c
hgext/inotify/linux/watcher.py
hgext/inotify/linuxserver.py
hgext/inotify/server.py
hgext/interhg.py
tests/blacklists/inotify-failures
tests/test-inotify-debuginotify.t
tests/test-inotify-dirty-dirstate.t
tests/test-inotify-issue1371.t
tests/test-inotify-issue1542.t
tests/test-inotify-issue1556.t
tests/test-inotify-lookup.t
tests/test-inotify.t
--- a/Makefile	Tue Apr 15 03:21:59 2014 +0900
+++ b/Makefile	Thu Apr 17 19:36:17 2014 -0400
@@ -71,7 +71,7 @@
 install-home: install-home-bin install-home-doc
 
 install-home-bin: build
-	$(PYTHON) setup.py $(PURE) install --home="$(HOME)" --force
+	$(PYTHON) setup.py $(PURE) install --home="$(HOME)" --prefix="" --force
 
 install-home-doc: doc
 	cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install
@@ -102,7 +102,7 @@
 
 update-pot: i18n/hg.pot
 
-i18n/hg.pot: $(PYFILES) $(DOCFILES)
+i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n/posplit i18n/hggettext
 	$(PYTHON) i18n/hggettext mercurial/commands.py \
 	  hgext/*.py hgext/*/__init__.py \
 	  mercurial/fileset.py mercurial/revset.py \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/Makefile.python	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,79 @@
+PYTHONVER=2.7.6
+PYTHONNAME=python-
+PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER)
+SYMLINKDIR=$(HOME)/bin
+
+help:
+	@echo
+	@echo 'Make a custom installation of a Python version'
+	@echo
+	@echo 'Common make parameters:'
+	@echo '  PYTHONVER=...    [$(PYTHONVER)]'
+	@echo '  PREFIX=...       [$(PREFIX)]'
+	@echo '  SYMLINKDIR=...   [$(SYMLINKDIR) creating $(PYTHONNAME)$(PYTHONVER)]'
+	@echo
+	@echo 'Common make targets:'
+	@echo '  python    - install Python $$PYTHONVER in $$PREFIX'
+	@echo '  symlink   - create a $$SYMLINKDIR/$(PYTHONNAME)$$PYTHONVER symlink'
+	@echo
+	@echo 'Example: create a temporary Python installation:'
+	@echo '  $$ make -f Makefile.python python PYTHONVER=2.4 PREFIX=/tmp/p24'
+	@echo '  $$ /tmp/p24/bin/python -V'
+	@echo '  Python 2.4'
+	@echo
+	@echo 'Some external libraries are required for building Python: zlib bzip2 openssl.'
+	@echo 'Make sure their development packages are installed systemwide.'
+# fedora: yum install zlib-devel bzip2-devel openssl-devel
+# debian: apt-get install zlib1g-dev libbz2-dev libssl-dev
+	@echo
+	@echo 'To build a nice collection of interesting Python versions:'
+	@echo '  $$ for v in 2.{4{,.2,.3},5{,.6},6{,.1,.2,.9},7{,.6}}; do'
+	@echo '    make -f Makefile.python symlink PYTHONVER=$$v || break; done'
+	@echo 'To run a Mercurial test on all these Python versions:'
+	@echo '  $$ for py in `cd ~/bin && ls $(PYTHONNAME)2.*`; do'
+	@echo '    echo $$py; $$py run-tests.py test-http.t; echo; done'
+	@echo
+
+export LANGUAGE=C
+export LC_ALL=C
+
+python: $(PREFIX)/bin/python docutils
+	printf 'import sys, zlib, bz2, docutils\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python
+
+PYTHON_SRCDIR=Python-$(PYTHONVER)
+PYTHON_SRCFILE=$(PYTHON_SRCDIR).tgz
+
+$(PREFIX)/bin/python:
+	[ -f $(PYTHON_SRCFILE) ] || wget http://www.python.org/ftp/python/$(PYTHONVER)/$(PYTHON_SRCFILE) || [ -f $(PYTHON_SRCFILE) ]
+	rm -rf $(PYTHON_SRCDIR)
+	tar xf $(PYTHON_SRCFILE)
+	# Ubuntu disables SSLv2 the hard way, disable it on old Pythons too
+	-sed -i 's,self.*SSLv2_method(),0;//\0,g' $(PYTHON_SRCDIR)/Modules/_ssl.c
+	# Find multiarch system libraries on Ubuntu with Python 2.4.x
+	# http://lipyrary.blogspot.dk/2011/05/how-to-compile-python-on-ubuntu-1104.html
+	-sed -i "s|lib_dirs = .* \[|\0'/usr/lib/`dpkg-architecture -qDEB_HOST_MULTIARCH`',|g" $(PYTHON_SRCDIR)/setup.py
+	# Find multiarch system libraries on Ubuntu and disable fortify error when setting argv
+	LDFLAGS="-L/usr/lib/`dpkg-architecture -qDEB_HOST_MULTIARCH`"; \
+	BASECFLAGS=-U_FORTIFY_SOURCE; \
+	export LDFLAGS BASECFLAGS; \
+	cd $(PYTHON_SRCDIR) && ./configure --prefix=$(PREFIX) && make all SVNVERSION=pwd && make install
+	printf 'import sys, zlib, bz2\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python
+	rm -rf $(PYTHON_SRCDIR)
+
+DOCUTILSVER=0.11
+DOCUTILS_SRCDIR=docutils-$(DOCUTILSVER)
+DOCUTILS_SRCFILE=$(DOCUTILS_SRCDIR).tar.gz
+
+docutils: $(PREFIX)/bin/python
+	@$(PREFIX)/bin/python -c 'import docutils' || ( set -ex; \
+	[ -f $(DOCUTILS_SRCFILE) ] || wget http://downloads.sourceforge.net/project/docutils/docutils/$(DOCUTILSVER)/$(DOCUTILS_SRCFILE) || [ -f $(DOCUTILS_SRCFILE) ]; \
+	rm -rf $(DOCUTILS_SRCDIR); \
+	tar xf $(DOCUTILS_SRCFILE); \
+	cd $(DOCUTILS_SRCDIR) && $(PREFIX)/bin/python setup.py install --prefix=$(PREFIX); \
+	$(PREFIX)/bin/python -c 'import docutils'; \
+	rm -rf $(DOCUTILS_SRCDIR); )
+
+symlink: python $(SYMLINKDIR)
+	ln -sf $(PREFIX)/bin/python $(SYMLINKDIR)/$(PYTHONNAME)$(PYTHONVER)
+
+.PHONY: help python docutils symlink
--- a/contrib/check-code.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/check-code.py	Thu Apr 17 19:36:17 2014 -0400
@@ -121,6 +121,7 @@
     (r'^( *)\t', "don't use tabs to indent"),
     (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
      "put a backslash-escaped newline after sed 'i' command"),
+    (r'^diff *-\w*u.*$\n(^  \$ |^$)', "prefix diff -u with cmp"),
   ],
   # warnings
   [
@@ -150,6 +151,9 @@
      "explicit exit code checks unnecessary"),
     (uprefix + r'set -e', "don't use set -e"),
     (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
+    (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
+     "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
+     'hg pull -q file:../test'), # in test-pull.t which is skipped on windows
     (r'^  saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
     (r'^  changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
      winglobmsg),
@@ -162,6 +166,8 @@
     (r'^  moving \S+/.*[^)]$', winglobmsg),
     (r'^  no changes made to subrepo since.*/.*[^)]$', winglobmsg),
     (r'^  .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
+    (r'^  .*file://\$TESTTMP',
+     'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
   ],
   # warnings
   [
@@ -185,6 +191,7 @@
 
 pypats = [
   [
+    (r'\([^)]*\*\w[^()]+\w+=', "can't pass varargs with keyword in Py2.5"),
     (r'^\s*def\s*\w+\s*\(.*,\s*\(',
      "tuple parameter unpacking not available in Python 3+"),
     (r'lambda\s*\(.*,.*\)',
@@ -194,12 +201,14 @@
      'use "import foo.bar" on its own line instead.'),
     (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
     (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
+    (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
+     'dict-from-generator'),
     (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
     (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
     (r'^\s*\t', "don't use tabs"),
     (r'\S;\s*\n', "semicolon"),
-    (r'[^_]_\((?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
-    (r"[^_]_\((?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
+    (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
+    (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
     (r'(\w|\)),\w', "missing whitespace after ,"),
     (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
     (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
@@ -306,6 +315,7 @@
 txtpats = [
   [
     ('\s$', 'trailing whitespace'),
+    ('.. note::[ \n][^\n]', 'add two newlines after note::')
   ],
   []
 ]
--- a/contrib/editmerge	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/editmerge	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 # A simple script for opening merge conflicts in the editor.
 # Use the following Mercurial settings to enable it.
 #
--- a/contrib/hgfixes/fix_bytesmod.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/hgfixes/fix_bytesmod.py	Thu Apr 17 19:36:17 2014 -0400
@@ -33,10 +33,11 @@
               '''
 
     def transform(self, node, results):
-        if self.filename in blacklist:
-            return
-        elif self.filename == 'mercurial/util.py':
-            touch_import('.', 'py3kcompat', node=node)
+        for bfn in blacklist:
+            if self.filename.endswith(bfn):
+                return
+        if not self.filename.endswith('mercurial/py3kcompat.py'):
+            touch_import('mercurial', 'py3kcompat', node=node)
 
         formatstr = results['formatstr'].clone()
         data = results['data'].clone()
@@ -60,4 +61,3 @@
 
         call = Call(Name('bytesformatter', prefix=' '), args)
         return call
-
--- a/contrib/hgk	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/hgk	Thu Apr 17 19:36:17 2014 -0400
@@ -208,7 +208,7 @@
 	exit 1
     }
     set leftover {}
-    fconfigure $commfd -blocking 0 -translation lf
+    fconfigure $commfd -blocking 0 -translation lf -eofchar {}
     fileevent $commfd readable [list getcommitlines $commfd]
     $canv delete all
     $canv create text 3 3 -anchor nw -text "Reading commits..." \
@@ -795,8 +795,8 @@
 # set the focus back to the toplevel for any click outside
 # the entry widgets
 proc click {w} {
-    global entries
-    foreach e $entries {
+    global ctext entries
+    foreach e [concat $entries $ctext] {
 	if {$w == $e} return
     }
     focus .
@@ -2546,6 +2546,7 @@
 
 proc selnextline {dir} {
     global selectedline
+    focus .
     if {![info exists selectedline]} return
     set l [expr $selectedline + $dir]
     unmarkmatches
@@ -2583,6 +2584,7 @@
 
 proc goback {} {
     global history historyindex
+    focus .
 
     if {$historyindex > 1} {
 	incr historyindex -1
@@ -2597,6 +2599,7 @@
 
 proc goforw {} {
     global history historyindex
+    focus .
 
     if {$historyindex < [llength $history]} {
 	set cmd [lindex $history $historyindex]
@@ -3890,7 +3893,7 @@
 }
 
 proc writecommit {} {
-    global rowmenuid wrcomtop commitinfo wrcomcmd
+    global rowmenuid wrcomtop commitinfo
 
     set top .writecommit
     set wrcomtop $top
@@ -3905,12 +3908,9 @@
     $top.head insert 0 [lindex $commitinfo($rowmenuid) 0]
     $top.head conf -state readonly
     grid x $top.head -sticky w
-    ttk::label $top.clab -text "Command:"
-    ttk::entry $top.cmd -width 60 -textvariable wrcomcmd
-    grid $top.clab $top.cmd -sticky w -pady 10
     ttk::label $top.flab -text "Output file:"
     ttk::entry $top.fname -width 60
-    $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6]"]
+    $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6].diff"]
     grid $top.flab $top.fname -sticky w
     ttk::frame $top.buts
     ttk::button $top.buts.gen -text "Write" -command wrcomgo
@@ -3928,9 +3928,8 @@
     global wrcomtop
 
     set id [$wrcomtop.sha1 get]
-    set cmd "echo $id | [$wrcomtop.cmd get]"
     set fname [$wrcomtop.fname get]
-    if {[catch {exec sh -c $cmd > $fname &} err]} {
+    if {[catch {exec $::env(HG) --config ui.report_untrusted=false export --git -o [string map {% %%} $fname] $id} err]} {
 	error_popup "Error writing commit: $err"
     }
     catch {destroy $wrcomtop}
@@ -4056,7 +4055,6 @@
 set datemode 0
 set boldnames 0
 set diffopts "-U 5 -p"
-set wrcomcmd "\"\$HG\" --config ui.report_untrusted=false debug-diff-tree --stdin -p --pretty"
 
 set mainfont {Helvetica 9}
 set curidfont {}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/hgperf	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+#
+# hgperf - measure performance of Mercurial commands
+#
+# Copyright 2014 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''measure performance of Mercurial commands
+
+Using ``hgperf`` instead of ``hg`` measures performance of the target
+Mercurial command. For example, the execution below measures
+performance of :hg:`heads --topo`::
+
+    $ hgperf heads --topo
+
+All command output via ``ui`` is suppressed, and just measurement
+result is displayed: see also "perf" extension in "contrib".
+
+Costs of processing before dispatching to the command function like
+below are not measured::
+
+    - parsing command line (e.g. option validity check)
+    - reading configuration files in
+
+But ``pre-`` and ``post-`` hook invocation for the target command is
+measured, even though these are invoked before or after dispatching to
+the command function, because these may be required to repeat
+execution of the target command correctly.
+'''
+
+import os
+import sys
+
+libdir = '@LIBDIR@'
+
+if libdir != '@' 'LIBDIR' '@':
+    if not os.path.isabs(libdir):
+        libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+                              libdir)
+        libdir = os.path.abspath(libdir)
+    sys.path.insert(0, libdir)
+
+# enable importing on demand to reduce startup time
+try:
+    from mercurial import demandimport; demandimport.enable()
+except ImportError:
+    import sys
+    sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" %
+                     ' '.join(sys.path))
+    sys.stderr.write("(check your install and PYTHONPATH)\n")
+    sys.exit(-1)
+
+import mercurial.util
+import mercurial.dispatch
+
+import time
+
+def timer(func, title=None):
+    results = []
+    begin = time.time()
+    count = 0
+    while True:
+        ostart = os.times()
+        cstart = time.time()
+        r = func()
+        cstop = time.time()
+        ostop = os.times()
+        count += 1
+        a, b = ostart, ostop
+        results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
+        if cstop - begin > 3 and count >= 100:
+            break
+        if cstop - begin > 10 and count >= 3:
+            break
+    if title:
+        sys.stderr.write("! %s\n" % title)
+    if r:
+        sys.stderr.write("! result: %s\n" % r)
+    m = min(results)
+    sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
+                     % (m[0], m[1] + m[2], m[1], m[2], count))
+
+orgruncommand = mercurial.dispatch.runcommand
+
+def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
+    ui.pushbuffer()
+    lui.pushbuffer()
+    timer(lambda : orgruncommand(lui, repo, cmd, fullargs, ui,
+                                 options, d, cmdpats, cmdoptions))
+    ui.popbuffer()
+    lui.popbuffer()
+
+mercurial.dispatch.runcommand = runcommand
+
+for fp in (sys.stdin, sys.stdout, sys.stderr):
+    mercurial.util.setbinary(fp)
+
+mercurial.dispatch.run()
--- a/contrib/import-checker.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/import-checker.py	Thu Apr 17 19:36:17 2014 -0400
@@ -11,12 +11,15 @@
 def dotted_name_of_path(path):
     """Given a relative path to a source file, return its dotted module name.
 
-
     >>> dotted_name_of_path('mercurial/error.py')
     'mercurial.error'
+    >>> dotted_name_of_path('zlibmodule.so')
+    'zlib'
     """
     parts = path.split('/')
-    parts[-1] = parts[-1][:-3] # remove .py
+    parts[-1] = parts[-1].split('.', 1)[0] # remove .py and .so and .ARCH.so
+    if parts[-1].endswith('module'):
+        parts[-1] = parts[-1][:-6]
     return '.'.join(parts)
 
 
@@ -136,7 +139,7 @@
     http://bugs.python.org/issue19510.
 
     >>> list(verify_stdlib_on_own_line('import sys, foo'))
-    ['mixed stdlib and relative imports:\\n   foo, sys']
+    ['mixed imports\\n   stdlib:    sys\\n   relative:  foo']
     >>> list(verify_stdlib_on_own_line('import sys, os'))
     []
     >>> list(verify_stdlib_on_own_line('import foo, bar'))
@@ -144,13 +147,13 @@
     """
     for node in ast.walk(ast.parse(source)):
         if isinstance(node, ast.Import):
-            from_stdlib = {}
+            from_stdlib = {False: [], True: []}
             for n in node.names:
-                from_stdlib[n.name] = n.name in stdlib_modules
-            num_std = len([x for x in from_stdlib.values() if x])
-            if num_std not in (len(from_stdlib.values()), 0):
-                yield ('mixed stdlib and relative imports:\n   %s' %
-                       ', '.join(sorted(from_stdlib.iterkeys())))
+                from_stdlib[n.name in stdlib_modules].append(n.name)
+            if from_stdlib[True] and from_stdlib[False]:
+                yield ('mixed imports\n   stdlib:    %s\n   relative:  %s' %
+                       (', '.join(sorted(from_stdlib[True])),
+                        ', '.join(sorted(from_stdlib[False]))))
 
 class CircularImport(Exception):
     pass
--- a/contrib/mergetools.hgrc	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/mergetools.hgrc	Thu Apr 17 19:36:17 2014 -0400
@@ -85,7 +85,6 @@
 
 ; Linux version of Beyond Compare
 bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo
-bcompare.premerge=False
 bcompare.gui=True
 bcompare.priority=-1
 bcompare.diffargs=-lro -lefttitle='$plabel1' -righttitle='$clabel' -solo -expandall $parent $child
@@ -103,7 +102,6 @@
 araxis.regappend=\ConsoleCompare.exe
 araxis.priority=-2
 araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output
-araxis.premerge=False
 araxis.checkconflict=True
 araxis.binary=True
 araxis.gui=True
--- a/contrib/perf.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/perf.py	Thu Apr 17 19:36:17 2014 -0400
@@ -335,7 +335,7 @@
     def d():
         if clear:
             repo.invalidatevolatilesets()
-        repo.revs(expr)
+        for r in repo.revs(expr): pass
     timer(d)
 
 @command('perfvolatilesets')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/revsetbenchmarks.py	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+
+# Measure the performance of a list of revsets against multiple revisions
+# defined by parameter. Checkout one by one and run perfrevset with every
+# revset in the list to benchmark its performance.
+#
+# - First argument is a revset of mercurial own repo to runs against.
+# - Second argument is the file from which the revset array will be taken
+#   If second argument is omitted read it from standard input
+#
+# You should run this from the root of your mercurial repository.
+#
+# This script also does one run of the current version of mercurial installed
+# to compare performance.
+
+import sys
+from subprocess import check_call, Popen, CalledProcessError, STDOUT, PIPE
+
+def check_output(*args, **kwargs):
+    kwargs.setdefault('stderr', PIPE)
+    kwargs.setdefault('stdout', PIPE)
+    proc = Popen(*args, **kwargs)
+    output, error = proc.communicate()
+    if proc.returncode != 0:
+        raise CalledProcessError(proc.returncode, ' '.join(args))
+    return output
+
+def update(rev):
+    """update the repo to a revision"""
+    try:
+        check_call(['hg', 'update', '--quiet', '--check', str(rev)])
+    except CalledProcessError, exc:
+        print >> sys.stderr, 'update to revision %s failed, aborting' % rev
+        sys.exit(exc.returncode)
+
+def perf(revset):
+    """run benchmark for this very revset"""
+    try:
+        output = check_output(['./hg',
+                               '--config',
+                               'extensions.perf=contrib/perf.py',
+                               'perfrevset',
+                               revset],
+                               stderr=STDOUT)
+        output = output.lstrip('!') # remove useless ! in this context
+        return output.strip()
+    except CalledProcessError, exc:
+        print >> sys.stderr, 'abort: cannot run revset benchmark'
+        sys.exit(exc.returncode)
+
+def printrevision(rev):
+    """print data about a revision"""
+    sys.stdout.write("Revision: ")
+    sys.stdout.flush()
+    check_call(['hg', 'log', '--rev', str(rev), '--template',
+               '{desc|firstline}\n'])
+
+def getrevs(spec):
+    """get the list of rev matched by a revset"""
+    try:
+        out = check_output(['hg', 'log', '--template={rev}\n', '--rev', spec])
+    except CalledProcessError, exc:
+        print >> sys.stderr, "abort, can't get revision from %s" % spec
+        sys.exit(exc.returncode)
+    return [r for r in out.split() if r]
+
+
+
+target_rev = sys.argv[1]
+
+revsetsfile = sys.stdin
+if len(sys.argv) > 2:
+    revsetsfile = open(sys.argv[2])
+
+revsets = [l.strip() for l in revsetsfile]
+
+print "Revsets to benchmark"
+print "----------------------------"
+
+for idx, rset in enumerate(revsets):
+    print "%i) %s" % (idx, rset)
+
+print "----------------------------"
+print
+
+
+revs = getrevs(target_rev)
+
+results = []
+for r in revs:
+    print "----------------------------"
+    printrevision(r)
+    print "----------------------------"
+    update(r)
+    res = []
+    results.append(res)
+    for idx, rset in enumerate(revsets):
+        data = perf(rset)
+        res.append(data)
+        print "%i)" % idx, data
+        sys.stdout.flush()
+    print "----------------------------"
+
+
+print """
+
+Result by revset
+================
+"""
+
+print 'Revision:', revs
+for idx, rev in enumerate(revs):
+    sys.stdout.write('%i) ' % idx)
+    sys.stdout.flush()
+    printrevision(rev)
+
+print
+print
+
+for ridx, rset in enumerate(revsets):
+
+    print "revset #%i: %s" % (ridx, rset)
+    for idx, data in enumerate(results):
+        print '%i) %s' % (idx, data[ridx])
+    print
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/revsetbenchmarks.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,16 @@
+all()
+draft()
+::tip
+draft() and ::tip
+0::tip
+roots(0::tip)
+author(lmoscovicz)
+author(mpm)
+author(lmoscovicz) or author(mpm)
+tip:0
+max(tip:0)
+min(0:tip)
+0::
+min(0::)
+roots((tip~100::) - (tip~100::tip))
+::p1(p1(tip))::
--- a/contrib/synthrepo.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/contrib/synthrepo.py	Thu Apr 17 19:36:17 2014 -0400
@@ -152,7 +152,7 @@
         if lastctx.rev() != nullrev:
             interarrival[roundto(ctx.date()[0] - lastctx.date()[0], 300)] += 1
         diff = sum((d.splitlines()
-                    for d in ctx.diff(pctx, opts=dict(git=True))), [])
+                    for d in ctx.diff(pctx, opts={'git': True})), [])
         fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
         for filename, mar, lineadd, lineremove, binary in parsegitdiff(diff):
             if binary:
@@ -189,21 +189,21 @@
     def pronk(d):
         return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
 
-    json.dump(dict(revs=len(revs),
-                   lineschanged=pronk(lineschanged),
-                   children=pronk(invchildren),
-                   fileschanged=pronk(fileschanged),
-                   filesadded=pronk(filesadded),
-                   linesinfilesadded=pronk(linesinfilesadded),
-                   dirsadded=pronk(dirsadded),
-                   filesremoved=pronk(filesremoved),
-                   linelengths=pronk(linelengths),
-                   parents=pronk(parents),
-                   p1distance=pronk(p1distance),
-                   p2distance=pronk(p2distance),
-                   interarrival=pronk(interarrival),
-                   tzoffset=pronk(tzoffset),
-                   ),
+    json.dump({'revs': len(revs),
+               'lineschanged': pronk(lineschanged),
+               'children': pronk(invchildren),
+               'fileschanged': pronk(fileschanged),
+               'filesadded': pronk(filesadded),
+               'linesinfilesadded': pronk(linesinfilesadded),
+               'dirsadded': pronk(dirsadded),
+               'filesremoved': pronk(filesremoved),
+               'linelengths': pronk(linelengths),
+               'parents': pronk(parents),
+               'p1distance': pronk(p1distance),
+               'p2distance': pronk(p2distance),
+               'interarrival': pronk(interarrival),
+               'tzoffset': pronk(tzoffset),
+               },
               fp)
     fp.close()
 
--- a/contrib/tmplrewrite.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-#!/usr/bin/python
-import sys, os, re
-
-IGNORE = ['.css', '.py']
-oldre = re.compile('#([\w\|%]+)#')
-
-def rewrite(fn):
-    f = open(fn)
-    new = open(fn + '.new', 'wb')
-    for ln in f:
-        new.write(oldre.sub('{\\1}', ln))
-    new.close()
-    f.close()
-    os.rename(new.name, f.name)
-
-if __name__ == '__main__':
-    if len(sys.argv) < 2:
-        print 'usage: python tmplrewrite.py [file [file [file]]]'
-    for fn in sys.argv[1:]:
-        if os.path.splitext(fn) in IGNORE:
-            continue
-        print 'rewriting %s...' % fn
-        rewrite(fn)
--- a/doc/gendoc.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/doc/gendoc.py	Thu Apr 17 19:36:17 2014 -0400
@@ -50,6 +50,9 @@
             allopts[-1] += " <%s[+]>" % optlabel
         elif (default is not None) and not isinstance(default, bool):
             allopts[-1] += " <%s>" % optlabel
+        if '\n' in desc:
+            # only remove line breaks and indentation
+            desc = ' '.join(l.lstrip() for l in desc.split('\n'))
         desc += default and _(" (default: %s)") % default or ""
         yield (", ".join(allopts), desc)
 
@@ -153,6 +156,8 @@
             continue
         d = get_cmd(h[f], cmdtable)
         ui.write(sectionfunc(d['cmd']))
+        # short description
+        ui.write(d['desc'][0])
         # synopsis
         ui.write("::\n\n")
         synopsislines = d['synopsis'].splitlines()
--- a/hgext/bugzilla.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/bugzilla.py	Thu Apr 17 19:36:17 2014 -0400
@@ -620,7 +620,7 @@
         ver = self.bzproxy.Bugzilla.version()['version'].split('.')
         self.bzvermajor = int(ver[0])
         self.bzverminor = int(ver[1])
-        self.bzproxy.User.login(dict(login=user, password=passwd))
+        self.bzproxy.User.login({'login': user, 'password': passwd})
 
     def transport(self, uri):
         if urlparse.urlparse(uri, "http")[0] == "https":
@@ -630,13 +630,15 @@
 
     def get_bug_comments(self, id):
         """Return a string with all comment text for a bug."""
-        c = self.bzproxy.Bug.comments(dict(ids=[id], include_fields=['text']))
+        c = self.bzproxy.Bug.comments({'ids': [id],
+                                       'include_fields': ['text']})
         return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
 
     def filter_real_bug_ids(self, bugs):
-        probe = self.bzproxy.Bug.get(dict(ids=sorted(bugs.keys()),
-                                          include_fields=[],
-                                          permissive=True))
+        probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
+                                      'include_fields': [],
+                                      'permissive': True,
+                                      })
         for badbug in probe['faults']:
             id = badbug['id']
             self.ui.status(_('bug %d does not exist\n') % id)
@@ -717,10 +719,10 @@
         than the subject line, and leave a blank line after it.
         '''
         user = self.map_committer(committer)
-        matches = self.bzproxy.User.get(dict(match=[user]))
+        matches = self.bzproxy.User.get({'match': [user]})
         if not matches['users']:
             user = self.ui.config('bugzilla', 'user', 'bugs')
-            matches = self.bzproxy.User.get(dict(match=[user]))
+            matches = self.bzproxy.User.get({'match': [user]})
             if not matches['users']:
                 raise util.Abort(_("default bugzilla user %s email not found") %
                                  user)
@@ -879,14 +881,13 @@
 
         mapfile = self.ui.config('bugzilla', 'style')
         tmpl = self.ui.config('bugzilla', 'template')
-        t = cmdutil.changeset_templater(self.ui, self.repo,
-                                        False, None, mapfile, False)
         if not mapfile and not tmpl:
             tmpl = _('changeset {node|short} in repo {root} refers '
                      'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
         if tmpl:
             tmpl = templater.parsestring(tmpl, quoted=False)
-            t.use_template(tmpl)
+        t = cmdutil.changeset_templater(self.ui, self.repo,
+                                        False, None, tmpl, mapfile, False)
         self.ui.pushbuffer()
         t.show(ctx, changes=ctx.changeset(),
                bug=str(bugid),
--- a/hgext/churn.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/churn.py	Thu Apr 17 19:36:17 2014 -0400
@@ -18,10 +18,10 @@
 def maketemplater(ui, repo, tmpl):
     tmpl = templater.parsestring(tmpl, quoted=False)
     try:
-        t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
+        t = cmdutil.changeset_templater(ui, repo, False, None, tmpl,
+                                        None, False)
     except SyntaxError, inst:
         raise util.Abort(inst.args[0])
-    t.use_template(tmpl)
     return t
 
 def changedlines(ui, repo, ctx1, ctx2, fns):
--- a/hgext/color.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/color.py	Thu Apr 17 19:36:17 2014 -0400
@@ -311,6 +311,15 @@
     for name, ext in extensions.extensions():
         _styles.update(getattr(ext, 'colortable', {}))
 
+def valideffect(effect):
+    'Determine if the effect is valid or not.'
+    good = False
+    if not _terminfo_params and effect in _effects:
+        good = True
+    elif effect in _terminfo_params or effect[:-11] in _terminfo_params:
+        good = True
+    return good
+
 def configstyles(ui):
     for status, cfgeffects in ui.configitems('color'):
         if '.' not in status or status.startswith('color.'):
@@ -319,9 +328,7 @@
         if cfgeffects:
             good = []
             for e in cfgeffects:
-                if not _terminfo_params and e in _effects:
-                    good.append(e)
-                elif e in _terminfo_params or e[:-11] in _terminfo_params:
+                if valideffect(e):
                     good.append(e)
                 else:
                     ui.warn(_("ignoring unknown color/effect %r "
@@ -375,6 +382,8 @@
             s = _styles.get(l, '')
             if s:
                 effects.append(s)
+            elif valideffect(l):
+                effects.append(l)
         effects = ' '.join(effects)
         if effects:
             return '\n'.join([render_effects(s, effects)
@@ -386,6 +395,10 @@
         # i18n: "label" is a keyword
         raise error.ParseError(_("label expects two arguments"))
 
+    # add known effects to the mapping so symbols like 'red', 'bold',
+    # etc. don't need to be quoted
+    mapping.update(dict([(k, k) for k in _effects]))
+
     thing = templater._evalifliteral(args[1], context, mapping)
 
     # apparently, repo could be a string that is the favicon?
@@ -424,6 +437,16 @@
          _("when to colorize (boolean, always, auto, or never)"),
          _('TYPE')))
 
+def debugcolor(ui, repo, **opts):
+    global _styles
+    _styles = {}
+    for effect in _effects.keys():
+        _styles[effect] = effect
+    ui.write(('color mode: %s\n') % ui._colormode)
+    ui.write(_('available colors:\n'))
+    for label, colors in _styles.items():
+        ui.write(('%s\n') % colors, label=label)
+
 if os.name != 'nt':
     w32effects = None
 else:
@@ -553,3 +576,8 @@
         finally:
             # Explicitly reset original attributes
             _kernel32.SetConsoleTextAttribute(stdout, origattr)
+
+cmdtable = {
+    'debugcolor':
+        (debugcolor, [], ('hg debugcolor'))
+}
--- a/hgext/convert/common.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/convert/common.py	Thu Apr 17 19:36:17 2014 -0400
@@ -63,13 +63,13 @@
 
         self.encoding = 'utf-8'
 
-    def checkhexformat(self, revstr):
+    def checkhexformat(self, revstr, mapname='splicemap'):
         """ fails if revstr is not a 40 byte hex. mercurial and git both uses
             such format for their revision numbering
         """
         if not re.match(r'[0-9a-fA-F]{40,40}$', revstr):
-            raise util.Abort(_('splicemap entry %s is not a valid revision'
-                               ' identifier') % revstr)
+            raise util.Abort(_('%s entry %s is not a valid revision'
+                               ' identifier') % (mapname, revstr))
 
     def before(self):
         pass
@@ -172,7 +172,7 @@
         """
         return {}
 
-    def checkrevformat(self, revstr):
+    def checkrevformat(self, revstr, mapname='splicemap'):
         """revstr is a string that describes a revision in the given
            source control system.  Return true if revstr has correct
            format.
@@ -192,10 +192,6 @@
         self.path = path
         self.created = []
 
-    def getheads(self):
-        """Return a list of this repository's heads"""
-        raise NotImplementedError
-
     def revmapfile(self):
         """Path to a file that will contain lines
         source_rev_id sink_rev_id
--- a/hgext/convert/git.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/convert/git.py	Thu Apr 17 19:36:17 2014 -0400
@@ -297,7 +297,7 @@
 
         return bookmarks
 
-    def checkrevformat(self, revstr):
+    def checkrevformat(self, revstr, mapname='splicemap'):
         """ git revision string is a 40 byte hex """
-        self.checkhexformat(revstr)
+        self.checkhexformat(revstr, mapname)
 
--- a/hgext/convert/hg.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/convert/hg.py	Thu Apr 17 19:36:17 2014 -0400
@@ -25,6 +25,9 @@
 
 from common import NoRepo, commit, converter_source, converter_sink
 
+import re
+sha1re = re.compile(r'\b[0-9a-f]{6,40}\b')
+
 class mercurial_sink(converter_sink):
     def __init__(self, ui, path):
         converter_sink.__init__(self, ui, path)
@@ -75,10 +78,6 @@
     def authorfile(self):
         return self.repo.join("authormap")
 
-    def getheads(self):
-        h = self.repo.changelog.heads()
-        return [hex(x) for x in h]
-
     def setbranch(self, branch, pbranches):
         if not self.clonebranches:
             return
@@ -157,6 +156,14 @@
         p2 = parents.pop(0)
 
         text = commit.desc
+
+        sha1s = re.findall(sha1re, text)
+        for sha1 in sha1s:
+            oldrev = source.lookuprev(sha1)
+            newrev = revmap.get(oldrev)
+            if newrev is not None:
+                text = text.replace(sha1, newrev[:len(sha1)])
+
         extra = commit.extra.copy()
         if self.branchnames and commit.branch:
             extra['branch'] = commit.branch
@@ -190,14 +197,36 @@
             parentctx = None
             tagparent = nullid
 
-        try:
-            oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
-        except Exception:
-            oldlines = []
+        oldlines = set()
+        for branch, heads in self.repo.branchmap().iteritems():
+            for h in heads:
+                if '.hgtags' in self.repo[h]:
+                    oldlines.update(
+                        set(self.repo[h]['.hgtags'].data().splitlines(True)))
+        oldlines = sorted(list(oldlines))
 
         newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
         if newlines == oldlines:
             return None, None
+
+        # if the old and new tags match, then there is nothing to update
+        oldtags = set()
+        newtags = set()
+        for line in oldlines:
+            s = line.strip().split(' ', 1)
+            if len(s) != 2:
+                continue
+            oldtags.add(s[1])
+        for line in newlines:
+            s = line.strip().split(' ', 1)
+            if len(s) != 2:
+                continue
+            if s[1] not in oldtags:
+                newtags.add(s[1].strip())
+
+        if not newtags:
+            return None, None
+
         data = "".join(newlines)
         def getfilectx(repo, memctx, f):
             return context.memfilectx(f, data, False, False, None)
@@ -412,6 +441,6 @@
     def getbookmarks(self):
         return bookmarks.listbookmarks(self.repo)
 
-    def checkrevformat(self, revstr):
+    def checkrevformat(self, revstr, mapname='splicemap'):
         """ Mercurial, revision string is a 40 byte hex """
-        self.checkhexformat(revstr)
+        self.checkhexformat(revstr, mapname)
--- a/hgext/convert/subversion.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/convert/subversion.py	Thu Apr 17 19:36:17 2014 -0400
@@ -41,13 +41,30 @@
     pass
 
 def revsplit(rev):
-    """Parse a revision string and return (uuid, path, revnum)."""
-    url, revnum = rev.rsplit('@', 1)
-    parts = url.split('/', 1)
+    """Parse a revision string and return (uuid, path, revnum).
+    >>> revsplit('svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
+    ...          '/proj%20B/mytrunk/mytrunk@1')
+    ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1)
+    >>> revsplit('svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1')
+    ('', '', 1)
+    >>> revsplit('@7')
+    ('', '', 7)
+    >>> revsplit('7')
+    ('', '', 0)
+    >>> revsplit('bad')
+    ('', '', 0)
+    """
+    parts = rev.rsplit('@', 1)
+    revnum = 0
+    if len(parts) > 1:
+        revnum = int(parts[1])
+    parts = parts[0].split('/', 1)
+    uuid = ''
     mod = ''
-    if len(parts) > 1:
+    if len(parts) > 1 and parts[0].startswith('svn:'):
+        uuid = parts[0][4:]
         mod = '/' + parts[1]
-    return parts[0][4:], mod, int(revnum)
+    return uuid, mod, revnum
 
 def quote(s):
     # As of svn 1.7, many svn calls expect "canonical" paths. In
@@ -157,6 +174,30 @@
             self._stdout.close()
             self._stdout = None
 
+class directlogstream(list):
+    """Direct revision log iterator.
+    This can be used for debugging and development but it will probably leak
+    memory and is not suitable for real conversions."""
+    def __init__(self, url, paths, start, end, limit=0,
+                  discover_changed_paths=True, strict_node_history=False):
+
+        def receiver(orig_paths, revnum, author, date, message, pool):
+            paths = {}
+            if orig_paths is not None:
+                for k, v in orig_paths.iteritems():
+                    paths[k] = changedpath(v)
+            self.append((paths, revnum, author, date, message))
+
+        # Use an ra of our own so that our parent can consume
+        # our results without confusing the server.
+        t = transport.SvnRaTransport(url=url)
+        svn.ra.get_log(t.ra, paths, start, end, limit,
+                       discover_changed_paths,
+                       strict_node_history,
+                       receiver)
+
+    def close(self):
+        pass
 
 # Check to see if the given path is a local Subversion repo. Verify this by
 # looking for several svn-specific files and directories in the given
@@ -454,13 +495,13 @@
         del self.commits[rev]
         return commit
 
-    def checkrevformat(self, revstr):
+    def checkrevformat(self, revstr, mapname='splicemap'):
         """ fails if revision format does not match the correct format"""
         if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
                               '[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
                               '{12,12}(.*)\@[0-9]+$',revstr):
-            raise util.Abort(_('splicemap entry %s is not a valid revision'
-                               ' identifier') % revstr)
+            raise util.Abort(_('%s entry %s is not a valid revision'
+                               ' identifier') % (mapname, revstr))
 
     def gettags(self):
         tags = {}
@@ -975,6 +1016,9 @@
             relpaths.append(p.strip('/'))
         args = [self.baseurl, relpaths, start, end, limit,
                 discover_changed_paths, strict_node_history]
+        # undocumented feature: debugsvnlog can be disabled
+        if not self.ui.configbool('convert', 'svn.debugsvnlog', True):
+            return directlogstream(*args)
         arg = encodeargs(args)
         hgexe = util.hgexecutable()
         cmd = '%s debugsvnlog' % util.shellquote(hgexe)
--- a/hgext/eol.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/eol.py	Thu Apr 17 19:36:17 2014 -0400
@@ -151,7 +151,7 @@
         self.cfg = config.config()
         # Our files should not be touched. The pattern must be
         # inserted first override a '** = native' pattern.
-        self.cfg.set('patterns', '.hg*', 'BIN')
+        self.cfg.set('patterns', '.hg*', 'BIN', 'eol')
         # We can then parse the user's patterns.
         self.cfg.parse('.hgeol', data)
 
@@ -176,14 +176,14 @@
         for pattern, style in self.cfg.items('patterns'):
             key = style.upper()
             try:
-                ui.setconfig('decode', pattern, self._decode[key])
-                ui.setconfig('encode', pattern, self._encode[key])
+                ui.setconfig('decode', pattern, self._decode[key], 'eol')
+                ui.setconfig('encode', pattern, self._encode[key], 'eol')
             except KeyError:
                 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
                         % (style, self.cfg.source('patterns', pattern)))
         # eol.only-consistent can be specified in ~/.hgrc or .hgeol
         for k, v in self.cfg.items('eol'):
-            ui.setconfig('eol', k, v)
+            ui.setconfig('eol', k, v, 'eol')
 
     def checkrev(self, repo, ctx, files):
         failed = []
@@ -261,7 +261,7 @@
     return False
 
 def uisetup(ui):
-    ui.setconfig('hooks', 'preupdate.eol', preupdate)
+    ui.setconfig('hooks', 'preupdate.eol', preupdate, 'eol')
 
 def extsetup(ui):
     try:
@@ -280,7 +280,7 @@
     for name, fn in filters.iteritems():
         repo.adddatafilter(name, fn)
 
-    ui.setconfig('patch', 'eol', 'auto')
+    ui.setconfig('patch', 'eol', 'auto', 'eol')
 
     class eolrepo(repo.__class__):
 
--- a/hgext/extdiff.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/extdiff.py	Thu Apr 17 19:36:17 2014 -0400
@@ -207,10 +207,10 @@
         # Function to quote file/dir names in the argument string.
         # When not operating in 3-way mode, an empty string is
         # returned for parent2
-        replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
-                       plabel1=label1a, plabel2=label1b,
-                       clabel=label2, child=dir2,
-                       root=repo.root)
+        replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
+                   'plabel1': label1a, 'plabel2': label1b,
+                   'clabel': label2, 'child': dir2,
+                   'root': repo.root}
         def quote(match):
             key = match.group()[1:]
             if not do3way and key == 'parent2':
@@ -316,7 +316,7 @@
     that revision is compared to the working directory, and, when no
     revisions are specified, the working directory files are compared
     to its parent.\
-''') % dict(path=util.uirepr(path))
+''') % {'path': util.uirepr(path)}
 
             # We must translate the docstring right away since it is
             # used as a format string. The string will unfortunately
--- a/hgext/hgcia.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/hgcia.py	Thu Apr 17 19:36:17 2014 -0400
@@ -202,8 +202,7 @@
             template = self.diffstat and self.dstemplate or self.deftemplate
         template = templater.parsestring(template, quoted=False)
         t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
-                                        style, False)
-        t.use_template(template)
+                                        template, style, False)
         self.templater = t
 
     def strip(self, path):
--- a/hgext/histedit.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/histedit.py	Thu Apr 17 19:36:17 2014 -0400
@@ -30,10 +30,12 @@
 
  # Edit history between c561b4e977df and 7c2fd3b9020c
  #
+ # Commits are listed from least to most recent
+ #
  # Commands:
  #  p, pick = use commit
  #  e, edit = use commit, but stop for amending
- #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+ #  f, fold = use commit, but combine it with the one above
  #  d, drop = remove commit from history
  #  m, mess = edit message without changing commit content
  #
@@ -49,10 +51,12 @@
 
  # Edit history between c561b4e977df and 7c2fd3b9020c
  #
+ # Commits are listed from least to most recent
+ #
  # Commands:
  #  p, pick = use commit
  #  e, edit = use commit, but stop for amending
- #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+ #  f, fold = use commit, but combine it with the one above
  #  d, drop = remove commit from history
  #  m, mess = edit message without changing commit content
  #
@@ -152,10 +156,8 @@
 from mercurial import copies
 from mercurial import context
 from mercurial import hg
-from mercurial import lock as lockmod
 from mercurial import node
 from mercurial import repair
-from mercurial import scmutil
 from mercurial import util
 from mercurial import obsolete
 from mercurial import merge as mergemod
@@ -170,10 +172,12 @@
 # i18n: command names and abbreviations must remain untranslated
 editcomment = _("""# Edit history between %s and %s
 #
+# Commits are listed from least to most recent
+#
 # Commands:
 #  p, pick = use commit
 #  e, edit = use commit, but stop for amending
-#  f, fold = use commit, but fold into previous commit (combines N and N-1)
+#  f, fold = use commit, but combine it with the one above
 #  d, drop = remove commit from history
 #  m, mess = edit message without changing commit content
 #
@@ -193,7 +197,8 @@
     def commitfunc(**kwargs):
         phasebackup = repo.ui.backupconfig('phases', 'new-commit')
         try:
-            repo.ui.setconfig('phases', 'new-commit', phasemin)
+            repo.ui.setconfig('phases', 'new-commit', phasemin,
+                              'histedit')
             extra = kwargs.get('extra', {}).copy()
             extra['histedit_source'] = src.hex()
             kwargs['extra'] = extra
@@ -215,11 +220,12 @@
     else:
         try:
             # ui.forcemerge is an internal variable, do not document
-            repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+            repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
+                              'histedit')
             stats = mergemod.update(repo, ctx.node(), True, True, False,
                                     ctx.p1().node())
         finally:
-            repo.ui.setconfig('ui', 'forcemerge', '')
+            repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
         repo.setparents(wcpar, node.nullid)
         repo.dirstate.write()
         # fix up dirstate for copies and renames
@@ -370,7 +376,7 @@
     phasebackup = repo.ui.backupconfig('phases', 'new-commit')
     try:
         phasemin = max(ctx.phase(), oldctx.phase())
-        repo.ui.setconfig('phases', 'new-commit', phasemin)
+        repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit')
         n = collapse(repo, ctx, repo[newnode], commitopts)
     finally:
         repo.ui.restoreconfig(phasebackup)
@@ -562,8 +568,11 @@
                 remote = None
             root = findoutgoing(ui, repo, remote, force, opts)
         else:
-            root = revs[0]
-            root = scmutil.revsingle(repo, root).node()
+            rootrevs = list(repo.set('roots(%lr)', revs))
+            if len(rootrevs) != 1:
+                raise util.Abort(_('The specified revisions must have ' +
+                    'exactly one common root'))
+            root = rootrevs[0].node()
 
         keep = opts.get('keep', False)
         revs = between(repo, root, topmost, keep)
@@ -643,23 +652,28 @@
     if os.path.exists(repo.sjoin('undo')):
         os.unlink(repo.sjoin('undo'))
 
-
-def bootstrapcontinue(ui, repo, parentctx, rules, opts):
-    action, currentnode = rules.pop(0)
-    ctx = repo[currentnode]
+def gatherchildren(repo, ctx):
     # is there any new commit between the expected parent and "."
     #
     # note: does not take non linear new change in account (but previous
     #       implementation didn't used them anyway (issue3655)
-    newchildren = [c.node() for c in repo.set('(%d::.)', parentctx)]
-    if parentctx.node() != node.nullid:
+    newchildren = [c.node() for c in repo.set('(%d::.)', ctx)]
+    if ctx.node() != node.nullid:
         if not newchildren:
-            # `parentctxnode` should match but no result. This means that
-            # currentnode is not a descendant from parentctxnode.
+            # `ctx` should match but no result. This means that
+            # currentnode is not a descendant from ctx.
             msg = _('%s is not an ancestor of working directory')
             hint = _('use "histedit --abort" to clear broken state')
-            raise util.Abort(msg % parentctx, hint=hint)
-        newchildren.pop(0)  # remove parentctxnode
+            raise util.Abort(msg % ctx, hint=hint)
+        newchildren.pop(0)  # remove ctx
+    return newchildren
+
+def bootstrapcontinue(ui, repo, parentctx, rules, opts):
+    action, currentnode = rules.pop(0)
+    ctx = repo[currentnode]
+
+    newchildren = gatherchildren(repo, parentctx)
+
     # Commit dirty working directory if necessary
     new = None
     m, a, r, d = repo.status()[:4]
@@ -897,7 +911,7 @@
             # This would reduce bundle overhead
             repair.strip(ui, repo, c)
     finally:
-        lockmod.release(lock)
+        release(lock)
 
 def summaryhook(ui, repo):
     if not os.path.exists(repo.join('histedit-state')):
--- a/hgext/inotify/__init__.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-# __init__.py - inotify-based status acceleration for Linux
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''accelerate status report using Linux's inotify service'''
-
-# todo: socket permissions
-
-from mercurial.i18n import _
-from mercurial import util
-import server
-from client import client, QueryFailed
-
-testedwith = 'internal'
-
-def serve(ui, repo, **opts):
-    '''start an inotify server for this repository'''
-    server.start(ui, repo.dirstate, repo.root, opts)
-
-def debuginotify(ui, repo, **opts):
-    '''debugging information for inotify extension
-
-    Prints the list of directories being watched by the inotify server.
-    '''
-    cli = client(ui, repo)
-    response = cli.debugquery()
-
-    ui.write(_('directories being watched:\n'))
-    for path in response:
-        ui.write(('  %s/\n') % path)
-
-def reposetup(ui, repo):
-    if not util.safehasattr(repo, 'dirstate'):
-        return
-
-    class inotifydirstate(repo.dirstate.__class__):
-
-        # We'll set this to false after an unsuccessful attempt so that
-        # next calls of status() within the same instance don't try again
-        # to start an inotify server if it won't start.
-        _inotifyon = True
-
-        def status(self, match, subrepos, ignored, clean, unknown):
-            files = match.files()
-            if '.' in files:
-                files = []
-            if (self._inotifyon and not ignored and not subrepos and
-                not self._dirty):
-                cli = client(ui, repo)
-                try:
-                    result = cli.statusquery(files, match, False,
-                                            clean, unknown)
-                except QueryFailed, instr:
-                    ui.debug(str(instr))
-                    # don't retry within the same hg instance
-                    inotifydirstate._inotifyon = False
-                    pass
-                else:
-                    if ui.config('inotify', 'debug'):
-                        r2 = super(inotifydirstate, self).status(
-                            match, [], False, clean, unknown)
-                        for c, a, b in zip('LMARDUIC', result, r2):
-                            for f in a:
-                                if f not in b:
-                                    ui.warn('*** inotify: %s +%s\n' % (c, f))
-                            for f in b:
-                                if f not in a:
-                                    ui.warn('*** inotify: %s -%s\n' % (c, f))
-                        result = r2
-                    return result
-            return super(inotifydirstate, self).status(
-                match, subrepos, ignored, clean, unknown)
-
-    repo.dirstate.__class__ = inotifydirstate
-
-cmdtable = {
-    'debuginotify':
-        (debuginotify, [], ('hg debuginotify')),
-    '^inserve':
-        (serve,
-         [('d', 'daemon', None, _('run server in background')),
-          ('', 'daemon-pipefds', '',
-           _('used internally by daemon mode'), _('NUM')),
-          ('t', 'idle-timeout', '',
-           _('minutes to sit idle before exiting'), _('NUM')),
-          ('', 'pid-file', '',
-           _('name of file to write process ID to'), _('FILE'))],
-         _('hg inserve [OPTION]...')),
-    }
--- a/hgext/inotify/client.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,173 +0,0 @@
-# client.py - inotify status client
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from mercurial.i18n import _
-import common, server
-import errno, os, socket, struct
-
-class QueryFailed(Exception):
-    pass
-
-def start_server(function):
-    """
-    Decorator.
-    Tries to call function, if it fails, try to (re)start inotify server.
-    Raise QueryFailed if something went wrong
-    """
-    def decorated_function(self, *args):
-        try:
-            return function(self, *args)
-        except (OSError, socket.error), err:
-            autostart = self.ui.configbool('inotify', 'autostart', True)
-
-            if err.args[0] == errno.ECONNREFUSED:
-                self.ui.warn(_('inotify-client: found dead inotify server '
-                               'socket; removing it\n'))
-                os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
-            if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
-                try:
-                    try:
-                        server.start(self.ui, self.dirstate, self.root,
-                                     dict(daemon=True, daemon_pipefds=''))
-                    except server.AlreadyStartedException, inst:
-                        # another process may have started its own
-                        # inotify server while this one was starting.
-                        self.ui.debug(str(inst))
-                except Exception, inst:
-                    self.ui.warn(_('inotify-client: could not start inotify '
-                                   'server: %s\n') % inst)
-                else:
-                    try:
-                        return function(self, *args)
-                    except socket.error, err:
-                        self.ui.warn(_('inotify-client: could not talk to new '
-                                       'inotify server: %s\n') % err.args[-1])
-            elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT):
-                # silently ignore normal errors if autostart is False
-                self.ui.debug('(inotify server not running)\n')
-            else:
-                self.ui.warn(_('inotify-client: failed to contact inotify '
-                               'server: %s\n') % err.args[-1])
-
-        self.ui.traceback()
-        raise QueryFailed('inotify query failed')
-
-    return decorated_function
-
-
-class client(object):
-    def __init__(self, ui, repo):
-        self.ui = ui
-        self.dirstate = repo.dirstate
-        self.root = repo.root
-        self.sock = socket.socket(socket.AF_UNIX)
-
-    def _connect(self):
-        sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
-        try:
-            self.sock.connect(sockpath)
-        except socket.error, err:
-            if err.args[0] == "AF_UNIX path too long":
-                sockpath = os.readlink(sockpath)
-                self.sock.connect(sockpath)
-            else:
-                raise
-
-    def _send(self, type, data):
-        """Sends protocol version number, and the data"""
-        self.sock.sendall(chr(common.version) + type + data)
-
-        self.sock.shutdown(socket.SHUT_WR)
-
-    def _receive(self, type):
-        """
-        Read data, check version number, extract headers,
-        and returns a tuple (data descriptor, header)
-        Raises QueryFailed on error
-        """
-        cs = common.recvcs(self.sock)
-        try:
-            version = ord(cs.read(1))
-        except TypeError:
-            # empty answer, assume the server crashed
-            self.ui.warn(_('inotify-client: received empty answer from inotify '
-                           'server'))
-            raise QueryFailed('server crashed')
-
-        if version != common.version:
-            self.ui.warn(_('(inotify: received response from incompatible '
-                      'server version %d)\n') % version)
-            raise QueryFailed('incompatible server version')
-
-        readtype = cs.read(4)
-        if readtype != type:
-            self.ui.warn(_('(inotify: received \'%s\' response when expecting'
-                       ' \'%s\')\n') % (readtype, type))
-            raise QueryFailed('wrong response type')
-
-        hdrfmt = common.resphdrfmts[type]
-        hdrsize = common.resphdrsizes[type]
-        try:
-            resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
-        except struct.error:
-            raise QueryFailed('unable to retrieve query response headers')
-
-        return cs, resphdr
-
-    def query(self, type, req):
-        self._connect()
-
-        self._send(type, req)
-
-        return self._receive(type)
-
-    @start_server
-    def statusquery(self, names, match, ignored, clean, unknown=True):
-
-        def genquery():
-            for n in names:
-                yield n
-            states = 'almrx!'
-            if ignored:
-                raise ValueError('this is insanity')
-            if clean:
-                states += 'c'
-            if unknown:
-                states += '?'
-            yield states
-
-        req = '\0'.join(genquery())
-
-        cs, resphdr = self.query('STAT', req)
-
-        def readnames(nbytes):
-            if nbytes:
-                names = cs.read(nbytes)
-                if names:
-                    return filter(match, names.split('\0'))
-            return []
-        results = tuple(map(readnames, resphdr[:-1]))
-
-        if names:
-            nbytes = resphdr[-1]
-            vdirs = cs.read(nbytes)
-            if vdirs:
-                for vdir in vdirs.split('\0'):
-                    if match.explicitdir:
-                        match.explicitdir(vdir)
-
-        return results
-
-    @start_server
-    def debugquery(self):
-        cs, resphdr = self.query('DBUG', '')
-
-        nbytes = resphdr[0]
-        names = cs.read(nbytes)
-        return names.split('\0')
--- a/hgext/inotify/common.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-# server.py - inotify common protocol code
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import cStringIO, socket, struct
-
-"""
-  Protocol between inotify clients and server:
-
-  Client sending query:
-  1) send protocol version number
-  2) send query type (string, 4 letters long)
-  3) send query parameters:
-     - For STAT, N+1 \0-separated strings:
-        1) N different names that need checking
-        2) 1 string containing all the status types to match
-     - No parameter needed for DBUG
-
-  Server sending query answer:
-  1) send protocol version number
-  2) send query type
-  3) send struct.pack'ed headers describing the length of the content:
-      e.g. for STAT, receive 9 integers describing the length of the
-      9 \0-separated string lists to be read:
-       * one file list for each lmar!?ic status type
-       * one list containing the directories visited during lookup
-
-"""
-
-version = 3
-
-resphdrfmts = {
-    'STAT': '>lllllllll', # status requests
-    'DBUG': '>l'          # debugging queries
-}
-resphdrsizes = dict((k, struct.calcsize(v))
-                    for k, v in resphdrfmts.iteritems())
-
-def recvcs(sock):
-    cs = cStringIO.StringIO()
-    s = True
-    try:
-        while s:
-            s = sock.recv(65536)
-            cs.write(s)
-    finally:
-        sock.shutdown(socket.SHUT_RD)
-    cs.seek(0)
-    return cs
--- a/hgext/inotify/linux/__init__.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-# __init__.py - low-level interfaces to the Linux inotify subsystem
-
-# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
-
-# This library is free software; you can redistribute it and/or modify
-# it under the terms of version 2.1 of the GNU Lesser General Public
-# License, or any later version.
-
-'''Low-level interface to the Linux inotify subsystem.
-
-The inotify subsystem provides an efficient mechanism for file status
-monitoring and change notification.
-
-This package provides the low-level inotify system call interface and
-associated constants and helper functions.
-
-For a higher-level interface that remains highly efficient, use the
-inotify.watcher package.'''
-
-__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
-
-from _inotify import *
-
-procfs_path = '/proc/sys/fs/inotify'
-
-def _read_procfs_value(name):
-    def read_value():
-        try:
-            fp = open(procfs_path + '/' + name)
-            r = int(fp.read())
-            fp.close()
-            return r
-        except OSError:
-            return None
-
-    read_value.__doc__ = '''Return the value of the %s setting from /proc.
-
-    If inotify is not enabled on this system, return None.''' % name
-
-    return read_value
-
-max_queued_events = _read_procfs_value('max_queued_events')
-max_user_instances = _read_procfs_value('max_user_instances')
-max_user_watches = _read_procfs_value('max_user_watches')
--- a/hgext/inotify/linux/_inotify.c	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,649 +0,0 @@
-/*
- * _inotify.c - Python extension interfacing to the Linux inotify subsystem
- *
- * Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of version 2.1 of the GNU Lesser General
- * Public License or any later version.
- */
-
-#include <Python.h>
-#include <alloca.h>
-#include <sys/inotify.h>
-#include <stdint.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <util.h>
-
-/* Variables used in the event string representation */
-static PyObject *join;
-static PyObject *er_wm;
-static PyObject *er_wmc;
-static PyObject *er_wmn;
-static PyObject *er_wmcn;
-
-static PyObject *init(PyObject *self, PyObject *args)
-{
-	PyObject *ret = NULL;
-	int fd = -1;
-
-	if (!PyArg_ParseTuple(args, ":init"))
-		goto bail;
-
-	Py_BEGIN_ALLOW_THREADS;
-	fd = inotify_init();
-	Py_END_ALLOW_THREADS;
-
-	if (fd == -1) {
-		PyErr_SetFromErrno(PyExc_OSError);
-		goto bail;
-	}
-
-	ret = PyInt_FromLong(fd);
-	if (ret == NULL)
-		goto bail;
-
-	goto done;
-
-bail:
-	if (fd != -1)
-		close(fd);
-
-	Py_CLEAR(ret);
-
-done:
-	return ret;
-}
-
-PyDoc_STRVAR(
-	init_doc,
-	"init() -> fd\n"
-	"\n"
-	"Initialize an inotify instance.\n"
-	"Return a file descriptor associated with a new inotify event queue.");
-
-static PyObject *add_watch(PyObject *self, PyObject *args)
-{
-	PyObject *ret = NULL;
-	uint32_t mask;
-	int wd = -1;
-	char *path;
-	int fd;
-
-	if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask))
-		goto bail;
-
-	Py_BEGIN_ALLOW_THREADS;
-	wd = inotify_add_watch(fd, path, mask);
-	Py_END_ALLOW_THREADS;
-
-	if (wd == -1) {
-		PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
-		goto bail;
-	}
-
-	ret = PyInt_FromLong(wd);
-	if (ret == NULL)
-		goto bail;
-
-	goto done;
-
-bail:
-	if (wd != -1)
-		inotify_rm_watch(fd, wd);
-
-	Py_CLEAR(ret);
-
-done:
-	return ret;
-}
-
-PyDoc_STRVAR(
-	add_watch_doc,
-	"add_watch(fd, path, mask) -> wd\n"
-	"\n"
-	"Add a watch to an inotify instance, or modify an existing watch.\n"
-	"\n"
-	"        fd: file descriptor returned by init()\n"
-	"        path: path to watch\n"
-	"        mask: mask of events to watch for\n"
-	"\n"
-	"Return a unique numeric watch descriptor for the inotify instance\n"
-	"mapped by the file descriptor.");
-
-static PyObject *remove_watch(PyObject *self, PyObject *args)
-{
-	uint32_t wd;
-	int fd;
-	int r;
-
-	if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd))
-		return NULL;
-
-	Py_BEGIN_ALLOW_THREADS;
-	r = inotify_rm_watch(fd, wd);
-	Py_END_ALLOW_THREADS;
-
-	if (r == -1) {
-		PyErr_SetFromErrno(PyExc_OSError);
-		return NULL;
-	}
-
-	Py_INCREF(Py_None);
-	return Py_None;
-}
-
-PyDoc_STRVAR(
-	remove_watch_doc,
-	"remove_watch(fd, wd)\n"
-	"\n"
-	"        fd: file descriptor returned by init()\n"
-	"        wd: watch descriptor returned by add_watch()\n"
-	"\n"
-	"Remove a watch associated with the watch descriptor wd from the\n"
-	"inotify instance associated with the file descriptor fd.\n"
-	"\n"
-	"Removing a watch causes an IN_IGNORED event to be generated for this\n"
-	"watch descriptor.");
-
-#define bit_name(x) {x, #x}
-
-static struct {
-	int bit;
-	const char *name;
-	PyObject *pyname;
-} bit_names[] = {
-	bit_name(IN_ACCESS),
-	bit_name(IN_MODIFY),
-	bit_name(IN_ATTRIB),
-	bit_name(IN_CLOSE_WRITE),
-	bit_name(IN_CLOSE_NOWRITE),
-	bit_name(IN_OPEN),
-	bit_name(IN_MOVED_FROM),
-	bit_name(IN_MOVED_TO),
-	bit_name(IN_CREATE),
-	bit_name(IN_DELETE),
-	bit_name(IN_DELETE_SELF),
-	bit_name(IN_MOVE_SELF),
-	bit_name(IN_UNMOUNT),
-	bit_name(IN_Q_OVERFLOW),
-	bit_name(IN_IGNORED),
-	bit_name(IN_ONLYDIR),
-	bit_name(IN_DONT_FOLLOW),
-	bit_name(IN_MASK_ADD),
-	bit_name(IN_ISDIR),
-	bit_name(IN_ONESHOT),
-	{0}
-};
-
-static PyObject *decode_mask(int mask)
-{
-	PyObject *ret = PyList_New(0);
-	int i;
-
-	if (ret == NULL)
-		goto bail;
-
-	for (i = 0; bit_names[i].bit; i++) {
-		if (mask & bit_names[i].bit) {
-			if (bit_names[i].pyname == NULL) {
-				bit_names[i].pyname = PyString_FromString(bit_names[i].name);
-				if (bit_names[i].pyname == NULL)
-					goto bail;
-			}
-			Py_INCREF(bit_names[i].pyname);
-			if (PyList_Append(ret, bit_names[i].pyname) == -1)
-				goto bail;
-		}
-	}
-
-	goto done;
-
-bail:
-	Py_CLEAR(ret);
-
-done:
-	return ret;
-}
-
-static PyObject *pydecode_mask(PyObject *self, PyObject *args)
-{
-	int mask;
-
-	if (!PyArg_ParseTuple(args, "i:decode_mask", &mask))
-		return NULL;
-
-	return decode_mask(mask);
-}
-
-PyDoc_STRVAR(
-	decode_mask_doc,
-	"decode_mask(mask) -> list_of_strings\n"
-	"\n"
-	"Decode an inotify mask value into a list of strings that give the\n"
-	"name of each bit set in the mask.");
-
-static char doc[] = "Low-level inotify interface wrappers.";
-
-static void define_const(PyObject *dict, const char *name, uint32_t val)
-{
-	PyObject *pyval = PyInt_FromLong(val);
-	PyObject *pyname = PyString_FromString(name);
-
-	if (!pyname || !pyval)
-		goto bail;
-
-	PyDict_SetItem(dict, pyname, pyval);
-
-bail:
-	Py_XDECREF(pyname);
-	Py_XDECREF(pyval);
-}
-
-static void define_consts(PyObject *dict)
-{
-	define_const(dict, "IN_ACCESS", IN_ACCESS);
-	define_const(dict, "IN_MODIFY", IN_MODIFY);
-	define_const(dict, "IN_ATTRIB", IN_ATTRIB);
-	define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE);
-	define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE);
-	define_const(dict, "IN_OPEN", IN_OPEN);
-	define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM);
-	define_const(dict, "IN_MOVED_TO", IN_MOVED_TO);
-
-	define_const(dict, "IN_CLOSE", IN_CLOSE);
-	define_const(dict, "IN_MOVE", IN_MOVE);
-
-	define_const(dict, "IN_CREATE", IN_CREATE);
-	define_const(dict, "IN_DELETE", IN_DELETE);
-	define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF);
-	define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF);
-	define_const(dict, "IN_UNMOUNT", IN_UNMOUNT);
-	define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW);
-	define_const(dict, "IN_IGNORED", IN_IGNORED);
-
-	define_const(dict, "IN_ONLYDIR", IN_ONLYDIR);
-	define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW);
-	define_const(dict, "IN_MASK_ADD", IN_MASK_ADD);
-	define_const(dict, "IN_ISDIR", IN_ISDIR);
-	define_const(dict, "IN_ONESHOT", IN_ONESHOT);
-	define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS);
-}
-
-struct event {
-	PyObject_HEAD
-	PyObject *wd;
-	PyObject *mask;
-	PyObject *cookie;
-	PyObject *name;
-};
-
-static PyObject *event_wd(PyObject *self, void *x)
-{
-	struct event *evt = (struct event *)self;
-	Py_INCREF(evt->wd);
-	return evt->wd;
-}
-
-static PyObject *event_mask(PyObject *self, void *x)
-{
-	struct event *evt = (struct event *)self;
-	Py_INCREF(evt->mask);
-	return evt->mask;
-}
-
-static PyObject *event_cookie(PyObject *self, void *x)
-{
-	struct event *evt = (struct event *)self;
-	Py_INCREF(evt->cookie);
-	return evt->cookie;
-}
-
-static PyObject *event_name(PyObject *self, void *x)
-{
-	struct event *evt = (struct event *)self;
-	Py_INCREF(evt->name);
-	return evt->name;
-}
-
-static struct PyGetSetDef event_getsets[] = {
-	{"wd", event_wd, NULL,
-	 "watch descriptor"},
-	{"mask", event_mask, NULL,
-	 "event mask"},
-	{"cookie", event_cookie, NULL,
-	 "rename cookie, if rename-related event"},
-	{"name", event_name, NULL,
-	 "file name"},
-	{NULL}
-};
-
-PyDoc_STRVAR(
-	event_doc,
-	"event: Structure describing an inotify event.");
-
-static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
-{
-	return (*t->tp_alloc)(t, 0);
-}
-
-static void event_dealloc(struct event *evt)
-{
-	Py_XDECREF(evt->wd);
-	Py_XDECREF(evt->mask);
-	Py_XDECREF(evt->cookie);
-	Py_XDECREF(evt->name);
-
-	Py_TYPE(evt)->tp_free(evt);
-}
-
-static PyObject *event_repr(struct event *evt)
-{
-	int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
-	PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
-	PyObject *tuple = NULL, *formatstr = NULL;
-
-	pymasks = decode_mask(PyInt_AsLong(evt->mask));
-	if (pymasks == NULL)
-		goto bail;
-
-	pymask = _PyString_Join(join, pymasks);
-	if (pymask == NULL)
-		goto bail;
-
-	if (evt->name != Py_None) {
-		if (cookie == -1) {
-			formatstr = er_wmn;
-			tuple = PyTuple_Pack(3, evt->wd, pymask, evt->name);
-		}
-		else {
-			formatstr = er_wmcn;
-			tuple = PyTuple_Pack(4, evt->wd, pymask,
-					     evt->cookie, evt->name);
-		}
-	} else {
-		if (cookie == -1) {
-			formatstr = er_wm;
-			tuple = PyTuple_Pack(2, evt->wd, pymask);
-		}
-		else {
-			formatstr = er_wmc;
-			tuple = PyTuple_Pack(3, evt->wd, pymask, evt->cookie);
-		}
-	}
-
-	if (tuple == NULL)
-		goto bail;
-
-	ret = PyNumber_Remainder(formatstr, tuple);
-
-	if (ret == NULL)
-		goto bail;
-
-	goto done;
-bail:
-	Py_CLEAR(ret);
-
-done:
-	Py_XDECREF(pymask);
-	Py_XDECREF(pymasks);
-	Py_XDECREF(tuple);
-
-	return ret;
-}
-
-static PyTypeObject event_type = {
-	PyVarObject_HEAD_INIT(NULL, 0)
-	"_inotify.event",             /*tp_name*/
-	sizeof(struct event), /*tp_basicsize*/
-	0,                         /*tp_itemsize*/
-	(destructor)event_dealloc, /*tp_dealloc*/
-	0,                         /*tp_print*/
-	0,                         /*tp_getattr*/
-	0,                         /*tp_setattr*/
-	0,                         /*tp_compare*/
-	(reprfunc)event_repr,      /*tp_repr*/
-	0,                         /*tp_as_number*/
-	0,                         /*tp_as_sequence*/
-	0,                         /*tp_as_mapping*/
-	0,                         /*tp_hash */
-	0,                         /*tp_call*/
-	0,                         /*tp_str*/
-	0,                         /*tp_getattro*/
-	0,                         /*tp_setattro*/
-	0,                         /*tp_as_buffer*/
-	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
-	event_doc,           /* tp_doc */
-	0,                         /* tp_traverse */
-	0,                         /* tp_clear */
-	0,                         /* tp_richcompare */
-	0,                         /* tp_weaklistoffset */
-	0,                         /* tp_iter */
-	0,                         /* tp_iternext */
-	0,                         /* tp_methods */
-	0,                         /* tp_members */
-	event_getsets,      /* tp_getset */
-	0,                         /* tp_base */
-	0,                         /* tp_dict */
-	0,                         /* tp_descr_get */
-	0,                         /* tp_descr_set */
-	0,                         /* tp_dictoffset */
-	0,                         /* tp_init */
-	0,                         /* tp_alloc */
-	event_new,          /* tp_new */
-};
-
-PyObject *read_events(PyObject *self, PyObject *args)
-{
-	PyObject *ctor_args = NULL;
-	PyObject *pybufsize = NULL;
-	PyObject *ret = NULL;
-	int bufsize = 65536;
-	char *buf = NULL;
-	int nread, pos;
-	int fd;
-
-	if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize))
-		goto bail;
-
-	if (pybufsize && pybufsize != Py_None)
-		bufsize = PyInt_AsLong(pybufsize);
-
-	ret = PyList_New(0);
-	if (ret == NULL)
-		goto bail;
-
-	if (bufsize <= 0) {
-		int r;
-
-		Py_BEGIN_ALLOW_THREADS;
-		r = ioctl(fd, FIONREAD, &bufsize);
-		Py_END_ALLOW_THREADS;
-
-		if (r == -1) {
-			PyErr_SetFromErrno(PyExc_OSError);
-			goto bail;
-		}
-		if (bufsize == 0)
-			goto done;
-	}
-	else {
-		static long name_max;
-		static long name_fd = -1;
-		long min;
-
-		if (name_fd != fd) {
-			name_fd = fd;
-			Py_BEGIN_ALLOW_THREADS;
-			name_max = fpathconf(fd, _PC_NAME_MAX);
-			Py_END_ALLOW_THREADS;
-		}
-
-		min = sizeof(struct inotify_event) + name_max + 1;
-
-		if (bufsize < min) {
-			PyErr_Format(PyExc_ValueError,
-				     "bufsize must be at least %d", (int)min);
-			goto bail;
-		}
-	}
-
-	buf = alloca(bufsize);
-
-	Py_BEGIN_ALLOW_THREADS;
-	nread = read(fd, buf, bufsize);
-	Py_END_ALLOW_THREADS;
-
-	if (nread == -1) {
-		PyErr_SetFromErrno(PyExc_OSError);
-		goto bail;
-	}
-
-	ctor_args = PyTuple_New(0);
-
-	if (ctor_args == NULL)
-		goto bail;
-
-	pos = 0;
-
-	while (pos < nread) {
-		struct inotify_event *in = (struct inotify_event *)(buf + pos);
-		struct event *evt;
-		PyObject *obj;
-
-		obj = PyObject_CallObject((PyObject *)&event_type, ctor_args);
-
-		if (obj == NULL)
-			goto bail;
-
-		evt = (struct event *)obj;
-
-		evt->wd = PyInt_FromLong(in->wd);
-		evt->mask = PyInt_FromLong(in->mask);
-		if (in->mask & IN_MOVE)
-			evt->cookie = PyInt_FromLong(in->cookie);
-		else {
-			Py_INCREF(Py_None);
-			evt->cookie = Py_None;
-		}
-		if (in->len)
-			evt->name = PyString_FromString(in->name);
-		else {
-			Py_INCREF(Py_None);
-			evt->name = Py_None;
-		}
-
-		if (!evt->wd || !evt->mask || !evt->cookie || !evt->name)
-			goto mybail;
-
-		if (PyList_Append(ret, obj) == -1)
-			goto mybail;
-
-		pos += sizeof(struct inotify_event) + in->len;
-		continue;
-
-	mybail:
-		Py_CLEAR(evt->wd);
-		Py_CLEAR(evt->mask);
-		Py_CLEAR(evt->cookie);
-		Py_CLEAR(evt->name);
-		Py_DECREF(obj);
-
-		goto bail;
-	}
-
-	goto done;
-
-bail:
-	Py_CLEAR(ret);
-
-done:
-	Py_XDECREF(ctor_args);
-
-	return ret;
-}
-
-static int init_globals(void)
-{
-	join = PyString_FromString("|");
-	er_wm = PyString_FromString("event(wd=%d, mask=%s)");
-	er_wmn = PyString_FromString("event(wd=%d, mask=%s, name=%s)");
-	er_wmc = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x)");
-	er_wmcn = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x, name=%s)");
-
-	return join && er_wm && er_wmn && er_wmc && er_wmcn;
-}
-
-PyDoc_STRVAR(
-	read_doc,
-	"read(fd, bufsize[=65536]) -> list_of_events\n"
-	"\n"
-	"\nRead inotify events from a file descriptor.\n"
-	"\n"
-	"        fd: file descriptor returned by init()\n"
-	"        bufsize: size of buffer to read into, in bytes\n"
-	"\n"
-	"Return a list of event objects.\n"
-	"\n"
-	"If bufsize is > 0, block until events are available to be read.\n"
-	"Otherwise, immediately return all events that can be read without\n"
-	"blocking.");
-
-static PyMethodDef methods[] = {
-	{"init", init, METH_VARARGS, init_doc},
-	{"add_watch", add_watch, METH_VARARGS, add_watch_doc},
-	{"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc},
-	{"read", read_events, METH_VARARGS, read_doc},
-	{"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc},
-	{NULL},
-};
-
-#ifdef IS_PY3K
-static struct PyModuleDef _inotify_module = {
-	PyModuleDef_HEAD_INIT,
-	"_inotify",
-	doc,
-	-1,
-	methods
-};
-
-PyMODINIT_FUNC PyInit__inotify(void)
-{
-	PyObject *mod, *dict;
-
-	mod = PyModule_Create(&_inotify_module);
-
-	if (mod == NULL)
-		return NULL;
-
-	if (!init_globals())
-		return;
-
-	dict = PyModule_GetDict(mod);
-
-	if (dict)
-		define_consts(dict);
-
-	return mod;
-}
-#else
-void init_inotify(void)
-{
-	PyObject *mod, *dict;
-
-	if (PyType_Ready(&event_type) == -1)
-		return;
-
-	if (!init_globals())
-		return;
-
-	mod = Py_InitModule3("_inotify", methods, doc);
-
-	dict = PyModule_GetDict(mod);
-
-	if (dict)
-		define_consts(dict);
-}
-#endif
--- a/hgext/inotify/linux/watcher.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,335 +0,0 @@
-# watcher.py - high-level interfaces to the Linux inotify subsystem
-
-# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
-
-# This library is free software; you can redistribute it and/or modify
-# it under the terms of version 2.1 of the GNU Lesser General Public
-# License, or any later version.
-
-'''High-level interfaces to the Linux inotify subsystem.
-
-The inotify subsystem provides an efficient mechanism for file status
-monitoring and change notification.
-
-The watcher class hides the low-level details of the inotify
-interface, and provides a Pythonic wrapper around it.  It generates
-events that provide somewhat more information than raw inotify makes
-available.
-
-The autowatcher class is more useful, as it automatically watches
-newly-created directories on your behalf.'''
-
-__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
-
-import _inotify as inotify
-import array
-import errno
-import fcntl
-import os
-import termios
-
-
-class event(object):
-    '''Derived inotify event class.
-
-    The following fields are available:
-
-        mask: event mask, indicating what kind of event this is
-
-        cookie: rename cookie, if a rename-related event
-
-        path: path of the directory in which the event occurred
-
-        name: name of the directory entry to which the event occurred
-        (may be None if the event happened to a watched directory)
-
-        fullpath: complete path at which the event occurred
-
-        wd: watch descriptor that triggered this event'''
-
-    __slots__ = (
-        'cookie',
-        'fullpath',
-        'mask',
-        'name',
-        'path',
-        'raw',
-        'wd',
-        )
-
-    def __init__(self, raw, path):
-        self.path = path
-        self.raw = raw
-        if raw.name:
-            self.fullpath = path + '/' + raw.name
-        else:
-            self.fullpath = path
-
-        self.wd = raw.wd
-        self.mask = raw.mask
-        self.cookie = raw.cookie
-        self.name = raw.name
-
-    def __repr__(self):
-        r = repr(self.raw)
-        return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:]
-
-
-_event_props = {
-    'access': 'File was accessed',
-    'modify': 'File was modified',
-    'attrib': 'Attribute of a directory entry was changed',
-    'close_write': 'File was closed after being written to',
-    'close_nowrite': 'File was closed without being written to',
-    'open': 'File was opened',
-    'moved_from': 'Directory entry was renamed from this name',
-    'moved_to': 'Directory entry was renamed to this name',
-    'create': 'Directory entry was created',
-    'delete': 'Directory entry was deleted',
-    'delete_self': 'The watched directory entry was deleted',
-    'move_self': 'The watched directory entry was renamed',
-    'unmount': 'Directory was unmounted, and can no longer be watched',
-    'q_overflow': 'Kernel dropped events due to queue overflow',
-    'ignored': 'Directory entry is no longer being watched',
-    'isdir': 'Event occurred on a directory',
-    }
-
-for k, v in _event_props.iteritems():
-    mask = getattr(inotify, 'IN_' + k.upper())
-    def getter(self):
-        return self.mask & mask
-    getter.__name__ = k
-    getter.__doc__ = v
-    setattr(event, k, property(getter, doc=v))
-
-del _event_props
-
-
-class watcher(object):
-    '''Provide a Pythonic interface to the low-level inotify API.
-
-    Also adds derived information to each event that is not available
-    through the normal inotify API, such as directory name.'''
-
-    __slots__ = (
-        'fd',
-        '_paths',
-        '_wds',
-        )
-
-    def __init__(self):
-        '''Create a new inotify instance.'''
-
-        self.fd = inotify.init()
-        self._paths = {}
-        self._wds = {}
-
-    def fileno(self):
-        '''Return the file descriptor this watcher uses.
-
-        Useful for passing to select and poll.'''
-
-        return self.fd
-
-    def add(self, path, mask):
-        '''Add or modify a watch.
-
-        Return the watch descriptor added or modified.'''
-
-        path = os.path.normpath(path)
-        wd = inotify.add_watch(self.fd, path, mask)
-        self._paths[path] = wd, mask
-        self._wds[wd] = path, mask
-        return wd
-
-    def remove(self, wd):
-        '''Remove the given watch.'''
-
-        inotify.remove_watch(self.fd, wd)
-        self._remove(wd)
-
-    def _remove(self, wd):
-        path_mask = self._wds.pop(wd, None)
-        if path_mask is not None:
-            self._paths.pop(path_mask[0])
-
-    def path(self, path):
-        '''Return a (watch descriptor, event mask) pair for the given path.
-
-        If the path is not being watched, return None.'''
-
-        return self._paths.get(path)
-
-    def wd(self, wd):
-        '''Return a (path, event mask) pair for the given watch descriptor.
-
-        If the watch descriptor is not valid or not associated with
-        this watcher, return None.'''
-
-        return self._wds.get(wd)
-
-    def read(self, bufsize=None):
-        '''Read a list of queued inotify events.
-
-        If bufsize is zero, only return those events that can be read
-        immediately without blocking.  Otherwise, block until events are
-        available.'''
-
-        events = []
-        for evt in inotify.read(self.fd, bufsize):
-            events.append(event(evt, self._wds[evt.wd][0]))
-            if evt.mask & inotify.IN_IGNORED:
-                self._remove(evt.wd)
-            elif evt.mask & inotify.IN_UNMOUNT:
-                self.close()
-        return events
-
-    def close(self):
-        '''Shut down this watcher.
-
-        All subsequent method calls are likely to raise exceptions.'''
-
-        os.close(self.fd)
-        self.fd = None
-        self._paths = None
-        self._wds = None
-
-    def __len__(self):
-        '''Return the number of active watches.'''
-
-        return len(self._paths)
-
-    def __iter__(self):
-        '''Yield a (path, watch descriptor, event mask) tuple for each
-        entry being watched.'''
-
-        for path, (wd, mask) in self._paths.iteritems():
-            yield path, wd, mask
-
-    def __del__(self):
-        if self.fd is not None:
-            os.close(self.fd)
-
-    ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
-
-    def add_iter(self, path, mask, onerror=None):
-        '''Add or modify watches over path and its subdirectories.
-
-        Yield each added or modified watch descriptor.
-
-        To ensure that this method runs to completion, you must
-        iterate over all of its results, even if you do not care what
-        they are.  For example:
-
-            for wd in w.add_iter(path, mask):
-                pass
-
-        By default, errors are ignored.  If optional arg "onerror" is
-        specified, it should be a function; it will be called with one
-        argument, an OSError instance.  It can report the error to
-        continue with the walk, or raise the exception to abort the
-        walk.'''
-
-        # Add the IN_ONLYDIR flag to the event mask, to avoid a possible
-        # race when adding a subdirectory.  In the time between the
-        # event being queued by the kernel and us processing it, the
-        # directory may have been deleted, or replaced with a different
-        # kind of entry with the same name.
-
-        submask = mask | inotify.IN_ONLYDIR
-
-        try:
-            yield self.add(path, mask)
-        except OSError, err:
-            if onerror and err.errno not in self.ignored_errors:
-                onerror(err)
-        for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
-            for d in dirs:
-                try:
-                    yield self.add(root + '/' + d, submask)
-                except OSError, err:
-                    if onerror and err.errno not in self.ignored_errors:
-                        onerror(err)
-
-    def add_all(self, path, mask, onerror=None):
-        '''Add or modify watches over path and its subdirectories.
-
-        Return a list of added or modified watch descriptors.
-
-        By default, errors are ignored.  If optional arg "onerror" is
-        specified, it should be a function; it will be called with one
-        argument, an OSError instance.  It can report the error to
-        continue with the walk, or raise the exception to abort the
-        walk.'''
-
-        return [w for w in self.add_iter(path, mask, onerror)]
-
-
-class autowatcher(watcher):
-    '''watcher class that automatically watches newly created directories.'''
-
-    __slots__ = (
-        'addfilter',
-        )
-
-    def __init__(self, addfilter=None):
-        '''Create a new inotify instance.
-
-        This instance will automatically watch newly created
-        directories.
-
-        If the optional addfilter parameter is not None, it must be a
-        callable that takes one parameter.  It will be called each time
-        a directory is about to be automatically watched.  If it returns
-        True, the directory will be watched if it still exists,
-        otherwise, it will be skipped.'''
-
-        super(autowatcher, self).__init__()
-        self.addfilter = addfilter
-
-    _dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
-
-    def read(self, bufsize=None):
-        events = super(autowatcher, self).read(bufsize)
-        for evt in events:
-            if evt.mask & self._dir_create_mask == self._dir_create_mask:
-                if self.addfilter is None or self.addfilter(evt):
-                    parentmask = self._wds[evt.wd][1]
-                    # See note about race avoidance via IN_ONLYDIR above.
-                    mask = parentmask | inotify.IN_ONLYDIR
-                    try:
-                        self.add_all(evt.fullpath, mask)
-                    except OSError, err:
-                        if err.errno not in self.ignored_errors:
-                            raise
-        return events
-
-
-class threshold(object):
-    '''Class that indicates whether a file descriptor has reached a
-    threshold of readable bytes available.
-
-    This class is not thread-safe.'''
-
-    __slots__ = (
-        'fd',
-        'threshold',
-        '_iocbuf',
-        )
-
-    def __init__(self, fd, threshold=1024):
-        self.fd = fd
-        self.threshold = threshold
-        self._iocbuf = array.array('i', [0])
-
-    def readable(self):
-        '''Return the number of bytes readable on this file descriptor.'''
-
-        fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
-        return self._iocbuf[0]
-
-    def __call__(self):
-        '''Indicate whether the number of readable bytes has met or
-        exceeded the threshold.'''
-
-        return self.readable() >= self.threshold
--- a/hgext/inotify/linuxserver.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,437 +0,0 @@
-# linuxserver.py - inotify status server for linux
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from mercurial.i18n import _
-from mercurial import osutil, util, error
-import server
-import errno, os, select, stat, sys, time
-
-try:
-    import linux as inotify
-    from linux import watcher
-except ImportError:
-    raise
-
-def walkrepodirs(dirstate, absroot):
-    '''Iterate over all subdirectories of this repo.
-    Exclude the .hg directory, any nested repos, and ignored dirs.'''
-    def walkit(dirname, top):
-        fullpath = server.join(absroot, dirname)
-        try:
-            for name, kind in osutil.listdir(fullpath):
-                if kind == stat.S_IFDIR:
-                    if name == '.hg':
-                        if not top:
-                            return
-                    else:
-                        d = server.join(dirname, name)
-                        if dirstate._ignore(d):
-                            continue
-                        for subdir in walkit(d, False):
-                            yield subdir
-        except OSError, err:
-            if err.errno not in server.walk_ignored_errors:
-                raise
-        yield fullpath
-
-    return walkit('', True)
-
-def _explain_watch_limit(ui, dirstate, rootabs):
-    path = '/proc/sys/fs/inotify/max_user_watches'
-    try:
-        limit = int(util.readfile(path))
-    except IOError, err:
-        if err.errno != errno.ENOENT:
-            raise
-        raise util.Abort(_('this system does not seem to '
-                           'support inotify'))
-    ui.warn(_('*** the current per-user limit on the number '
-              'of inotify watches is %s\n') % limit)
-    ui.warn(_('*** this limit is too low to watch every '
-              'directory in this repository\n'))
-    ui.warn(_('*** counting directories: '))
-    ndirs = len(list(walkrepodirs(dirstate, rootabs)))
-    ui.warn(_('found %d\n') % ndirs)
-    newlimit = min(limit, 1024)
-    while newlimit < ((limit + ndirs) * 1.1):
-        newlimit *= 2
-    ui.warn(_('*** to raise the limit from %d to %d (run as root):\n') %
-            (limit, newlimit))
-    ui.warn(_('***  echo %d > %s\n') % (newlimit, path))
-    raise util.Abort(_('cannot watch %s until inotify watch limit is raised')
-                     % rootabs)
-
-class pollable(object):
-    """
-    Interface to support polling.
-    The file descriptor returned by fileno() is registered to a polling
-    object.
-    Usage:
-        Every tick, check if an event has happened since the last tick:
-        * If yes, call handle_events
-        * If no, call handle_timeout
-    """
-    poll_events = select.POLLIN
-    instances = {}
-    poll = select.poll()
-
-    def fileno(self):
-        raise NotImplementedError
-
-    def handle_events(self, events):
-        raise NotImplementedError
-
-    def handle_timeout(self):
-        raise NotImplementedError
-
-    def shutdown(self):
-        raise NotImplementedError
-
-    def register(self, timeout):
-        fd = self.fileno()
-
-        pollable.poll.register(fd, pollable.poll_events)
-        pollable.instances[fd] = self
-
-        self.registered = True
-        self.timeout = timeout
-
-    def unregister(self):
-        pollable.poll.unregister(self)
-        self.registered = False
-
-    @classmethod
-    def run(cls):
-        while True:
-            timeout = None
-            timeobj = None
-            for obj in cls.instances.itervalues():
-                if obj.timeout is not None and (timeout is None
-                                                or obj.timeout < timeout):
-                    timeout, timeobj = obj.timeout, obj
-            try:
-                events = cls.poll.poll(timeout)
-            except select.error, err:
-                if err.args[0] == errno.EINTR:
-                    continue
-                raise
-            if events:
-                by_fd = {}
-                for fd, event in events:
-                    by_fd.setdefault(fd, []).append(event)
-
-                for fd, events in by_fd.iteritems():
-                    cls.instances[fd].handle_pollevents(events)
-
-            elif timeobj:
-                timeobj.handle_timeout()
-
-def eventaction(code):
-    """
-    Decorator to help handle events in repowatcher
-    """
-    def decorator(f):
-        def wrapper(self, wpath):
-            if code == 'm' and wpath in self.lastevent and \
-                self.lastevent[wpath] in 'cm':
-                return
-            self.lastevent[wpath] = code
-            self.timeout = 250
-
-            f(self, wpath)
-
-        wrapper.func_name = f.func_name
-        return wrapper
-    return decorator
-
-class repowatcher(server.repowatcher, pollable):
-    """
-    Watches inotify events
-    """
-    mask = (
-        inotify.IN_ATTRIB |
-        inotify.IN_CREATE |
-        inotify.IN_DELETE |
-        inotify.IN_DELETE_SELF |
-        inotify.IN_MODIFY |
-        inotify.IN_MOVED_FROM |
-        inotify.IN_MOVED_TO |
-        inotify.IN_MOVE_SELF |
-        inotify.IN_ONLYDIR |
-        inotify.IN_UNMOUNT |
-        0)
-
-    def __init__(self, ui, dirstate, root):
-        server.repowatcher.__init__(self, ui, dirstate, root)
-
-        self.lastevent = {}
-        self.dirty = False
-        try:
-            self.watcher = watcher.watcher()
-        except OSError, err:
-            raise util.Abort(_('inotify service not available: %s') %
-                             err.strerror)
-        self.threshold = watcher.threshold(self.watcher)
-        self.fileno = self.watcher.fileno
-        self.register(timeout=None)
-
-        self.handle_timeout()
-        self.scan()
-
-    def event_time(self):
-        last = self.last_event
-        now = time.time()
-        self.last_event = now
-
-        if last is None:
-            return 'start'
-        delta = now - last
-        if delta < 5:
-            return '+%.3f' % delta
-        if delta < 50:
-            return '+%.2f' % delta
-        return '+%.1f' % delta
-
-    def add_watch(self, path, mask):
-        if not path:
-            return
-        if self.watcher.path(path) is None:
-            if self.ui.debugflag:
-                self.ui.note(_('watching %r\n') % path[self.prefixlen:])
-            try:
-                self.watcher.add(path, mask)
-            except OSError, err:
-                if err.errno in (errno.ENOENT, errno.ENOTDIR):
-                    return
-                if err.errno != errno.ENOSPC:
-                    raise
-                _explain_watch_limit(self.ui, self.dirstate, self.wprefix)
-
-    def setup(self):
-        self.ui.note(_('watching directories under %r\n') % self.wprefix)
-        self.add_watch(self.wprefix + '.hg', inotify.IN_DELETE)
-
-    def scan(self, topdir=''):
-        ds = self.dirstate._map.copy()
-        self.add_watch(server.join(self.wprefix, topdir), self.mask)
-        for root, dirs, files in server.walk(self.dirstate, self.wprefix,
-                                             topdir):
-            for d in dirs:
-                self.add_watch(server.join(root, d), self.mask)
-            wroot = root[self.prefixlen:]
-            for fn in files:
-                wfn = server.join(wroot, fn)
-                self.updatefile(wfn, self.getstat(wfn))
-                ds.pop(wfn, None)
-        wtopdir = topdir
-        if wtopdir and wtopdir[-1] != '/':
-            wtopdir += '/'
-        for wfn, state in ds.iteritems():
-            if not wfn.startswith(wtopdir):
-                continue
-            try:
-                st = self.stat(wfn)
-            except OSError:
-                status = state[0]
-                self.deletefile(wfn, status)
-            else:
-                self.updatefile(wfn, st)
-        self.check_deleted('!')
-        self.check_deleted('r')
-
-    @eventaction('c')
-    def created(self, wpath):
-        if wpath == '.hgignore':
-            self.update_hgignore()
-        try:
-            st = self.stat(wpath)
-            if stat.S_ISREG(st[0]) or stat.S_ISLNK(st[0]):
-                self.updatefile(wpath, st)
-        except OSError:
-            pass
-
-    @eventaction('m')
-    def modified(self, wpath):
-        if wpath == '.hgignore':
-            self.update_hgignore()
-        try:
-            st = self.stat(wpath)
-            if stat.S_ISREG(st[0]):
-                if self.dirstate[wpath] in 'lmn':
-                    self.updatefile(wpath, st)
-        except OSError:
-            pass
-
-    @eventaction('d')
-    def deleted(self, wpath):
-        if wpath == '.hgignore':
-            self.update_hgignore()
-        elif wpath.startswith('.hg/'):
-            return
-
-        self.deletefile(wpath, self.dirstate[wpath])
-
-    def process_create(self, wpath, evt):
-        if self.ui.debugflag:
-            self.ui.note(_('%s event: created %s\n') %
-                         (self.event_time(), wpath))
-
-        if evt.mask & inotify.IN_ISDIR:
-            self.scan(wpath)
-        else:
-            self.created(wpath)
-
-    def process_delete(self, wpath, evt):
-        if self.ui.debugflag:
-            self.ui.note(_('%s event: deleted %s\n') %
-                         (self.event_time(), wpath))
-
-        if evt.mask & inotify.IN_ISDIR:
-            tree = self.tree.dir(wpath)
-            todelete = [wfn for wfn, ignore in tree.walk('?')]
-            for fn in todelete:
-                self.deletefile(fn, '?')
-            self.scan(wpath)
-        else:
-            self.deleted(wpath)
-
-    def process_modify(self, wpath, evt):
-        if self.ui.debugflag:
-            self.ui.note(_('%s event: modified %s\n') %
-                         (self.event_time(), wpath))
-
-        if not (evt.mask & inotify.IN_ISDIR):
-            self.modified(wpath)
-
-    def process_unmount(self, evt):
-        self.ui.warn(_('filesystem containing %s was unmounted\n') %
-                     evt.fullpath)
-        sys.exit(0)
-
-    def handle_pollevents(self, events):
-        if self.ui.debugflag:
-            self.ui.note(_('%s readable: %d bytes\n') %
-                         (self.event_time(), self.threshold.readable()))
-        if not self.threshold():
-            if self.registered:
-                if self.ui.debugflag:
-                    self.ui.note(_('%s below threshold - unhooking\n') %
-                                 (self.event_time()))
-                self.unregister()
-                self.timeout = 250
-        else:
-            self.read_events()
-
-    def read_events(self, bufsize=None):
-        events = self.watcher.read(bufsize)
-        if self.ui.debugflag:
-            self.ui.note(_('%s reading %d events\n') %
-                         (self.event_time(), len(events)))
-        for evt in events:
-            if evt.fullpath == self.wprefix[:-1]:
-                # events on the root of the repository
-                # itself, e.g. permission changes or repository move
-                continue
-            assert evt.fullpath.startswith(self.wprefix)
-            wpath = evt.fullpath[self.prefixlen:]
-
-            # paths have been normalized, wpath never ends with a '/'
-
-            if wpath.startswith('.hg/') and evt.mask & inotify.IN_ISDIR:
-                # ignore subdirectories of .hg/ (merge, patches...)
-                continue
-            if wpath == ".hg/wlock":
-                if evt.mask & inotify.IN_DELETE:
-                    self.dirstate.invalidate()
-                    self.dirty = False
-                    self.scan()
-                elif evt.mask & inotify.IN_CREATE:
-                    self.dirty = True
-            else:
-                if self.dirty:
-                    continue
-
-                if evt.mask & inotify.IN_UNMOUNT:
-                    self.process_unmount(wpath, evt)
-                elif evt.mask & (inotify.IN_MODIFY | inotify.IN_ATTRIB):
-                    self.process_modify(wpath, evt)
-                elif evt.mask & (inotify.IN_DELETE | inotify.IN_DELETE_SELF |
-                                 inotify.IN_MOVED_FROM):
-                    self.process_delete(wpath, evt)
-                elif evt.mask & (inotify.IN_CREATE | inotify.IN_MOVED_TO):
-                    self.process_create(wpath, evt)
-
-        self.lastevent.clear()
-
-    def handle_timeout(self):
-        if not self.registered:
-            if self.ui.debugflag:
-                self.ui.note(_('%s hooking back up with %d bytes readable\n') %
-                             (self.event_time(), self.threshold.readable()))
-            self.read_events(0)
-            self.register(timeout=None)
-
-        self.timeout = None
-
-    def shutdown(self):
-        self.watcher.close()
-
-    def debug(self):
-        """
-        Returns a sorted list of relatives paths currently watched,
-        for debugging purposes.
-        """
-        return sorted(tuple[0][self.prefixlen:] for tuple in self.watcher)
-
-class socketlistener(server.socketlistener, pollable):
-    """
-    Listens for client queries on unix socket inotify.sock
-    """
-    def __init__(self, ui, root, repowatcher, timeout):
-        server.socketlistener.__init__(self, ui, root, repowatcher, timeout)
-        self.register(timeout=timeout)
-
-    def handle_timeout(self):
-        raise server.TimeoutException
-
-    def handle_pollevents(self, events):
-        for e in events:
-            self.accept_connection()
-
-    def shutdown(self):
-        self.sock.close()
-        self.sock.cleanup()
-
-    def answer_stat_query(self, cs):
-        if self.repowatcher.timeout:
-            # We got a query while a rescan is pending.  Make sure we
-            # rescan before responding, or we could give back a wrong
-            # answer.
-            self.repowatcher.handle_timeout()
-        return server.socketlistener.answer_stat_query(self, cs)
-
-class master(object):
-    def __init__(self, ui, dirstate, root, timeout=None):
-        self.ui = ui
-        self.repowatcher = repowatcher(ui, dirstate, root)
-        self.socketlistener = socketlistener(ui, root, self.repowatcher,
-                                             timeout)
-
-    def shutdown(self):
-        for obj in pollable.instances.itervalues():
-            try:
-                obj.shutdown()
-            except error.SignalInterrupt:
-                pass
-
-    def run(self):
-        self.repowatcher.setup()
-        self.ui.note(_('finished setup\n'))
-        if os.getenv('TIME_STARTUP'):
-            sys.exit(0)
-        pollable.run()
--- a/hgext/inotify/server.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,465 +0,0 @@
-# server.py - common entry point for inotify status server
-#
-# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from mercurial.i18n import _
-from mercurial import cmdutil, posix, osutil, util
-import common
-
-import errno
-import os
-import socket
-import stat
-import struct
-import sys
-
-class AlreadyStartedException(Exception):
-    pass
-class TimeoutException(Exception):
-    pass
-
-def join(a, b):
-    if a:
-        if a[-1] == '/':
-            return a + b
-        return a + '/' + b
-    return b
-
-def split(path):
-    c = path.rfind('/')
-    if c == -1:
-        return '', path
-    return path[:c], path[c + 1:]
-
-walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
-
-def walk(dirstate, absroot, root):
-    '''Like os.walk, but only yields regular files.'''
-
-    # This function is critical to performance during startup.
-
-    def walkit(root, reporoot):
-        files, dirs = [], []
-
-        try:
-            fullpath = join(absroot, root)
-            for name, kind in osutil.listdir(fullpath):
-                if kind == stat.S_IFDIR:
-                    if name == '.hg':
-                        if not reporoot:
-                            return
-                    else:
-                        dirs.append(name)
-                        path = join(root, name)
-                        if dirstate._ignore(path):
-                            continue
-                        for result in walkit(path, False):
-                            yield result
-                elif kind in (stat.S_IFREG, stat.S_IFLNK):
-                    files.append(name)
-            yield fullpath, dirs, files
-
-        except OSError, err:
-            if err.errno == errno.ENOTDIR:
-                # fullpath was a directory, but has since been replaced
-                # by a file.
-                yield fullpath, dirs, files
-            elif err.errno not in walk_ignored_errors:
-                raise
-
-    return walkit(root, root == '')
-
-class directory(object):
-    """
-    Representing a directory
-
-    * path is the relative path from repo root to this directory
-    * files is a dict listing the files in this directory
-        - keys are file names
-        - values are file status
-    * dirs is a dict listing the subdirectories
-        - key are subdirectories names
-        - values are directory objects
-    """
-    def __init__(self, relpath=''):
-        self.path = relpath
-        self.files = {}
-        self.dirs = {}
-
-    def dir(self, relpath):
-        """
-        Returns the directory contained at the relative path relpath.
-        Creates the intermediate directories if necessary.
-        """
-        if not relpath:
-            return self
-        l = relpath.split('/')
-        ret = self
-        while l:
-            next = l.pop(0)
-            try:
-                ret = ret.dirs[next]
-            except KeyError:
-                d = directory(join(ret.path, next))
-                ret.dirs[next] = d
-                ret = d
-        return ret
-
-    def walk(self, states, visited=None):
-        """
-        yield (filename, status) pairs for items in the trees
-        that have status in states.
-        filenames are relative to the repo root
-        """
-        for file, st in self.files.iteritems():
-            if st in states:
-                yield join(self.path, file), st
-        for dir in self.dirs.itervalues():
-            if visited is not None:
-                visited.add(dir.path)
-            for e in dir.walk(states):
-                yield e
-
-    def lookup(self, states, path, visited):
-        """
-        yield root-relative filenames that match path, and whose
-        status are in states:
-        * if path is a file, yield path
-        * if path is a directory, yield directory files
-        * if path is not tracked, yield nothing
-        """
-        if path[-1] == '/':
-            path = path[:-1]
-
-        paths = path.split('/')
-
-        # we need to check separately for last node
-        last = paths.pop()
-
-        tree = self
-        try:
-            for dir in paths:
-                tree = tree.dirs[dir]
-        except KeyError:
-            # path is not tracked
-            visited.add(tree.path)
-            return
-
-        try:
-            # if path is a directory, walk it
-            target = tree.dirs[last]
-            visited.add(target.path)
-            for file, st in target.walk(states, visited):
-                yield file
-        except KeyError:
-            try:
-                if tree.files[last] in states:
-                    # path is a file
-                    visited.add(tree.path)
-                    yield path
-            except KeyError:
-                # path is not tracked
-                pass
-
-class repowatcher(object):
-    """
-    Watches inotify events
-    """
-    statuskeys = 'almr!?'
-
-    def __init__(self, ui, dirstate, root):
-        self.ui = ui
-        self.dirstate = dirstate
-
-        self.wprefix = join(root, '')
-        self.prefixlen = len(self.wprefix)
-
-        self.tree = directory()
-        self.statcache = {}
-        self.statustrees = dict([(s, directory()) for s in self.statuskeys])
-
-        self.ds_info = self.dirstate_info()
-
-        self.last_event = None
-
-
-    def handle_timeout(self):
-        pass
-
-    def dirstate_info(self):
-        try:
-            st = os.lstat(self.wprefix + '.hg/dirstate')
-            return st.st_mtime, st.st_ino
-        except OSError, err:
-            if err.errno != errno.ENOENT:
-                raise
-            return 0, 0
-
-    def filestatus(self, fn, st):
-        try:
-            type_, mode, size, time = self.dirstate._map[fn][:4]
-        except KeyError:
-            type_ = '?'
-        if type_ == 'n':
-            st_mode, st_size, st_mtime = st
-            if size == -1:
-                return 'l'
-            if size and (size != st_size or (mode ^ st_mode) & 0100):
-                return 'm'
-            if time != int(st_mtime):
-                return 'l'
-            return 'n'
-        if type_ == '?' and self.dirstate._dirignore(fn):
-            # we must check not only if the file is ignored, but if any part
-            # of its path match an ignore pattern
-            return 'i'
-        return type_
-
-    def updatefile(self, wfn, osstat):
-        '''
-        update the file entry of an existing file.
-
-        osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
-        '''
-
-        self._updatestatus(wfn, self.filestatus(wfn, osstat))
-
-    def deletefile(self, wfn, oldstatus):
-        '''
-        update the entry of a file which has been deleted.
-
-        oldstatus: char in statuskeys, status of the file before deletion
-        '''
-        if oldstatus == 'r':
-            newstatus = 'r'
-        elif oldstatus in 'almn':
-            newstatus = '!'
-        else:
-            newstatus = None
-
-        self.statcache.pop(wfn, None)
-        self._updatestatus(wfn, newstatus)
-
-    def _updatestatus(self, wfn, newstatus):
-        '''
-        Update the stored status of a file.
-
-        newstatus: - char in (statuskeys + 'ni'), new status to apply.
-                   - or None, to stop tracking wfn
-        '''
-        root, fn = split(wfn)
-        d = self.tree.dir(root)
-
-        oldstatus = d.files.get(fn)
-        # oldstatus can be either:
-        # - None : fn is new
-        # - a char in statuskeys: fn is a (tracked) file
-
-        if self.ui.debugflag and oldstatus != newstatus:
-            self.ui.note(_('status: %r %s -> %s\n') %
-                             (wfn, oldstatus, newstatus))
-
-        if oldstatus and oldstatus in self.statuskeys \
-            and oldstatus != newstatus:
-            del self.statustrees[oldstatus].dir(root).files[fn]
-
-        if newstatus in (None, 'i'):
-            d.files.pop(fn, None)
-        elif oldstatus != newstatus:
-            d.files[fn] = newstatus
-            if newstatus != 'n':
-                self.statustrees[newstatus].dir(root).files[fn] = newstatus
-
-    def check_deleted(self, key):
-        # Files that had been deleted but were present in the dirstate
-        # may have vanished from the dirstate; we must clean them up.
-        nuke = []
-        for wfn, ignore in self.statustrees[key].walk(key):
-            if wfn not in self.dirstate:
-                nuke.append(wfn)
-        for wfn in nuke:
-            root, fn = split(wfn)
-            del self.statustrees[key].dir(root).files[fn]
-            del self.tree.dir(root).files[fn]
-
-    def update_hgignore(self):
-        # An update of the ignore file can potentially change the
-        # states of all unknown and ignored files.
-
-        # XXX If the user has other ignore files outside the repo, or
-        # changes their list of ignore files at run time, we'll
-        # potentially never see changes to them.  We could get the
-        # client to report to us what ignore data they're using.
-        # But it's easier to do nothing than to open that can of
-        # worms.
-
-        if '_ignore' in self.dirstate.__dict__:
-            delattr(self.dirstate, '_ignore')
-            self.ui.note(_('rescanning due to .hgignore change\n'))
-            self.handle_timeout()
-            self.scan()
-
-    def getstat(self, wpath):
-        try:
-            return self.statcache[wpath]
-        except KeyError:
-            try:
-                return self.stat(wpath)
-            except OSError, err:
-                if err.errno != errno.ENOENT:
-                    raise
-
-    def stat(self, wpath):
-        try:
-            st = os.lstat(join(self.wprefix, wpath))
-            ret = st.st_mode, st.st_size, st.st_mtime
-            self.statcache[wpath] = ret
-            return ret
-        except OSError:
-            self.statcache.pop(wpath, None)
-            raise
-
-class socketlistener(object):
-    """
-    Listens for client queries on unix socket inotify.sock
-    """
-    def __init__(self, ui, root, repowatcher, timeout):
-        self.ui = ui
-        self.repowatcher = repowatcher
-        try:
-            self.sock = posix.unixdomainserver(
-                lambda p: os.path.join(root, '.hg', p),
-                'inotify')
-        except (OSError, socket.error), err:
-            if err.args[0] == errno.EADDRINUSE:
-                raise AlreadyStartedException(_('cannot start: '
-                                                'socket is already bound'))
-            raise
-        self.fileno = self.sock.fileno
-
-    def answer_stat_query(self, cs):
-        names = cs.read().split('\0')
-
-        states = names.pop()
-
-        self.ui.note(_('answering query for %r\n') % states)
-
-        visited = set()
-        if not names:
-            def genresult(states, tree):
-                for fn, state in tree.walk(states):
-                    yield fn
-        else:
-            def genresult(states, tree):
-                for fn in names:
-                    for f in tree.lookup(states, fn, visited):
-                        yield f
-
-        return ['\0'.join(r) for r in [
-            genresult('l', self.repowatcher.statustrees['l']),
-            genresult('m', self.repowatcher.statustrees['m']),
-            genresult('a', self.repowatcher.statustrees['a']),
-            genresult('r', self.repowatcher.statustrees['r']),
-            genresult('!', self.repowatcher.statustrees['!']),
-            '?' in states
-                and genresult('?', self.repowatcher.statustrees['?'])
-                or [],
-            [],
-            'c' in states and genresult('n', self.repowatcher.tree) or [],
-            visited
-            ]]
-
-    def answer_dbug_query(self):
-        return ['\0'.join(self.repowatcher.debug())]
-
-    def accept_connection(self):
-        sock, addr = self.sock.accept()
-
-        cs = common.recvcs(sock)
-        version = ord(cs.read(1))
-
-        if version != common.version:
-            self.ui.warn(_('received query from incompatible client '
-                           'version %d\n') % version)
-            try:
-                # try to send back our version to the client
-                # this way, the client too is informed of the mismatch
-                sock.sendall(chr(common.version))
-            except socket.error:
-                pass
-            return
-
-        type = cs.read(4)
-
-        if type == 'STAT':
-            results = self.answer_stat_query(cs)
-        elif type == 'DBUG':
-            results = self.answer_dbug_query()
-        else:
-            self.ui.warn(_('unrecognized query type: %s\n') % type)
-            return
-
-        try:
-            try:
-                v = chr(common.version)
-
-                sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
-                                            *map(len, results)))
-                sock.sendall(''.join(results))
-            finally:
-                sock.shutdown(socket.SHUT_WR)
-        except socket.error, err:
-            if err.args[0] != errno.EPIPE:
-                raise
-
-if sys.platform.startswith('linux'):
-    import linuxserver as _server
-else:
-    raise ImportError
-
-master = _server.master
-
-def start(ui, dirstate, root, opts):
-    timeout = opts.get('idle_timeout')
-    if timeout:
-        timeout = float(timeout) * 60000
-    else:
-        timeout = None
-
-    class service(object):
-        def init(self):
-            try:
-                self.master = master(ui, dirstate, root, timeout)
-            except AlreadyStartedException, inst:
-                raise util.Abort("inotify-server: %s" % inst)
-
-        def run(self):
-            try:
-                try:
-                    self.master.run()
-                except TimeoutException:
-                    pass
-            finally:
-                self.master.shutdown()
-
-    if 'inserve' not in sys.argv:
-        runargs = util.hgcmd() + ['inserve', '-R', root]
-    else:
-        runargs = util.hgcmd() + sys.argv[1:]
-
-    pidfile = ui.config('inotify', 'pidfile')
-    opts.setdefault('pid_file', '')
-    if opts['daemon'] and pidfile is not None and not opts['pid_file']:
-        opts['pid_file'] = pidfile
-
-    service = service()
-    logfile = ui.config('inotify', 'log')
-
-    appendpid = ui.configbool('inotify', 'appendpid', False)
-
-    ui.debug('starting inotify server: %s\n' % ' '.join(runargs))
-    cmdutil.service(opts, initfn=service.init, runfn=service.run,
-                    logfile=logfile, runargs=runargs, appendpid=appendpid)
--- a/hgext/interhg.py	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,83 +0,0 @@
-# interhg.py - interhg
-#
-# Copyright 2007 OHASHI Hideya <ohachige@gmail.com>
-#
-# Contributor(s):
-#   Edward Lee <edward.lee@engineering.uiuc.edu>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''expand expressions into changelog and summaries
-
-This extension allows the use of a special syntax in summaries, which
-will be automatically expanded into links or any other arbitrary
-expression, much like InterWiki does.
-
-A few example patterns (link to bug tracking, etc.) that may be used
-in your hgrc::
-
-  [interhg]
-  issues = s!issue(\\d+)!<a href="http://bts/issue\\1">issue\\1</a>!
-  bugzilla = s!((?:bug|b=|(?=#?\\d{4,}))(?:\\s*#?)(\\d+))!<a..=\\2">\\1</a>!i
-  boldify = s!(^|\\s)#(\\d+)\\b! <b>#\\2</b>!
-'''
-
-import re
-from mercurial.hgweb import hgweb_mod
-from mercurial import templatefilters, extensions
-from mercurial.i18n import _
-
-testedwith = 'internal'
-
-interhg_table = []
-
-def uisetup(ui):
-    orig_escape = templatefilters.filters["escape"]
-
-    def interhg_escape(x):
-        escstr = orig_escape(x)
-        for regexp, format in interhg_table:
-            escstr = regexp.sub(format, escstr)
-        return escstr
-
-    templatefilters.filters["escape"] = interhg_escape
-
-def interhg_refresh(orig, self, *args, **kwargs):
-    interhg_table[:] = []
-    for key, pattern in self.repo.ui.configitems('interhg'):
-        # grab the delimiter from the character after the "s"
-        unesc = pattern[1]
-        delim = re.escape(unesc)
-
-        # identify portions of the pattern, taking care to avoid escaped
-        # delimiters. the replace format and flags are optional, but delimiters
-        # are required.
-        match = re.match(r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
-                         % (delim, delim, delim), pattern)
-        if not match:
-            self.repo.ui.warn(_("interhg: invalid pattern for %s: %s\n")
-                              % (key, pattern))
-            continue
-
-        # we need to unescape the delimiter for regexp and format
-        delim_re = re.compile(r'(?<!\\)\\%s' % delim)
-        regexp = delim_re.sub(unesc, match.group(1))
-        format = delim_re.sub(unesc, match.group(2))
-
-        # the pattern allows for 6 regexp flags, so set them if necessary
-        flagin = match.group(3)
-        flags = 0
-        if flagin:
-            for flag in flagin.upper():
-                flags |= re.__dict__[flag]
-
-        try:
-            regexp = re.compile(regexp, flags)
-            interhg_table.append((regexp, format))
-        except re.error:
-            self.repo.ui.warn(_("interhg: invalid regexp for %s: %s\n")
-                              % (key, regexp))
-    return orig(self, *args, **kwargs)
-
-extensions.wrapfunction(hgweb_mod.hgweb, 'refresh', interhg_refresh)
--- a/hgext/keyword.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/keyword.py	Thu Apr 17 19:36:17 2014 -0400
@@ -218,9 +218,8 @@
         '''Replaces keywords in data with expanded template.'''
         def kwsub(mobj):
             kw = mobj.group(1)
-            ct = cmdutil.changeset_templater(self.ui, self.repo,
-                                             False, None, '', False)
-            ct.use_template(self.templates[kw])
+            ct = cmdutil.changeset_templater(self.ui, self.repo, False, None,
+                                             self.templates[kw], '', False)
             self.ui.pushbuffer()
             ct.show(ctx, root=self.repo.root, file=path)
             ekw = templatefilters.firstline(self.ui.popbuffer())
@@ -386,10 +385,10 @@
     tmpdir = tempfile.mkdtemp('', 'kwdemo.')
     ui.note(_('creating temporary repository at %s\n') % tmpdir)
     repo = localrepo.localrepository(repo.baseui, tmpdir, True)
-    ui.setconfig('keyword', fn, '')
+    ui.setconfig('keyword', fn, '', 'keyword')
     svn = ui.configbool('keywordset', 'svn')
     # explicitly set keywordset for demo output
-    ui.setconfig('keywordset', 'svn', svn)
+    ui.setconfig('keywordset', 'svn', svn, 'keyword')
 
     uikwmaps = ui.configitems('keywordmaps')
     if args or opts.get('rcfile'):
@@ -420,7 +419,7 @@
         if uikwmaps:
             ui.status(_('\tdisabling current template maps\n'))
             for k, v in kwmaps.iteritems():
-                ui.setconfig('keywordmaps', k, v)
+                ui.setconfig('keywordmaps', k, v, 'keyword')
     else:
         ui.status(_('\n\tconfiguration using current keyword template maps\n'))
         if uikwmaps:
@@ -446,7 +445,7 @@
         wlock.release()
     for name, cmd in ui.configitems('hooks'):
         if name.split('.', 1)[0].find('commit') > -1:
-            repo.ui.setconfig('hooks', name, '')
+            repo.ui.setconfig('hooks', name, '', 'keyword')
     msg = _('hg keyword configuration and expansion example')
     ui.note(("hg ci -m '%s'\n" % msg))
     repo.commit(text=msg)
--- a/hgext/largefiles/lfcommands.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/lfcommands.py	Thu Apr 17 19:36:17 2014 -0400
@@ -375,13 +375,6 @@
     store = basestore._openstore(repo)
     return store.verify(revs, contents=contents)
 
-def debugdirstate(ui, repo):
-    '''Show basic information for the largefiles dirstate'''
-    lfdirstate = lfutil.openlfdirstate(ui, repo)
-    for file_, ent in sorted(lfdirstate._map.iteritems()):
-        mode = '%3o' % (ent[1] & 0777 & ~util.umask)
-        ui.write("%c %s %10d %s\n" % (ent[0], mode, ent[2], file_))
-
 def cachelfiles(ui, repo, node, filelist=None):
     '''cachelfiles ensures that all largefiles needed by the specified revision
     are present in the repository's largefile cache.
@@ -447,6 +440,7 @@
                 if (os.path.exists(absstandin + '.orig') and
                     os.path.exists(abslfile)):
                     shutil.copyfile(abslfile, abslfile + '.orig')
+                    util.unlinkpath(absstandin + '.orig')
                 expecthash = lfutil.readstandin(repo, lfile)
                 if (expecthash != '' and
                     (not os.path.exists(abslfile) or
--- a/hgext/largefiles/lfutil.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/lfutil.py	Thu Apr 17 19:36:17 2014 -0400
@@ -15,6 +15,7 @@
 
 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
 from mercurial.i18n import _
+from mercurial import node
 
 shortname = '.hglf'
 shortnameslash = shortname + '/'
@@ -105,7 +106,7 @@
         return super(largefilesdirstate, self).forget(unixpath(f))
     def normallookup(self, f):
         return super(largefilesdirstate, self).normallookup(unixpath(f))
-    def _ignore(self):
+    def _ignore(self, f):
         return False
 
 def openlfdirstate(ui, repo, create=True):
@@ -365,3 +366,25 @@
         if f[0] not in filelist:
             filelist.append(f[0])
     return filelist
+
+def getlfilestoupload(repo, missing, addfunc):
+    for n in missing:
+        parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
+        ctx = repo[n]
+        files = set(ctx.files())
+        if len(parents) == 2:
+            mc = ctx.manifest()
+            mp1 = ctx.parents()[0].manifest()
+            mp2 = ctx.parents()[1].manifest()
+            for f in mp1:
+                if f not in mc:
+                    files.add(f)
+            for f in mp2:
+                if f not in mc:
+                    files.add(f)
+            for f in mc:
+                if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+                    files.add(f)
+        for fn in files:
+            if isstandin(fn) and fn in ctx:
+                addfunc(fn, ctx[fn].data().strip())
--- a/hgext/largefiles/overrides.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/overrides.py	Thu Apr 17 19:36:17 2014 -0400
@@ -12,7 +12,7 @@
 import copy
 
 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
-    node, archival, error, merge, discovery, pathutil
+        archival, merge, pathutil, revset
 from mercurial.i18n import _
 from mercurial.node import hex
 from hgext import rebase
@@ -24,9 +24,7 @@
 # -- Utility functions: commonly/repeatedly needed functionality ---------------
 
 def installnormalfilesmatchfn(manifest):
-    '''overrides scmutil.match so that the matcher it returns will ignore all
-    largefiles'''
-    oldmatch = None # for the closure
+    '''installmatchfn with a matchfn that ignores all largefiles'''
     def overridematch(ctx, pats=[], opts={}, globbed=False,
             default='relpath'):
         match = oldmatch(ctx, pats, opts, globbed, default)
@@ -42,18 +40,36 @@
     oldmatch = installmatchfn(overridematch)
 
 def installmatchfn(f):
+    '''monkey patch the scmutil module with a custom match function.
+    Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
     oldmatch = scmutil.match
     setattr(f, 'oldmatch', oldmatch)
     scmutil.match = f
     return oldmatch
 
 def restorematchfn():
-    '''restores scmutil.match to what it was before installnormalfilesmatchfn
+    '''restores scmutil.match to what it was before installmatchfn
     was called.  no-op if scmutil.match is its original function.
 
-    Note that n calls to installnormalfilesmatchfn will require n calls to
+    Note that n calls to installmatchfn will require n calls to
     restore matchfn to reverse'''
-    scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
+    scmutil.match = getattr(scmutil.match, 'oldmatch')
+
+def installmatchandpatsfn(f):
+    oldmatchandpats = scmutil.matchandpats
+    setattr(f, 'oldmatchandpats', oldmatchandpats)
+    scmutil.matchandpats = f
+    return oldmatchandpats
+
+def restorematchandpatsfn():
+    '''restores scmutil.matchandpats to what it was before
+    installnormalfilesmatchandpatsfn was called.  no-op if scmutil.matchandpats
+    is its original function.
+
+    Note that n calls to installnormalfilesmatchandpatsfn will require n calls
+    to restore matchfn to reverse'''
+    scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
+            scmutil.matchandpats)
 
 def addlargefiles(ui, repo, *pats, **opts):
     large = opts.pop('large', None)
@@ -241,19 +257,30 @@
         repo._repo.lfstatus = False
 
 def overridelog(orig, ui, repo, *pats, **opts):
-    def overridematch(ctx, pats=[], opts={}, globbed=False,
+    def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
             default='relpath'):
         """Matcher that merges root directory with .hglf, suitable for log.
         It is still possible to match .hglf directly.
         For any listed files run log on the standin too.
         matchfn tries both the given filename and with .hglf stripped.
         """
-        match = oldmatch(ctx, pats, opts, globbed, default)
-        m = copy.copy(match)
+        matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
+        m, p = copy.copy(matchandpats)
+
+        pats = set(p)
+        # TODO: handling of patterns in both cases below
+        if m._cwd:
+            back = (m._cwd.count('/') + 1) * '../'
+            pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
+        else:
+            pats.update(lfutil.standin(f) for f in p)
+
         for i in range(0, len(m._files)):
             standin = lfutil.standin(m._files[i])
             if standin in repo[ctx.node()]:
                 m._files[i] = standin
+            pats.add(standin)
+
         m._fmap = set(m._files)
         m._always = False
         origmatchfn = m.matchfn
@@ -264,14 +291,16 @@
             r = origmatchfn(f)
             return r
         m.matchfn = lfmatchfn
-        return m
-    oldmatch = installmatchfn(overridematch)
+
+        return m, pats
+
+    oldmatchandpats = installmatchandpatsfn(overridematchandpats)
     try:
         repo.lfstatus = True
         return orig(ui, repo, *pats, **opts)
     finally:
         repo.lfstatus = False
-        restorematchfn()
+        restorematchandpatsfn()
 
 def overrideverify(orig, ui, repo, *pats, **opts):
     large = opts.pop('large', False)
@@ -286,7 +315,9 @@
 def overridedebugstate(orig, ui, repo, *pats, **opts):
     large = opts.pop('large', False)
     if large:
-        lfcommands.debugdirstate(ui, repo)
+        class fakerepo(object):
+            dirstate = lfutil.openlfdirstate(ui, repo)
+        orig(ui, fakerepo, *pats, **opts)
     else:
         orig(ui, repo, *pats, **opts)
 
@@ -295,15 +326,15 @@
 # will get the new files. Filemerge is also overridden so that the merge
 # will merge standins correctly.
 def overrideupdate(orig, ui, repo, *pats, **opts):
-    lfdirstate = lfutil.openlfdirstate(ui, repo)
-    s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
-        False, False)
-    (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
-
     # Need to lock between the standins getting updated and their
     # largefiles getting updated
     wlock = repo.wlock()
     try:
+        lfdirstate = lfutil.openlfdirstate(ui, repo)
+        s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
+            [], False, False, False)
+        (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
+
         if opts['check']:
             mod = len(modified) > 0
             for lfile in unsure:
@@ -320,9 +351,9 @@
         if not opts['clean']:
             for lfile in unsure + modified + added:
                 lfutil.updatestandin(repo, lfutil.standin(lfile))
+        return orig(ui, repo, *pats, **opts)
     finally:
         wlock.release()
-    return orig(ui, repo, *pats, **opts)
 
 # Before starting the manifest merge, merge.updates will call
 # _checkunknown to check if there are any files in the merged-in
@@ -365,11 +396,11 @@
 # Finally, the merge.applyupdates function will then take care of
 # writing the files into the working copy and lfcommands.updatelfiles
 # will update the largefiles.
-def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
-                          partial, acceptremote=False):
+def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
+                             partial, acceptremote, followcopies):
     overwrite = force and not branchmerge
-    actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
-                     acceptremote)
+    actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
+                     acceptremote, followcopies)
 
     if overwrite:
         return actions
@@ -420,16 +451,18 @@
     if not lfutil.isstandin(orig):
         return origfn(repo, mynode, orig, fcd, fco, fca)
 
-    if not fco.cmp(fcd): # files identical?
-        return None
-
-    if repo.ui.promptchoice(
-        _('largefile %s has a merge conflict\nancestor was %s\n'
-          'keep (l)ocal %s or\ntake (o)ther %s?'
-          '$$ &Local $$ &Other') %
-          (lfutil.splitstandin(orig),
-           fca.data().strip(), fcd.data().strip(), fco.data().strip()),
-        0) == 1:
+    ahash = fca.data().strip().lower()
+    dhash = fcd.data().strip().lower()
+    ohash = fco.data().strip().lower()
+    if (ohash != ahash and
+        ohash != dhash and
+        (dhash == ahash or
+         repo.ui.promptchoice(
+             _('largefile %s has a merge conflict\nancestor was %s\n'
+               'keep (l)ocal %s or\ntake (o)ther %s?'
+               '$$ &Local $$ &Other') %
+               (lfutil.splitstandin(orig), ahash, dhash, ohash),
+             0) == 1)):
         repo.wwrite(fcd.path(), fco.data(), fco.flags())
     return 0
 
@@ -460,9 +493,9 @@
     # match largefiles and run it again.
     nonormalfiles = False
     nolfiles = False
+    installnormalfilesmatchfn(repo[None].manifest())
     try:
         try:
-            installnormalfilesmatchfn(repo[None].manifest())
             result = orig(ui, repo, pats, opts, rename)
         except util.Abort, e:
             if str(e) != _('no files to copy'):
@@ -487,7 +520,6 @@
             wlock = repo.wlock()
 
             manifest = repo[None].manifest()
-            oldmatch = None # for the closure
             def overridematch(ctx, pats=[], opts={}, globbed=False,
                     default='relpath'):
                 newpats = []
@@ -576,8 +608,7 @@
 # Standins are only updated (to match the hash of largefiles) before
 # commits. Update the standins then run the original revert, changing
 # the matcher to hit standins instead of largefiles. Based on the
-# resulting standins update the largefiles. Then return the standins
-# to their proper state
+# resulting standins update the largefiles.
 def overriderevert(orig, ui, repo, *pats, **opts):
     # Because we put the standins in a bad state (by updating them)
     # and then return them to a correct state we need to lock to
@@ -594,70 +625,40 @@
             if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
                 os.unlink(repo.wjoin(lfutil.standin(lfile)))
 
+        oldstandins = lfutil.getstandinsstate(repo)
+
+        def overridematch(ctx, pats=[], opts={}, globbed=False,
+                default='relpath'):
+            match = oldmatch(ctx, pats, opts, globbed, default)
+            m = copy.copy(match)
+            def tostandin(f):
+                if lfutil.standin(f) in ctx:
+                    return lfutil.standin(f)
+                elif lfutil.standin(f) in repo[None]:
+                    return None
+                return f
+            m._files = [tostandin(f) for f in m._files]
+            m._files = [f for f in m._files if f is not None]
+            m._fmap = set(m._files)
+            m._always = False
+            origmatchfn = m.matchfn
+            def matchfn(f):
+                if lfutil.isstandin(f):
+                    return (origmatchfn(lfutil.splitstandin(f)) and
+                            (f in repo[None] or f in ctx))
+                return origmatchfn(f)
+            m.matchfn = matchfn
+            return m
+        oldmatch = installmatchfn(overridematch)
         try:
-            ctx = scmutil.revsingle(repo, opts.get('rev'))
-            oldmatch = None # for the closure
-            def overridematch(ctx, pats=[], opts={}, globbed=False,
-                    default='relpath'):
-                match = oldmatch(ctx, pats, opts, globbed, default)
-                m = copy.copy(match)
-                def tostandin(f):
-                    if lfutil.standin(f) in ctx:
-                        return lfutil.standin(f)
-                    elif lfutil.standin(f) in repo[None]:
-                        return None
-                    return f
-                m._files = [tostandin(f) for f in m._files]
-                m._files = [f for f in m._files if f is not None]
-                m._fmap = set(m._files)
-                m._always = False
-                origmatchfn = m.matchfn
-                def matchfn(f):
-                    if lfutil.isstandin(f):
-                        # We need to keep track of what largefiles are being
-                        # matched so we know which ones to update later --
-                        # otherwise we accidentally revert changes to other
-                        # largefiles. This is repo-specific, so duckpunch the
-                        # repo object to keep the list of largefiles for us
-                        # later.
-                        if origmatchfn(lfutil.splitstandin(f)) and \
-                                (f in repo[None] or f in ctx):
-                            lfileslist = getattr(repo, '_lfilestoupdate', [])
-                            lfileslist.append(lfutil.splitstandin(f))
-                            repo._lfilestoupdate = lfileslist
-                            return True
-                        else:
-                            return False
-                    return origmatchfn(f)
-                m.matchfn = matchfn
-                return m
-            oldmatch = installmatchfn(overridematch)
-            scmutil.match
-            matches = overridematch(repo[None], pats, opts)
             orig(ui, repo, *pats, **opts)
         finally:
             restorematchfn()
-        lfileslist = getattr(repo, '_lfilestoupdate', [])
-        lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
-                                printmessage=False)
 
-        # empty out the largefiles list so we start fresh next time
-        repo._lfilestoupdate = []
-        for lfile in modified:
-            if lfile in lfileslist:
-                if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
-                        in repo['.']:
-                    lfutil.writestandin(repo, lfutil.standin(lfile),
-                        repo['.'][lfile].data().strip(),
-                        'x' in repo['.'][lfile].flags())
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
-        for lfile in added:
-            standin = lfutil.standin(lfile)
-            if standin not in ctx and (standin in matches or opts.get('all')):
-                if lfile in lfdirstate:
-                    lfdirstate.drop(lfile)
-                util.unlinkpath(repo.wjoin(standin))
-        lfdirstate.write()
+        newstandins = lfutil.getstandinsstate(repo)
+        filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+        lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
+
     finally:
         wlock.release()
 
@@ -752,7 +753,7 @@
         firstpulled = repo.firstpulled
     except AttributeError:
         raise util.Abort(_("pulled() only available in --lfrev"))
-    return [r for r in subset if r >= firstpulled]
+    return revset.baseset([r for r in subset if r >= firstpulled])
 
 def overrideclone(orig, ui, source, dest=None, **opts):
     d = dest
@@ -760,8 +761,8 @@
         d = hg.defaultdest(source)
     if opts.get('all_largefiles') and not hg.islocal(d):
             raise util.Abort(_(
-            '--all-largefiles is incompatible with non-local destination %s' %
-            d))
+            '--all-largefiles is incompatible with non-local destination %s') %
+            d)
 
     return orig(ui, source, dest, **opts)
 
@@ -981,62 +982,42 @@
 
     return result
 
-def getoutgoinglfiles(ui, repo, dest=None, **opts):
-    dest = ui.expandpath(dest or 'default-push', dest or 'default')
-    dest, branches = hg.parseurl(dest, opts.get('branch'))
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
-    if revs:
-        revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
-
-    try:
-        remote = hg.peer(repo, opts, dest)
-    except error.RepoError:
-        return None
-    outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
-    if not outgoing.missing:
-        return outgoing.missing
-    o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
-    if opts.get('newest_first'):
-        o.reverse()
-
-    toupload = set()
-    for n in o:
-        parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
-        ctx = repo[n]
-        files = set(ctx.files())
-        if len(parents) == 2:
-            mc = ctx.manifest()
-            mp1 = ctx.parents()[0].manifest()
-            mp2 = ctx.parents()[1].manifest()
-            for f in mp1:
-                if f not in mc:
-                        files.add(f)
-            for f in mp2:
-                if f not in mc:
-                    files.add(f)
-            for f in mc:
-                if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
-                    files.add(f)
-        toupload = toupload.union(
-            set([f for f in files if lfutil.isstandin(f) and f in ctx]))
-    return sorted(toupload)
-
-def overrideoutgoing(orig, ui, repo, dest=None, **opts):
-    result = orig(ui, repo, dest, **opts)
-
+def outgoinghook(ui, repo, other, opts, missing):
     if opts.pop('large', None):
-        toupload = getoutgoinglfiles(ui, repo, dest, **opts)
-        if toupload is None:
-            ui.status(_('largefiles: No remote repo\n'))
-        elif not toupload:
+        toupload = set()
+        lfutil.getlfilestoupload(repo, missing,
+                                 lambda fn, lfhash: toupload.add(fn))
+        if not toupload:
             ui.status(_('largefiles: no files to upload\n'))
         else:
             ui.status(_('largefiles to upload:\n'))
-            for file in toupload:
+            for file in sorted(toupload):
                 ui.status(lfutil.splitstandin(file) + '\n')
             ui.status('\n')
 
-    return result
+def summaryremotehook(ui, repo, opts, changes):
+    largeopt = opts.get('large', False)
+    if changes is None:
+        if largeopt:
+            return (False, True) # only outgoing check is needed
+        else:
+            return (False, False)
+    elif largeopt:
+        url, branch, peer, outgoing = changes[1]
+        if peer is None:
+            # i18n: column positioning for "hg summary"
+            ui.status(_('largefiles: (no remote repo)\n'))
+            return
+
+        toupload = set()
+        lfutil.getlfilestoupload(repo, outgoing.missing,
+                                 lambda fn, lfhash: toupload.add(fn))
+        if not toupload:
+            # i18n: column positioning for "hg summary"
+            ui.status(_('largefiles: (no files to upload)\n'))
+        else:
+            # i18n: column positioning for "hg summary"
+            ui.status(_('largefiles: %d to upload\n') % len(toupload))
 
 def overridesummary(orig, ui, repo, *pats, **opts):
     try:
@@ -1045,18 +1026,6 @@
     finally:
         repo.lfstatus = False
 
-    if opts.pop('large', None):
-        toupload = getoutgoinglfiles(ui, repo, None, **opts)
-        if toupload is None:
-            # i18n: column positioning for "hg summary"
-            ui.status(_('largefiles: (no remote repo)\n'))
-        elif not toupload:
-            # i18n: column positioning for "hg summary"
-            ui.status(_('largefiles: (no files to upload)\n'))
-        else:
-            # i18n: column positioning for "hg summary"
-            ui.status(_('largefiles: %d to upload\n') % len(toupload))
-
 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
                      similarity=None):
     if not lfutil.islfilesrepo(repo):
@@ -1146,22 +1115,24 @@
     m = scmutil.match(ctx, (file1,) + pats, opts)
     origmatchfn = m.matchfn
     def lfmatchfn(f):
+        if origmatchfn(f):
+            return True
         lf = lfutil.splitstandin(f)
         if lf is None:
-            return origmatchfn(f)
+            return False
         notbad.add(lf)
         return origmatchfn(lf)
     m.matchfn = lfmatchfn
     origbadfn = m.bad
     def lfbadfn(f, msg):
         if not f in notbad:
-            return origbadfn(f, msg)
+            origbadfn(f, msg)
     m.bad = lfbadfn
     for f in ctx.walk(m):
         fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                                  pathname=f)
         lf = lfutil.splitstandin(f)
-        if lf is None:
+        if lf is None or origmatchfn(f):
             # duplicating unreachable code from commands.cat
             data = ctx[f].data()
             if opts.get('decode'):
--- a/hgext/largefiles/proto.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/proto.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,7 +8,6 @@
 import re
 
 from mercurial import error, httppeer, util, wireproto
-from mercurial.wireproto import batchable, future
 from mercurial.i18n import _
 
 import lfutil
@@ -135,9 +134,9 @@
                     self._abort(error.ResponseError(_("unexpected response:"),
                                                     chunk))
 
-        @batchable
+        @wireproto.batchable
         def statlfile(self, sha):
-            f = future()
+            f = wireproto.future()
             result = {'sha': sha}
             yield result, f
             try:
--- a/hgext/largefiles/remotestore.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/remotestore.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,9 +8,8 @@
 
 import urllib2
 
-from mercurial import util
+from mercurial import util, wireproto
 from mercurial.i18n import _
-from mercurial.wireproto import remotebatch
 
 import lfutil
 import basestore
@@ -30,7 +29,8 @@
             % (source, util.hidepassword(self.url)))
 
     def exists(self, hashes):
-        return dict((h, s == 0) for (h, s) in self._stat(hashes).iteritems())
+        return dict((h, s == 0) for (h, s) in # dict-from-generator
+                    self._stat(hashes).iteritems())
 
     def sendfile(self, filename, hash):
         self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
@@ -96,5 +96,4 @@
 
     def batch(self):
         '''Support for remote batching.'''
-        return remotebatch(self)
-
+        return wireproto.remotebatch(self)
--- a/hgext/largefiles/reposetup.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/reposetup.py	Thu Apr 17 19:36:17 2014 -0400
@@ -10,8 +10,7 @@
 import copy
 import os
 
-from mercurial import error, manifest, match as match_, util, discovery
-from mercurial import node as node_
+from mercurial import error, manifest, match as match_, util
 from mercurial.i18n import _
 from mercurial import localrepo
 
@@ -413,37 +412,6 @@
                             " supported in the destination:"
                             " %s") % (', '.join(sorted(missing)))
                     raise util.Abort(msg)
-
-            outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
-                                                    force=force)
-            if outgoing.missing:
-                toupload = set()
-                o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
-                for n in o:
-                    parents = [p for p in self.changelog.parents(n)
-                               if p != node_.nullid]
-                    ctx = self[n]
-                    files = set(ctx.files())
-                    if len(parents) == 2:
-                        mc = ctx.manifest()
-                        mp1 = ctx.parents()[0].manifest()
-                        mp2 = ctx.parents()[1].manifest()
-                        for f in mp1:
-                            if f not in mc:
-                                files.add(f)
-                        for f in mp2:
-                            if f not in mc:
-                                files.add(f)
-                        for f in mc:
-                            if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
-                                    None):
-                                files.add(f)
-
-                    toupload = toupload.union(
-                        set([ctx[f].data().strip()
-                             for f in files
-                             if lfutil.isstandin(f) and f in ctx]))
-                lfcommands.uploadlfiles(ui, self, remote, toupload)
             return super(lfilesrepo, self).push(remote, force=force, revs=revs,
                 newbranch=newbranch)
 
@@ -503,11 +471,20 @@
 
     repo.__class__ = lfilesrepo
 
+    def prepushoutgoinghook(local, remote, outgoing):
+        if outgoing.missing:
+            toupload = set()
+            addfunc = lambda fn, lfhash: toupload.add(lfhash)
+            lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
+            lfcommands.uploadlfiles(ui, local, remote, toupload)
+    repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
+
     def checkrequireslfiles(ui, repo, **kwargs):
         if 'largefiles' not in repo.requirements and util.any(
                 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
             repo.requirements.add('largefiles')
             repo._writerequirements()
 
-    ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
-    ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
+    ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
+                 'largefiles')
+    ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
--- a/hgext/largefiles/uisetup.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/largefiles/uisetup.py	Thu Apr 17 19:36:17 2014 -0400
@@ -9,10 +9,9 @@
 '''setup for largefiles extension: uisetup'''
 
 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
-    httppeer, merge, scmutil, sshpeer, wireproto, revset
+    httppeer, merge, scmutil, sshpeer, wireproto, revset, subrepo
 from mercurial.i18n import _
 from mercurial.hgweb import hgweb_mod, webcommands
-from mercurial.subrepo import hgsubrepo
 
 import overrides
 import proto
@@ -42,7 +41,7 @@
     # Subrepos call status function
     entry = extensions.wrapcommand(commands.table, 'status',
                                    overrides.overridestatus)
-    entry = extensions.wrapfunction(hgsubrepo, 'status',
+    entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status',
                                     overrides.overridestatusfn)
 
     entry = extensions.wrapcommand(commands.table, 'log',
@@ -65,14 +64,16 @@
     debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
     entry[1].extend(debugstateopt)
 
-    entry = extensions.wrapcommand(commands.table, 'outgoing',
-        overrides.overrideoutgoing)
+    outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
+    entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
     outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
     entry[1].extend(outgoingopt)
+    cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
     entry = extensions.wrapcommand(commands.table, 'summary',
                                    overrides.overridesummary)
     summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
     entry[1].extend(summaryopt)
+    cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
 
     entry = extensions.wrapcommand(commands.table, 'update',
                                    overrides.overrideupdate)
@@ -96,15 +97,15 @@
                                    overrides.overridecat)
     entry = extensions.wrapfunction(merge, '_checkunknownfile',
                                     overrides.overridecheckunknownfile)
-    entry = extensions.wrapfunction(merge, 'manifestmerge',
-                                    overrides.overridemanifestmerge)
+    entry = extensions.wrapfunction(merge, 'calculateupdates',
+                                    overrides.overridecalculateupdates)
     entry = extensions.wrapfunction(filemerge, 'filemerge',
                                     overrides.overridefilemerge)
     entry = extensions.wrapfunction(cmdutil, 'copy',
                                     overrides.overridecopy)
 
     # Summary calls dirty on the subrepos
-    entry = extensions.wrapfunction(hgsubrepo, 'dirty',
+    entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty',
                                     overrides.overridedirty)
 
     # Backout calls revert so we need to override both the command and the
@@ -118,7 +119,8 @@
     extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
 
     extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
-    extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
+    extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
+                            overrides.hgsubrepoarchive)
     extensions.wrapfunction(cmdutil, 'bailifchanged',
                             overrides.overridebailifchanged)
 
--- a/hgext/mq.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/mq.py	Thu Apr 17 19:36:17 2014 -0400
@@ -304,7 +304,7 @@
         backup = repo.ui.backupconfig('phases', 'new-commit')
     try:
         if phase is not None:
-            repo.ui.setconfig('phases', 'new-commit', phase)
+            repo.ui.setconfig('phases', 'new-commit', phase, 'mq')
         return repo.commit(*args, **kwargs)
     finally:
         if phase is not None:
@@ -826,10 +826,10 @@
                 repo.setparents(p1, merge)
 
             if all_files and '.hgsubstate' in all_files:
-                wctx = repo['.']
-                mctx = actx = repo[None]
+                wctx = repo[None]
+                pctx = repo['.']
                 overwrite = False
-                mergedsubstate = subrepo.submerge(repo, wctx, mctx, actx,
+                mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
                     overwrite)
                 files += mergedsubstate.keys()
 
@@ -1035,11 +1035,8 @@
             self.checkpatchname(patchfn)
         inclsubs = checksubstate(repo)
         if inclsubs:
-            inclsubs.append('.hgsubstate')
             substatestate = repo.dirstate['.hgsubstate']
         if opts.get('include') or opts.get('exclude') or pats:
-            if inclsubs:
-                pats = list(pats or []) + inclsubs
             match = scmutil.match(repo[None], pats, opts)
             # detect missing files in pats
             def badfn(f, msg):
@@ -1047,14 +1044,14 @@
                     raise util.Abort('%s: %s' % (f, msg))
             match.bad = badfn
             changes = repo.status(match=match)
-            m, a, r, d = changes[:4]
         else:
             changes = self.checklocalchanges(repo, force=True)
-            m, a, r, d = changes
-        match = scmutil.matchfiles(repo, m + a + r + inclsubs)
+        commitfiles = list(inclsubs)
+        for files in changes[:3]:
+            commitfiles.extend(files)
+        match = scmutil.matchfiles(repo, commitfiles)
         if len(repo[None].parents()) > 1:
             raise util.Abort(_('cannot manage merge changesets'))
-        commitfiles = m + a + r
         self.checktoppatch(repo)
         insert = self.fullseriesend()
         wlock = repo.wlock()
@@ -1494,7 +1491,6 @@
 
             inclsubs = checksubstate(repo, hex(patchparent))
             if inclsubs:
-                inclsubs.append('.hgsubstate')
                 substatestate = repo.dirstate['.hgsubstate']
 
             ph = patchheader(self.join(patchfn), self.plainmode)
@@ -1987,9 +1983,11 @@
                     raise util.Abort(_('-e is incompatible with import from -'))
                 filename = normname(filename)
                 self.checkreservedname(filename)
-                originpath = self.join(filename)
-                if not os.path.isfile(originpath):
-                    raise util.Abort(_("patch %s does not exist") % filename)
+                if util.url(filename).islocal():
+                    originpath = self.join(filename)
+                    if not os.path.isfile(originpath):
+                        raise util.Abort(
+                            _("patch %s does not exist") % filename)
 
                 if patchname:
                     self.checkpatchname(patchname, force)
@@ -3269,6 +3267,12 @@
         def mq(self):
             return queue(self.ui, self.baseui, self.path)
 
+        def invalidateall(self):
+            super(mqrepo, self).invalidateall()
+            if localrepo.hasunfilteredcache(self, 'mq'):
+                # recreate mq in case queue path was changed
+                delattr(self.unfiltered(), 'mq')
+
         def abortifwdirpatched(self, errmsg, force=False):
             if self.mq.applied and self.mq.checkapplied and not force:
                 parents = self.dirstate.parents()
@@ -3285,14 +3289,14 @@
             return super(mqrepo, self).commit(text, user, date, match, force,
                                               editor, extra)
 
-        def checkpush(self, force, revs):
-            if self.mq.applied and self.mq.checkapplied and not force:
+        def checkpush(self, pushop):
+            if self.mq.applied and self.mq.checkapplied and not pushop.force:
                 outapplied = [e.node for e in self.mq.applied]
-                if revs:
+                if pushop.revs:
                     # Assume applied patches have no non-patch descendants and
                     # are not on remote already. Filtering any changeset not
                     # pushed.
-                    heads = set(revs)
+                    heads = set(pushop.revs)
                     for node in reversed(outapplied):
                         if node in heads:
                             break
@@ -3303,7 +3307,7 @@
                     if self[node].phase() < phases.secret:
                         raise util.Abort(_('source has mq patches applied'))
                 # no non-secret patches pushed
-            super(mqrepo, self).checkpush(force, revs)
+            super(mqrepo, self).checkpush(pushop)
 
         def _findtags(self):
             '''augment tags from base class with patch tags'''
@@ -3409,7 +3413,7 @@
     """
     revset.getargs(x, 0, 0, _("mq takes no arguments"))
     applied = set([repo[r.node].rev() for r in repo.mq.applied])
-    return [r for r in subset if r in applied]
+    return revset.baseset([r for r in subset if r in applied])
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [revsetmq]
--- a/hgext/notify.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/notify.py	Thu Apr 17 19:36:17 2014 -0400
@@ -188,13 +188,12 @@
         mapfile = self.ui.config('notify', 'style')
         template = (self.ui.config('notify', hooktype) or
                     self.ui.config('notify', 'template'))
-        self.t = cmdutil.changeset_templater(self.ui, self.repo,
-                                             False, None, mapfile, False)
         if not mapfile and not template:
             template = deftemplates.get(hooktype) or single_template
         if template:
             template = templater.parsestring(template, quoted=False)
-            self.t.use_template(template)
+        self.t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
+                                             template, mapfile, False)
 
     def strip(self, path):
         '''strip leading slashes from local path, turn into web-safe path.'''
--- a/hgext/pager.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/pager.py	Thu Apr 17 19:36:17 2014 -0400
@@ -129,8 +129,8 @@
                 if (always or auto and
                     (cmd in attend or
                      (cmd not in ignore and not attend))):
-                    ui.setconfig('ui', 'formatted', ui.formatted())
-                    ui.setconfig('ui', 'interactive', False)
+                    ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
+                    ui.setconfig('ui', 'interactive', False, 'pager')
                     if util.safehasattr(signal, "SIGPIPE"):
                         signal.signal(signal.SIGPIPE, signal.SIG_DFL)
                     _runpager(ui, p)
--- a/hgext/patchbomb.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/patchbomb.py	Thu Apr 17 19:36:17 2014 -0400
@@ -291,7 +291,11 @@
         return [str(r) for r in revs]
 
     def getpatches(revs):
+        prev = repo['.'].rev()
         for r in scmutil.revrange(repo, revs):
+            if r == prev and (repo[None].files() or repo[None].deleted()):
+                ui.warn(_('warning: working directory has '
+                          'uncommitted changes\n'))
             output = cStringIO.StringIO()
             cmdutil.export(repo, [r], fp=output,
                          opts=patch.diffopts(ui, opts))
@@ -546,11 +550,11 @@
             if not sendmail:
                 verifycert = ui.config('smtp', 'verifycert')
                 if opts.get('insecure'):
-                    ui.setconfig('smtp', 'verifycert', 'loose')
+                    ui.setconfig('smtp', 'verifycert', 'loose', 'patchbomb')
                 try:
                     sendmail = mail.connect(ui, mbox=mbox)
                 finally:
-                    ui.setconfig('smtp', 'verifycert', verifycert)
+                    ui.setconfig('smtp', 'verifycert', verifycert, 'patchbomb')
             ui.status(_('sending '), subj, ' ...\n')
             ui.progress(_('sending'), i, item=subj, total=len(msgs))
             if not mbox:
--- a/hgext/rebase.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/rebase.py	Thu Apr 17 19:36:17 2014 -0400
@@ -289,6 +289,9 @@
                                                            inclusive=True)
                 external = externalparent(repo, state, targetancestors)
 
+            if dest.closesbranch() and not keepbranchesf:
+                ui.status(_('reopening closed branch head %s\n') % dest)
+
         if keepbranchesf:
             # insert _savebranch at the start of extrafns so if
             # there's a user-provided extrafn it can clobber branch if
@@ -330,14 +333,15 @@
                     repo.ui.debug('resuming interrupted rebase\n')
                 else:
                     try:
-                        ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+                        ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
+                                     'rebase')
                         stats = rebasenode(repo, rev, p1, state, collapsef)
                         if stats and stats[3] > 0:
                             raise error.InterventionRequired(
                                 _('unresolved conflicts (see hg '
                                   'resolve, then hg rebase --continue)'))
                     finally:
-                        ui.setconfig('ui', 'forcemerge', '')
+                        ui.setconfig('ui', 'forcemerge', '', 'rebase')
                 cmdutil.duplicatecopies(repo, rev, target)
                 if not collapsef:
                     newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
@@ -516,6 +520,12 @@
             if state.get(p.rev()) == repo[p1].rev():
                 base = p.node()
                 break
+        else: # fallback when base not found
+            base = None
+
+            # Raise because this function is called wrong (see issue 4106)
+            raise AssertionError('no base found to rebase on '
+                                 '(rebasenode called wrong)')
     if base is not None:
         repo.ui.debug("   detach base %d:%s\n" % (repo[base].rev(), repo[base]))
     # When collapsing in-place, the parent is the common ancestor, we
@@ -703,7 +713,8 @@
                 if new != nullrev and new in seen:
                     skipped.add(old)
                 seen.add(new)
-        repo.ui.debug('computed skipped revs: %s\n' % skipped)
+        repo.ui.debug('computed skipped revs: %s\n' %
+                      (' '.join(str(r) for r in sorted(skipped)) or None))
         repo.ui.debug('rebase status resumed\n')
         return (originalwd, target, state, skipped,
                 collapse, keep, keepbranches, external, activebookmark)
@@ -790,7 +801,7 @@
                 repo.ui.debug('source is a child of destination\n')
                 return None
 
-        repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots))
+        repo.ui.debug('rebase onto %d starting from %s\n' % (dest, root))
         state.update(dict.fromkeys(rebaseset, nullrev))
         # Rebase tries to turn <dest> into a parent of <root> while
         # preserving the number of parents of rebased changesets:
--- a/hgext/shelve.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/shelve.py	Thu Apr 17 19:36:17 2014 -0400
@@ -22,10 +22,10 @@
 """
 
 from mercurial.i18n import _
-from mercurial.node import nullid, bin, hex
-from mercurial import changegroup, cmdutil, scmutil, phases
+from mercurial.node import nullid, nullrev, bin, hex
+from mercurial import changegroup, cmdutil, scmutil, phases, commands
 from mercurial import error, hg, mdiff, merge, patch, repair, util
-from mercurial import templatefilters
+from mercurial import templatefilters, changegroup, exchange
 from mercurial import lock as lockmod
 from hgext import rebase
 import errno
@@ -68,6 +68,18 @@
                 raise
             raise util.Abort(_("shelved change '%s' not found") % self.name)
 
+    def applybundle(self):
+        fp = self.opener()
+        try:
+            gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
+            changegroup.addchangegroup(self.repo, gen, 'unshelve',
+                                       'bundle:' + self.vfs.join(self.fname))
+        finally:
+            fp.close()
+
+    def writebundle(self, cg):
+        changegroup.writebundle(cg, self.fname, 'HG10UN', self.vfs)
+
 class shelvedstate(object):
     """Handle persistence during unshelving operations.
 
@@ -122,22 +134,21 @@
     """subcommand that creates a new shelve"""
 
     def publicancestors(ctx):
-        """Compute the heads of the public ancestors of a commit.
+        """Compute the public ancestors of a commit.
 
-        Much faster than the revset heads(ancestors(ctx) - draft())"""
-        seen = set()
+        Much faster than the revset ancestors(ctx) & draft()"""
+        seen = set([nullrev])
         visit = util.deque()
         visit.append(ctx)
         while visit:
             ctx = visit.popleft()
+            yield ctx.node()
             for parent in ctx.parents():
                 rev = parent.rev()
                 if rev not in seen:
                     seen.add(rev)
                     if parent.mutable():
                         visit.append(parent)
-                    else:
-                        yield parent.node()
 
     wctx = repo[None]
     parents = wctx.parents()
@@ -173,9 +184,9 @@
                 repo.mq.checkapplied = saved
 
     if parent.node() != nullid:
-        desc = parent.description().split('\n', 1)[0]
+        desc = "changes to '%s'" % parent.description().split('\n', 1)[0]
     else:
-        desc = '(empty repository)'
+        desc = '(changes in empty repository)'
 
     if not opts['message']:
         opts['message'] = desc
@@ -228,9 +239,8 @@
         fp.write('\0'.join(shelvedfiles))
 
         bases = list(publicancestors(repo[node]))
-        cg = repo.changegroupsubset(bases, [node], 'shelve')
-        changegroup.writebundle(cg, shelvedfile(repo, name, 'hg').filename(),
-                                'HG10UN')
+        cg = changegroup.changegroupsubset(repo, bases, [node], 'shelve')
+        shelvedfile(repo, name, 'hg').writebundle(cg)
         cmdutil.export(repo, [node],
                        fp=shelvedfile(repo, name, 'patch').opener('wb'),
                        opts=mdiff.diffopts(git=True))
@@ -459,7 +469,9 @@
           ('c', 'continue', None,
            _('continue an incomplete unshelve operation')),
           ('', 'keep', None,
-           _('keep shelve after unshelving'))],
+           _('keep shelve after unshelving')),
+          ('', 'date', '',
+           _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
          _('hg unshelve [SHELVED]'))
 def unshelve(ui, repo, *shelved, **opts):
     """restore a shelved change to the working directory
@@ -518,6 +530,7 @@
     if not shelvedfile(repo, basename, 'files').exists():
         raise util.Abort(_("shelved change '%s' not found") % basename)
 
+    oldquiet = ui.quiet
     wlock = lock = tr = None
     try:
         lock = repo.lock()
@@ -526,17 +539,19 @@
         tr = repo.transaction('unshelve', report=lambda x: None)
         oldtiprev = len(repo)
 
-        wctx = repo['.']
-        tmpwctx = wctx
+        pctx = repo['.']
+        tmpwctx = pctx
         # The goal is to have a commit structure like so:
-        # ...-> wctx -> tmpwctx -> shelvectx
+        # ...-> pctx -> tmpwctx -> shelvectx
         # where tmpwctx is an optional commit with the user's pending changes
         # and shelvectx is the unshelved changes. Then we merge it all down
-        # to the original wctx.
+        # to the original pctx.
 
         # Store pending changes in a commit
         m, a, r, d = repo.status()[:4]
         if m or a or r or d:
+            ui.status(_("temporarily committing pending changes "
+                        "(restore with 'hg unshelve --abort')\n"))
             def commitfunc(ui, repo, message, match, opts):
                 hasmq = util.safehasattr(repo, 'mq')
                 if hasmq:
@@ -551,28 +566,24 @@
 
             tempopts = {}
             tempopts['message'] = "pending changes temporary commit"
-            oldquiet = ui.quiet
-            try:
-                ui.quiet = True
-                node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
-            finally:
-                ui.quiet = oldquiet
+            tempopts['date'] = opts.get('date')
+            ui.quiet = True
+            node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
             tmpwctx = repo[node]
 
-        try:
-            fp = shelvedfile(repo, basename, 'hg').opener()
-            gen = changegroup.readbundle(fp, fp.name)
-            repo.addchangegroup(gen, 'unshelve', 'bundle:' + fp.name)
-            nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)]
-            phases.retractboundary(repo, phases.secret, nodes)
-        finally:
-            fp.close()
+        ui.quiet = True
+        shelvedfile(repo, basename, 'hg').applybundle()
+        nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)]
+        phases.retractboundary(repo, phases.secret, nodes)
+
+        ui.quiet = oldquiet
 
         shelvectx = repo['tip']
 
         # If the shelve is not immediately on top of the commit
         # we'll be merging with, rebase it to be on top.
         if tmpwctx.node() != shelvectx.parents()[0].node():
+            ui.status(_('rebasing shelved changes\n'))
             try:
                 rebase.rebase(ui, repo, **{
                     'rev' : [shelvectx.rev()],
@@ -584,7 +595,7 @@
 
                 stripnodes = [repo.changelog.node(rev)
                               for rev in xrange(oldtiprev, len(repo))]
-                shelvedstate.save(repo, basename, wctx, tmpwctx, stripnodes)
+                shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes)
 
                 util.rename(repo.join('rebasestate'),
                             repo.join('unshelverebasestate'))
@@ -599,7 +610,7 @@
                 # rebase was a no-op, so it produced no child commit
                 shelvectx = tmpwctx
 
-        mergefiles(ui, repo, wctx, shelvectx)
+        mergefiles(ui, repo, pctx, shelvectx)
         shelvedstate.clear(repo)
 
         # The transaction aborting will strip all the commits for us,
@@ -610,6 +621,7 @@
 
         unshelvecleanup(ui, repo, basename, opts)
     finally:
+        ui.quiet = oldquiet
         if tr:
             tr.release()
         lockmod.release(lock, wlock)
@@ -632,8 +644,8 @@
           ('p', 'patch', None,
            _('show patch')),
           ('', 'stat', None,
-           _('output diffstat-style summary of changes'))],
-         _('hg shelve'))
+           _('output diffstat-style summary of changes'))] + commands.walkopts,
+         _('hg shelve [OPTION]... [FILE]...'))
 def shelvecmd(ui, repo, *pats, **opts):
     '''save and set aside changes from the working directory
 
--- a/hgext/transplant.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/transplant.py	Thu Apr 17 19:36:17 2014 -0400
@@ -568,8 +568,9 @@
         if not heads:
             heads = repo.heads()
         ancestors = []
+        ctx = repo[dest]
         for head in heads:
-            ancestors.append(repo.changelog.ancestor(dest, head))
+            ancestors.append(ctx.ancestor(repo[head]).node())
         for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
             if match(node):
                 yield node
@@ -670,7 +671,8 @@
         s = revset.getset(repo, subset, x)
     else:
         s = subset
-    return [r for r in s if repo[r].extra().get('transplant_source')]
+    return revset.baseset([r for r in s if
+        repo[r].extra().get('transplant_source')])
 
 def kwtransplanted(repo, ctx, **args):
     """:transplanted: String. The node identifier of the transplanted
--- a/hgext/win32text.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/hgext/win32text.py	Thu Apr 17 19:36:17 2014 -0400
@@ -5,7 +5,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-'''perform automatic newline conversion
+'''perform automatic newline conversion (DEPRECATED)
 
   Deprecation: The win32text extension requires each user to configure
   the extension again and again for each clone since the configuration
--- a/i18n/check-translation.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/i18n/check-translation.py	Thu Apr 17 19:36:17 2014 -0400
@@ -66,6 +66,46 @@
 def warningchecker(msgidpat=None):
     return checker('warning', msgidpat)
 
+@warningchecker()
+def taildoublecolons(pe):
+    """Check equality of tail '::'-ness between msgid and msgstr
+
+    >>> pe = polib.POEntry(
+    ...     msgid ='ends with ::',
+    ...     msgstr='ends with ::')
+    >>> for e in taildoublecolons(pe): print e
+    >>> pe = polib.POEntry(
+    ...     msgid ='ends with ::',
+    ...     msgstr='ends without double-colons')
+    >>> for e in taildoublecolons(pe): print e
+    tail '::'-ness differs between msgid and msgstr
+    >>> pe = polib.POEntry(
+    ...     msgid ='ends without double-colons',
+    ...     msgstr='ends with ::')
+    >>> for e in taildoublecolons(pe): print e
+    tail '::'-ness differs between msgid and msgstr
+    """
+    if pe.msgid.endswith('::') != pe.msgstr.endswith('::'):
+        yield "tail '::'-ness differs between msgid and msgstr"
+
+@warningchecker()
+def indentation(pe):
+    """Check equality of initial indentation between msgid and msgstr
+
+    This may report unexpected warning, because this doesn't aware
+    the syntax of rst document and the context of msgstr.
+
+    >>> pe = polib.POEntry(
+    ...     msgid ='    indented text',
+    ...     msgstr='  narrowed indentation')
+    >>> for e in indentation(pe): print e
+    initial indentation width differs betweeen msgid and msgstr
+    """
+    idindent = len(pe.msgid) - len(pe.msgid.lstrip())
+    strindent = len(pe.msgstr) - len(pe.msgstr.lstrip())
+    if idindent != strindent:
+        yield "initial indentation width differs betweeen msgid and msgstr"
+
 ####################
 
 def check(pofile, fatal=True, warning=False):
--- a/i18n/de.po	Tue Apr 15 03:21:59 2014 +0900
+++ b/i18n/de.po	Thu Apr 17 19:36:17 2014 -0400
@@ -20,7 +20,7 @@
 msgstr ""
 "Project-Id-Version: Mercurial\n"
 "Report-Msgid-Bugs-To: <mercurial-devel@selenic.com>\n"
-"POT-Creation-Date: 2014-01-25 17:51+0100\n"
+"POT-Creation-Date: 2014-01-29 16:47+0100\n"
 "PO-Revision-Date: 2013-09-30 20:52+0100\n"
 "Last-Translator: Simon Heimberg <simohe@besonet.ch>\n"
 "Language-Team: \n"
@@ -2928,6 +2928,7 @@
 "  [repository]\n"
 "  native = LF"
 
+#. do not translate: .. note::
 msgid ".. note::"
 msgstr ""
 
@@ -5029,6 +5030,7 @@
 "    Siehe Hilfe zu 'paths' zu Pfad-Kurznamen und 'urls' für erlaubte\n"
 "    Formate für die Quellangabe."
 
+#. do not translate: .. container::
 msgid "    .. container:: verbose"
 msgstr ""
 
@@ -6548,6 +6550,7 @@
 "    Ohne Argumente werden die aktuell aktiven Wächter ausgegeben.\n"
 "    Mit einem Argument wird der aktuelle Wächter gesetzt."
 
+#. do not translate: .. note::
 msgid "    .. note::"
 msgstr ""
 
@@ -15694,6 +15697,7 @@
 "    order until one or more configuration files are detected."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note:: The registry key ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node"
 "\\Mercurial``\n"
@@ -15873,6 +15877,7 @@
 msgid "    stable5 = latest -b stable"
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note:: It is possible to create aliases with the same names as\n"
 "   existing commands, which will then override the original\n"
@@ -15918,6 +15923,7 @@
 "echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note:: Some global configuration options such as ``-R`` are\n"
 "   processed before shell aliases and will thus not be passed to\n"
@@ -16101,6 +16107,7 @@
 "the command."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note:: The tempfile mechanism is recommended for Windows systems,\n"
 "   where the standard shell I/O redirection operators often have\n"
@@ -16572,6 +16579,7 @@
 "  update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note:: It is generally better to use standard hooks rather than the\n"
 "   generic pre- and post- command hooks as they are guaranteed to be\n"
@@ -16580,6 +16588,7 @@
 "   generate a commit (e.g. tag) and not just the commit command."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note:: Environment variables with empty values may not be passed to\n"
 "   hooks on platforms such as Windows. As an example, ``$HG_PARENT2``\n"
@@ -18967,6 +18976,7 @@
 ":Manual group:   Mercurial Manual"
 msgstr ""
 
+#. do not translate: .. contents::
 msgid ""
 ".. contents::\n"
 "   :backlinks: top\n"
@@ -19017,6 +19027,7 @@
 "    repository."
 msgstr ""
 
+#. do not translate: .. include::
 msgid ".. include:: hg.1.gendoc.txt"
 msgstr ""
 
@@ -19121,6 +19132,7 @@
 "Public License version 2 or any later version."
 msgstr ""
 
+#. do not translate: .. include::
 msgid ".. include:: common.txt\n"
 msgstr ""
 
@@ -19143,6 +19155,7 @@
 ":Manual group:   Mercurial Manual"
 msgstr ""
 
+#. do not translate: .. include::
 msgid ".. include:: hgignore.5.gendoc.txt"
 msgstr ""
 
@@ -19170,6 +19183,7 @@
 "Public License version 2 or any later version."
 msgstr ""
 
+#. do not translate: .. include::
 msgid ".. include:: common.txt"
 msgstr ""
 
@@ -19281,6 +19295,7 @@
 "regexp pattern, start it with ``^``."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note::\n"
 "  Patterns specified in other than ``.hgignore`` are always rooted.\n"
@@ -19333,6 +19348,7 @@
 ":Manual group:   Mercurial Manual"
 msgstr ""
 
+#. do not translate: .. contents::
 msgid ""
 ".. contents::\n"
 "   :backlinks: top\n"
@@ -19348,6 +19364,7 @@
 "Beschreibung\n"
 "============"
 
+#. do not translate: .. include::
 msgid ".. include:: hgrc.5.gendoc.txt"
 msgstr ""
 
@@ -19564,6 +19581,7 @@
 msgid "8. The merge of the file fails and must be resolved before commit."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note::\n"
 "   After selecting a merge program, Mercurial will by default attempt\n"
@@ -19633,6 +19651,7 @@
 msgid "Alternate pattern notations must be specified explicitly."
 msgstr "Andere Schreibweisen von Mustern müssen explizit angegeben werden."
 
+#. do not translate: .. note::
 msgid ""
 ".. note::\n"
 "  Patterns specified in ``.hgignore`` are not rooted.\n"
@@ -19804,6 +19823,7 @@
 msgid " - secret changesets are neither pushed, pulled, or cloned"
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note::\n"
 "  Pulling a draft changeset from a publishing server does not mark it\n"
@@ -19823,12 +19843,14 @@
 "  [phases]\n"
 "  publish = False"
 
+#. do not translate: .. note::
 msgid ""
 ".. note::\n"
 "  Servers running older versions of Mercurial are treated as\n"
 "  publishing."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 ".. note::\n"
 "   Changesets in secret phase are not exchanged with the server. This\n"
@@ -20216,6 +20238,7 @@
 "   repositories states when committing in the parent repository."
 msgstr ""
 
+#. do not translate: .. note::
 msgid ""
 "   .. note::\n"
 "      The ``.hgsubstate`` file should not be edited manually."
--- a/i18n/posplit	Tue Apr 15 03:21:59 2014 +0900
+++ b/i18n/posplit	Thu Apr 17 19:36:17 2014 -0400
@@ -5,6 +5,7 @@
 # license: MIT/X11/Expat
 #
 
+import re
 import sys
 import polib
 
@@ -30,6 +31,7 @@
     cache = {}
     entries = po[:]
     po[:] = []
+    findd = re.compile(r' *\.\. (\w+)::') # for finding directives
     for entry in entries:
         msgids = entry.msgid.split(u'\n\n')
         if entry.msgstr:
@@ -49,8 +51,27 @@
 
         delta = 0
         for msgid, msgstr in zip(msgids, msgstrs):
-            if msgid:
+            if msgid and msgid != '::':
                 newentry = mkentry(entry, delta, msgid, msgstr)
+                mdirective = findd.match(msgid)
+                if mdirective:
+                    if not msgid[mdirective.end():].rstrip():
+                        # only directive, nothing to translate here
+                        continue
+                    directive = mdirective.group(1)
+                    if directive in ('container', 'include'):
+                        if msgid.rstrip('\n').count('\n') == 0:
+                            # only rst syntax, nothing to translate
+                            continue
+                        else:
+                            # lines following directly, unexpected
+                            print 'Warning: text follows line with directive' \
+                                  ' %s' % directive
+                    comment = 'do not translate: .. %s::' % directive
+                    if not newentry.comment:
+                        newentry.comment = comment
+                    elif comment not in newentry.comment:
+                        newentry.comment += '\n' + comment
                 addentry(po, newentry, cache)
             delta += 2 + msgid.count('\n')
     po.save()
--- a/mercurial/ancestor.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/ancestor.py	Thu Apr 17 19:36:17 2014 -0400
@@ -9,6 +9,62 @@
 import util
 from node import nullrev
 
+def commonancestorsheads(pfunc, *nodes):
+    """Returns a set with the heads of all common ancestors of all nodes,
+    heads(::nodes[0] and ::nodes[1] and ...) .
+
+    pfunc must return a list of parent vertices for a given vertex.
+    """
+    if not isinstance(nodes, set):
+        nodes = set(nodes)
+    if nullrev in nodes:
+        return set()
+    if len(nodes) <= 1:
+        return nodes
+
+    allseen = (1 << len(nodes)) - 1
+    seen = [0] * (max(nodes) + 1)
+    for i, n in enumerate(nodes):
+        seen[n] = 1 << i
+    poison = 1 << (i + 1)
+
+    gca = set()
+    interesting = len(nodes)
+    nv = len(seen) - 1
+    while nv >= 0 and interesting:
+        v = nv
+        nv -= 1
+        if not seen[v]:
+            continue
+        sv = seen[v]
+        if sv < poison:
+            interesting -= 1
+            if sv == allseen:
+                gca.add(v)
+                sv |= poison
+                if v in nodes:
+                    # history is linear
+                    return set([v])
+        if sv < poison:
+            for p in pfunc(v):
+                sp = seen[p]
+                if p == nullrev:
+                    continue
+                if sp == 0:
+                    seen[p] = sv
+                    interesting += 1
+                elif sp != sv:
+                    seen[p] |= sv
+        else:
+            for p in pfunc(v):
+                if p == nullrev:
+                    continue
+                sp = seen[p]
+                if sp and sp < poison:
+                    interesting -= 1
+                seen[p] = sv
+    return gca
+
 def ancestors(pfunc, *orignodes):
     """
     Returns the common ancestors of a and b that are furthest from a
@@ -16,59 +72,6 @@
 
     pfunc must return a list of parent vertices for a given vertex.
     """
-    if not isinstance(orignodes, set):
-        orignodes = set(orignodes)
-    if nullrev in orignodes:
-        return set()
-    if len(orignodes) <= 1:
-        return orignodes
-
-    def candidates(nodes):
-        allseen = (1 << len(nodes)) - 1
-        seen = [0] * (max(nodes) + 1)
-        for i, n in enumerate(nodes):
-            seen[n] = 1 << i
-        poison = 1 << (i + 1)
-
-        gca = set()
-        interesting = left = len(nodes)
-        nv = len(seen) - 1
-        while nv >= 0 and interesting:
-            v = nv
-            nv -= 1
-            if not seen[v]:
-                continue
-            sv = seen[v]
-            if sv < poison:
-                interesting -= 1
-                if sv == allseen:
-                    gca.add(v)
-                    sv |= poison
-                    if v in nodes:
-                        left -= 1
-                        if left <= 1:
-                            # history is linear
-                            return set([v])
-            if sv < poison:
-                for p in pfunc(v):
-                    sp = seen[p]
-                    if p == nullrev:
-                        continue
-                    if sp == 0:
-                        seen[p] = sv
-                        interesting += 1
-                    elif sp != sv:
-                        seen[p] |= sv
-            else:
-                for p in pfunc(v):
-                    if p == nullrev:
-                        continue
-                    sp = seen[p]
-                    if sp and sp < poison:
-                        interesting -= 1
-                    seen[p] = sv
-        return gca
-
     def deepest(nodes):
         interesting = {}
         count = max(nodes) + 1
@@ -125,95 +128,12 @@
             k |= i
         return set(n for (i, n) in mapping if k & i)
 
-    gca = candidates(orignodes)
+    gca = commonancestorsheads(pfunc, *orignodes)
 
     if len(gca) <= 1:
         return gca
     return deepest(gca)
 
-def genericancestor(a, b, pfunc):
-    """
-    Returns the common ancestor of a and b that is furthest from a
-    root (as measured by longest path) or None if no ancestor is
-    found. If there are multiple common ancestors at the same
-    distance, the first one found is returned.
-
-    pfunc must return a list of parent vertices for a given vertex
-    """
-
-    if a == b:
-        return a
-
-    a, b = sorted([a, b])
-
-    # find depth from root of all ancestors
-    # depth is stored as a negative for heapq
-    parentcache = {}
-    visit = [a, b]
-    depth = {}
-    while visit:
-        vertex = visit[-1]
-        pl = [p for p in pfunc(vertex) if p != nullrev]
-        parentcache[vertex] = pl
-        if not pl:
-            depth[vertex] = 0
-            visit.pop()
-        else:
-            for p in pl:
-                if p == a or p == b: # did we find a or b as a parent?
-                    return p # we're done
-                if p not in depth:
-                    visit.append(p)
-            if visit[-1] == vertex:
-                # -(maximum distance of parents + 1)
-                depth[vertex] = min([depth[p] for p in pl]) - 1
-                visit.pop()
-
-    # traverse ancestors in order of decreasing distance from root
-    def ancestors(vertex):
-        h = [(depth[vertex], vertex)]
-        seen = set()
-        while h:
-            d, n = heapq.heappop(h)
-            if n not in seen:
-                seen.add(n)
-                yield (d, n)
-                for p in parentcache[n]:
-                    heapq.heappush(h, (depth[p], p))
-
-    def generations(vertex):
-        sg, s = None, set()
-        for g, v in ancestors(vertex):
-            if g != sg:
-                if sg:
-                    yield sg, s
-                sg, s = g, set((v,))
-            else:
-                s.add(v)
-        yield sg, s
-
-    x = generations(a)
-    y = generations(b)
-    gx = x.next()
-    gy = y.next()
-
-    # increment each ancestor list until it is closer to root than
-    # the other, or they match
-    try:
-        while True:
-            if gx[0] == gy[0]:
-                for v in gx[1]:
-                    if v in gy[1]:
-                        return v
-                gy = y.next()
-                gx = x.next()
-            elif gx[0] > gy[0]:
-                gy = y.next()
-            else:
-                gx = x.next()
-    except StopIteration:
-        return None
-
 def missingancestors(revs, bases, pfunc):
     """Return all the ancestors of revs that are not ancestors of bases.
 
--- a/mercurial/bookmarks.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/bookmarks.py	Thu Apr 17 19:36:17 2014 -0400
@@ -363,22 +363,6 @@
             writer(msg)
         localmarks.write()
 
-def updateremote(ui, repo, remote, revs):
-    ui.debug("checking for updated bookmarks\n")
-    revnums = map(repo.changelog.rev, revs or [])
-    ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
-    (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
-     ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
-                 srchex=hex)
-
-    for b, scid, dcid in advsrc:
-        if ancestors and repo[scid].rev() not in ancestors:
-            continue
-        if remote.pushkey('bookmarks', b, dcid, scid):
-            ui.status(_("updating bookmark %s\n") % b)
-        else:
-            ui.warn(_('updating bookmark %s failed!\n') % b)
-
 def pushtoremote(ui, repo, remote, targets):
     (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
      ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
--- a/mercurial/branchmap.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/branchmap.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,6 +8,7 @@
 from node import bin, hex, nullid, nullrev
 import encoding
 import util
+import time
 
 def _filename(repo):
     """name of a branchcache file for a given repo or repoview"""
@@ -206,8 +207,10 @@
             if self.filteredhash is not None:
                 cachekey.append(hex(self.filteredhash))
             f.write(" ".join(cachekey) + '\n')
+            nodecount = 0
             for label, nodes in sorted(self.iteritems()):
                 for node in nodes:
+                    nodecount += 1
                     if node in self._closednodes:
                         state = 'c'
                     else:
@@ -215,6 +218,9 @@
                     f.write("%s %s %s\n" % (hex(node), state,
                                             encoding.fromlocal(label)))
             f.close()
+            repo.ui.log('branchcache',
+                        'wrote %s branch cache with %d labels and %d nodes\n',
+                        repo.filtername, len(self), nodecount)
         except (IOError, OSError, util.Abort):
             # Abort may be raise by read only opener
             pass
@@ -224,6 +230,7 @@
         missing heads, and a generator of nodes that are strictly a superset of
         heads missing, this function updates self to be correct.
         """
+        starttime = time.time()
         cl = repo.changelog
         # collect new branch entries
         newbranches = {}
@@ -272,3 +279,7 @@
                     self.tipnode = cl.node(tiprev)
                     self.tiprev = tiprev
         self.filteredhash = self._hashfiltered(repo)
+
+        duration = time.time() - starttime
+        repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
+                    repo.filtername, duration)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/bundle2.py	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,739 @@
+# bundle2.py - generic container format to transmit arbitrary data.
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Handling of the new bundle2 format
+
+The goal of bundle2 is to act as an atomically packet to transmit a set of
+payloads in an application agnostic way. It consist in a sequence of "parts"
+that will be handed to and processed by the application layer.
+
+
+General format architecture
+===========================
+
+The format is architectured as follow
+
+ - magic string
+ - stream level parameters
+ - payload parts (any number)
+ - end of stream marker.
+
+the Binary format
+============================
+
+All numbers are unsigned and big-endian.
+
+stream level parameters
+------------------------
+
+Binary format is as follow
+
+:params size: (16 bits integer)
+
+  The total number of Bytes used by the parameters
+
+:params value: arbitrary number of Bytes
+
+  A blob of `params size` containing the serialized version of all stream level
+  parameters.
+
+  The blob contains a space separated list of parameters. Parameters with value
+  are stored in the form `<name>=<value>`. Both name and value are urlquoted.
+
+  Empty name are obviously forbidden.
+
+  Name MUST start with a letter. If this first letter is lower case, the
+  parameter is advisory and can be safely ignored. However when the first
+  letter is capital, the parameter is mandatory and the bundling process MUST
+  stop if he is not able to proceed it.
+
+  Stream parameters use a simple textual format for two main reasons:
+
+  - Stream level parameters should remain simple and we want to discourage any
+    crazy usage.
+  - Textual data allow easy human inspection of a bundle2 header in case of
+    troubles.
+
+  Any Applicative level options MUST go into a bundle2 part instead.
+
+Payload part
+------------------------
+
+Binary format is as follow
+
+:header size: (16 bits inter)
+
+  The total number of Bytes used by the part headers. When the header is empty
+  (size = 0) this is interpreted as the end of stream marker.
+
+:header:
+
+    The header defines how to interpret the part. It contains two piece of
+    data: the part type, and the part parameters.
+
+    The part type is used to route an application level handler, that can
+    interpret payload.
+
+    Part parameters are passed to the application level handler.  They are
+    meant to convey information that will help the application level object to
+    interpret the part payload.
+
+    The binary format of the header is has follow
+
+    :typesize: (one byte)
+
+    :parttype: alphanumerical part name
+
+    :partid: A 32bits integer (unique in the bundle) that can be used to refer
+             to this part.
+
+    :parameters:
+
+        Part's parameter may have arbitrary content, the binary structure is::
+
+            <mandatory-count><advisory-count><param-sizes><param-data>
+
+        :mandatory-count: 1 byte, number of mandatory parameters
+
+        :advisory-count:  1 byte, number of advisory parameters
+
+        :param-sizes:
+
+            N couple of bytes, where N is the total number of parameters. Each
+            couple contains (<size-of-key>, <size-of-value) for one parameter.
+
+        :param-data:
+
+            A blob of bytes from which each parameter key and value can be
+            retrieved using the list of size couples stored in the previous
+            field.
+
+            Mandatory parameters comes first, then the advisory ones.
+
+:payload:
+
+    payload is a series of `<chunksize><chunkdata>`.
+
+    `chunksize` is a 32 bits integer, `chunkdata` are plain bytes (as much as
+    `chunksize` says)` The payload part is concluded by a zero size chunk.
+
+    The current implementation always produces either zero or one chunk.
+    This is an implementation limitation that will ultimately be lifted.
+
+Bundle processing
+============================
+
+Each part is processed in order using a "part handler". Handler are registered
+for a certain part type.
+
+The matching of a part to its handler is case insensitive. The case of the
+part type is used to know if a part is mandatory or advisory. If the Part type
+contains any uppercase char it is considered mandatory. When no handler is
+known for a Mandatory part, the process is aborted and an exception is raised.
+If the part is advisory and no handler is known, the part is ignored. When the
+process is aborted, the full bundle is still read from the stream to keep the
+channel usable. But none of the part read from an abort are processed. In the
+future, dropping the stream may become an option for channel we do not care to
+preserve.
+"""
+
+import util
+import struct
+import urllib
+import string
+
+import changegroup
+from i18n import _
+
+_pack = struct.pack
+_unpack = struct.unpack
+
+_magicstring = 'HG2X'
+
+_fstreamparamsize = '>H'
+_fpartheadersize = '>H'
+_fparttypesize = '>B'
+_fpartid = '>I'
+_fpayloadsize = '>I'
+_fpartparamcount = '>BB'
+
+preferedchunksize = 4096
+
+def _makefpartparamsizes(nbparams):
+    """return a struct format to read part parameter sizes
+
+    The number parameters is variable so we need to build that format
+    dynamically.
+    """
+    return '>'+('BB'*nbparams)
+
+parthandlermapping = {}
+
+def parthandler(parttype):
+    """decorator that register a function as a bundle2 part handler
+
+    eg::
+
+        @parthandler('myparttype')
+        def myparttypehandler(...):
+            '''process a part of type "my part".'''
+            ...
+    """
+    def _decorator(func):
+        lparttype = parttype.lower() # enforce lower case matching.
+        assert lparttype not in parthandlermapping
+        parthandlermapping[lparttype] = func
+        return func
+    return _decorator
+
+class unbundlerecords(object):
+    """keep record of what happens during and unbundle
+
+    New records are added using `records.add('cat', obj)`. Where 'cat' is a
+    category of record and obj is an arbitrary object.
+
+    `records['cat']` will return all entries of this category 'cat'.
+
+    Iterating on the object itself will yield `('category', obj)` tuples
+    for all entries.
+
+    All iterations happens in chronological order.
+    """
+
+    def __init__(self):
+        self._categories = {}
+        self._sequences = []
+        self._replies = {}
+
+    def add(self, category, entry, inreplyto=None):
+        """add a new record of a given category.
+
+        The entry can then be retrieved in the list returned by
+        self['category']."""
+        self._categories.setdefault(category, []).append(entry)
+        self._sequences.append((category, entry))
+        if inreplyto is not None:
+            self.getreplies(inreplyto).add(category, entry)
+
+    def getreplies(self, partid):
+        """get the subrecords that replies to a specific part"""
+        return self._replies.setdefault(partid, unbundlerecords())
+
+    def __getitem__(self, cat):
+        return tuple(self._categories.get(cat, ()))
+
+    def __iter__(self):
+        return iter(self._sequences)
+
+    def __len__(self):
+        return len(self._sequences)
+
+    def __nonzero__(self):
+        return bool(self._sequences)
+
+class bundleoperation(object):
+    """an object that represents a single bundling process
+
+    Its purpose is to carry unbundle-related objects and states.
+
+    A new object should be created at the beginning of each bundle processing.
+    The object is to be returned by the processing function.
+
+    The object has very little content now it will ultimately contain:
+    * an access to the repo the bundle is applied to,
+    * a ui object,
+    * a way to retrieve a transaction to add changes to the repo,
+    * a way to record the result of processing each part,
+    * a way to construct a bundle response when applicable.
+    """
+
+    def __init__(self, repo, transactiongetter):
+        self.repo = repo
+        self.ui = repo.ui
+        self.records = unbundlerecords()
+        self.gettransaction = transactiongetter
+        self.reply = None
+
+class TransactionUnavailable(RuntimeError):
+    pass
+
+def _notransaction():
+    """default method to get a transaction while processing a bundle
+
+    Raise an exception to highlight the fact that no transaction was expected
+    to be created"""
+    raise TransactionUnavailable()
+
+def processbundle(repo, unbundler, transactiongetter=_notransaction):
+    """This function process a bundle, apply effect to/from a repo
+
+    It iterates over each part then searches for and uses the proper handling
+    code to process the part. Parts are processed in order.
+
+    This is very early version of this function that will be strongly reworked
+    before final usage.
+
+    Unknown Mandatory part will abort the process.
+    """
+    op = bundleoperation(repo, transactiongetter)
+    # todo:
+    # - replace this is a init function soon.
+    # - exception catching
+    unbundler.params
+    iterparts = unbundler.iterparts()
+    part = None
+    try:
+        for part in iterparts:
+            parttype = part.type
+            # part key are matched lower case
+            key = parttype.lower()
+            try:
+                handler = parthandlermapping[key]
+                op.ui.debug('found a handler for part %r\n' % parttype)
+            except KeyError:
+                if key != parttype: # mandatory parts
+                    # todo:
+                    # - use a more precise exception
+                    raise
+                op.ui.debug('ignoring unknown advisory part %r\n' % key)
+                # consuming the part
+                part.read()
+                continue
+
+            # handler is called outside the above try block so that we don't
+            # risk catching KeyErrors from anything other than the
+            # parthandlermapping lookup (any KeyError raised by handler()
+            # itself represents a defect of a different variety).
+            output = None
+            if op.reply is not None:
+                op.ui.pushbuffer(error=True)
+                output = ''
+            try:
+                handler(op, part)
+            finally:
+                if output is not None:
+                    output = op.ui.popbuffer()
+            if output:
+                outpart = bundlepart('b2x:output',
+                                     advisoryparams=[('in-reply-to',
+                                                      str(part.id))],
+                                     data=output)
+                op.reply.addpart(outpart)
+            part.read()
+    except Exception:
+        if part is not None:
+            # consume the bundle content
+            part.read()
+        for part in iterparts:
+            # consume the bundle content
+            part.read()
+        raise
+    return op
+
+def decodecaps(blob):
+    """decode a bundle2 caps bytes blob into a dictionnary
+
+    The blob is a list of capabilities (one per line)
+    Capabilities may have values using a line of the form::
+
+        capability=value1,value2,value3
+
+    The values are always a list."""
+    caps = {}
+    for line in blob.splitlines():
+        if not line:
+            continue
+        if '=' not in line:
+            key, vals = line, ()
+        else:
+            key, vals = line.split('=', 1)
+            vals = vals.split(',')
+        key = urllib.unquote(key)
+        vals = [urllib.unquote(v) for v in vals]
+        caps[key] = vals
+    return caps
+
+def encodecaps(caps):
+    """encode a bundle2 caps dictionary into a bytes blob"""
+    chunks = []
+    for ca in sorted(caps):
+        vals = caps[ca]
+        ca = urllib.quote(ca)
+        vals = [urllib.quote(v) for v in vals]
+        if vals:
+            ca = "%s=%s" % (ca, ','.join(vals))
+        chunks.append(ca)
+    return '\n'.join(chunks)
+
+class bundle20(object):
+    """represent an outgoing bundle2 container
+
+    Use the `addparam` method to add stream level parameter. and `addpart` to
+    populate it. Then call `getchunks` to retrieve all the binary chunks of
+    data that compose the bundle2 container."""
+
+    def __init__(self, ui, capabilities=()):
+        self.ui = ui
+        self._params = []
+        self._parts = []
+        self.capabilities = dict(capabilities)
+
+    def addparam(self, name, value=None):
+        """add a stream level parameter"""
+        if not name:
+            raise ValueError('empty parameter name')
+        if name[0] not in string.letters:
+            raise ValueError('non letter first character: %r' % name)
+        self._params.append((name, value))
+
+    def addpart(self, part):
+        """add a new part to the bundle2 container
+
+        Parts contains the actual applicative payload."""
+        assert part.id is None
+        part.id = len(self._parts) # very cheap counter
+        self._parts.append(part)
+
+    def getchunks(self):
+        self.ui.debug('start emission of %s stream\n' % _magicstring)
+        yield _magicstring
+        param = self._paramchunk()
+        self.ui.debug('bundle parameter: %s\n' % param)
+        yield _pack(_fstreamparamsize, len(param))
+        if param:
+            yield param
+
+        self.ui.debug('start of parts\n')
+        for part in self._parts:
+            self.ui.debug('bundle part: "%s"\n' % part.type)
+            for chunk in part.getchunks():
+                yield chunk
+        self.ui.debug('end of bundle\n')
+        yield '\0\0'
+
+    def _paramchunk(self):
+        """return a encoded version of all stream parameters"""
+        blocks = []
+        for par, value in self._params:
+            par = urllib.quote(par)
+            if value is not None:
+                value = urllib.quote(value)
+                par = '%s=%s' % (par, value)
+            blocks.append(par)
+        return ' '.join(blocks)
+
+class unpackermixin(object):
+    """A mixin to extract bytes and struct data from a stream"""
+
+    def __init__(self, fp):
+        self._fp = fp
+
+    def _unpack(self, format):
+        """unpack this struct format from the stream"""
+        data = self._readexact(struct.calcsize(format))
+        return _unpack(format, data)
+
+    def _readexact(self, size):
+        """read exactly <size> bytes from the stream"""
+        return changegroup.readexactly(self._fp, size)
+
+
+class unbundle20(unpackermixin):
+    """interpret a bundle2 stream
+
+    This class is fed with a binary stream and yields parts through its
+    `iterparts` methods."""
+
+    def __init__(self, ui, fp, header=None):
+        """If header is specified, we do not read it out of the stream."""
+        self.ui = ui
+        super(unbundle20, self).__init__(fp)
+        if header is None:
+            header = self._readexact(4)
+            magic, version = header[0:2], header[2:4]
+            if magic != 'HG':
+                raise util.Abort(_('not a Mercurial bundle'))
+            if version != '2X':
+                raise util.Abort(_('unknown bundle version %s') % version)
+        self.ui.debug('start processing of %s stream\n' % header)
+
+    @util.propertycache
+    def params(self):
+        """dictionary of stream level parameters"""
+        self.ui.debug('reading bundle2 stream parameters\n')
+        params = {}
+        paramssize = self._unpack(_fstreamparamsize)[0]
+        if paramssize:
+            for p in self._readexact(paramssize).split(' '):
+                p = p.split('=', 1)
+                p = [urllib.unquote(i) for i in p]
+                if len(p) < 2:
+                    p.append(None)
+                self._processparam(*p)
+                params[p[0]] = p[1]
+        return params
+
+    def _processparam(self, name, value):
+        """process a parameter, applying its effect if needed
+
+        Parameter starting with a lower case letter are advisory and will be
+        ignored when unknown.  Those starting with an upper case letter are
+        mandatory and will this function will raise a KeyError when unknown.
+
+        Note: no option are currently supported. Any input will be either
+              ignored or failing.
+        """
+        if not name:
+            raise ValueError('empty parameter name')
+        if name[0] not in string.letters:
+            raise ValueError('non letter first character: %r' % name)
+        # Some logic will be later added here to try to process the option for
+        # a dict of known parameter.
+        if name[0].islower():
+            self.ui.debug("ignoring unknown parameter %r\n" % name)
+        else:
+            raise KeyError(name)
+
+
+    def iterparts(self):
+        """yield all parts contained in the stream"""
+        # make sure param have been loaded
+        self.params
+        self.ui.debug('start extraction of bundle2 parts\n')
+        headerblock = self._readpartheader()
+        while headerblock is not None:
+            part = unbundlepart(self.ui, headerblock, self._fp)
+            yield part
+            headerblock = self._readpartheader()
+        self.ui.debug('end of bundle2 stream\n')
+
+    def _readpartheader(self):
+        """reads a part header size and return the bytes blob
+
+        returns None if empty"""
+        headersize = self._unpack(_fpartheadersize)[0]
+        self.ui.debug('part header size: %i\n' % headersize)
+        if headersize:
+            return self._readexact(headersize)
+        return None
+
+
+class bundlepart(object):
+    """A bundle2 part contains application level payload
+
+    The part `type` is used to route the part to the application level
+    handler.
+    """
+
+    def __init__(self, parttype, mandatoryparams=(), advisoryparams=(),
+                 data=''):
+        self.id = None
+        self.type = parttype
+        self.data = data
+        self.mandatoryparams = mandatoryparams
+        self.advisoryparams = advisoryparams
+
+    def getchunks(self):
+        #### header
+        ## parttype
+        header = [_pack(_fparttypesize, len(self.type)),
+                  self.type, _pack(_fpartid, self.id),
+                 ]
+        ## parameters
+        # count
+        manpar = self.mandatoryparams
+        advpar = self.advisoryparams
+        header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
+        # size
+        parsizes = []
+        for key, value in manpar:
+            parsizes.append(len(key))
+            parsizes.append(len(value))
+        for key, value in advpar:
+            parsizes.append(len(key))
+            parsizes.append(len(value))
+        paramsizes = _pack(_makefpartparamsizes(len(parsizes) / 2), *parsizes)
+        header.append(paramsizes)
+        # key, value
+        for key, value in manpar:
+            header.append(key)
+            header.append(value)
+        for key, value in advpar:
+            header.append(key)
+            header.append(value)
+        ## finalize header
+        headerchunk = ''.join(header)
+        yield _pack(_fpartheadersize, len(headerchunk))
+        yield headerchunk
+        ## payload
+        for chunk in self._payloadchunks():
+            yield _pack(_fpayloadsize, len(chunk))
+            yield chunk
+        # end of payload
+        yield _pack(_fpayloadsize, 0)
+
+    def _payloadchunks(self):
+        """yield chunks of a the part payload
+
+        Exists to handle the different methods to provide data to a part."""
+        # we only support fixed size data now.
+        # This will be improved in the future.
+        if util.safehasattr(self.data, 'next'):
+            buff = util.chunkbuffer(self.data)
+            chunk = buff.read(preferedchunksize)
+            while chunk:
+                yield chunk
+                chunk = buff.read(preferedchunksize)
+        elif len(self.data):
+            yield self.data
+
+class unbundlepart(unpackermixin):
+    """a bundle part read from a bundle"""
+
+    def __init__(self, ui, header, fp):
+        super(unbundlepart, self).__init__(fp)
+        self.ui = ui
+        # unbundle state attr
+        self._headerdata = header
+        self._headeroffset = 0
+        self._initialized = False
+        self.consumed = False
+        # part data
+        self.id = None
+        self.type = None
+        self.mandatoryparams = None
+        self.advisoryparams = None
+        self._payloadstream = None
+        self._readheader()
+
+    def _fromheader(self, size):
+        """return the next <size> byte from the header"""
+        offset = self._headeroffset
+        data = self._headerdata[offset:(offset + size)]
+        self._headeroffset = offset + size
+        return data
+
+    def _unpackheader(self, format):
+        """read given format from header
+
+        This automatically compute the size of the format to read."""
+        data = self._fromheader(struct.calcsize(format))
+        return _unpack(format, data)
+
+    def _readheader(self):
+        """read the header and setup the object"""
+        typesize = self._unpackheader(_fparttypesize)[0]
+        self.type = self._fromheader(typesize)
+        self.ui.debug('part type: "%s"\n' % self.type)
+        self.id = self._unpackheader(_fpartid)[0]
+        self.ui.debug('part id: "%s"\n' % self.id)
+        ## reading parameters
+        # param count
+        mancount, advcount = self._unpackheader(_fpartparamcount)
+        self.ui.debug('part parameters: %i\n' % (mancount + advcount))
+        # param size
+        fparamsizes = _makefpartparamsizes(mancount + advcount)
+        paramsizes = self._unpackheader(fparamsizes)
+        # make it a list of couple again
+        paramsizes = zip(paramsizes[::2], paramsizes[1::2])
+        # split mandatory from advisory
+        mansizes = paramsizes[:mancount]
+        advsizes = paramsizes[mancount:]
+        # retrive param value
+        manparams = []
+        for key, value in mansizes:
+            manparams.append((self._fromheader(key), self._fromheader(value)))
+        advparams = []
+        for key, value in advsizes:
+            advparams.append((self._fromheader(key), self._fromheader(value)))
+        self.mandatoryparams = manparams
+        self.advisoryparams  = advparams
+        ## part payload
+        def payloadchunks():
+            payloadsize = self._unpack(_fpayloadsize)[0]
+            self.ui.debug('payload chunk size: %i\n' % payloadsize)
+            while payloadsize:
+                yield self._readexact(payloadsize)
+                payloadsize = self._unpack(_fpayloadsize)[0]
+                self.ui.debug('payload chunk size: %i\n' % payloadsize)
+        self._payloadstream = util.chunkbuffer(payloadchunks())
+        # we read the data, tell it
+        self._initialized = True
+
+    def read(self, size=None):
+        """read payload data"""
+        if not self._initialized:
+            self._readheader()
+        if size is None:
+            data = self._payloadstream.read()
+        else:
+            data = self._payloadstream.read(size)
+        if size is None or len(data) < size:
+            self.consumed = True
+        return data
+
+
+@parthandler('b2x:changegroup')
+def handlechangegroup(op, inpart):
+    """apply a changegroup part on the repo
+
+    This is a very early implementation that will massive rework before being
+    inflicted to any end-user.
+    """
+    # Make sure we trigger a transaction creation
+    #
+    # The addchangegroup function will get a transaction object by itself, but
+    # we need to make sure we trigger the creation of a transaction object used
+    # for the whole processing scope.
+    op.gettransaction()
+    cg = changegroup.unbundle10(inpart, 'UN')
+    ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
+    op.records.add('changegroup', {'return': ret})
+    if op.reply is not None:
+        # This is definitly not the final form of this
+        # return. But one need to start somewhere.
+        part = bundlepart('b2x:reply:changegroup', (),
+                           [('in-reply-to', str(inpart.id)),
+                            ('return', '%i' % ret)])
+        op.reply.addpart(part)
+    assert not inpart.read()
+
+@parthandler('b2x:reply:changegroup')
+def handlechangegroup(op, inpart):
+    p = dict(inpart.advisoryparams)
+    ret = int(p['return'])
+    op.records.add('changegroup', {'return': ret}, int(p['in-reply-to']))
+
+@parthandler('b2x:check:heads')
+def handlechangegroup(op, inpart):
+    """check that head of the repo did not change
+
+    This is used to detect a push race when using unbundle.
+    This replaces the "heads" argument of unbundle."""
+    h = inpart.read(20)
+    heads = []
+    while len(h) == 20:
+        heads.append(h)
+        h = inpart.read(20)
+    assert not h
+    if heads != op.repo.heads():
+        raise exchange.PushRaced()
+
+@parthandler('b2x:output')
+def handleoutput(op, inpart):
+    """forward output captured on the server to the client"""
+    for line in inpart.read().splitlines():
+        op.ui.write(('remote: %s\n' % line))
+
+@parthandler('b2x:replycaps')
+def handlereplycaps(op, inpart):
+    """Notify that a reply bundle should be created
+
+    The payload contains the capabilities information for the reply"""
+    caps = decodecaps(inpart.read())
+    if op.reply is None:
+        op.reply = bundle20(op.ui, caps)
+
--- a/mercurial/bundlerepo.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/bundlerepo.py	Thu Apr 17 19:36:17 2014 -0400
@@ -14,7 +14,7 @@
 from node import nullid
 from i18n import _
 import os, tempfile, shutil
-import changegroup, util, mdiff, discovery, cmdutil, scmutil
+import changegroup, util, mdiff, discovery, cmdutil, scmutil, exchange
 import localrepo, changelog, manifest, filelog, revlog, error
 
 class bundlerevlog(revlog.revlog):
@@ -193,7 +193,7 @@
             self._tempparent = tempfile.mkdtemp()
             localrepo.instance(ui, self._tempparent, 1)
             localrepo.localrepository.__init__(self, ui, self._tempparent)
-        self.ui.setconfig('phases', 'publish', False)
+        self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
 
         if path:
             self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
@@ -202,10 +202,10 @@
 
         self.tempfile = None
         f = util.posixfile(bundlename, "rb")
-        self.bundle = changegroup.readbundle(f, bundlename)
+        self.bundle = exchange.readbundle(ui, f, bundlename)
         if self.bundle.compressed():
-            fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
-                                            suffix=".hg10un", dir=self.path)
+            fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
+                                            suffix=".hg10un")
             self.tempfile = temp
             fptemp = os.fdopen(fdtemp, 'wb')
 
@@ -219,8 +219,8 @@
             finally:
                 fptemp.close()
 
-            f = util.posixfile(self.tempfile, "rb")
-            self.bundle = changegroup.readbundle(f, bundlename)
+            f = self.vfs.open(self.tempfile, mode="rb")
+            self.bundle = exchange.readbundle(ui, f, bundlename, self.vfs)
 
         # dict with the mapping 'filename' -> position in the bundle
         self.bundlefilespos = {}
@@ -280,7 +280,7 @@
         """Close assigned bundle file immediately."""
         self.bundle.close()
         if self.tempfile is not None:
-            os.unlink(self.tempfile)
+            self.vfs.unlink(self.tempfile)
         if self._tempparent:
             shutil.rmtree(self._tempparent, True)
 
--- a/mercurial/changegroup.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/changegroup.py	Thu Apr 17 19:36:17 2014 -0400
@@ -5,10 +5,12 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
+import weakref
 from i18n import _
-from node import nullrev, hex
+from node import nullrev, nullid, hex, short
 import mdiff, util, dagutil
 import struct, os, bz2, zlib, tempfile
+import discovery, error, phases, branchmap
 
 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
 
@@ -57,7 +59,7 @@
 # hgweb uses this list to communicate its preferred type
 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
 
-def writebundle(cg, filename, bundletype):
+def writebundle(cg, filename, bundletype, vfs=None):
     """Write a bundle file and return its filename.
 
     Existing files will not be overwritten.
@@ -70,7 +72,10 @@
     cleanup = None
     try:
         if filename:
-            fh = open(filename, "wb")
+            if vfs:
+                fh = vfs.open(filename, "wb")
+            else:
+                fh = open(filename, "wb")
         else:
             fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
             fh = os.fdopen(fd, "wb")
@@ -86,23 +91,8 @@
         # an empty chunkgroup is the end of the changegroup
         # a changegroup has at least 2 chunkgroups (changelog and manifest).
         # after that, an empty chunkgroup is the end of the changegroup
-        empty = False
-        count = 0
-        while not empty or count <= 2:
-            empty = True
-            count += 1
-            while True:
-                chunk = getchunk(cg)
-                if not chunk:
-                    break
-                empty = False
-                fh.write(z.compress(chunkheader(len(chunk))))
-                pos = 0
-                while pos < len(chunk):
-                    next = pos + 2**20
-                    fh.write(z.compress(chunk[pos:next]))
-                    pos = next
-            fh.write(z.compress(closechunk()))
+        for chunk in cg.getchunks():
+            fh.write(z.compress(chunk))
         fh.write(z.flush())
         cleanup = None
         return filename
@@ -110,7 +100,10 @@
         if fh is not None:
             fh.close()
         if cleanup is not None:
-            os.unlink(cleanup)
+            if filename and vfs:
+                vfs.unlink(cleanup)
+            else:
+                os.unlink(cleanup)
 
 def decompressor(fh, alg):
     if alg == 'UN':
@@ -173,7 +166,7 @@
         if not l:
             return {}
         fname = readexactly(self._stream, l)
-        return dict(filename=fname)
+        return {'filename': fname}
 
     def _deltaheader(self, headertuple, prevnode):
         node, p1, p2, cs = headertuple
@@ -191,8 +184,36 @@
         header = struct.unpack(self.deltaheader, headerdata)
         delta = readexactly(self._stream, l - self.deltaheadersize)
         node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
-        return dict(node=node, p1=p1, p2=p2, cs=cs,
-                    deltabase=deltabase, delta=delta)
+        return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
+                'deltabase': deltabase, 'delta': delta}
+
+    def getchunks(self):
+        """returns all the chunks contains in the bundle
+
+        Used when you need to forward the binary stream to a file or another
+        network API. To do so, it parse the changegroup data, otherwise it will
+        block in case of sshrepo because it don't know the end of the stream.
+        """
+        # an empty chunkgroup is the end of the changegroup
+        # a changegroup has at least 2 chunkgroups (changelog and manifest).
+        # after that, an empty chunkgroup is the end of the changegroup
+        empty = False
+        count = 0
+        while not empty or count <= 2:
+            empty = True
+            count += 1
+            while True:
+                chunk = getchunk(self)
+                if not chunk:
+                    break
+                empty = False
+                yield chunkheader(len(chunk))
+                pos = 0
+                while pos < len(chunk):
+                    next = pos + 2**20
+                    yield chunk[pos:next]
+                    pos = next
+            yield closechunk()
 
 class headerlessfixup(object):
     def __init__(self, fh, h):
@@ -206,23 +227,6 @@
             return d
         return readexactly(self._fh, n)
 
-def readbundle(fh, fname):
-    header = readexactly(fh, 6)
-
-    if not fname:
-        fname = "stream"
-        if not header.startswith('HG') and header.startswith('\0'):
-            fh = headerlessfixup(fh, header)
-            header = "HG10UN"
-
-    magic, version, alg = header[0:2], header[2:4], header[4:6]
-
-    if magic != 'HG':
-        raise util.Abort(_('%s: not a Mercurial bundle') % fname)
-    if version != '10':
-        raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
-    return unbundle10(fh, alg)
-
 class bundle10(object):
     deltaheader = _BUNDLE10_DELTA_HEADER
     def __init__(self, repo, bundlecaps=None):
@@ -428,3 +432,310 @@
     def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
         # do nothing with basenode, it is implicitly the previous one in HG10
         return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
+
+def _changegroupinfo(repo, nodes, source):
+    if repo.ui.verbose or source == 'bundle':
+        repo.ui.status(_("%d changesets found\n") % len(nodes))
+    if repo.ui.debugflag:
+        repo.ui.debug("list of changesets:\n")
+        for node in nodes:
+            repo.ui.debug("%s\n" % hex(node))
+
+def getsubset(repo, outgoing, bundler, source, fastpath=False):
+    repo = repo.unfiltered()
+    commonrevs = outgoing.common
+    csets = outgoing.missing
+    heads = outgoing.missingheads
+    # We go through the fast path if we get told to, or if all (unfiltered
+    # heads have been requested (since we then know there all linkrevs will
+    # be pulled by the client).
+    heads.sort()
+    fastpathlinkrev = fastpath or (
+            repo.filtername is None and heads == sorted(repo.heads()))
+
+    repo.hook('preoutgoing', throw=True, source=source)
+    _changegroupinfo(repo, csets, source)
+    gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
+    return unbundle10(util.chunkbuffer(gengroup), 'UN')
+
+def changegroupsubset(repo, roots, heads, source):
+    """Compute a changegroup consisting of all the nodes that are
+    descendants of any of the roots and ancestors of any of the heads.
+    Return a chunkbuffer object whose read() method will return
+    successive changegroup chunks.
+
+    It is fairly complex as determining which filenodes and which
+    manifest nodes need to be included for the changeset to be complete
+    is non-trivial.
+
+    Another wrinkle is doing the reverse, figuring out which changeset in
+    the changegroup a particular filenode or manifestnode belongs to.
+    """
+    cl = repo.changelog
+    if not roots:
+        roots = [nullid]
+    # TODO: remove call to nodesbetween.
+    csets, roots, heads = cl.nodesbetween(roots, heads)
+    discbases = []
+    for n in roots:
+        discbases.extend([p for p in cl.parents(n) if p != nullid])
+    outgoing = discovery.outgoing(cl, discbases, heads)
+    bundler = bundle10(repo)
+    return getsubset(repo, outgoing, bundler, source)
+
+def getlocalbundle(repo, source, outgoing, bundlecaps=None):
+    """Like getbundle, but taking a discovery.outgoing as an argument.
+
+    This is only implemented for local repos and reuses potentially
+    precomputed sets in outgoing."""
+    if not outgoing.missing:
+        return None
+    bundler = bundle10(repo, bundlecaps)
+    return getsubset(repo, outgoing, bundler, source)
+
+def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
+    """Like changegroupsubset, but returns the set difference between the
+    ancestors of heads and the ancestors common.
+
+    If heads is None, use the local heads. If common is None, use [nullid].
+
+    The nodes in common might not all be known locally due to the way the
+    current discovery protocol works.
+    """
+    cl = repo.changelog
+    if common:
+        hasnode = cl.hasnode
+        common = [n for n in common if hasnode(n)]
+    else:
+        common = [nullid]
+    if not heads:
+        heads = cl.heads()
+    outgoing = discovery.outgoing(cl, common, heads)
+    return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
+
+def changegroup(repo, basenodes, source):
+    # to avoid a race we use changegroupsubset() (issue1320)
+    return changegroupsubset(repo, basenodes, repo.heads(), source)
+
+def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
+    revisions = 0
+    files = 0
+    while True:
+        chunkdata = source.filelogheader()
+        if not chunkdata:
+            break
+        f = chunkdata["filename"]
+        repo.ui.debug("adding %s revisions\n" % f)
+        pr()
+        fl = repo.file(f)
+        o = len(fl)
+        if not fl.addgroup(source, revmap, trp):
+            raise util.Abort(_("received file revlog group is empty"))
+        revisions += len(fl) - o
+        files += 1
+        if f in needfiles:
+            needs = needfiles[f]
+            for new in xrange(o, len(fl)):
+                n = fl.node(new)
+                if n in needs:
+                    needs.remove(n)
+                else:
+                    raise util.Abort(
+                        _("received spurious file revlog entry"))
+            if not needs:
+                del needfiles[f]
+    repo.ui.progress(_('files'), None)
+
+    for f, needs in needfiles.iteritems():
+        fl = repo.file(f)
+        for n in needs:
+            try:
+                fl.rev(n)
+            except error.LookupError:
+                raise util.Abort(
+                    _('missing file data for %s:%s - run hg verify') %
+                    (f, hex(n)))
+
+    return revisions, files
+
+def addchangegroup(repo, source, srctype, url, emptyok=False):
+    """Add the changegroup returned by source.read() to this repo.
+    srctype is a string like 'push', 'pull', or 'unbundle'.  url is
+    the URL of the repo where this changegroup is coming from.
+
+    Return an integer summarizing the change to this repo:
+    - nothing changed or no source: 0
+    - more heads than before: 1+added heads (2..n)
+    - fewer heads than before: -1-removed heads (-2..-n)
+    - number of heads stays the same: 1
+    """
+    repo = repo.unfiltered()
+    def csmap(x):
+        repo.ui.debug("add changeset %s\n" % short(x))
+        return len(cl)
+
+    def revmap(x):
+        return cl.rev(x)
+
+    if not source:
+        return 0
+
+    repo.hook('prechangegroup', throw=True, source=srctype, url=url)
+
+    changesets = files = revisions = 0
+    efiles = set()
+
+    # write changelog data to temp files so concurrent readers will not see
+    # inconsistent view
+    cl = repo.changelog
+    cl.delayupdate()
+    oldheads = cl.heads()
+
+    tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
+    try:
+        trp = weakref.proxy(tr)
+        # pull off the changeset group
+        repo.ui.status(_("adding changesets\n"))
+        clstart = len(cl)
+        class prog(object):
+            step = _('changesets')
+            count = 1
+            ui = repo.ui
+            total = None
+            def __call__(repo):
+                repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
+                                 total=repo.total)
+                repo.count += 1
+        pr = prog()
+        source.callback = pr
+
+        source.changelogheader()
+        srccontent = cl.addgroup(source, csmap, trp)
+        if not (srccontent or emptyok):
+            raise util.Abort(_("received changelog group is empty"))
+        clend = len(cl)
+        changesets = clend - clstart
+        for c in xrange(clstart, clend):
+            efiles.update(repo[c].files())
+        efiles = len(efiles)
+        repo.ui.progress(_('changesets'), None)
+
+        # pull off the manifest group
+        repo.ui.status(_("adding manifests\n"))
+        pr.step = _('manifests')
+        pr.count = 1
+        pr.total = changesets # manifests <= changesets
+        # no need to check for empty manifest group here:
+        # if the result of the merge of 1 and 2 is the same in 3 and 4,
+        # no new manifest will be created and the manifest group will
+        # be empty during the pull
+        source.manifestheader()
+        repo.manifest.addgroup(source, revmap, trp)
+        repo.ui.progress(_('manifests'), None)
+
+        needfiles = {}
+        if repo.ui.configbool('server', 'validate', default=False):
+            # validate incoming csets have their manifests
+            for cset in xrange(clstart, clend):
+                mfest = repo.changelog.read(repo.changelog.node(cset))[0]
+                mfest = repo.manifest.readdelta(mfest)
+                # store file nodes we must see
+                for f, n in mfest.iteritems():
+                    needfiles.setdefault(f, set()).add(n)
+
+        # process the files
+        repo.ui.status(_("adding file changes\n"))
+        pr.step = _('files')
+        pr.count = 1
+        pr.total = efiles
+        source.callback = None
+
+        newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
+                                                needfiles)
+        revisions += newrevs
+        files += newfiles
+
+        dh = 0
+        if oldheads:
+            heads = cl.heads()
+            dh = len(heads) - len(oldheads)
+            for h in heads:
+                if h not in oldheads and repo[h].closesbranch():
+                    dh -= 1
+        htext = ""
+        if dh:
+            htext = _(" (%+d heads)") % dh
+
+        repo.ui.status(_("added %d changesets"
+                         " with %d changes to %d files%s\n")
+                         % (changesets, revisions, files, htext))
+        repo.invalidatevolatilesets()
+
+        if changesets > 0:
+            p = lambda: cl.writepending() and repo.root or ""
+            if 'node' not in tr.hookargs:
+                tr.hookargs['node'] = hex(cl.node(clstart))
+            repo.hook('pretxnchangegroup', throw=True, source=srctype,
+                      url=url, pending=p, **tr.hookargs)
+
+        added = [cl.node(r) for r in xrange(clstart, clend)]
+        publishing = repo.ui.configbool('phases', 'publish', True)
+        if srctype in ('push', 'serve'):
+            # Old servers can not push the boundary themselves.
+            # New servers won't push the boundary if changeset already
+            # exists locally as secret
+            #
+            # We should not use added here but the list of all change in
+            # the bundle
+            if publishing:
+                phases.advanceboundary(repo, phases.public, srccontent)
+            else:
+                phases.advanceboundary(repo, phases.draft, srccontent)
+                phases.retractboundary(repo, phases.draft, added)
+        elif srctype != 'strip':
+            # publishing only alter behavior during push
+            #
+            # strip should not touch boundary at all
+            phases.retractboundary(repo, phases.draft, added)
+
+        # make changelog see real files again
+        cl.finalize(trp)
+
+        tr.close()
+
+        if changesets > 0:
+            if srctype != 'strip':
+                # During strip, branchcache is invalid but coming call to
+                # `destroyed` will repair it.
+                # In other case we can safely update cache on disk.
+                branchmap.updatecache(repo.filtered('served'))
+            def runhooks():
+                # These hooks run when the lock releases, not when the
+                # transaction closes. So it's possible for the changelog
+                # to have changed since we last saw it.
+                if clstart >= len(repo):
+                    return
+
+                # forcefully update the on-disk branch cache
+                repo.ui.debug("updating the branch cache\n")
+                repo.hook("changegroup", source=srctype, url=url,
+                          **tr.hookargs)
+
+                for n in added:
+                    repo.hook("incoming", node=hex(n), source=srctype,
+                              url=url)
+
+                newheads = [h for h in repo.heads() if h not in oldheads]
+                repo.ui.log("incoming",
+                            "%s incoming changes - new heads: %s\n",
+                            len(added),
+                            ', '.join([hex(c[:6]) for c in newheads]))
+            repo._afterlock(runhooks)
+
+    finally:
+        tr.release()
+    # never return 0 here:
+    if dh < 0:
+        return dh - 1
+    else:
+        return dh + 1
--- a/mercurial/cmdutil.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/cmdutil.py	Thu Apr 17 19:36:17 2014 -0400
@@ -10,7 +10,7 @@
 import os, sys, errno, re, tempfile
 import util, scmutil, templater, patch, error, templatekw, revlog, copies
 import match as matchmod
-import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil
+import context, repair, graphmod, revset, phases, obsolete, pathutil
 import changelog
 import bookmarks
 import lock as lockmod
@@ -223,7 +223,7 @@
     r = None
     if repo:
         if cl:
-            r = repo.changelog
+            r = repo.unfiltered().changelog
         elif mf:
             r = repo.manifest
         elif file_:
@@ -542,6 +542,131 @@
     if runfn:
         return runfn()
 
+def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
+    """Utility function used by commands.import to import a single patch
+
+    This function is explicitly defined here to help the evolve extension to
+    wrap this part of the import logic.
+
+    The API is currently a bit ugly because it a simple code translation from
+    the import command. Feel free to make it better.
+
+    :hunk: a patch (as a binary string)
+    :parents: nodes that will be parent of the created commit
+    :opts: the full dict of option passed to the import command
+    :msgs: list to save commit message to.
+           (used in case we need to save it when failing)
+    :updatefunc: a function that update a repo to a given node
+                 updatefunc(<repo>, <node>)
+    """
+    tmpname, message, user, date, branch, nodeid, p1, p2 = \
+        patch.extract(ui, hunk)
+
+    editor = commiteditor
+    if opts.get('edit'):
+        editor = commitforceeditor
+    update = not opts.get('bypass')
+    strip = opts["strip"]
+    sim = float(opts.get('similarity') or 0)
+    if not tmpname:
+        return (None, None)
+    msg = _('applied to working directory')
+
+    try:
+        cmdline_message = logmessage(ui, opts)
+        if cmdline_message:
+            # pickup the cmdline msg
+            message = cmdline_message
+        elif message:
+            # pickup the patch msg
+            message = message.strip()
+        else:
+            # launch the editor
+            message = None
+        ui.debug('message:\n%s\n' % message)
+
+        if len(parents) == 1:
+            parents.append(repo[nullid])
+        if opts.get('exact'):
+            if not nodeid or not p1:
+                raise util.Abort(_('not a Mercurial patch'))
+            p1 = repo[p1]
+            p2 = repo[p2 or nullid]
+        elif p2:
+            try:
+                p1 = repo[p1]
+                p2 = repo[p2]
+                # Without any options, consider p2 only if the
+                # patch is being applied on top of the recorded
+                # first parent.
+                if p1 != parents[0]:
+                    p1 = parents[0]
+                    p2 = repo[nullid]
+            except error.RepoError:
+                p1, p2 = parents
+        else:
+            p1, p2 = parents
+
+        n = None
+        if update:
+            if p1 != parents[0]:
+                updatefunc(repo, p1.node())
+            if p2 != parents[1]:
+                repo.setparents(p1.node(), p2.node())
+
+            if opts.get('exact') or opts.get('import_branch'):
+                repo.dirstate.setbranch(branch or 'default')
+
+            files = set()
+            patch.patch(ui, repo, tmpname, strip=strip, files=files,
+                        eolmode=None, similarity=sim / 100.0)
+            files = list(files)
+            if opts.get('no_commit'):
+                if message:
+                    msgs.append(message)
+            else:
+                if opts.get('exact') or p2:
+                    # If you got here, you either use --force and know what
+                    # you are doing or used --exact or a merge patch while
+                    # being updated to its first parent.
+                    m = None
+                else:
+                    m = scmutil.matchfiles(repo, files or [])
+                n = repo.commit(message, opts.get('user') or user,
+                                opts.get('date') or date, match=m,
+                                editor=editor)
+        else:
+            if opts.get('exact') or opts.get('import_branch'):
+                branch = branch or 'default'
+            else:
+                branch = p1.branch()
+            store = patch.filestore()
+            try:
+                files = set()
+                try:
+                    patch.patchrepo(ui, repo, p1, store, tmpname, strip,
+                                    files, eolmode=None)
+                except patch.PatchError, e:
+                    raise util.Abort(str(e))
+                memctx = context.makememctx(repo, (p1.node(), p2.node()),
+                                            message,
+                                            opts.get('user') or user,
+                                            opts.get('date') or date,
+                                            branch, files, store,
+                                            editor=commiteditor)
+                repo.savecommitmessage(memctx.description())
+                n = memctx.commit()
+            finally:
+                store.close()
+        if opts.get('exact') and hex(n) != nodeid:
+            raise util.Abort(_('patch is damaged or loses information'))
+        if n:
+            # i18n: refers to a short changeset id
+            msg = _('created %s') % short(n)
+        return (msg, n)
+    finally:
+        os.unlink(tmpname)
+
 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
            opts=None):
     '''export changesets as hg patches.'''
@@ -629,7 +754,7 @@
     if listsubrepos:
         ctx1 = repo[node1]
         ctx2 = repo[node2]
-        for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
+        for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
             tempnode2 = node2
             try:
                 if node2 is not None:
@@ -823,7 +948,7 @@
 class changeset_templater(changeset_printer):
     '''format changeset information.'''
 
-    def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
+    def __init__(self, ui, repo, patch, diffopts, tmpl, mapfile, buffered):
         changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
         formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
         defaulttempl = {
@@ -836,11 +961,10 @@
         defaulttempl['filecopy'] = defaulttempl['file_copy']
         self.t = templater.templater(mapfile, {'formatnode': formatnode},
                                      cache=defaulttempl)
-        self.cache = {}
+        if tmpl:
+            self.t.cache['changeset'] = tmpl
 
-    def use_template(self, t):
-        '''set template string to use'''
-        self.t.cache['changeset'] = t
+        self.cache = {}
 
     def _meaningful_parentrevs(self, ctx):
         """Return list of meaningful (or all if debug) parentrevs for rev.
@@ -922,6 +1046,66 @@
         except SyntaxError, inst:
             raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
 
+def gettemplate(ui, tmpl, style):
+    """
+    Find the template matching the given template spec or style.
+    """
+
+    # ui settings
+    if not tmpl and not style:
+        tmpl = ui.config('ui', 'logtemplate')
+        if tmpl:
+            try:
+                tmpl = templater.parsestring(tmpl)
+            except SyntaxError:
+                tmpl = templater.parsestring(tmpl, quoted=False)
+            return tmpl, None
+        else:
+            style = util.expandpath(ui.config('ui', 'style', ''))
+
+    if style:
+        mapfile = style
+        if not os.path.split(mapfile)[0]:
+            mapname = (templater.templatepath('map-cmdline.' + mapfile)
+                       or templater.templatepath(mapfile))
+            if mapname:
+                mapfile = mapname
+        return None, mapfile
+
+    if not tmpl:
+        return None, None
+
+    # looks like a literal template?
+    if '{' in tmpl:
+        return tmpl, None
+
+    # perhaps a stock style?
+    if not os.path.split(tmpl)[0]:
+        mapname = (templater.templatepath('map-cmdline.' + tmpl)
+                   or templater.templatepath(tmpl))
+        if mapname and os.path.isfile(mapname):
+            return None, mapname
+
+    # perhaps it's a reference to [templates]
+    t = ui.config('templates', tmpl)
+    if t:
+        try:
+            tmpl = templater.parsestring(t)
+        except SyntaxError:
+            tmpl = templater.parsestring(t, quoted=False)
+        return tmpl, None
+
+    # perhaps it's a path to a map or a template
+    if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
+        # is it a mapfile for a style?
+        if os.path.basename(tmpl).startswith("map-"):
+            return None, os.path.realpath(tmpl)
+        tmpl = open(tmpl).read()
+        return tmpl, None
+
+    # constant string?
+    return tmpl, None
+
 def show_changeset(ui, repo, opts, buffered=False):
     """show one changeset using template or regular display.
 
@@ -938,42 +1122,30 @@
     if opts.get('patch') or opts.get('stat'):
         patch = scmutil.matchall(repo)
 
-    tmpl = opts.get('template')
-    style = None
-    if not tmpl:
-        style = opts.get('style')
+    tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
 
-    # ui settings
-    if not (tmpl or style):
-        tmpl = ui.config('ui', 'logtemplate')
-        if tmpl:
-            try:
-                tmpl = templater.parsestring(tmpl)
-            except SyntaxError:
-                tmpl = templater.parsestring(tmpl, quoted=False)
-        else:
-            style = util.expandpath(ui.config('ui', 'style', ''))
-
-    if not (tmpl or style):
+    if not tmpl and not mapfile:
         return changeset_printer(ui, repo, patch, opts, buffered)
 
-    mapfile = None
-    if style and not tmpl:
-        mapfile = style
-        if not os.path.split(mapfile)[0]:
-            mapname = (templater.templatepath('map-cmdline.' + mapfile)
-                       or templater.templatepath(mapfile))
-            if mapname:
-                mapfile = mapname
-
     try:
-        t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
+        t = changeset_templater(ui, repo, patch, opts, tmpl, mapfile, buffered)
     except SyntaxError, inst:
         raise util.Abort(inst.args[0])
-    if tmpl:
-        t.use_template(tmpl)
     return t
 
+def showmarker(ui, marker):
+    """utility function to display obsolescence marker in a readable way
+
+    To be used by debug function."""
+    ui.write(hex(marker.precnode()))
+    for repl in marker.succnodes():
+        ui.write(' ')
+        ui.write(hex(repl))
+    ui.write(' %X ' % marker._data[2])
+    ui.write('{%s}' % (', '.join('%r: %r' % t for t in
+                                 sorted(marker.metadata().items()))))
+    ui.write('\n')
+
 def finddate(ui, repo, date):
     """Find the tipmost changeset that matches the given date spec"""
 
@@ -995,19 +1167,11 @@
 
     raise util.Abort(_("revision matching date not found"))
 
-def increasingwindows(start, end, windowsize=8, sizelimit=512):
-    if start < end:
-        while start < end:
-            yield start, min(windowsize, end - start)
-            start += windowsize
-            if windowsize < sizelimit:
-                windowsize *= 2
-    else:
-        while start > end:
-            yield start, min(windowsize, start - end - 1)
-            start -= windowsize
-            if windowsize < sizelimit:
-                windowsize *= 2
+def increasingwindows(windowsize=8, sizelimit=512):
+    while True:
+        yield windowsize
+        if windowsize < sizelimit:
+            windowsize *= 2
 
 class FileWalkError(Exception):
     pass
@@ -1132,7 +1296,7 @@
     elif follow:
         revs = repo.revs('reverse(:.)')
     else:
-        revs = list(repo)
+        revs = revset.spanset(repo)
         revs.reverse()
     if not revs:
         return []
@@ -1148,7 +1312,7 @@
 
     if not slowpath and not match.files():
         # No files, no patterns.  Display all revs.
-        wanted = set(revs)
+        wanted = revs
 
     if not slowpath and match.files():
         # We only have to read through the filelog to find wanted revisions
@@ -1250,14 +1414,7 @@
         stop = min(revs[0], revs[-1])
         for x in xrange(rev, stop - 1, -1):
             if ff.match(x):
-                wanted.discard(x)
-
-    # Choose a small initial window if we will probably only visit a
-    # few commits.
-    limit = loglimit(opts)
-    windowsize = 8
-    if limit:
-        windowsize = min(limit, windowsize)
+                wanted = wanted - [x]
 
     # Now that wanted is correctly initialized, we can iterate over the
     # revision range, yielding only revisions in wanted.
@@ -1270,8 +1427,18 @@
             def want(rev):
                 return rev in wanted
 
-        for i, window in increasingwindows(0, len(revs), windowsize):
-            nrevs = [rev for rev in revs[i:i + window] if want(rev)]
+        it = iter(revs)
+        stopiteration = False
+        for windowsize in increasingwindows():
+            nrevs = []
+            for i in xrange(windowsize):
+                try:
+                    rev = it.next()
+                    if want(rev):
+                        nrevs.append(rev)
+                except (StopIteration):
+                    stopiteration = True
+                    break
             for rev in sorted(nrevs):
                 fns = fncache.get(rev)
                 ctx = change(rev)
@@ -1284,9 +1451,13 @@
                 prepare(ctx, fns)
             for rev in nrevs:
                 yield change(rev)
+
+            if stopiteration:
+                break
+
     return iterate()
 
-def _makegraphfilematcher(repo, pats, followfirst):
+def _makelogfilematcher(repo, pats, followfirst):
     # When displaying a revision with --patch --follow FILE, we have
     # to know which file of the revision must be diffed. With
     # --follow, we want the names of the ancestors of FILE in the
@@ -1314,7 +1485,7 @@
 
     return filematcher
 
-def _makegraphlogrevset(repo, pats, opts, revs):
+def _makelogrevset(repo, pats, opts, revs):
     """Return (expr, filematcher) where expr is a revset string built
     from log options and file patterns or None. If --stat or --patch
     are not passed filematcher is None. Otherwise it is a callable
@@ -1344,8 +1515,12 @@
     follow = opts.get('follow') or opts.get('follow_first')
     followfirst = opts.get('follow_first') and 1 or 0
     # --follow with FILE behaviour depends on revs...
-    startrev = revs[0]
-    followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
+    it = iter(revs)
+    startrev = it.next()
+    try:
+        followdescendants = startrev < it.next()
+    except (StopIteration):
+        followdescendants = False
 
     # branch and only_branch are really aliases and must be handled at
     # the same time
@@ -1421,7 +1596,7 @@
     filematcher = None
     if opts.get('patch') or opts.get('stat'):
         if follow:
-            filematcher = _makegraphfilematcher(repo, pats, followfirst)
+            filematcher = _makelogfilematcher(repo, pats, followfirst)
         else:
             filematcher = lambda rev: match
 
@@ -1464,18 +1639,18 @@
     possiblyunsorted = False # whether revs might need sorting
     if opts.get('rev'):
         revs = scmutil.revrange(repo, opts['rev'])
-        # Don't sort here because _makegraphlogrevset might depend on the
+        # Don't sort here because _makelogrevset might depend on the
         # order of revs
         possiblyunsorted = True
     else:
         if follow and len(repo) > 0:
             revs = repo.revs('reverse(:.)')
         else:
-            revs = list(repo.changelog)
+            revs = revset.spanset(repo)
             revs.reverse()
     if not revs:
-        return [], None, None
-    expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
+        return revset.baseset(), None, None
+    expr, filematcher = _makelogrevset(repo, pats, opts, revs)
     if possiblyunsorted:
         revs.sort(reverse=True)
     if expr:
@@ -1489,7 +1664,60 @@
         revs = matcher(repo, revs)
         revs.sort(reverse=True)
     if limit is not None:
-        revs = revs[:limit]
+        limitedrevs = revset.baseset()
+        for idx, rev in enumerate(revs):
+            if idx >= limit:
+                break
+            limitedrevs.append(rev)
+        revs = limitedrevs
+
+    return revs, expr, filematcher
+
+def getlogrevs(repo, pats, opts):
+    """Return (revs, expr, filematcher) where revs is an iterable of
+    revision numbers, expr is a revset string built from log options
+    and file patterns or None, and used to filter 'revs'. If --stat or
+    --patch are not passed filematcher is None. Otherwise it is a
+    callable taking a revision number and returning a match objects
+    filtering the files to be detailed when displaying the revision.
+    """
+    limit = loglimit(opts)
+    # Default --rev value depends on --follow but --follow behaviour
+    # depends on revisions resolved from --rev...
+    follow = opts.get('follow') or opts.get('follow_first')
+    if opts.get('rev'):
+        revs = scmutil.revrange(repo, opts['rev'])
+    elif follow:
+        revs = revset.baseset(repo.revs('reverse(:.)'))
+    else:
+        revs = revset.spanset(repo)
+        revs.reverse()
+    if not revs:
+        return revset.baseset([]), None, None
+    expr, filematcher = _makelogrevset(repo, pats, opts, revs)
+    if expr:
+        # Revset matchers often operate faster on revisions in changelog
+        # order, because most filters deal with the changelog.
+        if not opts.get('rev'):
+            revs.reverse()
+        matcher = revset.match(repo.ui, expr)
+        # Revset matches can reorder revisions. "A or B" typically returns
+        # returns the revision matching A then the revision matching B. Sort
+        # again to fix that.
+        revs = matcher(repo, revs)
+        if not opts.get('rev'):
+            revs.sort(reverse=True)
+    if limit is not None:
+        count = 0
+        limitedrevs = revset.baseset([])
+        it = iter(revs)
+        while count < limit:
+            try:
+                limitedrevs.append(it.next())
+            except (StopIteration):
+                break
+            count += 1
+        revs = limitedrevs
 
     return revs, expr, filematcher
 
@@ -1531,7 +1759,7 @@
     if opts.get('copies'):
         endrev = None
         if opts.get('rev'):
-            endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
+            endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
         getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
     displayer = show_changeset(ui, repo, opts, buffered=True)
     showparents = [ctx.node() for ctx in repo[None].parents()]
@@ -1632,6 +1860,59 @@
     forgot.extend(forget)
     return bad, forgot
 
+def cat(ui, repo, ctx, matcher, prefix, **opts):
+    err = 1
+
+    def write(path):
+        fp = makefileobj(repo, opts.get('output'), ctx.node(),
+                         pathname=os.path.join(prefix, path))
+        data = ctx[path].data()
+        if opts.get('decode'):
+            data = repo.wwritedata(path, data)
+        fp.write(data)
+        fp.close()
+
+    # Automation often uses hg cat on single files, so special case it
+    # for performance to avoid the cost of parsing the manifest.
+    if len(matcher.files()) == 1 and not matcher.anypats():
+        file = matcher.files()[0]
+        mf = repo.manifest
+        mfnode = ctx._changeset[0]
+        if mf.find(mfnode, file)[0]:
+            write(file)
+            return 0
+
+    # Don't warn about "missing" files that are really in subrepos
+    bad = matcher.bad
+
+    def badfn(path, msg):
+        for subpath in ctx.substate:
+            if path.startswith(subpath):
+                return
+        bad(path, msg)
+
+    matcher.bad = badfn
+
+    for abs in ctx.walk(matcher):
+        write(abs)
+        err = 0
+
+    matcher.bad = bad
+
+    for subpath in sorted(ctx.substate):
+        sub = ctx.sub(subpath)
+        try:
+            submatch = matchmod.narrowmatcher(subpath, matcher)
+
+            if not sub.cat(ui, submatch, os.path.join(prefix, sub._path),
+                           **opts):
+                err = 0
+        except error.RepoLookupError:
+            ui.status(_("skipping missing subrepository: %s\n")
+                           % os.path.join(prefix, subpath))
+
+    return err
+
 def duplicatecopies(repo, rev, fromrev):
     '''reproduce copies from fromrev to rev in the dirstate'''
     for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
@@ -1768,6 +2049,8 @@
             if not message:
                 editmsg = True
                 message = old.description()
+            elif opts.get('edit'):
+                editmsg = True
 
             pureextra = extra.copy()
             extra['amend_source'] = old.hex()
@@ -1802,10 +2085,10 @@
                     commitphase = 'secret'
                 else:
                     commitphase = old.phase()
-                repo.ui.setconfig('phases', 'new-commit', commitphase)
+                repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
                 newid = repo.commitctx(new)
             finally:
-                repo.ui.setconfig('phases', 'new-commit', ph)
+                repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
             if newid != old.node():
                 # Reroute the working copy parent to the new changeset
                 repo.setparents(newid, nullid)
@@ -1875,7 +2158,7 @@
     # run editor in the repository root
     olddir = os.getcwd()
     os.chdir(repo.root)
-    text = repo.ui.edit("\n".join(edittext), ctx.user())
+    text = repo.ui.edit("\n".join(edittext), ctx.user(), ctx.extra())
     text = re.sub("(?m)^HG:.*(\n|$)", "", text)
     os.chdir(olddir)
 
@@ -2062,54 +2345,8 @@
                         handle(revert, False)
                 else:
                     handle(remove, False)
-
         if not opts.get('dry_run'):
-            def checkout(f):
-                fc = ctx[f]
-                repo.wwrite(f, fc.data(), fc.flags())
-
-            audit_path = pathutil.pathauditor(repo.root)
-            for f in remove[0]:
-                if repo.dirstate[f] == 'a':
-                    repo.dirstate.drop(f)
-                    continue
-                audit_path(f)
-                try:
-                    util.unlinkpath(repo.wjoin(f))
-                except OSError:
-                    pass
-                repo.dirstate.remove(f)
-
-            normal = None
-            if node == parent:
-                # We're reverting to our parent. If possible, we'd like status
-                # to report the file as clean. We have to use normallookup for
-                # merges to avoid losing information about merged/dirty files.
-                if p2 != nullid:
-                    normal = repo.dirstate.normallookup
-                else:
-                    normal = repo.dirstate.normal
-            for f in revert[0]:
-                checkout(f)
-                if normal:
-                    normal(f)
-
-            for f in add[0]:
-                checkout(f)
-                repo.dirstate.add(f)
-
-            normal = repo.dirstate.normallookup
-            if node == parent and p2 == nullid:
-                normal = repo.dirstate.normal
-            for f in undelete[0]:
-                checkout(f)
-                normal(f)
-
-            copied = copies.pathcopies(repo[parent], ctx)
-
-            for f in add[0] + undelete[0] + revert[0]:
-                if f in copied:
-                    repo.dirstate.copy(copied[f], f)
+            _performrevert(repo, parents, ctx, revert, add, remove, undelete)
 
             if targetsubs:
                 # Revert the subrepos on the revert list
@@ -2118,6 +2355,63 @@
     finally:
         wlock.release()
 
+def _performrevert(repo, parents, ctx, revert, add, remove, undelete):
+    """function that actually perform all the action computed for revert
+
+    This is an independent function to let extension to plug in and react to
+    the imminent revert.
+
+    Make sure you have the working directory locked when calling this function.
+    """
+    parent, p2 = parents
+    node = ctx.node()
+    def checkout(f):
+        fc = ctx[f]
+        repo.wwrite(f, fc.data(), fc.flags())
+
+    audit_path = pathutil.pathauditor(repo.root)
+    for f in remove[0]:
+        if repo.dirstate[f] == 'a':
+            repo.dirstate.drop(f)
+            continue
+        audit_path(f)
+        try:
+            util.unlinkpath(repo.wjoin(f))
+        except OSError:
+            pass
+        repo.dirstate.remove(f)
+
+    normal = None
+    if node == parent:
+        # We're reverting to our parent. If possible, we'd like status
+        # to report the file as clean. We have to use normallookup for
+        # merges to avoid losing information about merged/dirty files.
+        if p2 != nullid:
+            normal = repo.dirstate.normallookup
+        else:
+            normal = repo.dirstate.normal
+    for f in revert[0]:
+        checkout(f)
+        if normal:
+            normal(f)
+
+    for f in add[0]:
+        checkout(f)
+        repo.dirstate.add(f)
+
+    normal = repo.dirstate.normallookup
+    if node == parent and p2 == nullid:
+        normal = repo.dirstate.normal
+    for f in undelete[0]:
+        checkout(f)
+        normal(f)
+
+    copied = copies.pathcopies(repo[parent], ctx)
+
+    for f in add[0] + undelete[0] + revert[0]:
+        if f in copied:
+            repo.dirstate.copy(copied[f], f)
+
 def command(table):
     '''returns a function object bound to table which can be used as
     a decorator for populating table as a command table'''
@@ -2133,9 +2427,24 @@
 
     return cmd
 
+# a list of (ui, repo, otherpeer, opts, missing) functions called by
+# commands.outgoing.  "missing" is "missing" of the result of
+# "findcommonoutgoing()"
+outgoinghooks = util.hooks()
+
 # a list of (ui, repo) functions called by commands.summary
 summaryhooks = util.hooks()
 
+# a list of (ui, repo, opts, changes) functions called by commands.summary.
+#
+# functions should return tuple of booleans below, if 'changes' is None:
+#  (whether-incomings-are-needed, whether-outgoings-are-needed)
+#
+# otherwise, 'changes' is a tuple of tuples below:
+#  - (sourceurl, sourcebranch, sourcepeer, incoming)
+#  - (desturl,   destbranch,   destpeer,   outgoing)
+summaryremotehooks = util.hooks()
+
 # A list of state files kept by multistep operations like graft.
 # Since graft cannot be aborted, it is considered 'clearable' by update.
 # note: bisect is intentionally excluded
--- a/mercurial/commands.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/commands.py	Thu Apr 17 19:36:17 2014 -0400
@@ -9,6 +9,7 @@
 from lock import release
 from i18n import _
 import os, re, difflib, time, tempfile, errno
+import sys
 import hg, scmutil, util, revlog, copies, error, bookmarks
 import patch, help, encoding, templatekw, discovery
 import archival, changegroup, cmdutil, hbisect
@@ -19,7 +20,7 @@
 import dagparser, context, simplemerge, graphmod
 import random
 import setdiscovery, treediscovery, dagutil, pvec, localrepo
-import phases, obsolete
+import phases, obsolete, exchange
 
 table = {}
 
@@ -89,8 +90,8 @@
 
 templateopts = [
     ('', 'style', '',
-     _('display using template map file'), _('STYLE')),
-    ('', 'template', '',
+     _('display using template map file (DEPRECATED)'), _('STYLE')),
+    ('T', 'template', '',
      _('display with template'), _('TEMPLATE')),
 ]
 
@@ -437,9 +438,8 @@
     node = scmutil.revsingle(repo, rev).node()
 
     op1, op2 = repo.dirstate.parents()
-    a = repo.changelog.ancestor(op1, node)
-    if a != node:
-        raise util.Abort(_('cannot backout change on a different branch'))
+    if node not in repo.changelog.commonancestorsheads(op1, node):
+        raise util.Abort(_('cannot backout change that is not an ancestor'))
 
     p1, p2 = repo.changelog.parents(node)
     if p1 == nullid:
@@ -465,7 +465,8 @@
         rctx = scmutil.revsingle(repo, hex(parent))
         if not opts.get('merge') and op1 != node:
             try:
-                ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+                ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
+                             'backout')
                 stats = mergemod.update(repo, parent, True, True, False,
                                         node, False)
                 repo.setparents(op1, op2)
@@ -479,7 +480,7 @@
                     ui.status(msg % short(node))
                 return stats[3] > 0
             finally:
-                ui.setconfig('ui', 'forcemerge', '')
+                ui.setconfig('ui', 'forcemerge', '', '')
         else:
             hg.clean(repo, node, show_stats=False)
             repo.dirstate.setbranch(branch)
@@ -510,10 +511,11 @@
             ui.status(_('merging with changeset %s\n')
                       % nice(repo.changelog.tip()))
             try:
-                ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+                ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
+                             'backout')
                 return hg.merge(repo, hex(repo.changelog.tip()))
             finally:
-                ui.setconfig('ui', 'forcemerge', '')
+                ui.setconfig('ui', 'forcemerge', '', '')
     finally:
         wlock.release()
     return 0
@@ -1126,8 +1128,8 @@
                                "a destination"))
         common = [repo.lookup(rev) for rev in base]
         heads = revs and map(repo.lookup, revs) or revs
-        cg = repo.getbundle('bundle', heads=heads, common=common,
-                            bundlecaps=bundlecaps)
+        cg = changegroup.getbundle(repo, 'bundle', heads=heads, common=common,
+                                   bundlecaps=bundlecaps)
         outgoing = None
     else:
         dest = ui.expandpath(dest or 'default-push', dest or 'default')
@@ -1139,7 +1141,7 @@
                                                 onlyheads=heads,
                                                 force=opts.get('force'),
                                                 portable=True)
-        cg = repo.getlocalbundle('bundle', outgoing, bundlecaps)
+        cg = changegroup.getlocalbundle(repo, 'bundle', outgoing, bundlecaps)
     if not cg:
         scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded)
         return 1
@@ -1160,42 +1162,24 @@
     no revision is given, the parent of the working directory is used.
 
     Output may be to a file, in which case the name of the file is
-    given using a format string. The formatting rules are the same as
-    for the export command, with the following additions:
-
+    given using a format string. The formatting rules as follows:
+
+    :``%%``: literal "%" character
     :``%s``: basename of file being printed
     :``%d``: dirname of file being printed, or '.' if in repository root
     :``%p``: root-relative path name of file being printed
+    :``%H``: changeset hash (40 hexadecimal digits)
+    :``%R``: changeset revision number
+    :``%h``: short-form changeset hash (12 hexadecimal digits)
+    :``%r``: zero-padded changeset revision number
+    :``%b``: basename of the exporting repository
 
     Returns 0 on success.
     """
     ctx = scmutil.revsingle(repo, opts.get('rev'))
-    err = 1
     m = scmutil.match(ctx, (file1,) + pats, opts)
 
-    def write(path):
-        fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
-                                 pathname=path)
-        data = ctx[path].data()
-        if opts.get('decode'):
-            data = repo.wwritedata(path, data)
-        fp.write(data)
-        fp.close()
-
-    # Automation often uses hg cat on single files, so special case it
-    # for performance to avoid the cost of parsing the manifest.
-    if len(m.files()) == 1 and not m.anypats():
-        file = m.files()[0]
-        mf = repo.manifest
-        mfnode = ctx._changeset[0]
-        if mf.find(mfnode, file)[0]:
-            write(file)
-            return 0
-
-    for abs in ctx.walk(m):
-        write(abs)
-        err = 0
-    return err
+    return cmdutil.cat(ui, repo, ctx, m, '', **opts)
 
 @command('^clone',
     [('U', 'noupdate', None,
@@ -1322,6 +1306,8 @@
      _('mark a branch as closed, hiding it from the branch list')),
     ('', 'amend', None, _('amend the parent of the working dir')),
     ('s', 'secret', None, _('use the secret phase for committing')),
+    ('e', 'edit', None,
+     _('further edit commit message already specified')),
     ] + walkopts + commitopts + commitopts2 + subrepoopts,
     _('[OPTION]... [FILE]...'))
 def commit(ui, repo, *pats, **opts):
@@ -1360,11 +1346,13 @@
 
     Returns 0 on success, 1 if nothing changed.
     """
+    forceeditor = opts.get('edit')
+
     if opts.get('subrepos'):
         if opts.get('amend'):
             raise util.Abort(_('cannot amend with --subrepos'))
         # Let --subrepos on the command line override config setting.
-        ui.setconfig('ui', 'commitsubrepos', True)
+        ui.setconfig('ui', 'commitsubrepos', True, 'commit')
 
     # Save this for restoring it later
     oldcommitphase = ui.config('phases', 'new-commit')
@@ -1397,23 +1385,12 @@
         if (not obsolete._enabled) and old.children():
             raise util.Abort(_('cannot amend changeset with children'))
 
-        e = cmdutil.commiteditor
-        if opts.get('force_editor'):
-            e = cmdutil.commitforceeditor
-
         # commitfunc is used only for temporary amend commit by cmdutil.amend
         def commitfunc(ui, repo, message, match, opts):
-            editor = e
-            # message contains text from -m or -l, if it's empty,
-            # open the editor with the old message
-            if not message:
-                message = old.description()
-                editor = cmdutil.commitforceeditor
             return repo.commit(message,
                                opts.get('user') or old.user(),
                                opts.get('date') or old.date(),
                                match,
-                               editor=editor,
                                extra=extra)
 
         current = repo._bookmarkcurrent
@@ -1433,21 +1410,23 @@
             newmarks.write()
     else:
         e = cmdutil.commiteditor
-        if opts.get('force_editor'):
+        if forceeditor:
             e = cmdutil.commitforceeditor
 
         def commitfunc(ui, repo, message, match, opts):
             try:
                 if opts.get('secret'):
-                    ui.setconfig('phases', 'new-commit', 'secret')
+                    ui.setconfig('phases', 'new-commit', 'secret', 'commit')
                     # Propagate to subrepos
-                    repo.baseui.setconfig('phases', 'new-commit', 'secret')
+                    repo.baseui.setconfig('phases', 'new-commit', 'secret',
+                                          'commit')
 
                 return repo.commit(message, opts.get('user'), opts.get('date'),
                                    match, editor=e, extra=extra)
             finally:
-                ui.setconfig('phases', 'new-commit', oldcommitphase)
-                repo.baseui.setconfig('phases', 'new-commit', oldcommitphase)
+                ui.setconfig('phases', 'new-commit', oldcommitphase, 'commit')
+                repo.baseui.setconfig('phases', 'new-commit', oldcommitphase,
+                                      'commit')
 
 
         node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
@@ -1463,6 +1442,103 @@
 
     cmdutil.commitstatus(repo, node, branch, bheads, opts)
 
+@command('config|showconfig|debugconfig',
+    [('u', 'untrusted', None, _('show untrusted configuration options')),
+     ('e', 'edit', None, _('edit user config')),
+     ('l', 'local', None, _('edit repository config')),
+     ('g', 'global', None, _('edit global config'))],
+      _('[-u] [NAME]...'))
+def config(ui, repo, *values, **opts):
+    """show combined config settings from all hgrc files
+
+    With no arguments, print names and values of all config items.
+
+    With one argument of the form section.name, print just the value
+    of that config item.
+
+    With multiple arguments, print names and values of all config
+    items with matching section names.
+
+    With --edit, start an editor on the user-level config file. With
+    --global, edit the system-wide config file. With --local, edit the
+    repository-level config file.
+
+    With --debug, the source (filename and line number) is printed
+    for each config item.
+
+    See :hg:`help config` for more information about config files.
+
+    Returns 0 on success.
+
+    """
+
+    if opts.get('edit') or opts.get('local') or opts.get('global'):
+        if opts.get('local') and opts.get('global'):
+            raise util.Abort(_("can't use --local and --global together"))
+
+        if opts.get('local'):
+            if not repo:
+                raise util.Abort(_("can't use --local outside a repository"))
+            paths = [repo.join('hgrc')]
+        elif opts.get('global'):
+            paths = scmutil.systemrcpath()
+        else:
+            paths = scmutil.userrcpath()
+
+        for f in paths:
+            if os.path.exists(f):
+                break
+        else:
+            f = paths[0]
+            fp = open(f, "w")
+            fp.write(
+                '# example config (see "hg help config" for more info)\n'
+                '\n'
+                '[ui]\n'
+                '# name and email, e.g.\n'
+                '# username = Jane Doe <jdoe@example.com>\n'
+                'username =\n'
+                '\n'
+                '[extensions]\n'
+                '# uncomment these lines to enable some popular extensions\n'
+                '# (see "hg help extensions" for more info)\n'
+                '# pager =\n'
+                '# progress =\n'
+                '# color =\n')
+            fp.close()
+
+        editor = ui.geteditor()
+        util.system("%s \"%s\"" % (editor, f),
+                    onerr=util.Abort, errprefix=_("edit failed"),
+                    out=ui.fout)
+        return
+
+    for f in scmutil.rcpath():
+        ui.debug('read config from: %s\n' % f)
+    untrusted = bool(opts.get('untrusted'))
+    if values:
+        sections = [v for v in values if '.' not in v]
+        items = [v for v in values if '.' in v]
+        if len(items) > 1 or items and sections:
+            raise util.Abort(_('only one config item permitted'))
+    for section, name, value in ui.walkconfig(untrusted=untrusted):
+        value = str(value).replace('\n', '\\n')
+        sectname = section + '.' + name
+        if values:
+            for v in values:
+                if v == section:
+                    ui.debug('%s: ' %
+                             ui.configsource(section, name, untrusted))
+                    ui.write('%s=%s\n' % (sectname, value))
+                elif v == sectname:
+                    ui.debug('%s: ' %
+                             ui.configsource(section, name, untrusted))
+                    ui.write(value, '\n')
+        else:
+            ui.debug('%s: ' %
+                     ui.configsource(section, name, untrusted))
+            ui.write('%s=%s\n' % (sectname, value))
+
 @command('copy|cp',
     [('A', 'after', None, _('record a copy that has already occurred')),
     ('f', 'force', None, _('forcibly copy over an existing managed file')),
@@ -1665,7 +1741,7 @@
     """lists the contents of a bundle"""
     f = hg.openpath(ui, bundlepath)
     try:
-        gen = changegroup.readbundle(f, bundlepath)
+        gen = exchange.readbundle(ui, f, bundlepath)
         if all:
             ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
 
@@ -1945,7 +2021,7 @@
         tree = fileset.parse(expr)[0]
         ui.note(tree, "\n")
 
-    for f in fileset.getfileset(ctx, expr):
+    for f in ctx.getfileset(expr):
         ui.write("%s\n" % f)
 
 @command('debugfsinfo', [], _('[PATH]'))
@@ -2089,7 +2165,10 @@
         ui.write(_(" (check that your locale is properly set)\n"))
         problems += 1
 
-    # Python lib
+    # Python
+    ui.status(_("checking Python executable (%s)\n") % sys.executable)
+    ui.status(_("checking Python version (%s)\n")
+              % ("%s.%s.%s" % sys.version_info[:3]))
     ui.status(_("checking Python lib (%s)...\n")
               % os.path.dirname(os.__file__))
 
@@ -2109,10 +2188,21 @@
     import templater
     p = templater.templatepath()
     ui.status(_("checking templates (%s)...\n") % ' '.join(p))
-    try:
-        templater.templater(templater.templatepath("map-cmdline.default"))
-    except Exception, inst:
-        ui.write(" %s\n" % inst)
+    if p:
+        m = templater.templatepath("map-cmdline.default")
+        if m:
+            # template found, check if it is working
+            try:
+                templater.templater(m)
+            except Exception, inst:
+                ui.write(" %s\n" % inst)
+                p = None
+        else:
+            ui.write(_(" template 'default' not found\n"))
+            p = None
+    else:
+        ui.write(_(" no template directories found\n"))
+    if not p:
         ui.write(_(" (templates seem to have been installed incorrectly)\n"))
         problems += 1
 
@@ -2218,14 +2308,7 @@
             l.release()
     else:
         for m in obsolete.allmarkers(repo):
-            ui.write(hex(m.precnode()))
-            for repl in m.succnodes():
-                ui.write(' ')
-                ui.write(hex(repl))
-            ui.write(' %X ' % m._data[2])
-            ui.write('{%s}' % (', '.join('%r: %r' % t for t in
-                                         sorted(m.metadata().items()))))
-            ui.write('\n')
+            cmdutil.showmarker(ui, m)
 
 @command('debugpathcomplete',
          [('f', 'full', None, _('complete an entire path')),
@@ -2384,7 +2467,7 @@
 
     if opts.get("dump"):
         numrevs = len(r)
-        ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
+        ui.write("# rev p1rev p2rev start   end deltastart base   p1   p2"
                  " rawsize totalsize compression heads\n")
         ts = 0
         heads = set()
@@ -2398,7 +2481,7 @@
             ts = ts + rs
             heads -= set(r.parentrevs(rev))
             heads.add(rev)
-            ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
+            ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d %11d %5d\n" %
                      (rev, p1, p2, r.start(rev), r.end(rev),
                       r.start(dbase), r.start(cbase),
                       r.start(p1), r.start(p2),
@@ -2546,8 +2629,10 @@
             ui.write(('deltas against other : ') + fmt % pcfmt(numother,
                                                              numdeltas))
 
-@command('debugrevspec', [], ('REVSPEC'))
-def debugrevspec(ui, repo, expr):
+@command('debugrevspec',
+    [('', 'optimize', None, _('print parsed tree after optimizing'))],
+    ('REVSPEC'))
+def debugrevspec(ui, repo, expr, **opts):
     """parse and apply a revision specification
 
     Use --verbose to print the parsed tree before and after aliases
@@ -2559,8 +2644,11 @@
         newtree = revset.findaliases(ui, tree)
         if newtree != tree:
             ui.note(revset.prettyformat(newtree), "\n")
+        if opts["optimize"]:
+            weight, optimizedtree = revset.optimize(newtree, True)
+            ui.note("* optimized:\n", revset.prettyformat(optimizedtree), "\n")
     func = revset.match(ui, expr)
-    for c in func(repo, range(len(repo))):
+    for c in func(repo, revset.spanset(repo)):
         ui.write("%s\n" % c)
 
 @command('debugsetparents', [], _('REV1 [REV2]'))
@@ -3090,11 +3178,12 @@
                 # perform the graft merge with p1(rev) as 'ancestor'
                 try:
                     # ui.forcemerge is an internal variable, do not document
-                    repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+                    repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
+                                      'graft')
                     stats = mergemod.update(repo, ctx.node(), True, True, False,
                                             ctx.p1().node())
                 finally:
-                    repo.ui.setconfig('ui', 'forcemerge', '')
+                    repo.ui.setconfig('ui', 'forcemerge', '', 'graft')
                 # report any conflicts
                 if stats and stats[3] > 0:
                     # write out state for --continue
@@ -3204,6 +3293,20 @@
         def __eq__(self, other):
             return self.line == other.line
 
+        def __iter__(self):
+            yield (self.line[:self.colstart], '')
+            yield (self.line[self.colstart:self.colend], 'grep.match')
+            rest = self.line[self.colend:]
+            while rest != '':
+                match = regexp.search(rest)
+                if not match:
+                    yield (rest, '')
+                    break
+                mstart, mend = match.span()
+                yield (rest[:mstart], '')
+                yield (rest[mstart:mend], 'grep.match')
+                rest = rest[mend:]
+
     matches = {}
     copies = {}
     def grepbody(fn, rev, body):
@@ -3232,7 +3335,7 @@
         rev = ctx.rev()
         datefunc = ui.quiet and util.shortdate or util.datestr
         found = False
-        filerevmatches = {}
+        @util.cachefunc
         def binary():
             flog = getfile(fn)
             return util.binary(flog.read(ctx.filenode(fn)))
@@ -3243,7 +3346,6 @@
             iter = [('', l) for l in states]
         for change, l in iter:
             cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')]
-            before, match, after = None, None, None
 
             if opts.get('line_number'):
                 cols.append((str(l.linenum), 'grep.linenumber'))
@@ -3253,29 +3355,21 @@
                 cols.append((ui.shortuser(ctx.user()), 'grep.user'))
             if opts.get('date'):
                 cols.append((datefunc(ctx.date()), 'grep.date'))
-            if opts.get('files_with_matches'):
-                c = (fn, rev)
-                if c in filerevmatches:
-                    continue
-                filerevmatches[c] = 1
-            else:
-                before = l.line[:l.colstart]
-                match = l.line[l.colstart:l.colend]
-                after = l.line[l.colend:]
             for col, label in cols[:-1]:
                 ui.write(col, label=label)
                 ui.write(sep, label='grep.sep')
             ui.write(cols[-1][0], label=cols[-1][1])
-            if before is not None:
+            if not opts.get('files_with_matches'):
                 ui.write(sep, label='grep.sep')
                 if not opts.get('text') and binary():
                     ui.write(" Binary file matches")
                 else:
-                    ui.write(before)
-                    ui.write(match, label='grep.match')
-                    ui.write(after)
+                    for s, label in l:
+                        ui.write(s, label=label)
             ui.write(eol)
             found = True
+            if opts.get('files_with_matches'):
+                break
         return found
 
     skip = {}
@@ -3671,10 +3765,6 @@
     if date:
         opts['date'] = util.parsedate(date)
 
-    editor = cmdutil.commiteditor
-    if opts.get('edit'):
-        editor = cmdutil.commitforceeditor
-
     update = not opts.get('bypass')
     if not update and opts.get('no_commit'):
         raise util.Abort(_('cannot use --no-commit with --bypass'))
@@ -3693,112 +3783,9 @@
         cmdutil.bailifchanged(repo)
 
     base = opts["base"]
-    strip = opts["strip"]
     wlock = lock = tr = None
     msgs = []
 
-    def tryone(ui, hunk, parents):
-        tmpname, message, user, date, branch, nodeid, p1, p2 = \
-            patch.extract(ui, hunk)
-
-        if not tmpname:
-            return (None, None)
-        msg = _('applied to working directory')
-
-        try:
-            cmdline_message = cmdutil.logmessage(ui, opts)
-            if cmdline_message:
-                # pickup the cmdline msg
-                message = cmdline_message
-            elif message:
-                # pickup the patch msg
-                message = message.strip()
-            else:
-                # launch the editor
-                message = None
-            ui.debug('message:\n%s\n' % message)
-
-            if len(parents) == 1:
-                parents.append(repo[nullid])
-            if opts.get('exact'):
-                if not nodeid or not p1:
-                    raise util.Abort(_('not a Mercurial patch'))
-                p1 = repo[p1]
-                p2 = repo[p2 or nullid]
-            elif p2:
-                try:
-                    p1 = repo[p1]
-                    p2 = repo[p2]
-                    # Without any options, consider p2 only if the
-                    # patch is being applied on top of the recorded
-                    # first parent.
-                    if p1 != parents[0]:
-                        p1 = parents[0]
-                        p2 = repo[nullid]
-                except error.RepoError:
-                    p1, p2 = parents
-            else:
-                p1, p2 = parents
-
-            n = None
-            if update:
-                if p1 != parents[0]:
-                    hg.clean(repo, p1.node())
-                if p2 != parents[1]:
-                    repo.setparents(p1.node(), p2.node())
-
-                if opts.get('exact') or opts.get('import_branch'):
-                    repo.dirstate.setbranch(branch or 'default')
-
-                files = set()
-                patch.patch(ui, repo, tmpname, strip=strip, files=files,
-                            eolmode=None, similarity=sim / 100.0)
-                files = list(files)
-                if opts.get('no_commit'):
-                    if message:
-                        msgs.append(message)
-                else:
-                    if opts.get('exact') or p2:
-                        # If you got here, you either use --force and know what
-                        # you are doing or used --exact or a merge patch while
-                        # being updated to its first parent.
-                        m = None
-                    else:
-                        m = scmutil.matchfiles(repo, files or [])
-                    n = repo.commit(message, opts.get('user') or user,
-                                    opts.get('date') or date, match=m,
-                                    editor=editor)
-            else:
-                if opts.get('exact') or opts.get('import_branch'):
-                    branch = branch or 'default'
-                else:
-                    branch = p1.branch()
-                store = patch.filestore()
-                try:
-                    files = set()
-                    try:
-                        patch.patchrepo(ui, repo, p1, store, tmpname, strip,
-                                        files, eolmode=None)
-                    except patch.PatchError, e:
-                        raise util.Abort(str(e))
-                    memctx = context.makememctx(repo, (p1.node(), p2.node()),
-                                                message,
-                                                opts.get('user') or user,
-                                                opts.get('date') or date,
-                                                branch, files, store,
-                                                editor=cmdutil.commiteditor)
-                    repo.savecommitmessage(memctx.description())
-                    n = memctx.commit()
-                finally:
-                    store.close()
-            if opts.get('exact') and hex(n) != nodeid:
-                raise util.Abort(_('patch is damaged or loses information'))
-            if n:
-                # i18n: refers to a short changeset id
-                msg = _('created %s') % short(n)
-            return (msg, n)
-        finally:
-            os.unlink(tmpname)
 
     try:
         try:
@@ -3819,7 +3806,8 @@
 
                 haspatch = False
                 for hunk in patch.split(patchfile):
-                    (msg, node) = tryone(ui, hunk, parents)
+                    (msg, node) = cmdutil.tryimportone(ui, repo, hunk, parents,
+                                                       opts, msgs, hg.clean)
                     if msg:
                         haspatch = True
                         ui.note(msg + '\n')
@@ -3870,6 +3858,23 @@
 
     See pull for valid source format details.
 
+    .. container:: verbose
+
+      Examples:
+
+      - show incoming changes with patches and full description::
+
+          hg incoming -vp
+
+      - show incoming changes excluding merges, store a bundle::
+
+          hg in -vpM --bundle incoming.hg
+          hg pull incoming.hg
+
+      - briefly list changes inside a bundle::
+
+          hg in changes.hg -T "{desc|firstline}\\n"
+
     Returns 0 if there are incoming changes, 1 otherwise.
     """
     if opts.get('graph'):
@@ -4004,6 +4009,12 @@
     each commit. When the -v/--verbose switch is used, the list of
     changed files and full commit message are shown.
 
+    With --graph the revisions are shown as an ASCII art DAG with the most
+    recent changeset at the top.
+    'o' is a changeset, '@' is a working directory parent, 'x' is obsolete,
+    and '+' represents a fork where the changeset from the lines below is a
+    parent of the 'o' merge on the same same line.
+
     .. note::
 
        log -p/--patch may generate unexpected diff output for merge
@@ -4071,55 +4082,22 @@
     if opts.get('graph'):
         return cmdutil.graphlog(ui, repo, *pats, **opts)
 
-    matchfn = scmutil.match(repo[None], pats, opts)
+    revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts)
     limit = cmdutil.loglimit(opts)
     count = 0
 
-    getrenamed, endrev = None, None
+    getrenamed = None
     if opts.get('copies'):
+        endrev = None
         if opts.get('rev'):
-            endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
+            endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
         getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
 
-    df = False
-    if opts.get("date"):
-        df = util.matchdate(opts["date"])
-
-    branches = opts.get('branch', []) + opts.get('only_branch', [])
-    opts['branch'] = [repo.lookupbranch(b) for b in branches]
-
-    displayer = cmdutil.show_changeset(ui, repo, opts, True)
-    def prep(ctx, fns):
-        rev = ctx.rev()
-        parents = [p for p in repo.changelog.parentrevs(rev)
-                   if p != nullrev]
-        if opts.get('no_merges') and len(parents) == 2:
-            return
-        if opts.get('only_merges') and len(parents) != 2:
-            return
-        if opts.get('branch') and ctx.branch() not in opts['branch']:
-            return
-        if df and not df(ctx.date()[0]):
-            return
-
-        lower = encoding.lower
-        if opts.get('user'):
-            luser = lower(ctx.user())
-            for k in [lower(x) for x in opts['user']]:
-                if (k in luser):
-                    break
-            else:
-                return
-        if opts.get('keyword'):
-            luser = lower(ctx.user())
-            ldesc = lower(ctx.description())
-            lfiles = lower(" ".join(ctx.files()))
-            for k in [lower(x) for x in opts['keyword']]:
-                if (k in luser or k in ldesc or k in lfiles):
-                    break
-            else:
-                return
-
+    displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
+    for rev in revs:
+        if count == limit:
+            break
+        ctx = repo[rev]
         copies = None
         if getrenamed is not None and rev:
             copies = []
@@ -4127,22 +4105,11 @@
                 rename = getrenamed(fn, rev)
                 if rename:
                     copies.append((fn, rename[0]))
-
-        revmatchfn = None
-        if opts.get('patch') or opts.get('stat'):
-            if opts.get('follow') or opts.get('follow_first'):
-                # note: this might be wrong when following through merges
-                revmatchfn = scmutil.match(repo[None], fns, default='path')
-            else:
-                revmatchfn = matchfn
-
+        revmatchfn = filematcher and filematcher(ctx.rev()) or None
         displayer.show(ctx, copies=copies, matchfn=revmatchfn)
-
-    for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
-        if displayer.flush(ctx.rev()):
+        if displayer.flush(rev):
             count += 1
-        if count == limit:
-            break
+
     displayer.close()
 
 @command('manifest',
@@ -4319,10 +4286,10 @@
 
     try:
         # ui.forcemerge is an internal variable, do not document
-        repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+        repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge')
         return hg.merge(repo, node, force=opts.get('force'))
     finally:
-        ui.setconfig('ui', 'forcemerge', '')
+        ui.setconfig('ui', 'forcemerge', '', 'merge')
 
 @command('outgoing|out',
     [('f', 'force', None, _('run even when the destination is unrelated')),
@@ -4347,8 +4314,9 @@
     """
     if opts.get('graph'):
         cmdutil.checkunsupportedgraphflags([], opts)
-        o = hg._outgoing(ui, repo, dest, opts)
-        if o is None:
+        o, other = hg._outgoing(ui, repo, dest, opts)
+        if not o:
+            cmdutil.outgoinghooks(ui, repo, other, opts, o)
             return
 
         revdag = cmdutil.graphrevs(repo, o, opts)
@@ -4356,6 +4324,7 @@
         showparents = [ctx.node() for ctx in repo[None].parents()]
         cmdutil.displaygraph(ui, revdag, displayer, showparents,
                              graphmod.asciiedges)
+        cmdutil.outgoinghooks(ui, repo, other, opts, o)
         return 0
 
     if opts.get('bookmarks'):
@@ -4702,7 +4671,7 @@
     """
 
     if opts.get('bookmark'):
-        ui.setconfig('bookmarks', 'pushing', opts['bookmark'])
+        ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push')
         for b in opts['bookmark']:
             # translate -B options to -r so changesets get pushed
             if b in repo._bookmarks:
@@ -4716,7 +4685,15 @@
     dest, branches = hg.parseurl(dest, opts.get('branch'))
     ui.status(_('pushing to %s\n') % util.hidepassword(dest))
     revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
-    other = hg.peer(repo, opts, dest)
+    try:
+        other = hg.peer(repo, opts, dest)
+    except error.RepoError:
+        if dest == "default-push":
+            raise util.Abort(_("default repository not configured!"),
+                    hint=_('see the "path" section in "hg help config"'))
+        else:
+            raise
+
     if revs:
         revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
 
@@ -4726,8 +4703,9 @@
         c = repo['']
         subs = c.substate # only repos that are committed
         for s in sorted(subs):
-            if c.sub(s).push(opts) == 0:
-                return False
+            result = c.sub(s).push(opts)
+            if result == 0:
+                return not result
     finally:
         del repo._subtoppath
     result = repo.push(other, opts.get('force'), revs=revs,
@@ -4970,11 +4948,12 @@
 
                 try:
                     # resolve file
-                    ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+                    ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
+                                 'resolve')
                     if ms.resolve(f, wctx):
                         ret = 1
                 finally:
-                    ui.setconfig('ui', 'forcemerge', '')
+                    ui.setconfig('ui', 'forcemerge', '', 'resolve')
                     ms.commit()
 
                 # replace filemerge's .orig file with our resolve file
@@ -5177,7 +5156,6 @@
         s.serve_forever()
 
     if opts["cmdserver"]:
-        checkrepo()
         s = commandserver.server(ui, repo, opts["cmdserver"])
         return s.serve()
 
@@ -5192,9 +5170,9 @@
         val = opts.get(o, '')
         if val in (None, ''): # should check against default options instead
             continue
-        baseui.setconfig("web", o, val)
+        baseui.setconfig("web", o, val, 'serve')
         if repo and repo.ui != baseui:
-            repo.ui.setconfig("web", o, val)
+            repo.ui.setconfig("web", o, val, 'serve')
 
     o = opts.get('web_conf') or opts.get('webdir_conf')
     if not o:
@@ -5249,52 +5227,6 @@
         self.httpd.serve_forever()
 
 
-@command('showconfig|debugconfig',
-    [('u', 'untrusted', None, _('show untrusted configuration options'))],
-    _('[-u] [NAME]...'))
-def showconfig(ui, repo, *values, **opts):
-    """show combined config settings from all hgrc files
-
-    With no arguments, print names and values of all config items.
-
-    With one argument of the form section.name, print just the value
-    of that config item.
-
-    With multiple arguments, print names and values of all config
-    items with matching section names.
-
-    With --debug, the source (filename and line number) is printed
-    for each config item.
-
-    Returns 0 on success.
-    """
-
-    for f in scmutil.rcpath():
-        ui.debug('read config from: %s\n' % f)
-    untrusted = bool(opts.get('untrusted'))
-    if values:
-        sections = [v for v in values if '.' not in v]
-        items = [v for v in values if '.' in v]
-        if len(items) > 1 or items and sections:
-            raise util.Abort(_('only one config item permitted'))
-    for section, name, value in ui.walkconfig(untrusted=untrusted):
-        value = str(value).replace('\n', '\\n')
-        sectname = section + '.' + name
-        if values:
-            for v in values:
-                if v == section:
-                    ui.debug('%s: ' %
-                             ui.configsource(section, name, untrusted))
-                    ui.write('%s=%s\n' % (sectname, value))
-                elif v == sectname:
-                    ui.debug('%s: ' %
-                             ui.configsource(section, name, untrusted))
-                    ui.write(value, '\n')
-        else:
-            ui.debug('%s: ' %
-                     ui.configsource(section, name, untrusted))
-            ui.write('%s=%s\n' % (sectname, value))
-
 @command('^status|st',
     [('A', 'all', None, _('show status of all files')),
     ('m', 'modified', None, _('show only modified files')),
@@ -5345,7 +5277,7 @@
       ! = missing (deleted by non-hg command, but still tracked)
       ? = not tracked
       I = ignored
-        = origin of the previous file listed as A (added)
+        = origin of the previous file (with --copies)
 
     .. container:: verbose
 
@@ -5553,38 +5485,82 @@
     cmdutil.summaryhooks(ui, repo)
 
     if opts.get('remote'):
-        t = []
+        needsincoming, needsoutgoing = True, True
+    else:
+        needsincoming, needsoutgoing = False, False
+        for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None):
+            if i:
+                needsincoming = True
+            if o:
+                needsoutgoing = True
+        if not needsincoming and not needsoutgoing:
+            return
+
+    def getincoming():
         source, branches = hg.parseurl(ui.expandpath('default'))
         sbranch = branches[0]
-        other = hg.peer(repo, {}, source)
+        try:
+            other = hg.peer(repo, {}, source)
+        except error.RepoError:
+            if opts.get('remote'):
+                raise
+            return source, sbranch, None, None, None
         revs, checkout = hg.addbranchrevs(repo, other, branches, None)
         if revs:
             revs = [other.lookup(rev) for rev in revs]
         ui.debug('comparing with %s\n' % util.hidepassword(source))
         repo.ui.pushbuffer()
         commoninc = discovery.findcommonincoming(repo, other, heads=revs)
-        _common, incoming, _rheads = commoninc
         repo.ui.popbuffer()
-        if incoming:
-            t.append(_('1 or more incoming'))
-
+        return source, sbranch, other, commoninc, commoninc[1]
+
+    if needsincoming:
+        source, sbranch, sother, commoninc, incoming = getincoming()
+    else:
+        source = sbranch = sother = commoninc = incoming = None
+
+    def getoutgoing():
         dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
         dbranch = branches[0]
         revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
         if source != dest:
-            other = hg.peer(repo, {}, dest)
+            try:
+                dother = hg.peer(repo, {}, dest)
+            except error.RepoError:
+                if opts.get('remote'):
+                    raise
+                return dest, dbranch, None, None
             ui.debug('comparing with %s\n' % util.hidepassword(dest))
+        elif sother is None:
+            # there is no explicit destination peer, but source one is invalid
+            return dest, dbranch, None, None
+        else:
+            dother = sother
         if (source != dest or (sbranch is not None and sbranch != dbranch)):
-            commoninc = None
+            common = None
+        else:
+            common = commoninc
         if revs:
             revs = [repo.lookup(rev) for rev in revs]
         repo.ui.pushbuffer()
-        outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs,
-                                                commoninc=commoninc)
+        outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs,
+                                                commoninc=common)
         repo.ui.popbuffer()
+        return dest, dbranch, dother, outgoing
+
+    if needsoutgoing:
+        dest, dbranch, dother, outgoing = getoutgoing()
+    else:
+        dest = dbranch = dother = outgoing = None
+
+    if opts.get('remote'):
+        t = []
+        if incoming:
+            t.append(_('1 or more incoming'))
         o = outgoing.missing
         if o:
             t.append(_('%d outgoing') % len(o))
+        other = dother or sother
         if 'bookmarks' in other.listkeys('namespaces'):
             lmarks = repo.listkeys('bookmarks')
             rmarks = other.listkeys('bookmarks')
@@ -5602,6 +5578,10 @@
             # i18n: column positioning for "hg summary"
             ui.status(_('remote: (synced)\n'))
 
+    cmdutil.summaryremotehooks(ui, repo, opts,
+                               ((source, sbranch, sother, commoninc),
+                                (dest, dbranch, dother, outgoing)))
+
 @command('tag',
     [('f', 'force', None, _('force tag')),
     ('l', 'local', None, _('make the tag local')),
@@ -5788,8 +5768,9 @@
     try:
         for fname in fnames:
             f = hg.openpath(ui, fname)
-            gen = changegroup.readbundle(f, fname)
-            modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
+            gen = exchange.readbundle(ui, f, fname)
+            modheads = changegroup.addchangegroup(repo, gen, 'unbundle',
+                                                  'bundle:' + fname)
     finally:
         lock.release()
     bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
@@ -5933,7 +5914,7 @@
 norepo = ("clone init version help debugcommands debugcomplete"
           " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
           " debugknown debuggetbundle debugbundle")
-optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
+optionalrepo = ("identify paths serve config showconfig debugancestor debugdag"
                 " debugdata debugindex debugindexdot debugrevlog")
 inferrepo = ("add addremove annotate cat commit diff grep forget log parents"
              " remove resolve status debugwalk")
--- a/mercurial/commandserver.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/commandserver.py	Thu Apr 17 19:36:17 2014 -0400
@@ -142,11 +142,15 @@
             else:
                 logfile = open(logpath, 'a')
 
-        # the ui here is really the repo ui so take its baseui so we don't end
-        # up with its local configuration
-        self.ui = repo.baseui
-        self.repo = repo
-        self.repoui = repo.ui
+        if repo:
+            # the ui here is really the repo ui so take its baseui so we don't
+            # end up with its local configuration
+            self.ui = repo.baseui
+            self.repo = repo
+            self.repoui = repo.ui
+        else:
+            self.ui = ui
+            self.repo = self.repoui = None
 
         if mode == 'pipe':
             self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e')
@@ -183,18 +187,18 @@
         # copy the uis so changes (e.g. --config or --verbose) don't
         # persist between requests
         copiedui = self.ui.copy()
-        self.repo.baseui = copiedui
-        # clone ui without using ui.copy because this is protected
-        repoui = self.repoui.__class__(self.repoui)
-        repoui.copy = copiedui.copy # redo copy protection
-        self.repo.ui = self.repo.dirstate._ui = repoui
-        self.repo.invalidate()
-        self.repo.invalidatedirstate()
+        if self.repo:
+            self.repo.baseui = copiedui
+            # clone ui without using ui.copy because this is protected
+            repoui = self.repoui.__class__(self.repoui)
+            repoui.copy = copiedui.copy # redo copy protection
+            self.repo.ui = self.repo.dirstate._ui = repoui
+            self.repo.invalidateall()
 
         req = dispatch.request(args[:], copiedui, self.repo, self.cin,
                                self.cout, self.cerr)
 
-        ret = dispatch.dispatch(req) or 0 # might return None
+        ret = (dispatch.dispatch(req) or 0) & 255 # might return None
 
         # restore old cwd
         if '--cwd' in args:
--- a/mercurial/config.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/config.py	Thu Apr 17 19:36:17 2014 -0400
@@ -93,7 +93,8 @@
         if section not in self:
             self._data[section] = sortdict()
         self._data[section][item] = value
-        self._source[(section, item)] = source
+        if source:
+            self._source[(section, item)] = source
 
     def restore(self, data):
         """restore data returned by self.backup"""
--- a/mercurial/context.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/context.py	Thu Apr 17 19:36:17 2014 -0400
@@ -7,11 +7,12 @@
 
 from node import nullid, nullrev, short, hex, bin
 from i18n import _
-import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
+import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
 import match as matchmod
 import os, errno, stat
 import obsolete as obsmod
 import repoview
+import fileset
 
 propertycache = util.propertycache
 
@@ -79,6 +80,9 @@
     def mutable(self):
         return self.phase() > phases.public
 
+    def getfileset(self, expr):
+        return fileset.getfileset(self, expr)
+
     def obsolete(self):
         """True if the changeset is obsolete"""
         return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
@@ -392,14 +396,32 @@
 
     def ancestor(self, c2):
         """
-        return the ancestor context of self and c2
+        return the "best" ancestor context of self and c2
         """
         # deal with workingctxs
         n2 = c2._node
         if n2 is None:
             n2 = c2._parents[0]._node
-        n = self._repo.changelog.ancestor(self._node, n2)
-        return changectx(self._repo, n)
+        cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
+        if not cahs:
+            anc = nullid
+        elif len(cahs) == 1:
+            anc = cahs[0]
+        else:
+            for r in self._repo.ui.configlist('merge', 'preferancestor'):
+                ctx = changectx(self._repo, r)
+                anc = ctx.node()
+                if anc in cahs:
+                    break
+            else:
+                anc = self._repo.changelog.ancestor(self._node, n2)
+            self._repo.ui.status(
+                (_("note: using %s as ancestor of %s and %s\n") %
+                 (short(anc), short(self._node), short(n2))) +
+                ''.join(_("      alternatively, use --config "
+                          "merge.preferancestor=%s\n") %
+                        short(n) for n in sorted(cahs) if n != anc))
+        return changectx(self._repo, anc)
 
     def descendant(self, other):
         """True if other is descendant of this changeset"""
@@ -429,8 +451,7 @@
             if fn in self._dirs:
                 # specified pattern is a directory
                 continue
-            if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
-                yield fn
+            match.bad(fn, _('no such file in rev %s') % self)
 
 class basefilectx(object):
     """A filecontext object represents the common logic for its children:
@@ -684,55 +705,6 @@
 
         return zip(hist[base][0], hist[base][1].splitlines(True))
 
-    def ancestor(self, fc2, actx):
-        """
-        find the common ancestor file context, if any, of self, and fc2
-
-        actx must be the changectx of the common ancestor
-        of self's and fc2's respective changesets.
-        """
-
-        # the easy case: no (relevant) renames
-        if fc2.path() == self.path() and self.path() in actx:
-            return actx[self.path()]
-
-        # the next easiest cases: unambiguous predecessor (name trumps
-        # history)
-        if self.path() in actx and fc2.path() not in actx:
-            return actx[self.path()]
-        if fc2.path() in actx and self.path() not in actx:
-            return actx[fc2.path()]
-
-        # prime the ancestor cache for the working directory
-        acache = {}
-        for c in (self, fc2):
-            if c.filenode() is None:
-                pl = [(n.path(), n.filenode()) for n in c.parents()]
-                acache[(c._path, None)] = pl
-
-        flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
-        def parents(vertex):
-            if vertex in acache:
-                return acache[vertex]
-            f, n = vertex
-            if f not in flcache:
-                flcache[f] = self._repo.file(f)
-            fl = flcache[f]
-            pl = [(f, p) for p in fl.parents(n) if p != nullid]
-            re = fl.renamed(n)
-            if re:
-                pl.append(re)
-            acache[vertex] = pl
-            return pl
-
-        a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
-        v = ancestor.genericancestor(a, b, parents)
-        if v:
-            f, n = v
-            return filectx(self._repo, f, fileid=n, filelog=flcache[f])
-
-        return None
-
     def ancestors(self, followfirst=False):
         visit = {}
         c = self
--- a/mercurial/copies.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/copies.py	Thu Apr 17 19:36:17 2014 -0400
@@ -228,9 +228,6 @@
     fullcopy = {}
     diverge = {}
 
-    def _checkcopies(f, m1, m2):
-        checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
-
     repo.ui.debug("  searching for copies back to rev %d\n" % limit)
 
     u1 = _nonoverlap(m1, m2, ma)
@@ -244,9 +241,10 @@
                       % "\n   ".join(u2))
 
     for f in u1:
-        _checkcopies(f, m1, m2)
+        checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
+
     for f in u2:
-        _checkcopies(f, m2, m1)
+        checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy)
 
     renamedelete = {}
     renamedelete2 = set()
@@ -262,7 +260,19 @@
         else:
             diverge2.update(fl) # reverse map for below
 
-    if fullcopy:
+    bothnew = sorted([d for d in m1 if d in m2 and d not in ma])
+    if bothnew:
+        repo.ui.debug("  unmatched files new in both:\n   %s\n"
+                      % "\n   ".join(bothnew))
+    bothdiverge, _copy, _fullcopy = {}, {}, {}
+    for f in bothnew:
+        checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
+        checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
+    for of, fl in bothdiverge.items():
+        if len(fl) == 2 and fl[0] == fl[1]:
+            copy[fl[0]] = of # not actually divergent, just matching renames
+
+    if fullcopy and repo.ui.debugflag:
         repo.ui.debug("  all copies found (* = to merge, ! = divergent, "
                       "% = renamed and deleted):\n")
         for f in sorted(fullcopy):
--- a/mercurial/demandimport.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/demandimport.py	Thu Apr 17 19:36:17 2014 -0400
@@ -24,7 +24,7 @@
   b = __import__(a)
 '''
 
-import __builtin__
+import __builtin__, os
 _origimport = __import__
 
 nothing = object()
@@ -167,7 +167,8 @@
 
 def enable():
     "enable global demand-loading of modules"
-    __builtin__.__import__ = _demandimport
+    if os.environ.get('HGDEMANDIMPORT') != 'disable':
+        __builtin__.__import__ = _demandimport
 
 def disable():
     "disable global demand-loading of modules"
--- a/mercurial/dirstate.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/dirstate.py	Thu Apr 17 19:36:17 2014 -0400
@@ -4,7 +4,6 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
-import errno
 
 from node import nullid
 from i18n import _
@@ -504,17 +503,13 @@
         if not self._dirty:
             return
         st = self._opener("dirstate", "w", atomictemp=True)
-
-        def finish(s):
-            st.write(s)
-            st.close()
-            self._lastnormaltime = 0
-            self._dirty = self._dirtypl = False
-
         # use the modification time of the newly created temporary file as the
         # filesystem's notion of 'now'
         now = util.fstat(st).st_mtime
-        finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
+        st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
+        st.close()
+        self._lastnormaltime = 0
+        self._dirty = self._dirtypl = False
 
     def _dirignore(self, f):
         if f == '.':
@@ -600,7 +595,7 @@
                 kind = getkind(st.st_mode)
                 if kind == dirkind:
                     if nf in dmap:
-                        #file deleted on disk but still in dirstate
+                        # file replaced by dir on disk but still in dirstate
                         results[nf] = None
                     if matchedir:
                         matchedir(nf)
@@ -611,10 +606,10 @@
                     badfn(ff, badtype(kind))
                     if nf in dmap:
                         results[nf] = None
-            except OSError, inst:
-                if nf in dmap: # does it exactly match a file?
+            except OSError, inst: # nf not found on disk - it is dirstate only
+                if nf in dmap: # does it exactly match a missing file?
                     results[nf] = None
-                else: # does it match a directory?
+                else: # does it match a missing directory?
                     prefix = nf + "/"
                     for fn in dmap:
                         if fn.startswith(prefix):
@@ -642,17 +637,14 @@
         # implementation doesn't use it at all. This satisfies the contract
         # because we only guarantee a "maybe".
 
-        def fwarn(f, msg):
-            self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
-            return False
-
-        ignore = self._ignore
-        dirignore = self._dirignore
         if ignored:
             ignore = util.never
             dirignore = util.never
-        elif not unknown:
-            # if unknown and ignored are False, skip step 2
+        elif unknown:
+            ignore = self._ignore
+            dirignore = self._dirignore
+        else:
+            # if not unknown and not ignored, drop dir recursion and step 2
             ignore = util.always
             dirignore = util.always
 
@@ -699,7 +691,7 @@
                 entries = listdir(join(nd), stat=True, skip=skip)
             except OSError, inst:
                 if inst.errno in (errno.EACCES, errno.ENOENT):
-                    fwarn(nd, inst.strerror)
+                    match.bad(self.pathto(nd), inst.strerror)
                     continue
                 raise
             for f, kind, st in entries:
@@ -728,8 +720,11 @@
             del results[s]
         del results['.hg']
 
-        # step 3: report unseen items in the dmap hash
+        # step 3: visit remaining files from dmap
         if not skipstep3 and not exact:
+            # If a dmap file is not in results yet, it was either
+            # a) not matching matchfn b) ignored, c) missing, or d) under a
+            # symlink directory.
             if not results and matchalways:
                 visit = dmap.keys()
             else:
@@ -737,9 +732,10 @@
             visit.sort()
 
             if unknown:
-                # unknown == True means we walked the full directory tree above.
-                # So if a file is not seen it was either a) not matching matchfn
-                # b) ignored, c) missing, or d) under a symlink directory.
+                # unknown == True means we walked all dirs under the roots
+                # that wasn't ignored, and everything that matched was stat'ed
+                # and is already in results.
+                # The rest must thus be ignored or under a symlink.
                 audit_path = pathutil.pathauditor(self._root)
 
                 for nf in iter(visit):
@@ -748,15 +744,17 @@
                     if audit_path.check(nf):
                         try:
                             results[nf] = lstat(join(nf))
+                            # file was just ignored, no links, and exists
                         except OSError:
                             # file doesn't exist
                             results[nf] = None
                     else:
                         # It's either missing or under a symlink directory
+                        # which we in this case report as missing
                         results[nf] = None
             else:
                 # We may not have walked the full directory tree above,
-                # so stat everything we missed.
+                # so stat and check everything we missed.
                 nf = iter(visit).next
                 for st in util.statfiles([join(i) for i in visit]):
                     results[nf()] = st
--- a/mercurial/discovery.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/discovery.py	Thu Apr 17 19:36:17 2014 -0400
@@ -154,7 +154,7 @@
 
     - branch: the branch name
     - remoteheads: the list of remote heads known locally
-                   None is the branch is new
+                   None if the branch is new
     - newheads: the new remote heads (known locally) with outgoing pushed
     - unsyncedheads: the list of remote heads unknown locally.
     """
@@ -250,8 +250,7 @@
                          hint=_("use 'hg push --new-branch' to create"
                                 " new remote branches"))
 
-    # 2 compute newly pushed bookmarks. We
-    # we don't warned about bookmarked heads.
+    # 2. Compute newly pushed bookmarks. We don't warn about bookmarked heads.
     localbookmarks = repo._bookmarks
     remotebookmarks = remote.listkeys('bookmarks')
     bookmarkedheads = set()
@@ -269,23 +268,23 @@
     # If there are more heads after the push than before, a suitable
     # error message, depending on unsynced status, is displayed.
     error = None
-    unsynced = False
     allmissing = set(outgoing.missing)
     allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
     allfuturecommon.update(allmissing)
     for branch, heads in sorted(headssum.iteritems()):
-        candidate_newhs = set(heads[1])
+        remoteheads, newheads, unsyncedheads = heads
+        candidate_newhs = set(newheads)
         # add unsynced data
-        if heads[0] is None:
+        if remoteheads is None:
             oldhs = set()
         else:
-            oldhs = set(heads[0])
-        oldhs.update(heads[2])
-        candidate_newhs.update(heads[2])
-        dhs = None
+            oldhs = set(remoteheads)
+        oldhs.update(unsyncedheads)
+        candidate_newhs.update(unsyncedheads)
+        dhs = None # delta heads, the new heads on branch
         discardedheads = set()
         if repo.obsstore:
-            # remove future heads which are actually obsolete by another
+            # remove future heads which are actually obsoleted by another
             # pushed element:
             #
             # XXX as above, There are several cases this case does not handle
@@ -297,8 +296,8 @@
             # (2) if the new heads have ancestors which are not obsolete and
             #     not ancestors of any other heads we will have a new head too.
             #
-            # This two case will be easy to handle for know changeset but much
-            # more tricky for unsynced changes.
+            # These two cases will be easy to handle for known changeset but
+            # much more tricky for unsynced changes.
             newhs = set()
             for nh in candidate_newhs:
                 if nh in repo and repo[nh].phase() <= phases.public:
@@ -312,10 +311,17 @@
                         newhs.add(nh)
         else:
             newhs = candidate_newhs
-        if [h for h in heads[2] if h not in discardedheads]:
-            unsynced = True
-        if heads[0] is None:
-            if 1 < len(newhs):
+        unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
+        if unsynced:
+            heads = ' '.join(short(h) for h in unsynced)
+            if branch is None:
+                repo.ui.status(_("remote has heads that are "
+                                 "not known locally: %s\n") % heads)
+            else:
+                repo.ui.status(_("remote has heads on branch '%s' that are "
+                                 "not known locally: %s\n") % (branch, heads))
+        if remoteheads is None:
+            if len(newhs) > 1:
                 dhs = list(newhs)
                 if error is None:
                     error = (_("push creates new branch '%s' "
@@ -324,7 +330,7 @@
                              " see \"hg help push\" for details about"
                              " pushing new heads")
         elif len(newhs) > len(oldhs):
-            # strip updates to existing remote heads from the new heads list
+            # remove bookmarked or existing remote heads from the new heads list
             dhs = sorted(newhs - bookmarkedheads - oldhs)
         if dhs:
             if error is None:
@@ -334,7 +340,7 @@
                 else:
                     error = _("push creates new remote head %s!"
                               ) % short(dhs[0])
-                if heads[2]: # unsynced
+                if unsyncedheads:
                     hint = _("pull and merge or"
                              " see \"hg help push\" for details about"
                              " pushing new heads")
@@ -350,7 +356,3 @@
                 repo.ui.note((" %s\n") % short(h))
     if error:
         raise util.Abort(error, hint=hint)
-
-    # 6. Check for unsynced changes on involved branches.
-    if unsynced:
-        repo.ui.warn(_("note: unsynced remote changes!\n"))
--- a/mercurial/dispatch.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/dispatch.py	Thu Apr 17 19:36:17 2014 -0400
@@ -40,7 +40,7 @@
         if not req.ui:
             req.ui = uimod.ui()
         if '--traceback' in req.args:
-            req.ui.setconfig('ui', 'traceback', 'on')
+            req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
 
         # set ui streams from the request
         if req.fin:
@@ -103,8 +103,8 @@
             if req.repo:
                 # copy configs that were passed on the cmdline (--config) to
                 # the repo ui
-                for cfg in cfgs:
-                    req.repo.ui.setconfig(*cfg)
+                for sec, name, val in cfgs:
+                    req.repo.ui.setconfig(sec, name, val, source='--config')
 
             # if we are in HGPLAIN mode, then disable custom debugging
             debugger = ui.config("ui", "debugger")
@@ -522,7 +522,7 @@
             section, name = name.split('.', 1)
             if not section or not name:
                 raise IndexError
-            ui.setconfig(section, name, value)
+            ui.setconfig(section, name, value, '--config')
             configs.append((section, name, value))
         except (IndexError, ValueError):
             raise util.Abort(_('malformed --config option: %r '
@@ -739,19 +739,19 @@
         for opt in ('verbose', 'debug', 'quiet'):
             val = str(bool(options[opt]))
             for ui_ in uis:
-                ui_.setconfig('ui', opt, val)
+                ui_.setconfig('ui', opt, val, '--' + opt)
 
     if options['traceback']:
         for ui_ in uis:
-            ui_.setconfig('ui', 'traceback', 'on')
+            ui_.setconfig('ui', 'traceback', 'on', '--traceback')
 
     if options['noninteractive']:
         for ui_ in uis:
-            ui_.setconfig('ui', 'interactive', 'off')
+            ui_.setconfig('ui', 'interactive', 'off', '-y')
 
     if cmdoptions.get('insecure', False):
         for ui_ in uis:
-            ui_.setconfig('web', 'cacerts', '')
+            ui_.setconfig('web', 'cacerts', '', '--insecure')
 
     if options['version']:
         return commands.version_(ui)
@@ -777,7 +777,7 @@
                 repo = hg.repository(ui, path=path)
                 if not repo.local():
                     raise util.Abort(_("repository '%s' is not local") % path)
-                repo.ui.setconfig("bundle", "mainreporoot", repo.root)
+                repo.ui.setconfig("bundle", "mainreporoot", repo.root, 'repo')
             except error.RequirementError:
                 raise
             except error.RepoError:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/exchange.py	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,757 @@
+# exchange.py - utility to exchange data between repos.
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+from node import hex, nullid
+import errno, urllib
+import util, scmutil, changegroup, base85
+import discovery, phases, obsolete, bookmarks, bundle2
+
+def readbundle(ui, fh, fname, vfs=None):
+    header = changegroup.readexactly(fh, 4)
+
+    alg = None
+    if not fname:
+        fname = "stream"
+        if not header.startswith('HG') and header.startswith('\0'):
+            fh = changegroup.headerlessfixup(fh, header)
+            header = "HG10"
+            alg = 'UN'
+    elif vfs:
+        fname = vfs.join(fname)
+
+    magic, version = header[0:2], header[2:4]
+
+    if magic != 'HG':
+        raise util.Abort(_('%s: not a Mercurial bundle') % fname)
+    if version == '10':
+        if alg is None:
+            alg = changegroup.readexactly(fh, 2)
+        return changegroup.unbundle10(fh, alg)
+    elif version == '2X':
+        return bundle2.unbundle20(ui, fh, header=magic + version)
+    else:
+        raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
+
+
+class pushoperation(object):
+    """A object that represent a single push operation
+
+    It purpose is to carry push related state and very common operation.
+
+    A new should be created at the beginning of each push and discarded
+    afterward.
+    """
+
+    def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
+        # repo we push from
+        self.repo = repo
+        self.ui = repo.ui
+        # repo we push to
+        self.remote = remote
+        # force option provided
+        self.force = force
+        # revs to be pushed (None is "all")
+        self.revs = revs
+        # allow push of new branch
+        self.newbranch = newbranch
+        # did a local lock get acquired?
+        self.locallocked = None
+        # Integer version of the push result
+        # - None means nothing to push
+        # - 0 means HTTP error
+        # - 1 means we pushed and remote head count is unchanged *or*
+        #   we have outgoing changesets but refused to push
+        # - other values as described by addchangegroup()
+        self.ret = None
+        # discover.outgoing object (contains common and outgoing data)
+        self.outgoing = None
+        # all remote heads before the push
+        self.remoteheads = None
+        # testable as a boolean indicating if any nodes are missing locally.
+        self.incoming = None
+        # set of all heads common after changeset bundle push
+        self.commonheads = None
+
+def push(repo, remote, force=False, revs=None, newbranch=False):
+    '''Push outgoing changesets (limited by revs) from a local
+    repository to remote. Return an integer:
+      - None means nothing to push
+      - 0 means HTTP error
+      - 1 means we pushed and remote head count is unchanged *or*
+        we have outgoing changesets but refused to push
+      - other values as described by addchangegroup()
+    '''
+    pushop = pushoperation(repo, remote, force, revs, newbranch)
+    if pushop.remote.local():
+        missing = (set(pushop.repo.requirements)
+                   - pushop.remote.local().supported)
+        if missing:
+            msg = _("required features are not"
+                    " supported in the destination:"
+                    " %s") % (', '.join(sorted(missing)))
+            raise util.Abort(msg)
+
+    # there are two ways to push to remote repo:
+    #
+    # addchangegroup assumes local user can lock remote
+    # repo (local filesystem, old ssh servers).
+    #
+    # unbundle assumes local user cannot lock remote repo (new ssh
+    # servers, http servers).
+
+    if not pushop.remote.canpush():
+        raise util.Abort(_("destination does not support push"))
+    # get local lock as we might write phase data
+    locallock = None
+    try:
+        locallock = pushop.repo.lock()
+        pushop.locallocked = True
+    except IOError, err:
+        pushop.locallocked = False
+        if err.errno != errno.EACCES:
+            raise
+        # source repo cannot be locked.
+        # We do not abort the push, but just disable the local phase
+        # synchronisation.
+        msg = 'cannot lock source repository: %s\n' % err
+        pushop.ui.debug(msg)
+    try:
+        pushop.repo.checkpush(pushop)
+        lock = None
+        unbundle = pushop.remote.capable('unbundle')
+        if not unbundle:
+            lock = pushop.remote.lock()
+        try:
+            _pushdiscovery(pushop)
+            if _pushcheckoutgoing(pushop):
+                pushop.repo.prepushoutgoinghooks(pushop.repo,
+                                                 pushop.remote,
+                                                 pushop.outgoing)
+                if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
+                                              False)
+                    and pushop.remote.capable('bundle2-exp')):
+                    _pushbundle2(pushop)
+                else:
+                    _pushchangeset(pushop)
+            _pushcomputecommonheads(pushop)
+            _pushsyncphase(pushop)
+            _pushobsolete(pushop)
+        finally:
+            if lock is not None:
+                lock.release()
+    finally:
+        if locallock is not None:
+            locallock.release()
+
+    _pushbookmark(pushop)
+    return pushop.ret
+
+def _pushdiscovery(pushop):
+    # discovery
+    unfi = pushop.repo.unfiltered()
+    fci = discovery.findcommonincoming
+    commoninc = fci(unfi, pushop.remote, force=pushop.force)
+    common, inc, remoteheads = commoninc
+    fco = discovery.findcommonoutgoing
+    outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
+                   commoninc=commoninc, force=pushop.force)
+    pushop.outgoing = outgoing
+    pushop.remoteheads = remoteheads
+    pushop.incoming = inc
+
+def _pushcheckoutgoing(pushop):
+    outgoing = pushop.outgoing
+    unfi = pushop.repo.unfiltered()
+    if not outgoing.missing:
+        # nothing to push
+        scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
+        return False
+    # something to push
+    if not pushop.force:
+        # if repo.obsstore == False --> no obsolete
+        # then, save the iteration
+        if unfi.obsstore:
+            # this message are here for 80 char limit reason
+            mso = _("push includes obsolete changeset: %s!")
+            mst = "push includes %s changeset: %s!"
+            # plain versions for i18n tool to detect them
+            _("push includes unstable changeset: %s!")
+            _("push includes bumped changeset: %s!")
+            _("push includes divergent changeset: %s!")
+            # If we are to push if there is at least one
+            # obsolete or unstable changeset in missing, at
+            # least one of the missinghead will be obsolete or
+            # unstable. So checking heads only is ok
+            for node in outgoing.missingheads:
+                ctx = unfi[node]
+                if ctx.obsolete():
+                    raise util.Abort(mso % ctx)
+                elif ctx.troubled():
+                    raise util.Abort(_(mst)
+                                     % (ctx.troubles()[0],
+                                        ctx))
+        newbm = pushop.ui.configlist('bookmarks', 'pushing')
+        discovery.checkheads(unfi, pushop.remote, outgoing,
+                             pushop.remoteheads,
+                             pushop.newbranch,
+                             bool(pushop.incoming),
+                             newbm)
+    return True
+
+def _pushbundle2(pushop):
+    """push data to the remote using bundle2
+
+    The only currently supported type of data is changegroup but this will
+    evolve in the future."""
+    # Send known head to the server for race detection.
+    capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
+    caps = bundle2.decodecaps(capsblob)
+    bundler = bundle2.bundle20(pushop.ui, caps)
+    # create reply capability
+    capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
+    bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
+    if not pushop.force:
+        part = bundle2.bundlepart('B2X:CHECK:HEADS',
+                                  data=iter(pushop.remoteheads))
+        bundler.addpart(part)
+    extrainfo = _pushbundle2extraparts(pushop, bundler)
+    # add the changegroup bundle
+    cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
+    cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
+    bundler.addpart(cgpart)
+    stream = util.chunkbuffer(bundler.getchunks())
+    reply = pushop.remote.unbundle(stream, ['force'], 'push')
+    try:
+        op = bundle2.processbundle(pushop.repo, reply)
+    except KeyError, exc:
+        raise util.Abort('missing support for %s' % exc)
+    cgreplies = op.records.getreplies(cgpart.id)
+    assert len(cgreplies['changegroup']) == 1
+    pushop.ret = cgreplies['changegroup'][0]['return']
+    _pushbundle2extrareply(pushop, op, extrainfo)
+
+def _pushbundle2extraparts(pushop, bundler):
+    """hook function to let extensions add parts
+
+    Return a dict to let extensions pass data to the reply processing.
+    """
+    return {}
+
+def _pushbundle2extrareply(pushop, op, extrainfo):
+    """hook function to let extensions react to part replies
+
+    The dict from _pushbundle2extrareply is fed to this function.
+    """
+    pass
+
+def _pushchangeset(pushop):
+    """Make the actual push of changeset bundle to remote repo"""
+    outgoing = pushop.outgoing
+    unbundle = pushop.remote.capable('unbundle')
+    # TODO: get bundlecaps from remote
+    bundlecaps = None
+    # create a changegroup from local
+    if pushop.revs is None and not (outgoing.excluded
+                            or pushop.repo.changelog.filteredrevs):
+        # push everything,
+        # use the fast path, no race possible on push
+        bundler = changegroup.bundle10(pushop.repo, bundlecaps)
+        cg = changegroup.getsubset(pushop.repo,
+                                   outgoing,
+                                   bundler,
+                                   'push',
+                                   fastpath=True)
+    else:
+        cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
+                                        bundlecaps)
+
+    # apply changegroup to remote
+    if unbundle:
+        # local repo finds heads on server, finds out what
+        # revs it must push. once revs transferred, if server
+        # finds it has different heads (someone else won
+        # commit/push race), server aborts.
+        if pushop.force:
+            remoteheads = ['force']
+        else:
+            remoteheads = pushop.remoteheads
+        # ssh: return remote's addchangegroup()
+        # http: return remote's addchangegroup() or 0 for error
+        pushop.ret = pushop.remote.unbundle(cg, remoteheads,
+                                            'push')
+    else:
+        # we return an integer indicating remote head count
+        # change
+        pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
+
+def _pushcomputecommonheads(pushop):
+    unfi = pushop.repo.unfiltered()
+    if pushop.ret:
+        # push succeed, synchronize target of the push
+        cheads = pushop.outgoing.missingheads
+    elif pushop.revs is None:
+        # All out push fails. synchronize all common
+        cheads = pushop.outgoing.commonheads
+    else:
+        # I want cheads = heads(::missingheads and ::commonheads)
+        # (missingheads is revs with secret changeset filtered out)
+        #
+        # This can be expressed as:
+        #     cheads = ( (missingheads and ::commonheads)
+        #              + (commonheads and ::missingheads))"
+        #              )
+        #
+        # while trying to push we already computed the following:
+        #     common = (::commonheads)
+        #     missing = ((commonheads::missingheads) - commonheads)
+        #
+        # We can pick:
+        # * missingheads part of common (::commonheads)
+        common = set(pushop.outgoing.common)
+        nm = pushop.repo.changelog.nodemap
+        cheads = [node for node in pushop.revs if nm[node] in common]
+        # and
+        # * commonheads parents on missing
+        revset = unfi.set('%ln and parents(roots(%ln))',
+                         pushop.outgoing.commonheads,
+                         pushop.outgoing.missing)
+        cheads.extend(c.node() for c in revset)
+    pushop.commonheads = cheads
+
+def _pushsyncphase(pushop):
+    """synchronise phase information locally and remotely"""
+    unfi = pushop.repo.unfiltered()
+    cheads = pushop.commonheads
+    if pushop.ret:
+        # push succeed, synchronize target of the push
+        cheads = pushop.outgoing.missingheads
+    elif pushop.revs is None:
+        # All out push fails. synchronize all common
+        cheads = pushop.outgoing.commonheads
+    else:
+        # I want cheads = heads(::missingheads and ::commonheads)
+        # (missingheads is revs with secret changeset filtered out)
+        #
+        # This can be expressed as:
+        #     cheads = ( (missingheads and ::commonheads)
+        #              + (commonheads and ::missingheads))"
+        #              )
+        #
+        # while trying to push we already computed the following:
+        #     common = (::commonheads)
+        #     missing = ((commonheads::missingheads) - commonheads)
+        #
+        # We can pick:
+        # * missingheads part of common (::commonheads)
+        common = set(pushop.outgoing.common)
+        nm = pushop.repo.changelog.nodemap
+        cheads = [node for node in pushop.revs if nm[node] in common]
+        # and
+        # * commonheads parents on missing
+        revset = unfi.set('%ln and parents(roots(%ln))',
+                         pushop.outgoing.commonheads,
+                         pushop.outgoing.missing)
+        cheads.extend(c.node() for c in revset)
+    pushop.commonheads = cheads
+    # even when we don't push, exchanging phase data is useful
+    remotephases = pushop.remote.listkeys('phases')
+    if (pushop.ui.configbool('ui', '_usedassubrepo', False)
+        and remotephases    # server supports phases
+        and pushop.ret is None # nothing was pushed
+        and remotephases.get('publishing', False)):
+        # When:
+        # - this is a subrepo push
+        # - and remote support phase
+        # - and no changeset was pushed
+        # - and remote is publishing
+        # We may be in issue 3871 case!
+        # We drop the possible phase synchronisation done by
+        # courtesy to publish changesets possibly locally draft
+        # on the remote.
+        remotephases = {'publishing': 'True'}
+    if not remotephases: # old server or public only reply from non-publishing
+        _localphasemove(pushop, cheads)
+        # don't push any phase data as there is nothing to push
+    else:
+        ana = phases.analyzeremotephases(pushop.repo, cheads,
+                                         remotephases)
+        pheads, droots = ana
+        ### Apply remote phase on local
+        if remotephases.get('publishing', False):
+            _localphasemove(pushop, cheads)
+        else: # publish = False
+            _localphasemove(pushop, pheads)
+            _localphasemove(pushop, cheads, phases.draft)
+        ### Apply local phase on remote
+
+        # Get the list of all revs draft on remote by public here.
+        # XXX Beware that revset break if droots is not strictly
+        # XXX root we may want to ensure it is but it is costly
+        outdated =  unfi.set('heads((%ln::%ln) and public())',
+                             droots, cheads)
+        for newremotehead in outdated:
+            r = pushop.remote.pushkey('phases',
+                                      newremotehead.hex(),
+                                      str(phases.draft),
+                                      str(phases.public))
+            if not r:
+                pushop.ui.warn(_('updating %s to public failed!\n')
+                                       % newremotehead)
+
+def _localphasemove(pushop, nodes, phase=phases.public):
+    """move <nodes> to <phase> in the local source repo"""
+    if pushop.locallocked:
+        phases.advanceboundary(pushop.repo, phase, nodes)
+    else:
+        # repo is not locked, do not change any phases!
+        # Informs the user that phases should have been moved when
+        # applicable.
+        actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
+        phasestr = phases.phasenames[phase]
+        if actualmoves:
+            pushop.ui.status(_('cannot lock source repo, skipping '
+                               'local %s phase update\n') % phasestr)
+
+def _pushobsolete(pushop):
+    """utility function to push obsolete markers to a remote"""
+    pushop.ui.debug('try to push obsolete markers to remote\n')
+    repo = pushop.repo
+    remote = pushop.remote
+    if (obsolete._enabled and repo.obsstore and
+        'obsolete' in remote.listkeys('namespaces')):
+        rslts = []
+        remotedata = repo.listkeys('obsolete')
+        for key in sorted(remotedata, reverse=True):
+            # reverse sort to ensure we end with dump0
+            data = remotedata[key]
+            rslts.append(remote.pushkey('obsolete', key, '', data))
+        if [r for r in rslts if not r]:
+            msg = _('failed to push some obsolete markers!\n')
+            repo.ui.warn(msg)
+
+def _pushbookmark(pushop):
+    """Update bookmark position on remote"""
+    ui = pushop.ui
+    repo = pushop.repo.unfiltered()
+    remote = pushop.remote
+    ui.debug("checking for updated bookmarks\n")
+    revnums = map(repo.changelog.rev, pushop.revs or [])
+    ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
+    (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
+     ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
+                           srchex=hex)
+
+    for b, scid, dcid in advsrc:
+        if ancestors and repo[scid].rev() not in ancestors:
+            continue
+        if remote.pushkey('bookmarks', b, dcid, scid):
+            ui.status(_("updating bookmark %s\n") % b)
+        else:
+            ui.warn(_('updating bookmark %s failed!\n') % b)
+
+class pulloperation(object):
+    """A object that represent a single pull operation
+
+    It purpose is to carry push related state and very common operation.
+
+    A new should be created at the beginning of each pull and discarded
+    afterward.
+    """
+
+    def __init__(self, repo, remote, heads=None, force=False):
+        # repo we pull into
+        self.repo = repo
+        # repo we pull from
+        self.remote = remote
+        # revision we try to pull (None is "all")
+        self.heads = heads
+        # do we force pull?
+        self.force = force
+        # the name the pull transaction
+        self._trname = 'pull\n' + util.hidepassword(remote.url())
+        # hold the transaction once created
+        self._tr = None
+        # set of common changeset between local and remote before pull
+        self.common = None
+        # set of pulled head
+        self.rheads = None
+        # list of missing changeset to fetch remotely
+        self.fetch = None
+        # result of changegroup pulling (used as return code by pull)
+        self.cgresult = None
+        # list of step remaining todo (related to future bundle2 usage)
+        self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
+
+    @util.propertycache
+    def pulledsubset(self):
+        """heads of the set of changeset target by the pull"""
+        # compute target subset
+        if self.heads is None:
+            # We pulled every thing possible
+            # sync on everything common
+            c = set(self.common)
+            ret = list(self.common)
+            for n in self.rheads:
+                if n not in c:
+                    ret.append(n)
+            return ret
+        else:
+            # We pulled a specific subset
+            # sync on this subset
+            return self.heads
+
+    def gettransaction(self):
+        """get appropriate pull transaction, creating it if needed"""
+        if self._tr is None:
+            self._tr = self.repo.transaction(self._trname)
+        return self._tr
+
+    def closetransaction(self):
+        """close transaction if created"""
+        if self._tr is not None:
+            self._tr.close()
+
+    def releasetransaction(self):
+        """release transaction if created"""
+        if self._tr is not None:
+            self._tr.release()
+
+def pull(repo, remote, heads=None, force=False):
+    pullop = pulloperation(repo, remote, heads, force)
+    if pullop.remote.local():
+        missing = set(pullop.remote.requirements) - pullop.repo.supported
+        if missing:
+            msg = _("required features are not"
+                    " supported in the destination:"
+                    " %s") % (', '.join(sorted(missing)))
+            raise util.Abort(msg)
+
+    lock = pullop.repo.lock()
+    try:
+        _pulldiscovery(pullop)
+        if (pullop.repo.ui.configbool('server', 'bundle2', False)
+            and pullop.remote.capable('bundle2-exp')):
+            _pullbundle2(pullop)
+        if 'changegroup' in pullop.todosteps:
+            _pullchangeset(pullop)
+        if 'phases' in pullop.todosteps:
+            _pullphase(pullop)
+        if 'obsmarkers' in pullop.todosteps:
+            _pullobsolete(pullop)
+        pullop.closetransaction()
+    finally:
+        pullop.releasetransaction()
+        lock.release()
+
+    return pullop.cgresult
+
+def _pulldiscovery(pullop):
+    """discovery phase for the pull
+
+    Current handle changeset discovery only, will change handle all discovery
+    at some point."""
+    tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
+                                       pullop.remote,
+                                       heads=pullop.heads,
+                                       force=pullop.force)
+    pullop.common, pullop.fetch, pullop.rheads = tmp
+
+def _pullbundle2(pullop):
+    """pull data using bundle2
+
+    For now, the only supported data are changegroup."""
+    kwargs = {'bundlecaps': set(['HG2X'])}
+    capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
+    kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
+    # pulling changegroup
+    pullop.todosteps.remove('changegroup')
+    if not pullop.fetch:
+            pullop.repo.ui.status(_("no changes found\n"))
+            pullop.cgresult = 0
+    else:
+        kwargs['common'] = pullop.common
+        kwargs['heads'] = pullop.heads or pullop.rheads
+        if pullop.heads is None and list(pullop.common) == [nullid]:
+            pullop.repo.ui.status(_("requesting all changes\n"))
+    _pullbundle2extraprepare(pullop, kwargs)
+    if kwargs.keys() == ['format']:
+        return # nothing to pull
+    bundle = pullop.remote.getbundle('pull', **kwargs)
+    try:
+        op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
+    except KeyError, exc:
+        raise util.Abort('missing support for %s' % exc)
+    assert len(op.records['changegroup']) == 1
+    pullop.cgresult = op.records['changegroup'][0]['return']
+
+def _pullbundle2extraprepare(pullop, kwargs):
+    """hook function so that extensions can extend the getbundle call"""
+    pass
+
+def _pullchangeset(pullop):
+    """pull changeset from unbundle into the local repo"""
+    # We delay the open of the transaction as late as possible so we
+    # don't open transaction for nothing or you break future useful
+    # rollback call
+    pullop.todosteps.remove('changegroup')
+    if not pullop.fetch:
+            pullop.repo.ui.status(_("no changes found\n"))
+            pullop.cgresult = 0
+            return
+    pullop.gettransaction()
+    if pullop.heads is None and list(pullop.common) == [nullid]:
+        pullop.repo.ui.status(_("requesting all changes\n"))
+    elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
+        # issue1320, avoid a race if remote changed after discovery
+        pullop.heads = pullop.rheads
+
+    if pullop.remote.capable('getbundle'):
+        # TODO: get bundlecaps from remote
+        cg = pullop.remote.getbundle('pull', common=pullop.common,
+                                     heads=pullop.heads or pullop.rheads)
+    elif pullop.heads is None:
+        cg = pullop.remote.changegroup(pullop.fetch, 'pull')
+    elif not pullop.remote.capable('changegroupsubset'):
+        raise util.Abort(_("partial pull cannot be done because "
+                                   "other repository doesn't support "
+                                   "changegroupsubset."))
+    else:
+        cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
+    pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
+                                                 pullop.remote.url())
+
+def _pullphase(pullop):
+    # Get remote phases data from remote
+    pullop.todosteps.remove('phases')
+    remotephases = pullop.remote.listkeys('phases')
+    publishing = bool(remotephases.get('publishing', False))
+    if remotephases and not publishing:
+        # remote is new and unpublishing
+        pheads, _dr = phases.analyzeremotephases(pullop.repo,
+                                                 pullop.pulledsubset,
+                                                 remotephases)
+        phases.advanceboundary(pullop.repo, phases.public, pheads)
+        phases.advanceboundary(pullop.repo, phases.draft,
+                               pullop.pulledsubset)
+    else:
+        # Remote is old or publishing all common changesets
+        # should be seen as public
+        phases.advanceboundary(pullop.repo, phases.public,
+                               pullop.pulledsubset)
+
+def _pullobsolete(pullop):
+    """utility function to pull obsolete markers from a remote
+
+    The `gettransaction` is function that return the pull transaction, creating
+    one if necessary. We return the transaction to inform the calling code that
+    a new transaction have been created (when applicable).
+
+    Exists mostly to allow overriding for experimentation purpose"""
+    pullop.todosteps.remove('obsmarkers')
+    tr = None
+    if obsolete._enabled:
+        pullop.repo.ui.debug('fetching remote obsolete markers\n')
+        remoteobs = pullop.remote.listkeys('obsolete')
+        if 'dump0' in remoteobs:
+            tr = pullop.gettransaction()
+            for key in sorted(remoteobs, reverse=True):
+                if key.startswith('dump'):
+                    data = base85.b85decode(remoteobs[key])
+                    pullop.repo.obsstore.mergemarkers(tr, data)
+            pullop.repo.invalidatevolatilesets()
+    return tr
+
+def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
+              **kwargs):
+    """return a full bundle (with potentially multiple kind of parts)
+
+    Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
+    passed. For now, the bundle can contain only changegroup, but this will
+    changes when more part type will be available for bundle2.
+
+    This is different from changegroup.getbundle that only returns an HG10
+    changegroup bundle. They may eventually get reunited in the future when we
+    have a clearer idea of the API we what to query different data.
+
+    The implementation is at a very early stage and will get massive rework
+    when the API of bundle is refined.
+    """
+    # build bundle here.
+    cg = changegroup.getbundle(repo, source, heads=heads,
+                               common=common, bundlecaps=bundlecaps)
+    if bundlecaps is None or 'HG2X' not in bundlecaps:
+        return cg
+    # very crude first implementation,
+    # the bundle API will change and the generation will be done lazily.
+    b2caps = {}
+    for bcaps in bundlecaps:
+        if bcaps.startswith('bundle2='):
+            blob = urllib.unquote(bcaps[len('bundle2='):])
+            b2caps.update(bundle2.decodecaps(blob))
+    bundler = bundle2.bundle20(repo.ui, b2caps)
+    part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
+    bundler.addpart(part)
+    _getbundleextrapart(bundler, repo, source, heads=None, common=None,
+                        bundlecaps=None, **kwargs)
+    return util.chunkbuffer(bundler.getchunks())
+
+def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
+                        bundlecaps=None, **kwargs):
+    """hook function to let extensions add parts to the requested bundle"""
+    pass
+
+class PushRaced(RuntimeError):
+    """An exception raised during unbundling that indicate a push race"""
+
+def check_heads(repo, their_heads, context):
+    """check if the heads of a repo have been modified
+
+    Used by peer for unbundling.
+    """
+    heads = repo.heads()
+    heads_hash = util.sha1(''.join(sorted(heads))).digest()
+    if not (their_heads == ['force'] or their_heads == heads or
+            their_heads == ['hashed', heads_hash]):
+        # someone else committed/pushed/unbundled while we
+        # were transferring data
+        raise PushRaced('repository changed while %s - '
+                        'please try again' % context)
+
+def unbundle(repo, cg, heads, source, url):
+    """Apply a bundle to a repo.
+
+    this function makes sure the repo is locked during the application and have
+    mechanism to check that no push race occurred between the creation of the
+    bundle and its application.
+
+    If the push was raced as PushRaced exception is raised."""
+    r = 0
+    # need a transaction when processing a bundle2 stream
+    tr = None
+    lock = repo.lock()
+    try:
+        check_heads(repo, heads, 'uploading changes')
+        # push can proceed
+        if util.safehasattr(cg, 'params'):
+            tr = repo.transaction('unbundle')
+            tr.hookargs['bundle2-exp'] = '1'
+            r = bundle2.processbundle(repo, cg, lambda: tr).reply
+            cl = repo.unfiltered().changelog
+            p = cl.writepending() and repo.root or ""
+            repo.hook('b2x-pretransactionclose', throw=True, source=source,
+                      url=url, pending=p, **tr.hookargs)
+            tr.close()
+            repo.hook('b2x-transactionclose', source=source, url=url,
+                      **tr.hookargs)
+        else:
+            r = changegroup.addchangegroup(repo, cg, source, url)
+    finally:
+        if tr is not None:
+            tr.release()
+        lock.release()
+    return r
--- a/mercurial/extensions.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/extensions.py	Thu Apr 17 19:36:17 2014 -0400
@@ -11,7 +11,7 @@
 
 _extensions = {}
 _order = []
-_ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg']
+_ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg', 'inotify']
 
 def extensions(ui=None):
     if ui:
@@ -43,10 +43,10 @@
 
 def loadpath(path, module_name):
     module_name = module_name.replace('.', '_')
-    path = util.expandpath(path)
+    path = util.normpath(util.expandpath(path))
     if os.path.isdir(path):
         # module/__init__.py style
-        d, f = os.path.split(path.rstrip('/'))
+        d, f = os.path.split(path)
         fd, fpath, desc = imp.find_module(f, [d])
         return imp.load_module(module_name, fd, fpath, desc)
     else:
--- a/mercurial/filemerge.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/filemerge.py	Thu Apr 17 19:36:17 2014 -0400
@@ -248,20 +248,21 @@
         tool, toolpath, binary, symlink = toolconf
         a, b, c, back = files
         out = ""
-        env = dict(HG_FILE=fcd.path(),
-                   HG_MY_NODE=short(mynode),
-                   HG_OTHER_NODE=str(fco.changectx()),
-                   HG_BASE_NODE=str(fca.changectx()),
-                   HG_MY_ISLINK='l' in fcd.flags(),
-                   HG_OTHER_ISLINK='l' in fco.flags(),
-                   HG_BASE_ISLINK='l' in fca.flags())
+        env = {'HG_FILE': fcd.path(),
+               'HG_MY_NODE': short(mynode),
+               'HG_OTHER_NODE': str(fco.changectx()),
+               'HG_BASE_NODE': str(fca.changectx()),
+               'HG_MY_ISLINK': 'l' in fcd.flags(),
+               'HG_OTHER_ISLINK': 'l' in fco.flags(),
+               'HG_BASE_ISLINK': 'l' in fca.flags(),
+               }
 
         ui = repo.ui
 
         args = _toolstr(ui, tool, "args", '$local $base $other')
         if "$output" in args:
             out, a = a, back # read input from backup, write to original
-        replace = dict(local=a, base=b, other=c, output=out)
+        replace = {'local': a, 'base': b, 'other': c, 'output': out}
         args = util.interpolate(r'\$', replace, args,
                                 lambda s: util.shellquote(util.localpath(s)))
         r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
@@ -333,10 +334,10 @@
             if onfailure:
                 ui.warn(onfailure % fd)
         else:
-            os.unlink(back)
+            util.unlink(back)
 
-        os.unlink(b)
-        os.unlink(c)
+        util.unlink(b)
+        util.unlink(c)
         return r
 
     if not r and (_toolbool(ui, tool, "checkconflicts") or
@@ -367,10 +368,10 @@
         if onfailure:
             ui.warn(onfailure % fd)
     else:
-        os.unlink(back)
+        util.unlink(back)
 
-    os.unlink(b)
-    os.unlink(c)
+    util.unlink(b)
+    util.unlink(c)
     return r
 
 # tell hggettext to extract docstrings from these functions:
--- a/mercurial/graphmod.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/graphmod.py	Thu Apr 17 19:36:17 2014 -0400
@@ -34,10 +34,10 @@
         return
 
     cl = repo.changelog
-    lowestrev = min(revs)
+    lowestrev = revs.min()
     gpcache = {}
 
-    knownrevs = set(revs)
+    knownrevs = revs.set()
     for rev in revs:
         ctx = repo[rev]
         parents = sorted(set([p.rev() for p in ctx.parents()
--- a/mercurial/help.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help.py	Thu Apr 17 19:36:17 2014 -0400
@@ -12,18 +12,21 @@
 import encoding, util, minirst
 import cmdutil
 
-def listexts(header, exts, indent=1):
+def listexts(header, exts, indent=1, showdeprecated=False):
     '''return a text listing of the given extensions'''
     rst = []
     if exts:
         rst.append('\n%s\n\n' % header)
         for name, desc in sorted(exts.iteritems()):
+            if '(DEPRECATED)' in desc and not showdeprecated:
+                continue
             rst.append('%s:%s: %s\n' % (' ' * indent, name, desc))
     return rst
 
 def extshelp():
     rst = loaddoc('extensions')().splitlines(True)
-    rst.extend(listexts(_('enabled extensions:'), extensions.enabled()))
+    rst.extend(listexts(
+        _('enabled extensions:'), extensions.enabled(), showdeprecated=True))
     rst.extend(listexts(_('disabled extensions:'), extensions.disabled()))
     doc = ''.join(rst)
     return doc
@@ -38,7 +41,7 @@
             shortopt, longopt, default, desc = option
             optlabel = _("VALUE") # default label
 
-        if _("DEPRECATED") in desc and not verbose:
+        if not verbose and ("DEPRECATED" in desc or _("DEPRECATED") in desc):
             continue
 
         so = ''
@@ -89,8 +92,6 @@
             results['topics'].append((names[0], header))
     import commands # avoid cycle
     for cmd, entry in commands.table.iteritems():
-        if cmd.startswith('debug'):
-            continue
         if len(entry) == 3:
             summary = entry[2]
         else:
@@ -308,6 +309,8 @@
         # list of commands
         if name == "shortlist":
             header = _('basic commands:\n\n')
+        elif name == "debug":
+            header = _('debug commands (internal and unsupported):\n\n')
         else:
             header = _('list of commands:\n\n')
 
@@ -323,7 +326,7 @@
             if name == "shortlist" and not f.startswith("^"):
                 continue
             f = f.lstrip("^")
-            if not ui.debugflag and f.startswith("debug"):
+            if not ui.debugflag and f.startswith("debug") and name != "debug":
                 continue
             doc = e[0].__doc__
             if doc and 'DEPRECATED' in doc and not ui.verbose:
--- a/mercurial/help/config.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/config.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -85,7 +85,9 @@
     be read.  Mercurial checks each of these locations in the specified
     order until one or more configuration files are detected.
 
-.. note:: The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
+.. note::
+
+   The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
    is used when running 32-bit Python on 64-bit Windows.
 
 Syntax
@@ -204,7 +206,9 @@
 
     stable5 = latest -b stable
 
-.. note:: It is possible to create aliases with the same names as
+.. note::
+
+   It is possible to create aliases with the same names as
    existing commands, which will then override the original
    definitions. This is almost always a bad idea!
 
@@ -235,7 +239,9 @@
 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
 
-.. note:: Some global configuration options such as ``-R`` are
+.. note::
+
+   Some global configuration options such as ``-R`` are
    processed before shell aliases and will thus not be passed to
    aliases.
 
@@ -362,7 +368,9 @@
 of an empty temporary file, where the filtered data must be written by
 the command.
 
-.. note:: The tempfile mechanism is recommended for Windows systems,
+.. note::
+
+   The tempfile mechanism is recommended for Windows systems,
    where the standard shell I/O redirection operators often have
    strange effects and may corrupt the contents of your files.
 
@@ -708,13 +716,17 @@
   in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
   update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``.
 
-.. note:: It is generally better to use standard hooks rather than the
+.. note::
+
+   It is generally better to use standard hooks rather than the
    generic pre- and post- command hooks as they are guaranteed to be
    called in the appropriate contexts for influencing transactions.
    Also, hooks like "commit" will be called in all contexts that
    generate a commit (e.g. tag) and not just the commit command.
 
-.. note:: Environment variables with empty values may not be passed to
+.. note::
+
+   Environment variables with empty values may not be passed to
    hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
    will have an empty value under Unix-like platforms for non-merge
    changesets, while it will not be available at all under Windows.
--- a/mercurial/help/hgignore.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/hgignore.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -69,6 +69,7 @@
 regexp pattern, start it with ``^``.
 
 .. note::
+
   Patterns specified in other than ``.hgignore`` are always rooted.
   Please see :hg:`help patterns` for details.
 
--- a/mercurial/help/merge-tools.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/merge-tools.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -73,6 +73,7 @@
 8. The merge of the file fails and must be resolved before commit.
 
 .. note::
+
    After selecting a merge program, Mercurial will by default attempt
    to merge the files using a simple merge algorithm first. Only if it doesn't
    succeed because of conflicting changes Mercurial will actually execute the
--- a/mercurial/help/patterns.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/patterns.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -7,6 +7,7 @@
 Alternate pattern notations must be specified explicitly.
 
 .. note::
+
   Patterns specified in ``.hgignore`` are not rooted.
   Please see :hg:`help hgignore` for details.
 
--- a/mercurial/help/phases.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/phases.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -42,6 +42,7 @@
  - secret changesets are neither pushed, pulled, or cloned
 
 .. note::
+
   Pulling a draft changeset from a publishing server does not mark it
   as public on the server side due to the read-only nature of pull.
 
@@ -55,10 +56,12 @@
 See :hg:`help config` for more information on configuration files.
 
 .. note::
+
   Servers running older versions of Mercurial are treated as
   publishing.
 
 .. note::
+
    Changesets in secret phase are not exchanged with the server. This
    applies to their content: file names, file contents, and changeset
    metadata. For technical reasons, the identifier (e.g. d825e4025e39)
--- a/mercurial/help/subrepos.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/subrepos.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -39,6 +39,7 @@
    repositories states when committing in the parent repository.
 
    .. note::
+
       The ``.hgsubstate`` file should not be edited manually.
 
 
@@ -83,6 +84,9 @@
 :archive: archive does not recurse in subrepositories unless
     -S/--subrepos is specified.
 
+:cat: cat currently only handles exact file matches in subrepos.
+    Git and Subversion subrepositories are currently ignored.
+
 :commit: commit creates a consistent snapshot of the state of the
     entire project and its subrepositories. If any subrepositories
     have been modified, Mercurial will abort.  Mercurial can be made
--- a/mercurial/help/templates.txt	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/help/templates.txt	Thu Apr 17 19:36:17 2014 -0400
@@ -52,14 +52,20 @@
 
 - if(expr, then[, else])
 
+- ifcontains(expr, expr, then[, else])
+
 - ifeq(expr, expr, then[, else])
 
 - join(list, sep)
 
 - label(label, expr)
 
+- revset(query[, formatargs])
+
 - rstdoc(text, style)
 
+- shortest(node)
+
 - strip(text[, chars])
 
 - sub(pat, repl, expr)
@@ -106,3 +112,11 @@
 - Display the contents of the 'extra' field, one per line::
 
    $ hg log -r 0 --template "{join(extras, '\n')}\n"
+
+- Mark the current bookmark with '*'::
+
+   $ hg log --template "{bookmarks % '{bookmark}{ifeq(bookmark, current, \"*\")} '}\n"
+
+- Mark the working copy parent with '@'::
+
+   $ hg log --template "{ifcontains(rev, revset('.'), '@')}\n"
--- a/mercurial/hg.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hg.py	Thu Apr 17 19:36:17 2014 -0400
@@ -129,8 +129,25 @@
     return _peerorrepo(rui, path, create).peer()
 
 def defaultdest(source):
-    '''return default destination of clone if none is given'''
-    return os.path.basename(os.path.normpath(util.url(source).path or ''))
+    '''return default destination of clone if none is given
+
+    >>> defaultdest('foo')
+    'foo'
+    >>> defaultdest('/foo/bar')
+    'bar'
+    >>> defaultdest('/')
+    ''
+    >>> defaultdest('')
+    ''
+    >>> defaultdest('http://example.org/')
+    ''
+    >>> defaultdest('http://example.org/foo/')
+    'foo'
+    '''
+    path = util.url(source).path
+    if not path:
+        return ''
+    return os.path.basename(os.path.normpath(path))
 
 def share(ui, source, dest=None, update=True):
     '''create a shared repository'''
@@ -284,7 +301,8 @@
 
     if dest is None:
         dest = defaultdest(source)
-        ui.status(_("destination directory: %s\n") % dest)
+        if dest:
+            ui.status(_("destination directory: %s\n") % dest)
     else:
         dest = ui.expandpath(dest)
 
@@ -413,7 +431,7 @@
             fp.write("default = %s\n" % defaulturl)
             fp.close()
 
-            destrepo.ui.setconfig('paths', 'default', defaulturl)
+            destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
 
             if update:
                 if update is not True:
@@ -567,8 +585,7 @@
     o = outgoing.missing
     if not o:
         scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
-        return None
-    return o
+    return o, other
 
 def outgoing(ui, repo, dest, opts):
     def recurse():
@@ -581,8 +598,9 @@
         return ret
 
     limit = cmdutil.loglimit(opts)
-    o = _outgoing(ui, repo, dest, opts)
-    if o is None:
+    o, other = _outgoing(ui, repo, dest, opts)
+    if not o:
+        cmdutil.outgoinghooks(ui, repo, other, opts, o)
         return recurse()
 
     if opts.get('newest_first'):
@@ -598,6 +616,7 @@
         count += 1
         displayer.show(repo[n])
     displayer.close()
+    cmdutil.outgoinghooks(ui, repo, other, opts, o)
     recurse()
     return 0 # exit code is zero since we found outgoing changes
 
@@ -621,19 +640,19 @@
     for o in 'ssh', 'remotecmd':
         v = opts.get(o) or src.config('ui', o)
         if v:
-            dst.setconfig("ui", o, v)
+            dst.setconfig("ui", o, v, 'copied')
 
     # copy bundle-specific options
     r = src.config('bundle', 'mainreporoot')
     if r:
-        dst.setconfig('bundle', 'mainreporoot', r)
+        dst.setconfig('bundle', 'mainreporoot', r, 'copied')
 
     # copy selected local settings to the remote ui
     for sect in ('auth', 'hostfingerprints', 'http_proxy'):
         for key, val in src.configitems(sect):
-            dst.setconfig(sect, key, val)
+            dst.setconfig(sect, key, val, 'copied')
     v = src.config('web', 'cacerts')
     if v:
-        dst.setconfig('web', 'cacerts', util.expandpath(v))
+        dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
 
     return dst
--- a/mercurial/hgweb/hgweb_mod.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hgweb/hgweb_mod.py	Thu Apr 17 19:36:17 2014 -0400
@@ -64,10 +64,10 @@
             r = repo
 
         r = self._getview(r)
-        r.ui.setconfig('ui', 'report_untrusted', 'off')
-        r.baseui.setconfig('ui', 'report_untrusted', 'off')
-        r.ui.setconfig('ui', 'nontty', 'true')
-        r.baseui.setconfig('ui', 'nontty', 'true')
+        r.ui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
+        r.baseui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
+        r.ui.setconfig('ui', 'nontty', 'true', 'hgweb')
+        r.baseui.setconfig('ui', 'nontty', 'true', 'hgweb')
         self.repo = r
         hook.redirect(True)
         self.mtime = -1
--- a/mercurial/hgweb/hgwebdir_mod.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hgweb/hgwebdir_mod.py	Thu Apr 17 19:36:17 2014 -0400
@@ -96,8 +96,8 @@
             u = self.baseui.copy()
         else:
             u = ui.ui()
-            u.setconfig('ui', 'report_untrusted', 'off')
-            u.setconfig('ui', 'nontty', 'true')
+            u.setconfig('ui', 'report_untrusted', 'off', 'hgwebdir')
+            u.setconfig('ui', 'nontty', 'true', 'hgwebdir')
 
         if not isinstance(self.conf, (dict, list, tuple)):
             map = {'paths': 'hgweb-paths'}
@@ -308,17 +308,17 @@
 
                     # add '/' to the name to make it obvious that
                     # the entry is a directory, not a regular repository
-                    row = dict(contact="",
-                               contact_sort="",
-                               name=name + '/',
-                               name_sort=name,
-                               url=url,
-                               description="",
-                               description_sort="",
-                               lastchange=d,
-                               lastchange_sort=d[1]-d[0],
-                               archives=[],
-                               isdirectory=True)
+                    row = {'contact': "",
+                           'contact_sort': "",
+                           'name': name + '/',
+                           'name_sort': name,
+                           'url': url,
+                           'description': "",
+                           'description_sort': "",
+                           'lastchange': d,
+                           'lastchange_sort': d[1]-d[0],
+                           'archives': [],
+                           'isdirectory': True}
 
                     seendirs.add(name)
                     yield row
@@ -356,17 +356,18 @@
                 contact = get_contact(get)
                 description = get("web", "description", "")
                 name = get("web", "name", name)
-                row = dict(contact=contact or "unknown",
-                           contact_sort=contact.upper() or "unknown",
-                           name=name,
-                           name_sort=name,
-                           url=url,
-                           description=description or "unknown",
-                           description_sort=description.upper() or "unknown",
-                           lastchange=d,
-                           lastchange_sort=d[1]-d[0],
-                           archives=archivelist(u, "tip", url),
-                           isdirectory=None)
+                row = {'contact': contact or "unknown",
+                       'contact_sort': contact.upper() or "unknown",
+                       'name': name,
+                       'name_sort': name,
+                       'url': url,
+                       'description': description or "unknown",
+                       'description_sort': description.upper() or "unknown",
+                       'lastchange': d,
+                       'lastchange_sort': d[1]-d[0],
+                       'archives': archivelist(u, "tip", url),
+                       'isdirectory': None,
+                       }
 
                 seenrepos.add(name)
                 yield row
--- a/mercurial/hgweb/protocol.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hgweb/protocol.py	Thu Apr 17 19:36:17 2014 -0400
@@ -12,7 +12,7 @@
 HGTYPE = 'application/mercurial-0.1'
 HGERRTYPE = 'application/hg-error'
 
-class webproto(object):
+class webproto(wireproto.abstractserverproto):
     def __init__(self, req, ui):
         self.req = req
         self.response = ''
--- a/mercurial/hgweb/webcommands.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hgweb/webcommands.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,7 +8,7 @@
 import os, mimetypes, re, cgi, copy
 import webutil
 from mercurial import error, encoding, archival, templater, templatefilters
-from mercurial.node import short, hex, nullid
+from mercurial.node import short, hex
 from mercurial import util
 from common import paritygen, staticfile, get_contact, ErrorResponse
 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
@@ -187,7 +187,7 @@
 
         mfunc = revset.match(web.repo.ui, revdef)
         try:
-            revs = mfunc(web.repo, list(web.repo))
+            revs = mfunc(web.repo, revset.baseset(web.repo))
             return MODE_REVSET, revs
             # ParseError: wrongly placed tokens, wrongs arguments, etc
             # RepoLookupError: no such revision, e.g. in 'revision:'
@@ -712,28 +712,22 @@
             return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
         return f.data().splitlines()
 
+    parent = ctx.p1()
+    leftrev = parent.rev()
+    leftnode = parent.node()
+    rightrev = ctx.rev()
+    rightnode = ctx.node()
     if path in ctx:
         fctx = ctx[path]
-        rightrev = fctx.filerev()
-        rightnode = fctx.filenode()
         rightlines = filelines(fctx)
-        parents = fctx.parents()
-        if not parents:
-            leftrev = -1
-            leftnode = nullid
+        if path not in parent:
             leftlines = ()
         else:
-            pfctx = parents[0]
-            leftrev = pfctx.filerev()
-            leftnode = pfctx.filenode()
+            pfctx = parent[path]
             leftlines = filelines(pfctx)
     else:
-        rightrev = -1
-        rightnode = nullid
         rightlines = ()
         fctx = ctx.parents()[0][path]
-        leftrev = fctx.filerev()
-        leftnode = fctx.filenode()
         leftlines = filelines(fctx)
 
     comparison = webutil.compare(tmpl, context, leftlines, rightlines)
@@ -982,7 +976,11 @@
             if len(revs) >= revcount:
                 break
 
-        dag = graphmod.dagwalker(web.repo, revs)
+        # We have to feed a baseset to dagwalker as it is expecting smartset
+        # object. This does not have a big impact on hgweb performance itself
+        # since hgweb graphing code is not itself lazy yet.
+        dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
+        # As we said one line above... not lazy.
         tree = list(graphmod.colored(dag, web.repo))
 
     def getcolumns(tree):
@@ -1018,26 +1016,26 @@
                              [cgi.escape(x) for x in ctx.tags()],
                              [cgi.escape(x) for x in ctx.bookmarks()]))
             else:
-                edgedata = [dict(col=edge[0], nextcol=edge[1],
-                                 color=(edge[2] - 1) % 6 + 1,
-                                 width=edge[3], bcolor=edge[4])
+                edgedata = [{'col': edge[0], 'nextcol': edge[1],
+                             'color': (edge[2] - 1) % 6 + 1,
+                             'width': edge[3], 'bcolor': edge[4]}
                             for edge in edges]
 
                 data.append(
-                    dict(node=node,
-                         col=vtx[0],
-                         color=(vtx[1] - 1) % 6 + 1,
-                         edges=edgedata,
-                         row=row,
-                         nextrow=row + 1,
-                         desc=desc,
-                         user=user,
-                         age=age,
-                         bookmarks=webutil.nodebookmarksdict(
-                            web.repo, ctx.node()),
-                         branches=webutil.nodebranchdict(web.repo, ctx),
-                         inbranch=webutil.nodeinbranch(web.repo, ctx),
-                         tags=webutil.nodetagsdict(web.repo, ctx.node())))
+                    {'node': node,
+                     'col': vtx[0],
+                     'color': (vtx[1] - 1) % 6 + 1,
+                     'edges': edgedata,
+                     'row': row,
+                     'nextrow': row + 1,
+                     'desc': desc,
+                     'user': user,
+                     'age': age,
+                     'bookmarks': webutil.nodebookmarksdict(
+                         web.repo, ctx.node()),
+                     'branches': webutil.nodebranchdict(web.repo, ctx),
+                     'inbranch': webutil.nodeinbranch(web.repo, ctx),
+                     'tags': webutil.nodetagsdict(web.repo, ctx.node())})
 
             row += 1
 
--- a/mercurial/hgweb/webutil.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hgweb/webutil.py	Thu Apr 17 19:36:17 2014 -0400
@@ -7,7 +7,7 @@
 # GNU General Public License version 2 or any later version.
 
 import os, copy
-from mercurial import match, patch, error, ui, util, pathutil
+from mercurial import match, patch, error, ui, util, pathutil, context
 from mercurial.i18n import _
 from mercurial.node import hex, nullid
 from common import ErrorResponse
@@ -138,6 +138,9 @@
         yield d
 
 def parents(ctx, hide=None):
+    if (isinstance(ctx, context.basefilectx) and
+        ctx.changectx().rev() != ctx.linkrev()):
+        return _siblings([ctx._repo[ctx.linkrev()]], hide)
     return _siblings(ctx.parents(), hide)
 
 def children(ctx, hide=None):
@@ -146,7 +149,7 @@
 def renamelink(fctx):
     r = fctx.renamed()
     if r:
-        return [dict(file=r[0], node=hex(r[1]))]
+        return [{'file': r[0], 'node': hex(r[1])}]
     return []
 
 def nodetagsdict(repo, node):
--- a/mercurial/hook.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/hook.py	Thu Apr 17 19:36:17 2014 -0400
@@ -6,7 +6,7 @@
 # GNU General Public License version 2 or any later version.
 
 from i18n import _
-import os, sys, time, types
+import os, sys, time
 import extensions, util, demandimport
 
 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
@@ -19,11 +19,10 @@
     unmodified commands (e.g. mercurial.commands.update) can
     be run as hooks without wrappers to convert return values.'''
 
-    ui.note(_("calling hook %s: %s\n") % (hname, funcname))
-    starttime = time.time()
-
-    obj = funcname
-    if not util.safehasattr(obj, '__call__'):
+    if util.safehasattr(funcname, '__call__'):
+        obj = funcname
+        funcname = obj.__module__ + "." + obj.__name__
+    else:
         d = funcname.rfind('.')
         if d == -1:
             raise util.Abort(_('%s hook is invalid ("%s" not in '
@@ -75,6 +74,10 @@
             raise util.Abort(_('%s hook is invalid '
                                '("%s" is not callable)') %
                              (hname, funcname))
+
+    ui.note(_("calling hook %s: %s\n") % (hname, funcname))
+    starttime = time.time()
+
     try:
         try:
             # redirect IO descriptors to the ui descriptors so hooks
@@ -100,11 +103,8 @@
     finally:
         sys.stdout, sys.stderr, sys.stdin = old
         duration = time.time() - starttime
-        readablefunc = funcname
-        if isinstance(funcname, types.FunctionType):
-            readablefunc = funcname.__module__ + "." + funcname.__name__
         ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
-               name, readablefunc, duration)
+               name, funcname, duration)
     if r:
         if throw:
             raise util.Abort(_('%s hook failed') % hname)
--- a/mercurial/httppeer.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/httppeer.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,6 +8,7 @@
 
 from node import nullid
 from i18n import _
+import tempfile
 import changegroup, statichttprepo, error, httpconnection, url, util, wireproto
 import os, urllib, urllib2, zlib, httplib
 import errno, socket
@@ -211,10 +212,29 @@
             fp.close()
             os.unlink(tempname)
 
-    def _abort(self, exception):
-        raise exception
+    def _calltwowaystream(self, cmd, fp, **args):
+        fh = None
+        filename = None
+        try:
+            # dump bundle to disk
+            fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
+            fh = os.fdopen(fd, "wb")
+            d = fp.read(4096)
+            while d:
+                fh.write(d)
+                d = fp.read(4096)
+            fh.close()
+            # start http push
+            fp = httpconnection.httpsendfile(self.ui, filename, "rb")
+            headers = {'Content-Type': 'application/mercurial-0.1'}
+            return self._callstream(cmd, data=fp, headers=headers, **args)
+        finally:
+            if fh is not None:
+                fh.close()
+                os.unlink(filename)
 
-    def _decompress(self, stream):
+    def _callcompressable(self, cmd, **args):
+        stream =  self._callstream(cmd, **args)
         return util.chunkbuffer(zgenerator(stream))
 
 class httpspeer(httppeer):
--- a/mercurial/localrepo.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/localrepo.py	Thu Apr 17 19:36:17 2014 -0400
@@ -6,10 +6,11 @@
 # GNU General Public License version 2 or any later version.
 from node import hex, nullid, short
 from i18n import _
-import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
+import urllib
+import peer, changegroup, subrepo, pushkey, obsolete, repoview
 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
 import lock as lockmod
-import transaction, store, encoding
+import transaction, store, encoding, exchange, bundle2
 import scmutil, util, extensions, hook, error, revset
 import match as matchmod
 import merge as mergemod
@@ -62,13 +63,14 @@
         return orig(repo.unfiltered(), *args, **kwargs)
     return wrapper
 
-MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
-LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
+moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
+                  'unbundle'))
+legacycaps = moderncaps.union(set(['changegroupsubset']))
 
 class localpeer(peer.peerrepository):
     '''peer for a local repo; reflects only the most recent API'''
 
-    def __init__(self, repo, caps=MODERNCAPS):
+    def __init__(self, repo, caps=moderncaps):
         peer.peerrepository.__init__(self)
         self._repo = repo.filtered('served')
         self.ui = repo.ui
@@ -103,18 +105,42 @@
     def known(self, nodes):
         return self._repo.known(nodes)
 
-    def getbundle(self, source, heads=None, common=None, bundlecaps=None):
-        return self._repo.getbundle(source, heads=heads, common=common,
-                                    bundlecaps=None)
+    def getbundle(self, source, heads=None, common=None, bundlecaps=None,
+                  format='HG10', **kwargs):
+        cg = exchange.getbundle(self._repo, source, heads=heads,
+                                common=common, bundlecaps=bundlecaps, **kwargs)
+        if bundlecaps is not None and 'HG2X' in bundlecaps:
+            # When requesting a bundle2, getbundle returns a stream to make the
+            # wire level function happier. We need to build a proper object
+            # from it in local peer.
+            cg = bundle2.unbundle20(self.ui, cg)
+        return cg
 
     # TODO We might want to move the next two calls into legacypeer and add
     # unbundle instead.
 
+    def unbundle(self, cg, heads, url):
+        """apply a bundle on a repo
+
+        This function handles the repo locking itself."""
+        try:
+            cg = exchange.readbundle(self.ui, cg, None)
+            ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
+            if util.safehasattr(ret, 'getchunks'):
+                # This is a bundle20 object, turn it into an unbundler.
+                # This little dance should be dropped eventually when the API
+                # is finally improved.
+                stream = util.chunkbuffer(ret.getchunks())
+                ret = bundle2.unbundle20(self.ui, stream)
+            return ret
+        except exchange.PushRaced, exc:
+            raise error.ResponseError(_('push failed:'), exc.message)
+
     def lock(self):
         return self._repo.lock()
 
     def addchangegroup(self, cg, source, url):
-        return self._repo.addchangegroup(cg, source, url)
+        return changegroup.addchangegroup(self._repo, cg, source, url)
 
     def pushkey(self, namespace, key, old, new):
         return self._repo.pushkey(namespace, key, old, new)
@@ -131,7 +157,7 @@
     restricted capabilities'''
 
     def __init__(self, repo):
-        localpeer.__init__(self, repo, caps=LEGACYCAPS)
+        localpeer.__init__(self, repo, caps=legacycaps)
 
     def branches(self, nodes):
         return self._repo.branches(nodes)
@@ -140,10 +166,10 @@
         return self._repo.between(pairs)
 
     def changegroup(self, basenodes, source):
-        return self._repo.changegroup(basenodes, source)
+        return changegroup.changegroup(self._repo, basenodes, source)
 
     def changegroupsubset(self, bases, heads, source):
-        return self._repo.changegroupsubset(bases, heads, source)
+        return changegroup.changegroupsubset(self._repo, bases, heads, source)
 
 class localrepository(object):
 
@@ -154,6 +180,8 @@
     requirements = ['revlogv1']
     filtername = None
 
+    bundle2caps = {'HG2X': ()}
+
     # a list of (ui, featureset) functions.
     # only functions defined in module of enabled extensions are invoked
     featuresetupfuncs = set()
@@ -275,6 +303,12 @@
         pass
 
     def _restrictcapabilities(self, caps):
+        # bundle2 is not ready for prime time, drop it unless explicitly
+        # required by the tests (or some brave tester)
+        if self.ui.configbool('experimental', 'bundle2-exp', False):
+            caps = set(caps)
+            capsblob = bundle2.encodecaps(self.bundle2caps)
+            caps.add('bundle2-exp=' + urllib.quote(capsblob))
         return caps
 
     def _applyrequirements(self, requirements):
@@ -428,7 +462,7 @@
         '''Return a list of revisions matching the given revset'''
         expr = revset.formatspec(expr, *args)
         m = revset.match(None, expr)
-        return [r for r in m(self, list(self))]
+        return m(self, revset.spanset(self))
 
     def set(self, expr, *args):
         '''
@@ -823,13 +857,17 @@
             raise error.RepoError(
                 _("abandoned transaction found - run hg recover"))
 
+        def onclose():
+            self.store.write(tr)
+
         self._writejournal(desc)
         renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
         rp = report and report or self.ui.warn
         tr = transaction.transaction(rp, self.sopener,
                                      "journal",
                                      aftertrans(renames),
-                                     self.store.createmode)
+                                     self.store.createmode,
+                                     onclose)
         self._transref = weakref.ref(tr)
         return tr
 
@@ -842,7 +880,7 @@
                 (self.svfs, 'journal.phaseroots'))
 
     def undofiles(self):
-        return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
+        return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
 
     def _writejournal(self, desc):
         self.opener.write("journal.dirstate",
@@ -992,6 +1030,14 @@
             except AttributeError:
                 pass
         self.invalidatecaches()
+        self.store.invalidatecaches()
+
+    def invalidateall(self):
+        '''Fully invalidates both store and non-store parts, causing the
+        subsequent operation to reread any outside changes.'''
+        # extension should hook this to invalidate its caches
+        self.invalidate()
+        self.invalidatedirstate()
 
     def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
         try:
@@ -1005,6 +1051,7 @@
             l = lockmod.lock(vfs, lockname,
                              int(self.ui.config("ui", "timeout", "600")),
                              releasefn, desc=desc)
+            self.ui.warn(_("got lock after %s seconds\n") % l.delay)
         if acquirefn:
             acquirefn()
         return l
@@ -1029,7 +1076,6 @@
             return l
 
         def unlock():
-            self.store.write()
             if hasunfilteredcache(self, '_phasecache'):
                 self._phasecache.write()
             for k, ce in self._filecache.items():
@@ -1122,12 +1168,14 @@
                 self.ui.warn(_("warning: can't find ancestor for '%s' "
                                "copied from '%s'!\n") % (fname, cfname))
 
+        elif fparent1 == nullid:
+            fparent1, fparent2 = fparent2, nullid
         elif fparent2 != nullid:
             # is one parent an ancestor of the other?
-            fparentancestor = flog.ancestor(fparent1, fparent2)
-            if fparentancestor == fparent1:
+            fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
+            if fparent1 in fparentancestors:
                 fparent1, fparent2 = fparent2, nullid
-            elif fparentancestor == fparent2:
+            elif fparent2 in fparentancestors:
                 fparent2 = nullid
 
         # is the file changed?
@@ -1183,10 +1231,9 @@
             # only manage subrepos and .hgsubstate if .hgsub is present
             if '.hgsub' in wctx:
                 # we'll decide whether to track this ourselves, thanks
-                if '.hgsubstate' in changes[0]:
-                    changes[0].remove('.hgsubstate')
-                if '.hgsubstate' in changes[2]:
-                    changes[2].remove('.hgsubstate')
+                for c in changes[:3]:
+                    if '.hgsubstate' in c:
+                        c.remove('.hgsubstate')
 
                 # compare current state to last committed state
                 # build new substate based on last committed state
@@ -1578,7 +1625,7 @@
         r = modified, added, removed, deleted, unknown, ignored, clean
 
         if listsubrepos:
-            for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
+            for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
                 if working:
                     rev2 = None
                 else:
@@ -1658,623 +1705,24 @@
         return r
 
     def pull(self, remote, heads=None, force=False):
-        if remote.local():
-            missing = set(remote.requirements) - self.supported
-            if missing:
-                msg = _("required features are not"
-                        " supported in the destination:"
-                        " %s") % (', '.join(sorted(missing)))
-                raise util.Abort(msg)
-
-        # don't open transaction for nothing or you break future useful
-        # rollback call
-        tr = None
-        trname = 'pull\n' + util.hidepassword(remote.url())
-        lock = self.lock()
-        try:
-            tmp = discovery.findcommonincoming(self.unfiltered(), remote,
-                                               heads=heads, force=force)
-            common, fetch, rheads = tmp
-            if not fetch:
-                self.ui.status(_("no changes found\n"))
-                result = 0
-            else:
-                tr = self.transaction(trname)
-                if heads is None and list(common) == [nullid]:
-                    self.ui.status(_("requesting all changes\n"))
-                elif heads is None and remote.capable('changegroupsubset'):
-                    # issue1320, avoid a race if remote changed after discovery
-                    heads = rheads
-
-                if remote.capable('getbundle'):
-                    # TODO: get bundlecaps from remote
-                    cg = remote.getbundle('pull', common=common,
-                                          heads=heads or rheads)
-                elif heads is None:
-                    cg = remote.changegroup(fetch, 'pull')
-                elif not remote.capable('changegroupsubset'):
-                    raise util.Abort(_("partial pull cannot be done because "
-                                           "other repository doesn't support "
-                                           "changegroupsubset."))
-                else:
-                    cg = remote.changegroupsubset(fetch, heads, 'pull')
-                result = self.addchangegroup(cg, 'pull', remote.url())
+        return exchange.pull (self, remote, heads, force)
 
-            # compute target subset
-            if heads is None:
-                # We pulled every thing possible
-                # sync on everything common
-                subset = common + rheads
-            else:
-                # We pulled a specific subset
-                # sync on this subset
-                subset = heads
-
-            # Get remote phases data from remote
-            remotephases = remote.listkeys('phases')
-            publishing = bool(remotephases.get('publishing', False))
-            if remotephases and not publishing:
-                # remote is new and unpublishing
-                pheads, _dr = phases.analyzeremotephases(self, subset,
-                                                         remotephases)
-                phases.advanceboundary(self, phases.public, pheads)
-                phases.advanceboundary(self, phases.draft, subset)
-            else:
-                # Remote is old or publishing all common changesets
-                # should be seen as public
-                phases.advanceboundary(self, phases.public, subset)
-
-            def gettransaction():
-                if tr is None:
-                    return self.transaction(trname)
-                return tr
-
-            obstr = obsolete.syncpull(self, remote, gettransaction)
-            if obstr is not None:
-                tr = obstr
-
-            if tr is not None:
-                tr.close()
-        finally:
-            if tr is not None:
-                tr.release()
-            lock.release()
-
-        return result
-
-    def checkpush(self, force, revs):
+    def checkpush(self, pushop):
         """Extensions can override this function if additional checks have
         to be performed before pushing, or call it if they override push
         command.
         """
         pass
 
-    def push(self, remote, force=False, revs=None, newbranch=False):
-        '''Push outgoing changesets (limited by revs) from the current
-        repository to remote. Return an integer:
-          - None means nothing to push
-          - 0 means HTTP error
-          - 1 means we pushed and remote head count is unchanged *or*
-            we have outgoing changesets but refused to push
-          - other values as described by addchangegroup()
-        '''
-        if remote.local():
-            missing = set(self.requirements) - remote.local().supported
-            if missing:
-                msg = _("required features are not"
-                        " supported in the destination:"
-                        " %s") % (', '.join(sorted(missing)))
-                raise util.Abort(msg)
-
-        # there are two ways to push to remote repo:
-        #
-        # addchangegroup assumes local user can lock remote
-        # repo (local filesystem, old ssh servers).
-        #
-        # unbundle assumes local user cannot lock remote repo (new ssh
-        # servers, http servers).
-
-        if not remote.canpush():
-            raise util.Abort(_("destination does not support push"))
-        unfi = self.unfiltered()
-        def localphasemove(nodes, phase=phases.public):
-            """move <nodes> to <phase> in the local source repo"""
-            if locallock is not None:
-                phases.advanceboundary(self, phase, nodes)
-            else:
-                # repo is not locked, do not change any phases!
-                # Informs the user that phases should have been moved when
-                # applicable.
-                actualmoves = [n for n in nodes if phase < self[n].phase()]
-                phasestr = phases.phasenames[phase]
-                if actualmoves:
-                    self.ui.status(_('cannot lock source repo, skipping local'
-                                     ' %s phase update\n') % phasestr)
-        # get local lock as we might write phase data
-        locallock = None
-        try:
-            locallock = self.lock()
-        except IOError, err:
-            if err.errno != errno.EACCES:
-                raise
-            # source repo cannot be locked.
-            # We do not abort the push, but just disable the local phase
-            # synchronisation.
-            msg = 'cannot lock source repository: %s\n' % err
-            self.ui.debug(msg)
-        try:
-            self.checkpush(force, revs)
-            lock = None
-            unbundle = remote.capable('unbundle')
-            if not unbundle:
-                lock = remote.lock()
-            try:
-                # discovery
-                fci = discovery.findcommonincoming
-                commoninc = fci(unfi, remote, force=force)
-                common, inc, remoteheads = commoninc
-                fco = discovery.findcommonoutgoing
-                outgoing = fco(unfi, remote, onlyheads=revs,
-                               commoninc=commoninc, force=force)
-
-
-                if not outgoing.missing:
-                    # nothing to push
-                    scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
-                    ret = None
-                else:
-                    # something to push
-                    if not force:
-                        # if self.obsstore == False --> no obsolete
-                        # then, save the iteration
-                        if unfi.obsstore:
-                            # this message are here for 80 char limit reason
-                            mso = _("push includes obsolete changeset: %s!")
-                            mst = "push includes %s changeset: %s!"
-                            # plain versions for i18n tool to detect them
-                            _("push includes unstable changeset: %s!")
-                            _("push includes bumped changeset: %s!")
-                            _("push includes divergent changeset: %s!")
-                            # If we are to push if there is at least one
-                            # obsolete or unstable changeset in missing, at
-                            # least one of the missinghead will be obsolete or
-                            # unstable. So checking heads only is ok
-                            for node in outgoing.missingheads:
-                                ctx = unfi[node]
-                                if ctx.obsolete():
-                                    raise util.Abort(mso % ctx)
-                                elif ctx.troubled():
-                                    raise util.Abort(_(mst)
-                                                     % (ctx.troubles()[0],
-                                                        ctx))
-                        newbm = self.ui.configlist('bookmarks', 'pushing')
-                        discovery.checkheads(unfi, remote, outgoing,
-                                             remoteheads, newbranch,
-                                             bool(inc), newbm)
-
-                    # TODO: get bundlecaps from remote
-                    bundlecaps = None
-                    # create a changegroup from local
-                    if revs is None and not (outgoing.excluded
-                                             or self.changelog.filteredrevs):
-                        # push everything,
-                        # use the fast path, no race possible on push
-                        bundler = changegroup.bundle10(self, bundlecaps)
-                        cg = self._changegroupsubset(outgoing,
-                                                     bundler,
-                                                     'push',
-                                                     fastpath=True)
-                    else:
-                        cg = self.getlocalbundle('push', outgoing, bundlecaps)
-
-                    # apply changegroup to remote
-                    if unbundle:
-                        # local repo finds heads on server, finds out what
-                        # revs it must push. once revs transferred, if server
-                        # finds it has different heads (someone else won
-                        # commit/push race), server aborts.
-                        if force:
-                            remoteheads = ['force']
-                        # ssh: return remote's addchangegroup()
-                        # http: return remote's addchangegroup() or 0 for error
-                        ret = remote.unbundle(cg, remoteheads, 'push')
-                    else:
-                        # we return an integer indicating remote head count
-                        # change
-                        ret = remote.addchangegroup(cg, 'push', self.url())
-
-                if ret:
-                    # push succeed, synchronize target of the push
-                    cheads = outgoing.missingheads
-                elif revs is None:
-                    # All out push fails. synchronize all common
-                    cheads = outgoing.commonheads
-                else:
-                    # I want cheads = heads(::missingheads and ::commonheads)
-                    # (missingheads is revs with secret changeset filtered out)
-                    #
-                    # This can be expressed as:
-                    #     cheads = ( (missingheads and ::commonheads)
-                    #              + (commonheads and ::missingheads))"
-                    #              )
-                    #
-                    # while trying to push we already computed the following:
-                    #     common = (::commonheads)
-                    #     missing = ((commonheads::missingheads) - commonheads)
-                    #
-                    # We can pick:
-                    # * missingheads part of common (::commonheads)
-                    common = set(outgoing.common)
-                    nm = self.changelog.nodemap
-                    cheads = [node for node in revs if nm[node] in common]
-                    # and
-                    # * commonheads parents on missing
-                    revset = unfi.set('%ln and parents(roots(%ln))',
-                                     outgoing.commonheads,
-                                     outgoing.missing)
-                    cheads.extend(c.node() for c in revset)
-                # even when we don't push, exchanging phase data is useful
-                remotephases = remote.listkeys('phases')
-                if (self.ui.configbool('ui', '_usedassubrepo', False)
-                    and remotephases    # server supports phases
-                    and ret is None # nothing was pushed
-                    and remotephases.get('publishing', False)):
-                    # When:
-                    # - this is a subrepo push
-                    # - and remote support phase
-                    # - and no changeset was pushed
-                    # - and remote is publishing
-                    # We may be in issue 3871 case!
-                    # We drop the possible phase synchronisation done by
-                    # courtesy to publish changesets possibly locally draft
-                    # on the remote.
-                    remotephases = {'publishing': 'True'}
-                if not remotephases: # old server or public only repo
-                    localphasemove(cheads)
-                    # don't push any phase data as there is nothing to push
-                else:
-                    ana = phases.analyzeremotephases(self, cheads, remotephases)
-                    pheads, droots = ana
-                    ### Apply remote phase on local
-                    if remotephases.get('publishing', False):
-                        localphasemove(cheads)
-                    else: # publish = False
-                        localphasemove(pheads)
-                        localphasemove(cheads, phases.draft)
-                    ### Apply local phase on remote
-
-                    # Get the list of all revs draft on remote by public here.
-                    # XXX Beware that revset break if droots is not strictly
-                    # XXX root we may want to ensure it is but it is costly
-                    outdated =  unfi.set('heads((%ln::%ln) and public())',
-                                         droots, cheads)
-                    for newremotehead in outdated:
-                        r = remote.pushkey('phases',
-                                           newremotehead.hex(),
-                                           str(phases.draft),
-                                           str(phases.public))
-                        if not r:
-                            self.ui.warn(_('updating %s to public failed!\n')
-                                            % newremotehead)
-                self.ui.debug('try to push obsolete markers to remote\n')
-                obsolete.syncpush(self, remote)
-            finally:
-                if lock is not None:
-                    lock.release()
-        finally:
-            if locallock is not None:
-                locallock.release()
-
-        bookmarks.updateremote(self.ui, unfi, remote, revs)
-        return ret
-
-    def changegroupinfo(self, nodes, source):
-        if self.ui.verbose or source == 'bundle':
-            self.ui.status(_("%d changesets found\n") % len(nodes))
-        if self.ui.debugflag:
-            self.ui.debug("list of changesets:\n")
-            for node in nodes:
-                self.ui.debug("%s\n" % hex(node))
-
-    def changegroupsubset(self, bases, heads, source):
-        """Compute a changegroup consisting of all the nodes that are
-        descendants of any of the bases and ancestors of any of the heads.
-        Return a chunkbuffer object whose read() method will return
-        successive changegroup chunks.
-
-        It is fairly complex as determining which filenodes and which
-        manifest nodes need to be included for the changeset to be complete
-        is non-trivial.
-
-        Another wrinkle is doing the reverse, figuring out which changeset in
-        the changegroup a particular filenode or manifestnode belongs to.
+    @unfilteredpropertycache
+    def prepushoutgoinghooks(self):
+        """Return util.hooks consists of "(repo, remote, outgoing)"
+        functions, which are called before pushing changesets.
         """
-        cl = self.changelog
-        if not bases:
-            bases = [nullid]
-        # TODO: remove call to nodesbetween.
-        csets, bases, heads = cl.nodesbetween(bases, heads)
-        discbases = []
-        for n in bases:
-            discbases.extend([p for p in cl.parents(n) if p != nullid])
-        outgoing = discovery.outgoing(cl, discbases, heads)
-        bundler = changegroup.bundle10(self)
-        return self._changegroupsubset(outgoing, bundler, source)
-
-    def getlocalbundle(self, source, outgoing, bundlecaps=None):
-        """Like getbundle, but taking a discovery.outgoing as an argument.
-
-        This is only implemented for local repos and reuses potentially
-        precomputed sets in outgoing."""
-        if not outgoing.missing:
-            return None
-        bundler = changegroup.bundle10(self, bundlecaps)
-        return self._changegroupsubset(outgoing, bundler, source)
+        return util.hooks()
 
-    def getbundle(self, source, heads=None, common=None, bundlecaps=None):
-        """Like changegroupsubset, but returns the set difference between the
-        ancestors of heads and the ancestors common.
-
-        If heads is None, use the local heads. If common is None, use [nullid].
-
-        The nodes in common might not all be known locally due to the way the
-        current discovery protocol works.
-        """
-        cl = self.changelog
-        if common:
-            hasnode = cl.hasnode
-            common = [n for n in common if hasnode(n)]
-        else:
-            common = [nullid]
-        if not heads:
-            heads = cl.heads()
-        return self.getlocalbundle(source,
-                                   discovery.outgoing(cl, common, heads),
-                                   bundlecaps=bundlecaps)
-
-    @unfilteredmethod
-    def _changegroupsubset(self, outgoing, bundler, source,
-                           fastpath=False):
-        commonrevs = outgoing.common
-        csets = outgoing.missing
-        heads = outgoing.missingheads
-        # We go through the fast path if we get told to, or if all (unfiltered
-        # heads have been requested (since we then know there all linkrevs will
-        # be pulled by the client).
-        heads.sort()
-        fastpathlinkrev = fastpath or (
-                self.filtername is None and heads == sorted(self.heads()))
-
-        self.hook('preoutgoing', throw=True, source=source)
-        self.changegroupinfo(csets, source)
-        gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
-        return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
-
-    def changegroup(self, basenodes, source):
-        # to avoid a race we use changegroupsubset() (issue1320)
-        return self.changegroupsubset(basenodes, self.heads(), source)
-
-    @unfilteredmethod
-    def addchangegroup(self, source, srctype, url, emptyok=False):
-        """Add the changegroup returned by source.read() to this repo.
-        srctype is a string like 'push', 'pull', or 'unbundle'.  url is
-        the URL of the repo where this changegroup is coming from.
-
-        Return an integer summarizing the change to this repo:
-        - nothing changed or no source: 0
-        - more heads than before: 1+added heads (2..n)
-        - fewer heads than before: -1-removed heads (-2..-n)
-        - number of heads stays the same: 1
-        """
-        def csmap(x):
-            self.ui.debug("add changeset %s\n" % short(x))
-            return len(cl)
-
-        def revmap(x):
-            return cl.rev(x)
-
-        if not source:
-            return 0
-
-        self.hook('prechangegroup', throw=True, source=srctype, url=url)
-
-        changesets = files = revisions = 0
-        efiles = set()
-
-        # write changelog data to temp files so concurrent readers will not see
-        # inconsistent view
-        cl = self.changelog
-        cl.delayupdate()
-        oldheads = cl.heads()
-
-        tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
-        try:
-            trp = weakref.proxy(tr)
-            # pull off the changeset group
-            self.ui.status(_("adding changesets\n"))
-            clstart = len(cl)
-            class prog(object):
-                step = _('changesets')
-                count = 1
-                ui = self.ui
-                total = None
-                def __call__(self):
-                    self.ui.progress(self.step, self.count, unit=_('chunks'),
-                                     total=self.total)
-                    self.count += 1
-            pr = prog()
-            source.callback = pr
-
-            source.changelogheader()
-            srccontent = cl.addgroup(source, csmap, trp)
-            if not (srccontent or emptyok):
-                raise util.Abort(_("received changelog group is empty"))
-            clend = len(cl)
-            changesets = clend - clstart
-            for c in xrange(clstart, clend):
-                efiles.update(self[c].files())
-            efiles = len(efiles)
-            self.ui.progress(_('changesets'), None)
-
-            # pull off the manifest group
-            self.ui.status(_("adding manifests\n"))
-            pr.step = _('manifests')
-            pr.count = 1
-            pr.total = changesets # manifests <= changesets
-            # no need to check for empty manifest group here:
-            # if the result of the merge of 1 and 2 is the same in 3 and 4,
-            # no new manifest will be created and the manifest group will
-            # be empty during the pull
-            source.manifestheader()
-            self.manifest.addgroup(source, revmap, trp)
-            self.ui.progress(_('manifests'), None)
-
-            needfiles = {}
-            if self.ui.configbool('server', 'validate', default=False):
-                # validate incoming csets have their manifests
-                for cset in xrange(clstart, clend):
-                    mfest = self.changelog.read(self.changelog.node(cset))[0]
-                    mfest = self.manifest.readdelta(mfest)
-                    # store file nodes we must see
-                    for f, n in mfest.iteritems():
-                        needfiles.setdefault(f, set()).add(n)
-
-            # process the files
-            self.ui.status(_("adding file changes\n"))
-            pr.step = _('files')
-            pr.count = 1
-            pr.total = efiles
-            source.callback = None
-
-            newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
-                                                         pr, needfiles)
-            revisions += newrevs
-            files += newfiles
-
-            dh = 0
-            if oldheads:
-                heads = cl.heads()
-                dh = len(heads) - len(oldheads)
-                for h in heads:
-                    if h not in oldheads and self[h].closesbranch():
-                        dh -= 1
-            htext = ""
-            if dh:
-                htext = _(" (%+d heads)") % dh
-
-            self.ui.status(_("added %d changesets"
-                             " with %d changes to %d files%s\n")
-                             % (changesets, revisions, files, htext))
-            self.invalidatevolatilesets()
-
-            if changesets > 0:
-                p = lambda: cl.writepending() and self.root or ""
-                self.hook('pretxnchangegroup', throw=True,
-                          node=hex(cl.node(clstart)), source=srctype,
-                          url=url, pending=p)
-
-            added = [cl.node(r) for r in xrange(clstart, clend)]
-            publishing = self.ui.configbool('phases', 'publish', True)
-            if srctype == 'push':
-                # Old server can not push the boundary themself.
-                # New server won't push the boundary if changeset already
-                # existed locally as secrete
-                #
-                # We should not use added here but the list of all change in
-                # the bundle
-                if publishing:
-                    phases.advanceboundary(self, phases.public, srccontent)
-                else:
-                    phases.advanceboundary(self, phases.draft, srccontent)
-                    phases.retractboundary(self, phases.draft, added)
-            elif srctype != 'strip':
-                # publishing only alter behavior during push
-                #
-                # strip should not touch boundary at all
-                phases.retractboundary(self, phases.draft, added)
-
-            # make changelog see real files again
-            cl.finalize(trp)
-
-            tr.close()
-
-            if changesets > 0:
-                if srctype != 'strip':
-                    # During strip, branchcache is invalid but coming call to
-                    # `destroyed` will repair it.
-                    # In other case we can safely update cache on disk.
-                    branchmap.updatecache(self.filtered('served'))
-                def runhooks():
-                    # These hooks run when the lock releases, not when the
-                    # transaction closes. So it's possible for the changelog
-                    # to have changed since we last saw it.
-                    if clstart >= len(self):
-                        return
-
-                    # forcefully update the on-disk branch cache
-                    self.ui.debug("updating the branch cache\n")
-                    self.hook("changegroup", node=hex(cl.node(clstart)),
-                              source=srctype, url=url)
-
-                    for n in added:
-                        self.hook("incoming", node=hex(n), source=srctype,
-                                  url=url)
-
-                    newheads = [h for h in self.heads() if h not in oldheads]
-                    self.ui.log("incoming",
-                                "%s incoming changes - new heads: %s\n",
-                                len(added),
-                                ', '.join([hex(c[:6]) for c in newheads]))
-                self._afterlock(runhooks)
-
-        finally:
-            tr.release()
-        # never return 0 here:
-        if dh < 0:
-            return dh - 1
-        else:
-            return dh + 1
-
-    def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
-        revisions = 0
-        files = 0
-        while True:
-            chunkdata = source.filelogheader()
-            if not chunkdata:
-                break
-            f = chunkdata["filename"]
-            self.ui.debug("adding %s revisions\n" % f)
-            pr()
-            fl = self.file(f)
-            o = len(fl)
-            if not fl.addgroup(source, revmap, trp):
-                raise util.Abort(_("received file revlog group is empty"))
-            revisions += len(fl) - o
-            files += 1
-            if f in needfiles:
-                needs = needfiles[f]
-                for new in xrange(o, len(fl)):
-                    n = fl.node(new)
-                    if n in needs:
-                        needs.remove(n)
-                    else:
-                        raise util.Abort(
-                            _("received spurious file revlog entry"))
-                if not needs:
-                    del needfiles[f]
-        self.ui.progress(_('files'), None)
-
-        for f, needs in needfiles.iteritems():
-            fl = self.file(f)
-            for n in needs:
-                try:
-                    fl.rev(n)
-                except error.LookupError:
-                    raise util.Abort(
-                        _('missing file data for %s:%s - run hg verify') %
-                        (f, hex(n)))
-
-        return revisions, files
+    def push(self, remote, force=False, revs=None, newbranch=False):
+        return exchange.push(self, remote, force, revs, newbranch)
 
     def stream_in(self, remote, requirements):
         lock = self.lock()
@@ -2310,26 +1758,36 @@
             handled_bytes = 0
             self.ui.progress(_('clone'), 0, total=total_bytes)
             start = time.time()
-            for i in xrange(total_files):
-                # XXX doesn't support '\n' or '\r' in filenames
-                l = fp.readline()
-                try:
-                    name, size = l.split('\0', 1)
-                    size = int(size)
-                except (ValueError, TypeError):
-                    raise error.ResponseError(
-                        _('unexpected response from remote server:'), l)
-                if self.ui.debugflag:
-                    self.ui.debug('adding %s (%s)\n' %
-                                  (name, util.bytecount(size)))
-                # for backwards compat, name was partially encoded
-                ofp = self.sopener(store.decodedir(name), 'w')
-                for chunk in util.filechunkiter(fp, limit=size):
-                    handled_bytes += len(chunk)
-                    self.ui.progress(_('clone'), handled_bytes,
-                                     total=total_bytes)
-                    ofp.write(chunk)
-                ofp.close()
+
+            tr = self.transaction(_('clone'))
+            try:
+                for i in xrange(total_files):
+                    # XXX doesn't support '\n' or '\r' in filenames
+                    l = fp.readline()
+                    try:
+                        name, size = l.split('\0', 1)
+                        size = int(size)
+                    except (ValueError, TypeError):
+                        raise error.ResponseError(
+                            _('unexpected response from remote server:'), l)
+                    if self.ui.debugflag:
+                        self.ui.debug('adding %s (%s)\n' %
+                                      (name, util.bytecount(size)))
+                    # for backwards compat, name was partially encoded
+                    ofp = self.sopener(store.decodedir(name), 'w')
+                    for chunk in util.filechunkiter(fp, limit=size):
+                        handled_bytes += len(chunk)
+                        self.ui.progress(_('clone'), handled_bytes,
+                                         total=total_bytes)
+                        ofp.write(chunk)
+                    ofp.close()
+                tr.close()
+            finally:
+                tr.release()
+
+            # Writing straight to files circumvented the inmemory caches
+            self.invalidate()
+
             elapsed = time.time() - start
             if elapsed <= 0:
                 elapsed = 0.001
--- a/mercurial/lock.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/lock.py	Thu Apr 17 19:36:17 2014 -0400
@@ -38,7 +38,7 @@
         self.desc = desc
         self.postrelease  = []
         self.pid = os.getpid()
-        self.lock()
+        self.delay = self.lock()
 
     def __del__(self):
         if self.held:
@@ -57,7 +57,7 @@
         while True:
             try:
                 self.trylock()
-                return 1
+                return self.timeout - timeout
             except error.LockHeld, inst:
                 if timeout != 0:
                     time.sleep(1)
--- a/mercurial/match.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/match.py	Thu Apr 17 19:36:17 2014 -0400
@@ -6,30 +6,32 @@
 # GNU General Public License version 2 or any later version.
 
 import re
-import util, fileset, pathutil
+import util, pathutil
 from i18n import _
 
-def _rematcher(pat):
-    m = util.compilere(pat)
+def _rematcher(regex):
+    '''compile the regexp with the best available regexp engine and return a
+    matcher function'''
+    m = util.compilere(regex)
     try:
         # slightly faster, provided by facebook's re2 bindings
         return m.test_match
     except AttributeError:
         return m.match
 
-def _expandsets(pats, ctx):
-    '''convert set: patterns into a list of files in the given context'''
+def _expandsets(kindpats, ctx):
+    '''Returns the kindpats list with the 'set' patterns expanded.'''
     fset = set()
     other = []
 
-    for kind, expr in pats:
+    for kind, pat in kindpats:
         if kind == 'set':
             if not ctx:
                 raise util.Abort("fileset expression with no context")
-            s = fileset.getfileset(ctx, expr)
+            s = ctx.getfileset(pat)
             fset.update(s)
             continue
-        other.append((kind, expr))
+        other.append((kind, pat))
     return fset, other
 
 class match(object):
@@ -41,10 +43,10 @@
         root - the canonical root of the tree you're matching against
         cwd - the current working directory, if relevant
         patterns - patterns to find
-        include - patterns to include
-        exclude - patterns to exclude
-        default - if a pattern in names has no explicit type, assume this one
-        exact - patterns are actually literals
+        include - patterns to include (unless they are excluded)
+        exclude - patterns to exclude (even if they are included)
+        default - if a pattern in patterns has no explicit type, assume this one
+        exact - patterns are actually filenames (include/exclude still apply)
 
         a pattern is one of:
         'glob:<glob>' - a glob relative to cwd
@@ -59,17 +61,17 @@
 
         self._root = root
         self._cwd = cwd
-        self._files = []
+        self._files = [] # exact files and roots of patterns
         self._anypats = bool(include or exclude)
         self._ctx = ctx
         self._always = False
 
         if include:
-            pats = _normalize(include, 'glob', root, cwd, auditor)
-            self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
+            kindpats = _normalize(include, 'glob', root, cwd, auditor)
+            self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)')
         if exclude:
-            pats = _normalize(exclude, 'glob', root, cwd, auditor)
-            self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
+            kindpats = _normalize(exclude, 'glob', root, cwd, auditor)
+            self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)')
         if exact:
             if isinstance(patterns, list):
                 self._files = patterns
@@ -77,10 +79,10 @@
                 self._files = list(patterns)
             pm = self.exact
         elif patterns:
-            pats = _normalize(patterns, default, root, cwd, auditor)
-            self._files = _roots(pats)
-            self._anypats = self._anypats or _anypats(pats)
-            self.patternspat, pm = _buildmatch(ctx, pats, '$')
+            kindpats = _normalize(patterns, default, root, cwd, auditor)
+            self._files = _roots(kindpats)
+            self._anypats = self._anypats or _anypats(kindpats)
+            self.patternspat, pm = _buildmatch(ctx, kindpats, '$')
 
         if patterns or exact:
             if include:
@@ -114,28 +116,45 @@
     def __iter__(self):
         for f in self._files:
             yield f
+
+    # Callbacks related to how the matcher is used by dirstate.walk.
+    # Subscribers to these events must monkeypatch the matcher object.
     def bad(self, f, msg):
-        '''callback for each explicit file that can't be
-        found/accessed, with an error message
-        '''
+        '''Callback from dirstate.walk for each explicit file that can't be
+        found/accessed, with an error message.'''
         pass
-    # If this is set, it will be called when an explicitly listed directory is
-    # visited.
+
+    # If an explicitdir is set, it will be called when an explicitly listed
+    # directory is visited.
     explicitdir = None
-    # If this is set, it will be called when a directory discovered by recursive
-    # traversal is visited.
+
+    # If an traversedir is set, it will be called when a directory discovered
+    # by recursive traversal is visited.
     traversedir = None
-    def missing(self, f):
-        pass
-    def exact(self, f):
-        return f in self._fmap
+
     def rel(self, f):
+        '''Convert repo path back to path that is relative to cwd of matcher.'''
         return util.pathto(self._root, self._cwd, f)
+
     def files(self):
+        '''Explicitly listed files or patterns or roots:
+        if no patterns or .always(): empty list,
+        if exact: list exact files,
+        if not .anypats(): list all files and dirs,
+        else: optimal roots'''
         return self._files
+
+    def exact(self, f):
+        '''Returns True if f is in .files().'''
+        return f in self._fmap
+
     def anypats(self):
+        '''Matcher uses patterns or include/exclude.'''
         return self._anypats
+
     def always(self):
+        '''Matcher will match everything and .files() will be empty
+        - optimization might be possible and necessary.'''
         return self._always
 
 class exact(match):
@@ -191,21 +210,36 @@
     def bad(self, f, msg):
         self._matcher.bad(self._path + "/" + f, msg)
 
-def patkind(pat):
-    return _patsplit(pat, None)[0]
+def patkind(pattern, default=None):
+    '''If pattern is 'kind:pat' with a known kind, return kind.'''
+    return _patsplit(pattern, default)[0]
 
-def _patsplit(pat, default):
-    """Split a string into an optional pattern kind prefix and the
-    actual pattern."""
-    if ':' in pat:
-        kind, val = pat.split(':', 1)
+def _patsplit(pattern, default):
+    """Split a string into the optional pattern kind prefix and the actual
+    pattern."""
+    if ':' in pattern:
+        kind, pat = pattern.split(':', 1)
         if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
                     'listfile', 'listfile0', 'set'):
-            return kind, val
-    return default, pat
+            return kind, pat
+    return default, pattern
 
 def _globre(pat):
-    "convert a glob pattern into a regexp"
+    r'''Convert an extended glob string to a regexp string.
+
+    >>> print _globre(r'?')
+    .
+    >>> print _globre(r'*')
+    [^/]*
+    >>> print _globre(r'**')
+    .*
+    >>> print _globre(r'[a*?!^][^b][!c]')
+    [a*?!^][\^b][^c]
+    >>> print _globre(r'{a,b}')
+    (?:a|b)
+    >>> print _globre(r'.\*\?')
+    \.\*\?
+    '''
     i, n = 0, len(pat)
     res = ''
     group = 0
@@ -260,99 +294,115 @@
             res += escape(c)
     return res
 
-def _regex(kind, name, tail):
-    '''convert a pattern into a regular expression'''
-    if not name:
+def _regex(kind, pat, globsuffix):
+    '''Convert a (normalized) pattern of any kind into a regular expression.
+    globsuffix is appended to the regexp of globs.'''
+    if not pat:
         return ''
     if kind == 're':
-        return name
-    elif kind == 'path':
-        return '^' + re.escape(name) + '(?:/|$)'
-    elif kind == 'relglob':
-        return '(?:|.*/)' + _globre(name) + tail
-    elif kind == 'relpath':
-        return re.escape(name) + '(?:/|$)'
-    elif kind == 'relre':
-        if name.startswith('^'):
-            return name
-        return '.*' + name
-    return _globre(name) + tail
+        return pat
+    if kind == 'path':
+        return '^' + re.escape(pat) + '(?:/|$)'
+    if kind == 'relglob':
+        return '(?:|.*/)' + _globre(pat) + globsuffix
+    if kind == 'relpath':
+        return re.escape(pat) + '(?:/|$)'
+    if kind == 'relre':
+        if pat.startswith('^'):
+            return pat
+        return '.*' + pat
+    return _globre(pat) + globsuffix
 
-def _buildmatch(ctx, pats, tail):
-    fset, pats = _expandsets(pats, ctx)
-    if not pats:
+def _buildmatch(ctx, kindpats, globsuffix):
+    '''Return regexp string and a matcher function for kindpats.
+    globsuffix is appended to the regexp of globs.'''
+    fset, kindpats = _expandsets(kindpats, ctx)
+    if not kindpats:
         return "", fset.__contains__
 
-    pat, mf = _buildregexmatch(pats, tail)
+    regex, mf = _buildregexmatch(kindpats, globsuffix)
     if fset:
-        return pat, lambda f: f in fset or mf(f)
-    return pat, mf
+        return regex, lambda f: f in fset or mf(f)
+    return regex, mf
 
-def _buildregexmatch(pats, tail):
-    """build a matching function from a set of patterns"""
+def _buildregexmatch(kindpats, globsuffix):
+    """Build a match function from a list of kinds and kindpats,
+    return regexp string and a matcher function."""
     try:
-        pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
-        if len(pat) > 20000:
+        regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
+                                     for (k, p) in kindpats])
+        if len(regex) > 20000:
             raise OverflowError
-        return pat, _rematcher(pat)
+        return regex, _rematcher(regex)
     except OverflowError:
         # We're using a Python with a tiny regex engine and we
         # made it explode, so we'll divide the pattern list in two
         # until it works
-        l = len(pats)
+        l = len(kindpats)
         if l < 2:
             raise
-        pata, a = _buildregexmatch(pats[:l//2], tail)
-        patb, b = _buildregexmatch(pats[l//2:], tail)
+        regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
+        regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
         return pat, lambda s: a(s) or b(s)
     except re.error:
-        for k, p in pats:
+        for k, p in kindpats:
             try:
-                _rematcher('(?:%s)' % _regex(k, p, tail))
+                _rematcher('(?:%s)' % _regex(k, p, globsuffix))
             except re.error:
                 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
         raise util.Abort(_("invalid pattern"))
 
-def _normalize(names, default, root, cwd, auditor):
-    pats = []
-    for kind, name in [_patsplit(p, default) for p in names]:
+def _normalize(patterns, default, root, cwd, auditor):
+    '''Convert 'kind:pat' from the patterns list to tuples with kind and
+    normalized and rooted patterns and with listfiles expanded.'''
+    kindpats = []
+    for kind, pat in [_patsplit(p, default) for p in patterns]:
         if kind in ('glob', 'relpath'):
-            name = pathutil.canonpath(root, cwd, name, auditor)
+            pat = pathutil.canonpath(root, cwd, pat, auditor)
         elif kind in ('relglob', 'path'):
-            name = util.normpath(name)
+            pat = util.normpath(pat)
         elif kind in ('listfile', 'listfile0'):
             try:
-                files = util.readfile(name)
+                files = util.readfile(pat)
                 if kind == 'listfile0':
                     files = files.split('\0')
                 else:
                     files = files.splitlines()
                 files = [f for f in files if f]
             except EnvironmentError:
-                raise util.Abort(_("unable to read file list (%s)") % name)
-            pats += _normalize(files, default, root, cwd, auditor)
+                raise util.Abort(_("unable to read file list (%s)") % pat)
+            kindpats += _normalize(files, default, root, cwd, auditor)
             continue
+        # else: re or relre - which cannot be normalized
+        kindpats.append((kind, pat))
+    return kindpats
+
+def _roots(kindpats):
+    '''return roots and exact explicitly listed files from patterns
 
-        pats.append((kind, name))
-    return pats
-
-def _roots(patterns):
+    >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
+    ['g', 'g', '.']
+    >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
+    ['r', 'p/p', '.']
+    >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
+    ['.', '.', '.']
+    '''
     r = []
-    for kind, name in patterns:
+    for kind, pat in kindpats:
         if kind == 'glob': # find the non-glob prefix
             root = []
-            for p in name.split('/'):
+            for p in pat.split('/'):
                 if '[' in p or '{' in p or '*' in p or '?' in p:
                     break
                 root.append(p)
             r.append('/'.join(root) or '.')
         elif kind in ('relpath', 'path'):
-            r.append(name or '.')
+            r.append(pat or '.')
         else: # relglob, re, relre
             r.append('.')
     return r
 
-def _anypats(patterns):
-    for kind, name in patterns:
+def _anypats(kindpats):
+    for kind, pat in kindpats:
         if kind in ('glob', 're', 'relglob', 'relre', 'set'):
             return True
--- a/mercurial/merge.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/merge.py	Thu Apr 17 19:36:17 2014 -0400
@@ -34,7 +34,7 @@
         [type][length][content]
 
     Type is a single character, length is a 4 bytes integer, content is an
-    arbitrary suites of bytes of lenght `length`.
+    arbitrary suites of bytes of length `length`.
 
     Type should be a letter. Capital letter are mandatory record, Mercurial
     should abort if they are unknown. lower case record can be safely ignored.
@@ -47,10 +47,12 @@
     '''
     statepathv1 = "merge/state"
     statepathv2 = "merge/state2"
+
     def __init__(self, repo):
         self._repo = repo
         self._dirty = False
         self._read()
+
     def reset(self, node=None, other=None):
         self._state = {}
         if node:
@@ -58,7 +60,13 @@
             self._other = other
         shutil.rmtree(self._repo.join("merge"), True)
         self._dirty = False
+
     def _read(self):
+        """Analyse each record content to restore a serialized state from disk
+
+        This function process "record" entry produced by the de-serialization
+        of on disk file.
+        """
         self._state = {}
         records = self._readrecords()
         for rtype, record in records:
@@ -73,7 +81,21 @@
                 raise util.Abort(_('unsupported merge state record: %s')
                                    % rtype)
         self._dirty = False
+
     def _readrecords(self):
+        """Read merge state from disk and return a list of record (TYPE, data)
+
+        We read data from both v1 and v2 files and decide which one to use.
+
+        V1 has been used by version prior to 2.9.1 and contains less data than
+        v2. We read both versions and check if no data in v2 contradicts
+        v1. If there is not contradiction we can safely assume that both v1
+        and v2 were written at the same time and use the extract data in v2. If
+        there is contradiction we ignore v2 content as we assume an old version
+        of Mercurial has overwritten the mergestate file and left an old v2
+        file around.
+
+        returns list of record [(TYPE, data), ...]"""
         v1records = self._readrecordsv1()
         v2records = self._readrecordsv2()
         oldv2 = set() # old format version of v2 record
@@ -101,7 +123,15 @@
                 return v1records
         else:
             return v2records
+
     def _readrecordsv1(self):
+        """read on disk merge state for version 1 file
+
+        returns list of record [(TYPE, data), ...]
+
+        Note: the "F" data from this file are one entry short
+              (no "other file node" entry)
+        """
         records = []
         try:
             f = self._repo.opener(self.statepathv1)
@@ -115,7 +145,12 @@
             if err.errno != errno.ENOENT:
                 raise
         return records
+
     def _readrecordsv2(self):
+        """read on disk merge state for version 2 file
+
+        returns list of record [(TYPE, data), ...]
+        """
         records = []
         try:
             f = self._repo.opener(self.statepathv2)
@@ -125,17 +160,19 @@
             while off < end:
                 rtype = data[off]
                 off += 1
-                lenght = _unpack('>I', data[off:(off + 4)])[0]
+                length = _unpack('>I', data[off:(off + 4)])[0]
                 off += 4
-                record = data[off:(off + lenght)]
-                off += lenght
+                record = data[off:(off + length)]
+                off += length
                 records.append((rtype, record))
             f.close()
         except IOError, err:
             if err.errno != errno.ENOENT:
                 raise
         return records
+
     def commit(self):
+        """Write current state on disk (if necessary)"""
         if self._dirty:
             records = []
             records.append(("L", hex(self._local)))
@@ -144,10 +181,14 @@
                 records.append(("F", "\0".join([d] + v)))
             self._writerecords(records)
             self._dirty = False
+
     def _writerecords(self, records):
+        """Write current state on disk (both v1 and v2)"""
         self._writerecordsv1(records)
         self._writerecordsv2(records)
+
     def _writerecordsv1(self, records):
+        """Write current state on disk in a version 1 file"""
         f = self._repo.opener(self.statepathv1, "w")
         irecords = iter(records)
         lrecords = irecords.next()
@@ -157,14 +198,25 @@
             if rtype == "F":
                 f.write("%s\n" % _droponode(data))
         f.close()
+
     def _writerecordsv2(self, records):
+        """Write current state on disk in a version 2 file"""
         f = self._repo.opener(self.statepathv2, "w")
         for key, data in records:
             assert len(key) == 1
             format = ">sI%is" % len(data)
             f.write(_pack(format, key, len(data), data))
         f.close()
+
     def add(self, fcl, fco, fca, fd):
+        """add a new (potentially?) conflicting file the merge state
+        fcl: file context for local,
+        fco: file context for remote,
+        fca: file context for ancestors,
+        fd:  file path of the resulting merge.
+
+        note: also write the local version to the `.hg/merge` directory.
+        """
         hash = util.sha1(fcl.path()).hexdigest()
         self._repo.opener.write("merge/" + hash, fcl.data())
         self._state[fd] = ['u', hash, fcl.path(),
@@ -172,21 +224,28 @@
                            fco.path(), hex(fco.filenode()),
                            fcl.flags()]
         self._dirty = True
+
     def __contains__(self, dfile):
         return dfile in self._state
+
     def __getitem__(self, dfile):
         return self._state[dfile][0]
+
     def __iter__(self):
         l = self._state.keys()
         l.sort()
         for f in l:
             yield f
+
     def files(self):
         return self._state.keys()
+
     def mark(self, dfile, state):
         self._state[dfile][0] = state
         self._dirty = True
+
     def resolve(self, dfile, wctx):
+        """rerun merge process for file path `dfile`"""
         if self[dfile] == 'r':
             return 0
         stateentry = self._state[dfile]
@@ -212,6 +271,7 @@
         if r is None:
             # no real conflict
             del self._state[dfile]
+            self._dirty = True
         elif not r:
             self.mark(dfile, 'r')
         return r
@@ -263,7 +323,7 @@
 
     return actions
 
-def _checkcollision(repo, wmf, actions, prompts):
+def _checkcollision(repo, wmf, actions):
     # build provisional merged manifest up
     pmmf = set(wmf)
 
@@ -274,20 +334,23 @@
     def nop(f, args):
         pass
 
-    def renameop(f, args):
-        f2, fd, flags = args
-        if f:
-            pmmf.discard(f)
-        pmmf.add(fd)
+    def renamemoveop(f, args):
+        f2, flags = args
+        pmmf.discard(f2)
+        pmmf.add(f)
+    def renamegetop(f, args):
+        f2, flags = args
+        pmmf.add(f)
     def mergeop(f, args):
-        f2, fd, move = args
+        f1, f2, fa, move, anc = args
         if move:
-            pmmf.discard(f)
-        pmmf.add(fd)
+            pmmf.discard(f1)
+        pmmf.add(f)
 
     opmap = {
         "a": addop,
-        "d": renameop,
+        "dm": renamemoveop,
+        "dg": renamegetop,
         "dr": nop,
         "e": nop,
         "f": addop, # untracked file should be kept in working directory
@@ -295,21 +358,14 @@
         "m": mergeop,
         "r": removeop,
         "rd": nop,
+        "cd": addop,
+        "dc": addop,
     }
     for f, m, args, msg in actions:
         op = opmap.get(m)
         assert op, m
         op(f, args)
 
-    opmap = {
-        "cd": addop,
-        "dc": addop,
-    }
-    for f, m in prompts:
-        op = opmap.get(m)
-        assert op, m
-        op(f, None)
-
     # check case-folding collision in provisional merged manifest
     foldmap = {}
     for f in sorted(pmmf):
@@ -320,7 +376,7 @@
         foldmap[fold] = f
 
 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
-                  acceptremote=False):
+                  acceptremote, followcopies):
     """
     Merge p1 and p2 with ancestor pa and generate merge action list
 
@@ -329,19 +385,8 @@
     acceptremote = accept the incoming changes without prompting
     """
 
-    overwrite = force and not branchmerge
     actions, copy, movewithdir = [], {}, {}
 
-    followcopies = False
-    if overwrite:
-        pa = wctx
-    elif pa == p2: # backwards
-        pa = wctx.p1()
-    elif not branchmerge and not wctx.dirty(missing=True):
-        pass
-    elif pa and repo.ui.configbool("merge", "followcopies", True):
-        followcopies = True
-
     # manifests fetched in order are going to be faster, so prime the caches
     [x.manifest() for x in
      sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
@@ -370,7 +415,7 @@
                 m1['.hgsubstate'] += "+"
                 break
 
-    aborts, prompts = [], []
+    aborts = []
     # Compare manifests
     fdiff = dicthelpers.diff(m1, m2)
     flagsdiff = m1.flagsdiff(m2)
@@ -395,11 +440,16 @@
         if partial and not partial(f):
             continue
         if n1 and n2:
-            fla = ma.flags(f)
-            nol = 'l' not in fl1 + fl2 + fla
+            fa = f
             a = ma.get(f, nullid)
+            if a == nullid:
+                fa = copy.get(f, f)
+                # Note: f as default is wrong - we can't really make a 3-way
+                # merge without an ancestor file.
+            fla = ma.flags(fa)
+            nol = 'l' not in fl1 + fl2 + fla
             if n2 == a and fl2 == fla:
-                pass # remote unchanged - keep local
+                actions.append((f, "k", (), "keep")) # remote unchanged
             elif n1 == a and fl1 == fla: # local unchanged - use remote
                 if n1 == n2: # optimization: keep local content
                     actions.append((f, "e", (fl2,), "update permissions"))
@@ -410,36 +460,40 @@
             elif nol and n1 == a: # local only changed 'x'
                 actions.append((f, "g", (fl1,), "remote is newer"))
             else: # both changed something
-                actions.append((f, "m", (f, f, False), "versions differ"))
+                actions.append((f, "m", (f, f, fa, False, pa.node()),
+                               "versions differ"))
         elif f in copied: # files we'll deal with on m2 side
             pass
-        elif n1 and f in movewithdir: # directory rename
+        elif n1 and f in movewithdir: # directory rename, move local
             f2 = movewithdir[f]
-            actions.append((f, "d", (None, f2, fl1),
-                            "remote renamed directory to " + f2))
+            actions.append((f2, "dm", (f, fl1),
+                            "remote directory rename - move from " + f))
         elif n1 and f in copy:
             f2 = copy[f]
-            actions.append((f, "m", (f2, f, False),
-                            "local copied/moved to " + f2))
+            actions.append((f, "m", (f, f2, f2, False, pa.node()),
+                            "local copied/moved from " + f2))
         elif n1 and f in ma: # clean, a different, no remote
             if n1 != ma[f]:
-                prompts.append((f, "cd")) # prompt changed/deleted
+                if acceptremote:
+                    actions.append((f, "r", None, "remote delete"))
+                else:
+                    actions.append((f, "cd", None, "prompt changed/deleted"))
             elif n1[20:] == "a": # added, no remote
                 actions.append((f, "f", None, "remote deleted"))
             else:
                 actions.append((f, "r", None, "other deleted"))
         elif n2 and f in movewithdir:
             f2 = movewithdir[f]
-            actions.append((None, "d", (f, f2, fl2),
-                            "local renamed directory to " + f2))
+            actions.append((f2, "dg", (f, fl2),
+                            "local directory rename - get from " + f))
         elif n2 and f in copy:
             f2 = copy[f]
             if f2 in m2:
-                actions.append((f2, "m", (f, f, False),
-                                "remote copied to " + f))
+                actions.append((f, "m", (f2, f, f2, False, pa.node()),
+                                "remote copied from " + f2))
             else:
-                actions.append((f2, "m", (f, f, True),
-                                "remote moved to " + f))
+                actions.append((f, "m", (f2, f, f2, True, pa.node()),
+                                "remote moved from " + f2))
         elif n2 and f not in ma:
             # local unknown, remote created: the logic is described by the
             # following table:
@@ -458,7 +512,8 @@
             else:
                 different = _checkunknownfile(repo, wctx, p2, f)
                 if force and branchmerge and different:
-                    actions.append((f, "m", (f, f, False),
+                    # FIXME: This is wrong - f is not in ma ...
+                    actions.append((f, "m", (f, f, f, False, pa.node()),
                                     "remote differs from untracked local"))
                 elif not force and different:
                     aborts.append((f, "ud"))
@@ -470,7 +525,12 @@
                 aborts.append((f, "ud"))
             else:
                 # if different: old untracked f may be overwritten and lost
-                prompts.append((f, "dc")) # prompt deleted/changed
+                if acceptremote:
+                    actions.append((f, "g", (m2.flags(f),),
+                                   "remote recreating"))
+                else:
+                    actions.append((f, "dc", (m2.flags(f),),
+                                   "prompt deleted/changed"))
 
     for f, m in sorted(aborts):
         if m == "ud":
@@ -484,30 +544,10 @@
         # check collision between files only in p2 for clean update
         if (not branchmerge and
             (force or not wctx.dirty(missing=True, branch=False))):
-            _checkcollision(repo, m2, [], [])
+            _checkcollision(repo, m2, [])
         else:
-            _checkcollision(repo, m1, actions, prompts)
+            _checkcollision(repo, m1, actions)
 
-    for f, m in sorted(prompts):
-        if m == "cd":
-            if acceptremote:
-                actions.append((f, "r", None, "remote delete"))
-            elif repo.ui.promptchoice(
-                _("local changed %s which remote deleted\n"
-                  "use (c)hanged version or (d)elete?"
-                  "$$ &Changed $$ &Delete") % f, 0):
-                actions.append((f, "r", None, "prompt delete"))
-            else:
-                actions.append((f, "a", None, "prompt keep"))
-        elif m == "dc":
-            if acceptremote:
-                actions.append((f, "g", (m2.flags(f),), "remote recreating"))
-            elif repo.ui.promptchoice(
-                _("remote changed %s which local deleted\n"
-                  "use (c)hanged version or leave (d)eleted?"
-                  "$$ &Changed $$ &Deleted") % f, 0) == 0:
-                actions.append((f, "g", (m2.flags(f),), "prompt recreating"))
-        else: assert False, m
     return actions
 
 def actionkey(a):
@@ -549,12 +589,11 @@
     if i > 0:
         yield i, f
 
-def applyupdates(repo, actions, wctx, mctx, actx, overwrite):
+def applyupdates(repo, actions, wctx, mctx, overwrite):
     """apply the merge action list to the working directory
 
     wctx is the working copy context
     mctx is the context to be merged into the working copy
-    actx is the context of the common ancestor
 
     Return a tuple of counts (updated, merged, removed, unresolved) that
     describes how many files were affected by the update.
@@ -571,24 +610,20 @@
         f, m, args, msg = a
         repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
         if m == "m": # merge
-            f2, fd, move = args
-            if fd == '.hgsubstate': # merged internally
+            f1, f2, fa, move, anc = args
+            if f == '.hgsubstate': # merged internally
                 continue
-            repo.ui.debug("  preserving %s for resolve of %s\n" % (f, fd))
-            fcl = wctx[f]
+            repo.ui.debug("  preserving %s for resolve of %s\n" % (f1, f))
+            fcl = wctx[f1]
             fco = mctx[f2]
-            if mctx == actx: # backwards, use working dir parent as ancestor
-                if fcl.parents():
-                    fca = fcl.p1()
-                else:
-                    fca = repo.filectx(f, fileid=nullrev)
+            actx = repo[anc]
+            if fa in actx:
+                fca = actx[fa]
             else:
-                fca = fcl.ancestor(fco, actx)
-            if not fca:
-                fca = repo.filectx(f, fileid=nullrev)
-            ms.add(fcl, fco, fca, fd)
-            if f != fd and move:
-                moves.append(f)
+                fca = repo.filectx(f1, fileid=nullrev)
+            ms.add(fcl, fco, fca, f)
+            if f1 != f and move:
+                moves.append(f1)
 
     audit = repo.wopener.audit
 
@@ -599,13 +634,13 @@
             audit(f)
             util.unlinkpath(repo.wjoin(f))
 
-    numupdates = len(actions)
+    numupdates = len([a for a in actions if a[1] != 'k'])
     workeractions = [a for a in actions if a[1] in 'gr']
     updateactions = [a for a in workeractions if a[1] == 'g']
     updated = len(updateactions)
     removeactions = [a for a in workeractions if a[1] == 'r']
     removed = len(removeactions)
-    actions = [a for a in actions if a[1] not in 'gr']
+    actions = [a for a in actions if a[1] not in 'grk']
 
     hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
     if hgsub and hgsub[0] == 'r':
@@ -636,13 +671,13 @@
         f, m, args, msg = a
         progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
         if m == "m": # merge
-            f2, fd, move = args
-            if fd == '.hgsubstate': # subrepo states need updating
+            f1, f2, fa, move, anc = args
+            if f == '.hgsubstate': # subrepo states need updating
                 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
                                  overwrite)
                 continue
-            audit(fd)
-            r = ms.resolve(fd, wctx)
+            audit(f)
+            r = ms.resolve(f, wctx)
             if r is not None and r > 0:
                 unresolved += 1
             else:
@@ -650,16 +685,17 @@
                     updated += 1
                 else:
                     merged += 1
-        elif m == "d": # directory rename
-            f2, fd, flags = args
-            if f:
-                repo.ui.note(_("moving %s to %s\n") % (f, fd))
-                audit(fd)
-                repo.wwrite(fd, wctx.filectx(f).data(), flags)
-                util.unlinkpath(repo.wjoin(f))
-            if f2:
-                repo.ui.note(_("getting %s to %s\n") % (f2, fd))
-                repo.wwrite(fd, mctx.filectx(f2).data(), flags)
+        elif m == "dm": # directory rename, move local
+            f0, flags = args
+            repo.ui.note(_("moving %s to %s\n") % (f0, f))
+            audit(f)
+            repo.wwrite(f, wctx.filectx(f0).data(), flags)
+            util.unlinkpath(repo.wjoin(f0))
+            updated += 1
+        elif m == "dg": # local directory rename, get
+            f0, flags = args
+            repo.ui.note(_("getting %s to %s\n") % (f0, f))
+            repo.wwrite(f, mctx.filectx(f0).data(), flags)
             updated += 1
         elif m == "dr": # divergent renames
             fl, = args
@@ -683,17 +719,104 @@
 
     return updated, merged, removed, unresolved
 
-def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial,
-                     acceptremote=False):
-    "Calculate the actions needed to merge mctx into tctx"
-    actions = []
-    actions += manifestmerge(repo, tctx, mctx,
-                             ancestor,
-                             branchmerge, force,
-                             partial, acceptremote)
-    if tctx.rev() is None:
-        actions += _forgetremoved(tctx, mctx, branchmerge)
-    return actions
+def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
+                     acceptremote, followcopies):
+    "Calculate the actions needed to merge mctx into wctx using ancestors"
+
+    if len(ancestors) == 1: # default
+        actions = manifestmerge(repo, wctx, mctx, ancestors[0],
+                                branchmerge, force,
+                                partial, acceptremote, followcopies)
+
+    else: # only when merge.preferancestor=* - experimentalish code
+        # Call for bids
+        fbids = {} # mapping filename to list af action bids
+        for ancestor in ancestors:
+            repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
+            actions = manifestmerge(repo, wctx, mctx, ancestor,
+                                    branchmerge, force,
+                                    partial, acceptremote, followcopies)
+            for a in sorted(actions):
+                repo.ui.debug(' %s: %s\n' % (a[0], a[1]))
+                f = a[0]
+                if f in fbids:
+                    fbids[f].append(a)
+                else:
+                    fbids[f] = [a]
+
+        # Pick the best bid for each file
+        repo.ui.note(_('\nauction for merging merge bids\n'))
+        actions = []
+        for f, bidsl in sorted(fbids.items()):
+            # Consensus?
+            a0 = bidsl[0]
+            if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
+                repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
+                actions.append(a0)
+                continue
+            # Group bids by kind of action
+            bids = {}
+            for a in bidsl:
+                m = a[1]
+                if m in bids:
+                    bids[m].append(a)
+                else:
+                    bids[m] = [a]
+            # If keep is an option, just do it.
+            if "k" in bids:
+                repo.ui.note(" %s: picking 'keep' action\n" % f)
+                actions.append(bids["k"][0])
+                continue
+            # If all gets agree [how could they not?], just do it.
+            if "g" in bids:
+                ga0 = bids["g"][0]
+                if util.all(a == ga0 for a in bids["g"][1:]):
+                    repo.ui.note(" %s: picking 'get' action\n" % f)
+                    actions.append(ga0)
+                    continue
+            # TODO: Consider other simple actions such as mode changes
+            # Handle inefficient democrazy.
+            repo.ui.note(_(' %s: multiple merge bids:\n') % (f, m))
+            for a in bidsl:
+                repo.ui.note('  %s: %s\n' % (f, a[1]))
+            # Pick random action. TODO: Instead, prompt user when resolving
+            a0 = bidsl[0]
+            repo.ui.warn(_(' %s: ambiguous merge - picked %s action)\n') %
+                         (f, a0[1]))
+            actions.append(a0)
+            continue
+        repo.ui.note(_('end of auction\n\n'))
+
+    # Filter out prompts.
+    newactions, prompts = [], []
+    for a in actions:
+        if a[1] in ("cd", "dc"):
+            prompts.append(a)
+        else:
+            newactions.append(a)
+    # Prompt and create actions. TODO: Move this towards resolve phase.
+    for f, m, args, msg in sorted(prompts):
+        if m == "cd":
+            if repo.ui.promptchoice(
+                _("local changed %s which remote deleted\n"
+                  "use (c)hanged version or (d)elete?"
+                  "$$ &Changed $$ &Delete") % f, 0):
+                newactions.append((f, "r", None, "prompt delete"))
+            else:
+                newactions.append((f, "a", None, "prompt keep"))
+        elif m == "dc":
+            flags, = args
+            if repo.ui.promptchoice(
+                _("remote changed %s which local deleted\n"
+                  "use (c)hanged version or leave (d)eleted?"
+                  "$$ &Changed $$ &Deleted") % f, 0) == 0:
+                newactions.append((f, "g", (flags,), "prompt recreating"))
+        else: assert False, m
+
+    if wctx.rev() is None:
+        newactions += _forgetremoved(wctx, mctx, branchmerge)
+
+    return newactions
 
 def recordupdates(repo, actions, branchmerge):
     "record merge actions to the dirstate"
@@ -712,50 +835,55 @@
             repo.dirstate.drop(f)
         elif m == "e": # exec change
             repo.dirstate.normallookup(f)
+        elif m == "k": # keep
+            pass
         elif m == "g": # get
             if branchmerge:
                 repo.dirstate.otherparent(f)
             else:
                 repo.dirstate.normal(f)
         elif m == "m": # merge
-            f2, fd, move = args
+            f1, f2, fa, move, anc = args
             if branchmerge:
                 # We've done a branch merge, mark this file as merged
                 # so that we properly record the merger later
-                repo.dirstate.merge(fd)
-                if f != f2: # copy/rename
+                repo.dirstate.merge(f)
+                if f1 != f2: # copy/rename
                     if move:
-                        repo.dirstate.remove(f)
-                    if f != fd:
-                        repo.dirstate.copy(f, fd)
+                        repo.dirstate.remove(f1)
+                    if f1 != f:
+                        repo.dirstate.copy(f1, f)
                     else:
-                        repo.dirstate.copy(f2, fd)
+                        repo.dirstate.copy(f2, f)
             else:
                 # We've update-merged a locally modified file, so
                 # we set the dirstate to emulate a normal checkout
                 # of that file some time in the past. Thus our
                 # merge will appear as a normal local file
                 # modification.
-                if f2 == fd: # file not locally copied/moved
-                    repo.dirstate.normallookup(fd)
+                if f2 == f: # file not locally copied/moved
+                    repo.dirstate.normallookup(f)
                 if move:
-                    repo.dirstate.drop(f)
-        elif m == "d": # directory rename
-            f2, fd, flag = args
-            if not f2 and f not in repo.dirstate:
+                    repo.dirstate.drop(f1)
+        elif m == "dm": # directory rename, move local
+            f0, flag = args
+            if f0 not in repo.dirstate:
                 # untracked file moved
                 continue
             if branchmerge:
-                repo.dirstate.add(fd)
-                if f:
-                    repo.dirstate.remove(f)
-                    repo.dirstate.copy(f, fd)
-                if f2:
-                    repo.dirstate.copy(f2, fd)
+                repo.dirstate.add(f)
+                repo.dirstate.remove(f0)
+                repo.dirstate.copy(f0, f)
             else:
-                repo.dirstate.normal(fd)
-                if f:
-                    repo.dirstate.drop(f)
+                repo.dirstate.normal(f)
+                repo.dirstate.drop(f0)
+        elif m == "dg": # directory rename, get
+            f0, flag = args
+            if branchmerge:
+                repo.dirstate.add(f)
+                repo.dirstate.copy(f0, f)
+            else:
+                repo.dirstate.normal(f)
 
 def update(repo, node, branchmerge, force, partial, ancestor=None,
            mergeancestor=False):
@@ -808,9 +936,9 @@
         wc = repo[None]
         pl = wc.parents()
         p1 = pl[0]
-        pa = None
+        pas = [None]
         if ancestor:
-            pa = repo[ancestor]
+            pas = [repo[ancestor]]
 
         if node is None:
             # Here is where we should consider bookmarks, divergent bookmarks,
@@ -849,13 +977,17 @@
                     # get the max revision for the given successors set,
                     # i.e. the 'tip' of a set
                     node = repo.revs("max(%ln)", successors)[0]
-                    pa = p1
+                    pas = [p1]
 
         overwrite = force and not branchmerge
 
         p2 = repo[node]
-        if pa is None:
-            pa = p1.ancestor(p2)
+        if pas[0] is None:
+            if repo.ui.config("merge", "preferancestor") == '*':
+                cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
+                pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
+            else:
+                pas = [p1.ancestor(p2)]
 
         fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
 
@@ -863,10 +995,10 @@
         if not overwrite and len(pl) > 1:
             raise util.Abort(_("outstanding uncommitted merges"))
         if branchmerge:
-            if pa == p2:
+            if pas == [p2]:
                 raise util.Abort(_("merging with a working directory ancestor"
                                    " has no effect"))
-            elif pa == p1:
+            elif pas == [p1]:
                 if not mergeancestor and p1.branch() == p2.branch():
                     raise util.Abort(_("nothing to merge"),
                                      hint=_("use 'hg update' "
@@ -886,7 +1018,7 @@
                 repo.hook('update', parent1=xp2, parent2='', error=0)
                 return 0, 0, 0, 0
 
-            if pa not in (p1, p2):  # nonlinear
+            if pas not in ([p1], [p2]):  # nonlinear
                 dirty = wc.dirty(missing=True)
                 if dirty or onode is None:
                     # Branching is a bit strange to ensure we do the minimal
@@ -894,7 +1026,7 @@
                     foreground = obsolete.foreground(repo, [p1.node()])
                     # note: the <node> variable contains a random identifier
                     if repo[node].node() in foreground:
-                        pa = p1  # allow updating to successors
+                        pas = [p1]  # allow updating to successors
                     elif dirty:
                         msg = _("uncommitted changes")
                         if onode is None:
@@ -910,11 +1042,21 @@
                         raise util.Abort(msg, hint=hint)
                 else:
                     # Allow jumping branches if clean and specific rev given
-                    pa = p1
+                    pas = [p1]
+
+        followcopies = False
+        if overwrite:
+            pas = [wc]
+        elif pas == [p2]: # backwards
+            pas = [wc.p1()]
+        elif not branchmerge and not wc.dirty(missing=True):
+            pass
+        elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
+            followcopies = True
 
         ### calculate phase
-        actions = calculateupdates(repo, wc, p2, pa,
-                                   branchmerge, force, partial, mergeancestor)
+        actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
+                                   partial, mergeancestor, followcopies)
 
         ### apply phase
         if not branchmerge: # just jump to the new rev
@@ -924,7 +1066,7 @@
             # note that we're in the middle of an update
             repo.vfs.write('updatestate', p2.hex())
 
-        stats = applyupdates(repo, actions, wc, p2, pa, overwrite)
+        stats = applyupdates(repo, actions, wc, p2, overwrite)
 
         if not partial:
             repo.setparents(fp1, fp2)
--- a/mercurial/minirst.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/minirst.py	Thu Apr 17 19:36:17 2014 -0400
@@ -73,7 +73,7 @@
         if lines:
             indent = min((len(l) - len(l.lstrip())) for l in lines)
             lines = [l[indent:] for l in lines]
-            blocks.append(dict(indent=indent, lines=lines))
+            blocks.append({'indent': indent, 'lines': lines})
     return blocks
 
 def findliteralblocks(blocks):
@@ -109,7 +109,7 @@
             elif len(blocks[i]['lines']) == 1 and \
                  blocks[i]['lines'][0].lstrip(' ').startswith('.. ') and \
                  blocks[i]['lines'][0].find(' ', 3) == -1:
-                # directive on its onw line, not a literal block
+                # directive on its own line, not a literal block
                 i += 1
                 continue
             else:
@@ -174,8 +174,8 @@
                     items = []
                     for j, line in enumerate(lines):
                         if match(lines, j, itemre, singleline):
-                            items.append(dict(type=type, lines=[],
-                                              indent=blocks[i]['indent']))
+                            items.append({'type': type, 'lines': [],
+                                          'indent': blocks[i]['indent']})
                         items[-1]['lines'].append(line)
                     blocks[i:i + 1] = items
                     break
@@ -382,10 +382,10 @@
             blocks[i]['type'] in ('bullet', 'option', 'field')):
             i += 1
         elif not blocks[i - 1]['lines']:
-            # no lines in previous block, do not seperate
+            # no lines in previous block, do not separate
             i += 1
         else:
-            blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
+            blocks.insert(i, {'lines': [''], 'indent': 0, 'type': 'margin'})
             i += 2
     return blocks
 
@@ -697,6 +697,10 @@
     for row in data:
         l = []
         for w, v in zip(widths, row):
+            if '\n' in v:
+                # only remove line breaks and indentation, long lines are
+                # handled by the next tool
+                v = ' '.join(e.lstrip() for e in v.split('\n'))
             pad = ' ' * (w - encoding.colwidth(v))
             l.append(v + pad)
         out.append(indent + ' '.join(l) + "\n")
--- a/mercurial/obsolete.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/obsolete.py	Thu Apr 17 19:36:17 2014 -0400
@@ -176,7 +176,7 @@
         if ':' in key or '\0' in key:
             raise ValueError("':' and '\0' are forbidden in metadata key'")
         if '\0' in value:
-            raise ValueError("':' are forbidden in metadata value'")
+            raise ValueError("':' is forbidden in metadata value'")
     return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
 
 def decodemeta(data):
@@ -247,6 +247,9 @@
     def __iter__(self):
         return iter(self._all)
 
+    def __len__(self):
+        return len(self._all)
+
     def __nonzero__(self):
         return bool(self._all)
 
@@ -256,6 +259,12 @@
         * ensuring it is hashable
         * check mandatory metadata
         * encode metadata
+
+        If you are a human writing code creating marker you want to use the
+        `createmarkers` function in this module instead.
+
+        return True if a new marker have been added, False if the markers
+        already existed (no op).
         """
         if metadata is None:
             metadata = {}
@@ -267,7 +276,7 @@
             if len(succ) != 20:
                 raise ValueError(succ)
         marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
-        self.add(transaction, [marker])
+        return bool(self.add(transaction, [marker]))
 
     def add(self, transaction, markers):
         """Add new markers to the store
@@ -343,14 +352,15 @@
 # - the base85 encoding
 _maxpayload = 5300
 
-def listmarkers(repo):
-    """List markers over pushkey"""
-    if not repo.obsstore:
-        return {}
+def _pushkeyescape(markers):
+    """encode markers into a dict suitable for pushkey exchange
+
+    - binary data is base85 encoded
+    - split in chunks smaller than 5300 bytes"""
     keys = {}
     parts = []
     currentlen = _maxpayload * 2  # ensure we create a new part
-    for marker in  repo.obsstore:
+    for marker in markers:
         nextdata = _encodeonemarker(marker)
         if (len(nextdata) + currentlen > _maxpayload):
             currentpart = []
@@ -363,13 +373,19 @@
         keys['dump%i' % idx] = base85.b85encode(data)
     return keys
 
+def listmarkers(repo):
+    """List markers over pushkey"""
+    if not repo.obsstore:
+        return {}
+    return _pushkeyescape(repo.obsstore)
+
 def pushmarker(repo, key, old, new):
     """Push markers over pushkey"""
     if not key.startswith('dump'):
         repo.ui.warn(_('unknown key: %r') % key)
         return 0
     if old:
-        repo.ui.warn(_('unexpected old value') % key)
+        repo.ui.warn(_('unexpected old value for %r') % key)
         return 0
     data = base85.b85decode(new)
     lock = repo.lock()
@@ -384,43 +400,6 @@
     finally:
         lock.release()
 
-def syncpush(repo, remote):
-    """utility function to push obsolete markers to a remote
-
-    Exist mostly to allow overriding for experimentation purpose"""
-    if (_enabled and repo.obsstore and
-        'obsolete' in remote.listkeys('namespaces')):
-        rslts = []
-        remotedata = repo.listkeys('obsolete')
-        for key in sorted(remotedata, reverse=True):
-            # reverse sort to ensure we end with dump0
-            data = remotedata[key]
-            rslts.append(remote.pushkey('obsolete', key, '', data))
-        if [r for r in rslts if not r]:
-            msg = _('failed to push some obsolete markers!\n')
-            repo.ui.warn(msg)
-
-def syncpull(repo, remote, gettransaction):
-    """utility function to pull obsolete markers from a remote
-
-    The `gettransaction` is function that return the pull transaction, creating
-    one if necessary. We return the transaction to inform the calling code that
-    a new transaction have been created (when applicable).
-
-    Exists mostly to allow overriding for experimentation purpose"""
-    tr = None
-    if _enabled:
-        repo.ui.debug('fetching remote obsolete markers\n')
-        remoteobs = remote.listkeys('obsolete')
-        if 'dump0' in remoteobs:
-            tr = gettransaction()
-            for key in sorted(remoteobs, reverse=True):
-                if key.startswith('dump'):
-                    data = base85.b85decode(remoteobs[key])
-                    repo.obsstore.mergemarkers(tr, data)
-            repo.invalidatevolatilesets()
-    return tr
-
 def allmarkers(repo):
     """all obsolete markers known in a repository"""
     for markerdata in repo.obsstore:
@@ -673,7 +652,7 @@
                 # Within a marker, a successor may have divergent successors
                 # sets. In such a case, the marker will contribute multiple
                 # divergent successors sets. If multiple successors have
-                # divergent successors sets, a cartesian product is used.
+                # divergent successors sets, a Cartesian product is used.
                 #
                 # At the end we post-process successors sets to remove
                 # duplicated entry and successors set that are strict subset of
@@ -800,7 +779,7 @@
 def _computebumpedset(repo):
     """the set of revs trying to obsolete public revisions"""
     bumped = set()
-    # utils function (avoid attribut lookup in the loop)
+    # util function (avoid attribute lookup in the loop)
     phase = repo._phasecache.phase # would be faster to grab the full list
     public = phases.public
     cl = repo.changelog
@@ -845,8 +824,10 @@
 def createmarkers(repo, relations, flag=0, metadata=None):
     """Add obsolete markers between changesets in a repo
 
-    <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
-    `old` and `news` are changectx.
+    <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
+    tuple. `old` and `news` are changectx. metadata is an optional dictionary
+    containing metadata for this marker only. It is merged with the global
+    metadata specified through the `metadata` argument of this function,
 
     Trying to obsolete a public changeset will raise an exception.
 
@@ -865,7 +846,13 @@
         metadata['user'] = repo.ui.username()
     tr = repo.transaction('add-obsolescence-marker')
     try:
-        for prec, sucs in relations:
+        for rel in relations:
+            prec = rel[0]
+            sucs = rel[1]
+            localmetadata = metadata.copy()
+            if 2 < len(rel):
+                localmetadata.update(rel[2])
+
             if not prec.mutable():
                 raise util.Abort("cannot obsolete immutable changeset: %s"
                                  % prec)
@@ -873,7 +860,7 @@
             nsucs = tuple(s.node() for s in sucs)
             if nprec in nsucs:
                 raise util.Abort("changeset %s cannot obsolete itself" % prec)
-            repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
+            repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata)
             repo.filteredrevcache.clear()
         tr.close()
     finally:
--- a/mercurial/parser.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/parser.py	Thu Apr 17 19:36:17 2014 -0400
@@ -75,9 +75,12 @@
                     if len(infix) == 3:
                         self._match(infix[2], pos)
         return expr
-    def parse(self, message):
+    def parse(self, message, lookup=None):
         'generate a parse tree from a message'
-        self._iter = self._tokenizer(message)
+        if lookup:
+            self._iter = self._tokenizer(message, lookup)
+        else:
+            self._iter = self._tokenizer(message)
         self._advance()
         res = self._parse()
         token, value, pos = self.current
--- a/mercurial/parsers.c	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/parsers.c	Thu Apr 17 19:36:17 2014 -0400
@@ -14,6 +14,8 @@
 
 #include "util.h"
 
+static char *versionerrortext = "Python minor version mismatch";
+
 static int8_t hextable[256] = {
 	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
 	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -1208,7 +1210,7 @@
 	const bitmask allseen = (1ull << revcount) - 1;
 	const bitmask poison = 1ull << revcount;
 	PyObject *gca = PyList_New(0);
-	int i, v, interesting, left;
+	int i, v, interesting;
 	int maxrev = -1;
 	long sp;
 	bitmask *seen;
@@ -1230,7 +1232,7 @@
 	for (i = 0; i < revcount; i++)
 		seen[revs[i]] = 1ull << i;
 
-	interesting = left = revcount;
+	interesting = revcount;
 
 	for (v = maxrev; v >= 0 && interesting; v--) {
 		long sv = seen[v];
@@ -1251,11 +1253,8 @@
 				}
 				sv |= poison;
 				for (i = 0; i < revcount; i++) {
-					if (revs[i] == v) {
-						if (--left <= 1)
-							goto done;
-						break;
-					}
+					if (revs[i] == v)
+						goto done;
 				}
 			}
 		}
@@ -1529,10 +1528,6 @@
 		ret = gca;
 		Py_INCREF(gca);
 	}
-	else if (PyList_GET_SIZE(gca) == 1) {
-		ret = PyList_GET_ITEM(gca, 0);
-		Py_INCREF(ret);
-	}
 	else ret = find_deepest(self, gca);
 
 done:
@@ -1549,6 +1544,97 @@
 }
 
 /*
+ * Given a (possibly overlapping) set of revs, return all the
+ * common ancestors heads: heads(::args[0] and ::a[1] and ...)
+ */
+static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
+{
+	PyObject *ret = NULL;
+	Py_ssize_t argcount, i, len;
+	bitmask repeat = 0;
+	int revcount = 0;
+	int *revs;
+
+	argcount = PySequence_Length(args);
+	revs = malloc(argcount * sizeof(*revs));
+	if (argcount > 0 && revs == NULL)
+		return PyErr_NoMemory();
+	len = index_length(self) - 1;
+
+	for (i = 0; i < argcount; i++) {
+		static const int capacity = 24;
+		PyObject *obj = PySequence_GetItem(args, i);
+		bitmask x;
+		long val;
+
+		if (!PyInt_Check(obj)) {
+			PyErr_SetString(PyExc_TypeError,
+					"arguments must all be ints");
+			goto bail;
+		}
+		val = PyInt_AsLong(obj);
+		if (val == -1) {
+			ret = PyList_New(0);
+			goto done;
+		}
+		if (val < 0 || val >= len) {
+			PyErr_SetString(PyExc_IndexError,
+					"index out of range");
+			goto bail;
+		}
+		/* this cheesy bloom filter lets us avoid some more
+		 * expensive duplicate checks in the common set-is-disjoint
+		 * case */
+		x = 1ull << (val & 0x3f);
+		if (repeat & x) {
+			int k;
+			for (k = 0; k < revcount; k++) {
+				if (val == revs[k])
+					goto duplicate;
+			}
+		}
+		else repeat |= x;
+		if (revcount >= capacity) {
+			PyErr_Format(PyExc_OverflowError,
+				     "bitset size (%d) > capacity (%d)",
+				     revcount, capacity);
+			goto bail;
+		}
+		revs[revcount++] = (int)val;
+	duplicate:;
+	}
+
+	if (revcount == 0) {
+		ret = PyList_New(0);
+		goto done;
+	}
+	if (revcount == 1) {
+		PyObject *obj;
+		ret = PyList_New(1);
+		if (ret == NULL)
+			goto bail;
+		obj = PyInt_FromLong(revs[0]);
+		if (obj == NULL)
+			goto bail;
+		PyList_SET_ITEM(ret, 0, obj);
+		goto done;
+	}
+
+	ret = find_gca_candidates(self, revs, revcount);
+	if (ret == NULL)
+		goto bail;
+
+done:
+	free(revs);
+	return ret;
+
+bail:
+	free(revs);
+	Py_XDECREF(ret);
+	return NULL;
+}
+
+/*
  * Invalidate any trie entries introduced by added revs.
  */
 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
@@ -1792,6 +1878,9 @@
 static PyMethodDef index_methods[] = {
 	{"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
 	 "return the gca set of the given revs"},
+	{"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
+	  METH_VARARGS,
+	  "return the heads of the common ancestors of the given revs"},
 	{"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
 	 "clear the index caches"},
 	{"get", (PyCFunction)index_m_get, METH_VARARGS,
@@ -1918,6 +2007,16 @@
 
 static void module_init(PyObject *mod)
 {
+	/* This module constant has two purposes.  First, it lets us unit test
+	 * the ImportError raised without hard-coding any error text.  This
+	 * means we can change the text in the future without breaking tests,
+	 * even across changesets without a recompile.  Second, its presence
+	 * can be used to determine whether the version-checking logic is
+	 * present, which also helps in testing across changesets without a
+	 * recompile.  Note that this means the pure-Python version of parsers
+	 * should not have this module constant. */
+	PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
+
 	dirs_module_init(mod);
 
 	indexType.tp_new = PyType_GenericNew;
@@ -1935,6 +2034,24 @@
 	dirstate_unset = Py_BuildValue("ciii", 'n', 0, -1, -1);
 }
 
+static int check_python_version(void)
+{
+	PyObject *sys = PyImport_ImportModule("sys");
+	long hexversion = PyInt_AsLong(PyObject_GetAttrString(sys, "hexversion"));
+	/* sys.hexversion is a 32-bit number by default, so the -1 case
+	 * should only occur in unusual circumstances (e.g. if sys.hexversion
+	 * is manually set to an invalid value). */
+	if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
+		PyErr_Format(PyExc_ImportError, "%s: The Mercurial extension "
+			"modules were compiled with Python " PY_VERSION ", but "
+			"Mercurial is currently using Python with sys.hexversion=%ld: "
+			"Python %s\n at: %s", versionerrortext, hexversion,
+			Py_GetVersion(), Py_GetProgramFullPath());
+		return -1;
+	}
+	return 0;
+}
+
 #ifdef IS_PY3K
 static struct PyModuleDef parsers_module = {
 	PyModuleDef_HEAD_INIT,
@@ -1946,14 +2063,22 @@
 
 PyMODINIT_FUNC PyInit_parsers(void)
 {
-	PyObject *mod = PyModule_Create(&parsers_module);
+	PyObject *mod;
+
+	if (check_python_version() == -1)
+		return;
+	mod = PyModule_Create(&parsers_module);
 	module_init(mod);
 	return mod;
 }
 #else
 PyMODINIT_FUNC initparsers(void)
 {
-	PyObject *mod = Py_InitModule3("parsers", methods, parsers_doc);
+	PyObject *mod;
+
+	if (check_python_version() == -1)
+		return;
+	mod = Py_InitModule3("parsers", methods, parsers_doc);
 	module_init(mod);
 }
 #endif
--- a/mercurial/patch.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/patch.py	Thu Apr 17 19:36:17 2014 -0400
@@ -1859,7 +1859,7 @@
             # set numbers to 0 anyway when starting new file
             adds, removes, isbinary = 0, 0, False
             if line.startswith('diff --git a/'):
-                filename = gitre.search(line).group(1)
+                filename = gitre.search(line).group(2)
             elif line.startswith('diff -r'):
                 # format: "diff -r ... -r ... filename"
                 filename = diffre.search(line).group(1)
--- a/mercurial/phases.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/phases.py	Thu Apr 17 19:36:17 2014 -0400
@@ -258,7 +258,7 @@
         filtered = False
         nodemap = repo.changelog.nodemap # to filter unknown nodes
         for phase, nodes in enumerate(self.phaseroots):
-            missing = [node for node in nodes if node not in nodemap]
+            missing = sorted(node for node in nodes if node not in nodemap)
             if missing:
                 for mnode in missing:
                     repo.ui.debug(
--- a/mercurial/repair.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/repair.py	Thu Apr 17 19:36:17 2014 -0400
@@ -6,24 +6,24 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-from mercurial import changegroup
+from mercurial import changegroup, exchange
 from mercurial.node import short
 from mercurial.i18n import _
-import os
 import errno
 
 def _bundle(repo, bases, heads, node, suffix, compress=True):
     """create a bundle with the specified revisions as a backup"""
-    cg = repo.changegroupsubset(bases, heads, 'strip')
-    backupdir = repo.join("strip-backup")
-    if not os.path.isdir(backupdir):
-        os.mkdir(backupdir)
-    name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
+    cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
+    backupdir = "strip-backup"
+    vfs = repo.vfs
+    if not vfs.isdir(backupdir):
+        vfs.mkdir(backupdir)
+    name = "%s/%s-%s.hg" % (backupdir, short(node), suffix)
     if compress:
         bundletype = "HG10BZ"
     else:
         bundletype = "HG10UN"
-    return changegroup.writebundle(cg, name, bundletype)
+    return changegroup.writebundle(cg, name, bundletype, vfs)
 
 def _collectfiles(repo, striprev):
     """find out the filelogs affected by the strip"""
@@ -108,10 +108,13 @@
 
     # create a changegroup for all the branches we need to keep
     backupfile = None
+    vfs = repo.vfs
     if backup == "all":
         backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
-        repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
-        repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile)
+        repo.ui.status(_("saved backup bundle to %s\n") %
+                       vfs.join(backupfile))
+        repo.ui.log("backupbundle", "saved backup bundle to %s\n",
+                    vfs.join(backupfile))
     if saveheads or savebases:
         # do not compress partial bundle if we remove it from disk later
         chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
@@ -134,6 +137,8 @@
             for i in xrange(offset, len(tr.entries)):
                 file, troffset, ignore = tr.entries[i]
                 repo.sopener(file, 'a').truncate(troffset)
+                if troffset == 0:
+                    repo.store.markremoved(file)
             tr.close()
         except: # re-raises
             tr.abort()
@@ -141,25 +146,27 @@
 
         if saveheads or savebases:
             ui.note(_("adding branch\n"))
-            f = open(chgrpfile, "rb")
-            gen = changegroup.readbundle(f, chgrpfile)
+            f = vfs.open(chgrpfile, "rb")
+            gen = exchange.readbundle(ui, f, chgrpfile, vfs)
             if not repo.ui.verbose:
                 # silence internal shuffling chatter
                 repo.ui.pushbuffer()
-            repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
+            changegroup.addchangegroup(repo, gen, 'strip',
+                                       'bundle:' + vfs.join(chgrpfile), True)
             if not repo.ui.verbose:
                 repo.ui.popbuffer()
             f.close()
             if not keeppartialbundle:
-                os.unlink(chgrpfile)
+                vfs.unlink(chgrpfile)
 
         # remove undo files
-        for undofile in repo.undofiles():
+        for undovfs, undofile in repo.undofiles():
             try:
-                os.unlink(undofile)
+                undovfs.unlink(undofile)
             except OSError, e:
                 if e.errno != errno.ENOENT:
-                    ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
+                    ui.warn(_('error removing %s: %s\n') %
+                            (undovfs.join(undofile), str(e)))
 
         for m in updatebm:
             bm[m] = repo[newbmtarget].node()
@@ -167,10 +174,10 @@
     except: # re-raises
         if backupfile:
             ui.warn(_("strip failed, full bundle stored in '%s'\n")
-                    % backupfile)
+                    % vfs.join(backupfile))
         elif saveheads:
             ui.warn(_("strip failed, partial bundle stored in '%s'\n")
-                    % chgrpfile)
+                    % vfs.join(chgrpfile))
         raise
 
     repo.destroyed()
--- a/mercurial/repoview.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/repoview.py	Thu Apr 17 19:36:17 2014 -0400
@@ -9,7 +9,8 @@
 import copy
 import phases
 import util
-import obsolete, revset
+import obsolete
+import tags as tagsmod
 
 
 def hideablerevs(repo):
@@ -18,6 +19,31 @@
     This is a standalone function to help extensions to wrap it."""
     return obsolete.getrevs(repo, 'obsolete')
 
+def _gethiddenblockers(repo):
+    """Get revisions that will block hidden changesets from being filtered
+
+    This is a standalone function to help extensions to wrap it."""
+    assert not repo.changelog.filteredrevs
+    hideable = hideablerevs(repo)
+    blockers = []
+    if hideable:
+        # We use cl to avoid recursive lookup from repo[xxx]
+        cl = repo.changelog
+        firsthideable = min(hideable)
+        revs = cl.revs(start=firsthideable)
+        tofilter = repo.revs(
+            '(%ld) and children(%ld)', list(revs), list(hideable))
+        blockers = [r for r in tofilter if r not in hideable]
+        for par in repo[None].parents():
+            blockers.append(par.rev())
+        for bm in repo._bookmarks.values():
+            blockers.append(cl.rev(bm))
+        tags = {}
+        tagsmod.readlocaltags(repo.ui, repo, tags, {})
+        if tags:
+            blockers.extend(cl.rev(t[0]) for t in tags.values())
+    return blockers
+
 def computehidden(repo):
     """compute the set of hidden revision to filter
 
@@ -26,15 +52,7 @@
     hideable = hideablerevs(repo)
     if hideable:
         cl = repo.changelog
-        firsthideable = min(hideable)
-        revs = cl.revs(start=firsthideable)
-        blockers = [r for r in revset._children(repo, revs, hideable)
-                      if r not in hideable]
-        for par in repo[None].parents():
-            blockers.append(par.rev())
-        for bm in repo._bookmarks.values():
-            blockers.append(repo[bm].rev())
-        blocked = cl.ancestors(blockers, inclusive=True)
+        blocked = cl.ancestors(_gethiddenblockers(repo), inclusive=True)
         return frozenset(r for r in hideable if r not in blocked)
     return frozenset()
 
@@ -95,7 +113,7 @@
 
 # function to compute filtered set
 #
-# When addding a new filter you MUST update the table at:
+# When adding a new filter you MUST update the table at:
 #     mercurial.branchmap.subsettable
 # Otherwise your filter will have to recompute all its branches cache
 # from scratch (very slow).
--- a/mercurial/revlog.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/revlog.py	Thu Apr 17 19:36:17 2014 -0400
@@ -734,6 +734,15 @@
                 break
         return False
 
+    def commonancestorsheads(self, a, b):
+        """calculate all the heads of the common ancestors of nodes a and b"""
+        a, b = self.rev(a), self.rev(b)
+        try:
+            ancs = self.index.commonancestorsheads(a, b)
+        except (AttributeError, OverflowError): # C implementation failed
+            ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
+        return map(self.node, ancs)
+
     def ancestor(self, a, b):
         """calculate the least common ancestor of nodes a and b"""
 
--- a/mercurial/revset.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/revset.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,7 +8,9 @@
 import re
 import parser, util, error, discovery, hbisect, phases
 import node
+import heapq
 import match as matchmod
+import ancestor as ancestormod
 from i18n import _
 import encoding
 import obsolete as obsmod
@@ -19,43 +21,64 @@
     """Like revlog.ancestors(), but supports followfirst."""
     cut = followfirst and 1 or None
     cl = repo.changelog
-    visit = util.deque(revs)
-    seen = set([node.nullrev])
-    while visit:
-        for parent in cl.parentrevs(visit.popleft())[:cut]:
-            if parent not in seen:
-                visit.append(parent)
-                seen.add(parent)
-                yield parent
+
+    def iterate():
+        revqueue, revsnode = None, None
+        h = []
+
+        revs.descending()
+        revqueue = util.deque(revs)
+        if revqueue:
+            revsnode = revqueue.popleft()
+            heapq.heappush(h, -revsnode)
+
+        seen = set([node.nullrev])
+        while h:
+            current = -heapq.heappop(h)
+            if current not in seen:
+                if revsnode and current == revsnode:
+                    if revqueue:
+                        revsnode = revqueue.popleft()
+                        heapq.heappush(h, -revsnode)
+                seen.add(current)
+                yield current
+                for parent in cl.parentrevs(current)[:cut]:
+                    if parent != node.nullrev:
+                        heapq.heappush(h, -parent)
+
+    return _descgeneratorset(iterate())
 
 def _revdescendants(repo, revs, followfirst):
     """Like revlog.descendants() but supports followfirst."""
     cut = followfirst and 1 or None
-    cl = repo.changelog
-    first = min(revs)
-    nullrev = node.nullrev
-    if first == nullrev:
-        # Are there nodes with a null first parent and a non-null
-        # second one? Maybe. Do we care? Probably not.
-        for i in cl:
-            yield i
-        return
-
-    seen = set(revs)
-    for i in cl.revs(first + 1):
-        for x in cl.parentrevs(i)[:cut]:
-            if x != nullrev and x in seen:
-                seen.add(i)
+
+    def iterate():
+        cl = repo.changelog
+        first = min(revs)
+        nullrev = node.nullrev
+        if first == nullrev:
+            # Are there nodes with a null first parent and a non-null
+            # second one? Maybe. Do we care? Probably not.
+            for i in cl:
                 yield i
-                break
+        else:
+            seen = set(revs)
+            for i in cl.revs(first + 1):
+                for x in cl.parentrevs(i)[:cut]:
+                    if x != nullrev and x in seen:
+                        seen.add(i)
+                        yield i
+                        break
+
+    return _ascgeneratorset(iterate())
 
 def _revsbetween(repo, roots, heads):
     """Return all paths between roots and heads, inclusive of both endpoint
     sets."""
     if not roots:
-        return []
+        return baseset([])
     parentrevs = repo.changelog.parentrevs
-    visit = heads[:]
+    visit = baseset(heads)
     reachable = set()
     seen = {}
     minroot = min(roots)
@@ -72,12 +95,12 @@
             if parent >= minroot and parent not in seen:
                 visit.append(parent)
     if not reachable:
-        return []
+        return baseset([])
     for rev in sorted(seen):
         for parent in seen[rev]:
             if parent in reachable:
                 reachable.add(rev)
-    return sorted(reachable)
+    return baseset(sorted(reachable))
 
 elements = {
     "(": (20, ("group", 1, ")"), ("func", 1, ")")),
@@ -105,7 +128,7 @@
 
 keywords = set(['and', 'or', 'not'])
 
-def tokenize(program):
+def tokenize(program, lookup=None):
     '''
     Parse a revset statement into a stream of tokens
 
@@ -155,7 +178,7 @@
             pos += 1
             while pos < l: # find end of symbol
                 d = program[pos]
-                if not (d.isalnum() or d in "._/@" or ord(d) > 127):
+                if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
                     break
                 if d == '.' and program[pos - 1] == '.': # special case for ..
                     pos -= 1
@@ -164,6 +187,22 @@
             sym = program[s:pos]
             if sym in keywords: # operator keywords
                 yield (sym, None, s)
+            elif '-' in sym:
+                # some jerk gave us foo-bar-baz, try to check if it's a symbol
+                if lookup and lookup(sym):
+                    # looks like a real symbol
+                    yield ('symbol', sym, s)
+                else:
+                    # looks like an expression
+                    parts = sym.split('-')
+                    for p in parts[:-1]:
+                        if p: # possible consecutive -
+                            yield ('symbol', p, s)
+                        s += len(p)
+                        yield ('-', None, pos)
+                        s += 1
+                    if parts[-1]: # possible trailing -
+                        yield ('symbol', parts[-1], s)
             else:
                 yield ('symbol', sym, s)
             pos -= 1
@@ -195,7 +234,10 @@
 def getset(repo, subset, x):
     if not x:
         raise error.ParseError(_("missing argument"))
-    return methods[x[0]](repo, subset, *x[1:])
+    s = methods[x[0]](repo, subset, *x[1:])
+    if util.safehasattr(s, 'set'):
+        return s
+    return baseset(s)
 
 def _getrevsource(repo, r):
     extra = repo[r].extra()
@@ -212,10 +254,10 @@
 def stringset(repo, subset, x):
     x = repo[x].rev()
     if x == -1 and len(subset) == len(repo):
-        return [-1]
+        return baseset([-1])
     if len(subset) == len(repo) or x in subset:
-        return [x]
-    return []
+        return baseset([x])
+    return baseset([])
 
 def symbolset(repo, subset, x):
     if x in symbols:
@@ -223,39 +265,36 @@
     return stringset(repo, subset, x)
 
 def rangeset(repo, subset, x, y):
-    cl = repo.changelog
+    cl = baseset(repo.changelog)
     m = getset(repo, cl, x)
     n = getset(repo, cl, y)
 
     if not m or not n:
-        return []
+        return baseset([])
     m, n = m[0], n[-1]
 
     if m < n:
-        r = range(m, n + 1)
+        r = spanset(repo, m, n + 1)
     else:
-        r = range(m, n - 1, -1)
-    s = set(subset)
-    return [x for x in r if x in s]
+        r = spanset(repo, m, n - 1)
+    return r & subset
 
 def dagrange(repo, subset, x, y):
-    r = list(repo)
+    r = spanset(repo)
     xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
-    s = set(subset)
-    return [r for r in xs if r in s]
+    s = subset.set()
+    return xs.filter(lambda r: r in s)
 
 def andset(repo, subset, x, y):
     return getset(repo, getset(repo, subset, x), y)
 
 def orset(repo, subset, x, y):
     xl = getset(repo, subset, x)
-    s = set(xl)
-    yl = getset(repo, [r for r in subset if r not in s], y)
+    yl = getset(repo, subset - xl, y)
     return xl + yl
 
 def notset(repo, subset, x):
-    s = set(getset(repo, subset, x))
-    return [r for r in subset if r not in s]
+    return subset - getset(repo, subset, x)
 
 def listset(repo, subset, a, b):
     raise error.ParseError(_("can't use a list in this context"))
@@ -281,7 +320,7 @@
 
 def ancestor(repo, subset, x):
     """``ancestor(*changeset)``
-    Greatest common ancestor of the changesets.
+    A greatest common ancestor of the changesets.
 
     Accepts 0 or more changesets.
     Will return empty list when passed no args.
@@ -289,30 +328,27 @@
     """
     # i18n: "ancestor" is a keyword
     l = getlist(x)
-    rl = list(repo)
+    rl = spanset(repo)
     anc = None
 
     # (getset(repo, rl, i) for i in l) generates a list of lists
-    rev = repo.changelog.rev
-    ancestor = repo.changelog.ancestor
-    node = repo.changelog.node
     for revs in (getset(repo, rl, i) for i in l):
         for r in revs:
             if anc is None:
-                anc = r
+                anc = repo[r]
             else:
-                anc = rev(ancestor(node(anc), node(r)))
-
-    if anc is not None and anc in subset:
-        return [anc]
-    return []
+                anc = anc.ancestor(repo[r])
+
+    if anc is not None and anc.rev() in subset:
+        return baseset([anc.rev()])
+    return baseset([])
 
 def _ancestors(repo, subset, x, followfirst=False):
-    args = getset(repo, list(repo), x)
+    args = getset(repo, spanset(repo), x)
     if not args:
-        return []
-    s = set(_revancestors(repo, args, followfirst)) | set(args)
-    return [r for r in subset if r in s]
+        return baseset([])
+    s = _revancestors(repo, args, followfirst)
+    return subset.filter(lambda r: r in s)
 
 def ancestors(repo, subset, x):
     """``ancestors(set)``
@@ -336,11 +372,11 @@
         raise error.ParseError(_("~ expects a number"))
     ps = set()
     cl = repo.changelog
-    for r in getset(repo, cl, x):
+    for r in getset(repo, baseset(cl), x):
         for i in range(n):
             r = cl.parentrevs(r)[0]
         ps.add(r)
-    return [r for r in subset if r in ps]
+    return subset.filter(lambda r: r in ps)
 
 def author(repo, subset, x):
     """``author(string)``
@@ -349,7 +385,27 @@
     # i18n: "author" is a keyword
     n = encoding.lower(getstring(x, _("author requires a string")))
     kind, pattern, matcher = _substringmatcher(n)
-    return [r for r in subset if matcher(encoding.lower(repo[r].user()))]
+    return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
+
+def only(repo, subset, x):
+    """``only(set, [set])``
+    Changesets that are ancestors of the first set that are not ancestors
+    of any other head in the repo. If a second set is specified, the result
+    is ancestors of the first set that are not ancestors of the second set
+    (i.e. ::<set1> - ::<set2>).
+    """
+    cl = repo.changelog
+    args = getargs(x, 1, 2, _('only takes one or two arguments'))
+    include = getset(repo, spanset(repo), args[0]).set()
+    if len(args) == 1:
+        descendants = set(_revdescendants(repo, include, False))
+        exclude = [rev for rev in cl.headrevs()
+            if not rev in descendants and not rev in include]
+    else:
+        exclude = getset(repo, spanset(repo), args[1])
+
+    results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
+    return lazyset(subset, lambda x: x in results)
 
 def bisect(repo, subset, x):
     """``bisect(string)``
@@ -366,7 +422,7 @@
     # i18n: "bisect" is a keyword
     status = getstring(x, _("bisect requires a string")).lower()
     state = set(hbisect.get(repo, status))
-    return [r for r in subset if r in state]
+    return subset.filter(lambda r: r in state)
 
 # Backward-compatibility
 # - no help entry so that we do not advertise it any more
@@ -393,7 +449,7 @@
             if not bmrev:
                 raise util.Abort(_("bookmark '%s' does not exist") % bm)
             bmrev = repo[bmrev].rev()
-            return [r for r in subset if r == bmrev]
+            return subset.filter(lambda r: r == bmrev)
         else:
             matchrevs = set()
             for name, bmrev in repo._bookmarks.iteritems():
@@ -405,11 +461,11 @@
             bmrevs = set()
             for bmrev in matchrevs:
                 bmrevs.add(repo[bmrev].rev())
-            return [r for r in subset if r in bmrevs]
+            return subset & bmrevs
 
     bms = set([repo[r].rev()
                for r in repo._bookmarks.values()])
-    return [r for r in subset if r in bms]
+    return subset.filter(lambda r: r in bms)
 
 def branch(repo, subset, x):
     """``branch(string or set)``
@@ -431,16 +487,16 @@
             # note: falls through to the revspec case if no branch with
             # this name exists
             if pattern in repo.branchmap():
-                return [r for r in subset if matcher(repo[r].branch())]
+                return subset.filter(lambda r: matcher(repo[r].branch()))
         else:
-            return [r for r in subset if matcher(repo[r].branch())]
-
-    s = getset(repo, list(repo), x)
+            return subset.filter(lambda r: matcher(repo[r].branch()))
+
+    s = getset(repo, spanset(repo), x)
     b = set()
     for r in s:
         b.add(repo[r].branch())
-    s = set(s)
-    return [r for r in subset if r in s or repo[r].branch() in b]
+    s = s.set()
+    return subset.filter(lambda r: r in s or repo[r].branch() in b)
 
 def bumped(repo, subset, x):
     """``bumped()``
@@ -451,7 +507,7 @@
     # i18n: "bumped" is a keyword
     getargs(x, 0, 0, _("bumped takes no arguments"))
     bumped = obsmod.getrevs(repo, 'bumped')
-    return [r for r in subset if r in bumped]
+    return subset & bumped
 
 def bundle(repo, subset, x):
     """``bundle()``
@@ -463,43 +519,43 @@
         bundlerevs = repo.changelog.bundlerevs
     except AttributeError:
         raise util.Abort(_("no bundle provided - specify with -R"))
-    return [r for r in subset if r in bundlerevs]
+    return subset & bundlerevs
 
 def checkstatus(repo, subset, pat, field):
-    m = None
-    s = []
     hasset = matchmod.patkind(pat) == 'set'
-    fname = None
-    for r in subset:
-        c = repo[r]
+
+    def matches(x):
+        m = None
+        fname = None
+        c = repo[x]
         if not m or hasset:
             m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
             if not m.anypats() and len(m.files()) == 1:
                 fname = m.files()[0]
         if fname is not None:
             if fname not in c.files():
-                continue
+                return False
         else:
             for f in c.files():
                 if m(f):
                     break
             else:
-                continue
+                return False
         files = repo.status(c.p1().node(), c.node())[field]
         if fname is not None:
             if fname in files:
-                s.append(r)
+                return True
         else:
             for f in files:
                 if m(f):
-                    s.append(r)
-                    break
-    return s
+                    return True
+
+    return subset.filter(matches)
 
 def _children(repo, narrow, parentset):
     cs = set()
     if not parentset:
-        return cs
+        return baseset(cs)
     pr = repo.changelog.parentrevs
     minrev = min(parentset)
     for r in narrow:
@@ -508,15 +564,15 @@
         for p in pr(r):
             if p in parentset:
                 cs.add(r)
-    return cs
+    return baseset(cs)
 
 def children(repo, subset, x):
     """``children(set)``
     Child changesets of changesets in set.
     """
-    s = set(getset(repo, list(repo), x))
+    s = getset(repo, baseset(repo), x).set()
     cs = _children(repo, subset, s)
-    return [r for r in subset if r in cs]
+    return subset & cs
 
 def closed(repo, subset, x):
     """``closed()``
@@ -524,7 +580,7 @@
     """
     # i18n: "closed" is a keyword
     getargs(x, 0, 0, _("closed takes no arguments"))
-    return [r for r in subset if repo[r].closesbranch()]
+    return subset.filter(lambda r: repo[r].closesbranch())
 
 def contains(repo, subset, x):
     """``contains(pattern)``
@@ -537,23 +593,21 @@
     """
     # i18n: "contains" is a keyword
     pat = getstring(x, _("contains requires a pattern"))
-    s = []
-    if not matchmod.patkind(pat):
-        pat = pathutil.canonpath(repo.root, repo.getcwd(), pat)
-        for r in subset:
-            if pat in repo[r]:
-                s.append(r)
-    else:
-        m = None
-        for r in subset:
-            c = repo[r]
-            if not m or matchmod.patkind(pat) == 'set':
-                m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
+
+    def matches(x):
+        if not matchmod.patkind(pat):
+            pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
+            if pats in repo[x]:
+                return True
+        else:
+            c = repo[x]
+            m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
             for f in c.manifest():
                 if m(f):
-                    s.append(r)
-                    break
-    return s
+                    return True
+        return False
+
+    return subset.filter(matches)
 
 def converted(repo, subset, x):
     """``converted([id])``
@@ -575,7 +629,7 @@
         source = repo[r].extra().get('convert_revision', None)
         return source is not None and (rev is None or source.startswith(rev))
 
-    return [r for r in subset if _matchvalue(r)]
+    return subset.filter(lambda r: _matchvalue(r))
 
 def date(repo, subset, x):
     """``date(interval)``
@@ -584,7 +638,7 @@
     # i18n: "date" is a keyword
     ds = getstring(x, _("date requires a string"))
     dm = util.matchdate(ds)
-    return [r for r in subset if dm(repo[r].date()[0])]
+    return subset.filter(lambda x: dm(repo[x].date()[0]))
 
 def desc(repo, subset, x):
     """``desc(string)``
@@ -592,19 +646,30 @@
     """
     # i18n: "desc" is a keyword
     ds = encoding.lower(getstring(x, _("desc requires a string")))
-    l = []
-    for r in subset:
-        c = repo[r]
-        if ds in encoding.lower(c.description()):
-            l.append(r)
-    return l
+
+    def matches(x):
+        c = repo[x]
+        return ds in encoding.lower(c.description())
+
+    return subset.filter(matches)
 
 def _descendants(repo, subset, x, followfirst=False):
-    args = getset(repo, list(repo), x)
+    args = getset(repo, spanset(repo), x)
     if not args:
-        return []
-    s = set(_revdescendants(repo, args, followfirst)) | set(args)
-    return [r for r in subset if r in s]
+        return baseset([])
+    s = _revdescendants(repo, args, followfirst)
+
+    # Both sets need to be ascending in order to lazily return the union
+    # in the correct order.
+    args.ascending()
+
+    subsetset = subset.set()
+    result = (orderedlazyset(s, subsetset.__contains__, ascending=True) +
+              orderedlazyset(args, subsetset.__contains__, ascending=True))
+
+    # Wrap result in a lazyset since it's an _addset, which doesn't implement
+    # all the necessary functions to be consumed by callers.
+    return orderedlazyset(result, lambda r: True, ascending=True)
 
 def descendants(repo, subset, x):
     """``descendants(set)``
@@ -624,9 +689,9 @@
     is the same as passing all().
     """
     if x is not None:
-        args = set(getset(repo, list(repo), x))
+        args = getset(repo, spanset(repo), x).set()
     else:
-        args = set(getall(repo, list(repo), x))
+        args = getall(repo, spanset(repo), x).set()
 
     dests = set()
 
@@ -659,7 +724,7 @@
             r = src
             src = _getrevsource(repo, r)
 
-    return [r for r in subset if r in dests]
+    return subset.filter(lambda r: r in dests)
 
 def divergent(repo, subset, x):
     """``divergent()``
@@ -668,7 +733,7 @@
     # i18n: "divergent" is a keyword
     getargs(x, 0, 0, _("divergent takes no arguments"))
     divergent = obsmod.getrevs(repo, 'divergent')
-    return [r for r in subset if r in divergent]
+    return subset.filter(lambda r: r in divergent)
 
 def draft(repo, subset, x):
     """``draft()``
@@ -676,7 +741,7 @@
     # i18n: "draft" is a keyword
     getargs(x, 0, 0, _("draft takes no arguments"))
     pc = repo._phasecache
-    return [r for r in subset if pc.phase(repo, r) == phases.draft]
+    return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
 
 def extinct(repo, subset, x):
     """``extinct()``
@@ -685,7 +750,7 @@
     # i18n: "extinct" is a keyword
     getargs(x, 0, 0, _("extinct takes no arguments"))
     extincts = obsmod.getrevs(repo, 'extinct')
-    return [r for r in subset if r in extincts]
+    return subset & extincts
 
 def extra(repo, subset, x):
     """``extra(label, [value])``
@@ -712,7 +777,7 @@
         extra = repo[r].extra()
         return label in extra and (value is None or matcher(extra[label]))
 
-    return [r for r in subset if _matchvalue(r)]
+    return subset.filter(lambda r: _matchvalue(r))
 
 def filelog(repo, subset, x):
     """``filelog(pattern)``
@@ -744,7 +809,7 @@
                 for fr in fl:
                     s.add(fl.linkrev(fr))
 
-    return [r for r in subset if r in s]
+    return subset.filter(lambda r: r in s)
 
 def first(repo, subset, x):
     """``first(set, [n])``
@@ -763,11 +828,11 @@
             # include the revision responsible for the most recent version
             s.add(cx.linkrev())
         else:
-            return []
+            return baseset([])
     else:
-        s = set(_revancestors(repo, [c.rev()], followfirst)) | set([c.rev()])
-
-    return [r for r in subset if r in s]
+        s = _revancestors(repo, baseset([c.rev()]), followfirst)
+
+    return subset.filter(lambda r: r in s)
 
 def follow(repo, subset, x):
     """``follow([file])``
@@ -802,14 +867,15 @@
         gr = re.compile(getstring(x, _("grep requires a string")))
     except re.error, e:
         raise error.ParseError(_('invalid match pattern: %s') % e)
-    l = []
-    for r in subset:
-        c = repo[r]
+
+    def matches(x):
+        c = repo[x]
         for e in c.files() + [c.user(), c.description()]:
             if gr.search(e):
-                l.append(r)
-                break
-    return l
+                return True
+        return False
+
+    return subset.filter(matches)
 
 def _matchfiles(repo, subset, x):
     # _matchfiles takes a revset list of prefixed arguments:
@@ -858,10 +924,10 @@
             hasset = True
     if not default:
         default = 'glob'
-    m = None
-    s = []
-    for r in subset:
-        c = repo[r]
+
+    def matches(x):
+        m = None
+        c = repo[x]
         if not m or (hasset and rev is None):
             ctx = c
             if rev is not None:
@@ -870,9 +936,10 @@
                                exclude=exc, ctx=ctx, default=default)
         for f in c.files():
             if m(f):
-                s.append(r)
-                break
-    return s
+                return True
+        return False
+
+    return subset.filter(matches)
 
 def hasfile(repo, subset, x):
     """``file(pattern)``
@@ -896,15 +963,15 @@
     hs = set()
     for b, ls in repo.branchmap().iteritems():
         hs.update(repo[h].rev() for h in ls)
-    return [r for r in subset if r in hs]
+    return baseset(hs).filter(subset.__contains__)
 
 def heads(repo, subset, x):
     """``heads(set)``
     Members of set with no children in set.
     """
     s = getset(repo, subset, x)
-    ps = set(parents(repo, subset, x))
-    return [r for r in s if r not in ps]
+    ps = parents(repo, subset, x)
+    return s - ps
 
 def hidden(repo, subset, x):
     """``hidden()``
@@ -913,7 +980,7 @@
     # i18n: "hidden" is a keyword
     getargs(x, 0, 0, _("hidden takes no arguments"))
     hiddenrevs = repoview.filterrevs(repo, 'visible')
-    return [r for r in subset if r in hiddenrevs]
+    return subset & hiddenrevs
 
 def keyword(repo, subset, x):
     """``keyword(string)``
@@ -922,13 +989,13 @@
     """
     # i18n: "keyword" is a keyword
     kw = encoding.lower(getstring(x, _("keyword requires a string")))
-    l = []
-    for r in subset:
+
+    def matches(r):
         c = repo[r]
-        if util.any(kw in encoding.lower(t)
-                    for t in c.files() + [c.user(), c.description()]):
-            l.append(r)
-    return l
+        return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
+            c.description()])
+
+    return subset.filter(matches)
 
 def limit(repo, subset, x):
     """``limit(set, [n])``
@@ -944,9 +1011,18 @@
     except (TypeError, ValueError):
         # i18n: "limit" is a keyword
         raise error.ParseError(_("limit expects a number"))
-    ss = set(subset)
-    os = getset(repo, list(repo), l[0])[:lim]
-    return [r for r in os if r in ss]
+    ss = subset.set()
+    os = getset(repo, spanset(repo), l[0])
+    bs = baseset([])
+    it = iter(os)
+    for x in xrange(lim):
+        try:
+            y = it.next()
+            if y in ss:
+                bs.append(y)
+        except (StopIteration):
+            break
+    return bs
 
 def last(repo, subset, x):
     """``last(set, [n])``
@@ -962,20 +1038,30 @@
     except (TypeError, ValueError):
         # i18n: "last" is a keyword
         raise error.ParseError(_("last expects a number"))
-    ss = set(subset)
-    os = getset(repo, list(repo), l[0])[-lim:]
-    return [r for r in os if r in ss]
+    ss = subset.set()
+    os = getset(repo, spanset(repo), l[0])
+    os.reverse()
+    bs = baseset([])
+    it = iter(os)
+    for x in xrange(lim):
+        try:
+            y = it.next()
+            if y in ss:
+                bs.append(y)
+        except (StopIteration):
+            break
+    return bs
 
 def maxrev(repo, subset, x):
     """``max(set)``
     Changeset with highest revision number in set.
     """
-    os = getset(repo, list(repo), x)
+    os = getset(repo, spanset(repo), x)
     if os:
-        m = max(os)
+        m = os.max()
         if m in subset:
-            return [m]
-    return []
+            return baseset([m])
+    return baseset([])
 
 def merge(repo, subset, x):
     """``merge()``
@@ -984,7 +1070,7 @@
     # i18n: "merge" is a keyword
     getargs(x, 0, 0, _("merge takes no arguments"))
     cl = repo.changelog
-    return [r for r in subset if cl.parentrevs(r)[1] != -1]
+    return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
 
 def branchpoint(repo, subset, x):
     """``branchpoint()``
@@ -994,25 +1080,35 @@
     getargs(x, 0, 0, _("branchpoint takes no arguments"))
     cl = repo.changelog
     if not subset:
-        return []
+        return baseset([])
     baserev = min(subset)
     parentscount = [0]*(len(repo) - baserev)
     for r in cl.revs(start=baserev + 1):
         for p in cl.parentrevs(r):
             if p >= baserev:
                 parentscount[p - baserev] += 1
-    return [r for r in subset if (parentscount[r - baserev] > 1)]
+    return subset.filter(lambda r: parentscount[r - baserev] > 1)
 
 def minrev(repo, subset, x):
     """``min(set)``
     Changeset with lowest revision number in set.
     """
-    os = getset(repo, list(repo), x)
+    os = getset(repo, spanset(repo), x)
     if os:
-        m = min(os)
+        m = os.min()
         if m in subset:
-            return [m]
-    return []
+            return baseset([m])
+    return baseset([])
+
+def _missingancestors(repo, subset, x):
+    # i18n: "_missingancestors" is a keyword
+    revs, bases = getargs(x, 2, 2,
+                          _("_missingancestors requires two arguments"))
+    rs = baseset(repo)
+    revs = getset(repo, rs, revs)
+    bases = getset(repo, rs, bases)
+    missing = set(repo.changelog.findmissingrevs(bases, revs))
+    return baseset([r for r in subset if r in missing])
 
 def modifies(repo, subset, x):
     """``modifies(pattern)``
@@ -1042,7 +1138,7 @@
         if pm is not None:
             rn = repo.changelog.rev(pm)
 
-    return [r for r in subset if r == rn]
+    return subset.filter(lambda r: r == rn)
 
 def obsolete(repo, subset, x):
     """``obsolete()``
@@ -1050,7 +1146,7 @@
     # i18n: "obsolete" is a keyword
     getargs(x, 0, 0, _("obsolete takes no arguments"))
     obsoletes = obsmod.getrevs(repo, 'obsolete')
-    return [r for r in subset if r in obsoletes]
+    return subset & obsoletes
 
 def origin(repo, subset, x):
     """``origin([set])``
@@ -1061,9 +1157,9 @@
     for the first operation is selected.
     """
     if x is not None:
-        args = set(getset(repo, list(repo), x))
+        args = getset(repo, spanset(repo), x).set()
     else:
-        args = set(getall(repo, list(repo), x))
+        args = getall(repo, spanset(repo), x).set()
 
     def _firstsrc(rev):
         src = _getrevsource(repo, rev)
@@ -1078,7 +1174,7 @@
             src = prev
 
     o = set([_firstsrc(r) for r in args])
-    return [r for r in subset if r in o]
+    return subset.filter(lambda r: r in o)
 
 def outgoing(repo, subset, x):
     """``outgoing([path])``
@@ -1101,7 +1197,7 @@
     repo.ui.popbuffer()
     cl = repo.changelog
     o = set([cl.rev(r) for r in outgoing.missing])
-    return [r for r in subset if r in o]
+    return subset.filter(lambda r: r in o)
 
 def p1(repo, subset, x):
     """``p1([set])``
@@ -1109,13 +1205,13 @@
     """
     if x is None:
         p = repo[x].p1().rev()
-        return [r for r in subset if r == p]
+        return subset.filter(lambda r: r == p)
 
     ps = set()
     cl = repo.changelog
-    for r in getset(repo, list(repo), x):
+    for r in getset(repo, spanset(repo), x):
         ps.add(cl.parentrevs(r)[0])
-    return [r for r in subset if r in ps]
+    return subset & ps
 
 def p2(repo, subset, x):
     """``p2([set])``
@@ -1125,15 +1221,15 @@
         ps = repo[x].parents()
         try:
             p = ps[1].rev()
-            return [r for r in subset if r == p]
+            return subset.filter(lambda r: r == p)
         except IndexError:
-            return []
+            return baseset([])
 
     ps = set()
     cl = repo.changelog
-    for r in getset(repo, list(repo), x):
+    for r in getset(repo, spanset(repo), x):
         ps.add(cl.parentrevs(r)[1])
-    return [r for r in subset if r in ps]
+    return subset & ps
 
 def parents(repo, subset, x):
     """``parents([set])``
@@ -1141,13 +1237,13 @@
     """
     if x is None:
         ps = tuple(p.rev() for p in repo[x].parents())
-        return [r for r in subset if r in ps]
+        return subset & ps
 
     ps = set()
     cl = repo.changelog
-    for r in getset(repo, list(repo), x):
+    for r in getset(repo, spanset(repo), x):
         ps.update(cl.parentrevs(r))
-    return [r for r in subset if r in ps]
+    return subset & ps
 
 def parentspec(repo, subset, x, n):
     """``set^0``
@@ -1163,7 +1259,7 @@
         raise error.ParseError(_("^ expects a number 0, 1, or 2"))
     ps = set()
     cl = repo.changelog
-    for r in getset(repo, cl, x):
+    for r in getset(repo, baseset(cl), x):
         if n == 0:
             ps.add(r)
         elif n == 1:
@@ -1172,7 +1268,7 @@
             parents = cl.parentrevs(r)
             if len(parents) > 1:
                 ps.add(parents[1])
-    return [r for r in subset if r in ps]
+    return subset & ps
 
 def present(repo, subset, x):
     """``present(set)``
@@ -1186,7 +1282,7 @@
     try:
         return getset(repo, subset, x)
     except error.RepoLookupError:
-        return []
+        return baseset([])
 
 def public(repo, subset, x):
     """``public()``
@@ -1194,7 +1290,7 @@
     # i18n: "public" is a keyword
     getargs(x, 0, 0, _("public takes no arguments"))
     pc = repo._phasecache
-    return [r for r in subset if pc.phase(repo, r) == phases.public]
+    return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
 
 def remote(repo, subset, x):
     """``remote([id [,path]])``
@@ -1228,8 +1324,8 @@
     if n in repo:
         r = repo[n].rev()
         if r in subset:
-            return [r]
-    return []
+            return baseset([r])
+    return baseset([])
 
 def removes(repo, subset, x):
     """``removes(pattern)``
@@ -1255,7 +1351,7 @@
     except (TypeError, ValueError):
         # i18n: "rev" is a keyword
         raise error.ParseError(_("rev expects a number"))
-    return [r for r in subset if r == l]
+    return subset.filter(lambda r: r == l)
 
 def matching(repo, subset, x):
     """``matching(revision [, field])``
@@ -1285,7 +1381,7 @@
     # i18n: "matching" is a keyword
     l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
 
-    revs = getset(repo, repo.changelog, l[0])
+    revs = getset(repo, baseset(repo.changelog), l[0])
 
     fieldlist = ['metadata']
     if len(l) > 1:
@@ -1356,26 +1452,24 @@
     # is only one field to match)
     getinfo = lambda r: [f(r) for f in getfieldfuncs]
 
-    matches = set()
-    for rev in revs:
-        target = getinfo(rev)
-        for r in subset:
+    def matches(x):
+        for rev in revs:
+            target = getinfo(rev)
             match = True
             for n, f in enumerate(getfieldfuncs):
-                if target[n] != f(r):
+                if target[n] != f(x):
                     match = False
-                    break
             if match:
-                matches.add(r)
-    return [r for r in subset if r in matches]
+                return True
+        return False
+
+    return subset.filter(matches)
 
 def reverse(repo, subset, x):
     """``reverse(set)``
     Reverse order of set.
     """
     l = getset(repo, subset, x)
-    if not isinstance(l, list):
-        l = list(l)
     l.reverse()
     return l
 
@@ -1383,10 +1477,10 @@
     """``roots(set)``
     Changesets in set with no parent changeset in set.
     """
-    s = set(getset(repo, repo.changelog, x))
-    subset = [r for r in subset if r in s]
+    s = getset(repo, spanset(repo), x).set()
+    subset = baseset([r for r in s if r in subset.set()])
     cs = _children(repo, subset, s)
-    return [r for r in subset if r not in cs]
+    return subset - cs
 
 def secret(repo, subset, x):
     """``secret()``
@@ -1394,7 +1488,7 @@
     # i18n: "secret" is a keyword
     getargs(x, 0, 0, _("secret takes no arguments"))
     pc = repo._phasecache
-    return [r for r in subset if pc.phase(repo, r) == phases.secret]
+    return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
 
 def sort(repo, subset, x):
     """``sort(set[, [-]key...])``
@@ -1421,7 +1515,14 @@
     l = []
     def invert(s):
         return "".join(chr(255 - ord(c)) for c in s)
-    for r in getset(repo, subset, s):
+    revs = getset(repo, subset, s)
+    if keys == ["rev"]:
+        revs.sort()
+        return revs
+    elif keys == ["-rev"]:
+        revs.sort(reverse=True)
+        return revs
+    for r in revs:
         c = repo[r]
         e = []
         for k in keys:
@@ -1450,7 +1551,7 @@
         e.append(r)
         l.append(e)
     l.sort()
-    return [e[-1] for e in l]
+    return baseset([e[-1] for e in l])
 
 def _stringmatcher(pattern):
     """
@@ -1523,7 +1624,7 @@
             s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
     else:
         s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
-    return [r for r in subset if r in s]
+    return subset & s
 
 def tagged(repo, subset, x):
     return tag(repo, subset, x)
@@ -1535,7 +1636,7 @@
     # i18n: "unstable" is a keyword
     getargs(x, 0, 0, _("unstable takes no arguments"))
     unstables = obsmod.getrevs(repo, 'unstable')
-    return [r for r in subset if r in unstables]
+    return subset & unstables
 
 
 def user(repo, subset, x):
@@ -1552,11 +1653,29 @@
 def _list(repo, subset, x):
     s = getstring(x, "internal error")
     if not s:
-        return []
-    if not isinstance(subset, set):
-        subset = set(subset)
+        return baseset([])
     ls = [repo[r].rev() for r in s.split('\0')]
-    return [r for r in ls if r in subset]
+    s = subset.set()
+    return baseset([r for r in ls if r in s])
+
+# for internal use
+def _intlist(repo, subset, x):
+    s = getstring(x, "internal error")
+    if not s:
+        return baseset([])
+    ls = [int(r) for r in s.split('\0')]
+    s = subset.set()
+    return baseset([r for r in ls if r in s])
+
+# for internal use
+def _hexlist(repo, subset, x):
+    s = getstring(x, "internal error")
+    if not s:
+        return baseset([])
+    cl = repo.changelog
+    ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
+    s = subset.set()
+    return baseset([r for r in ls if r in s])
 
 symbols = {
     "adds": adds,
@@ -1565,6 +1684,7 @@
     "ancestors": ancestors,
     "_firstancestors": _firstancestors,
     "author": author,
+    "only": only,
     "bisect": bisect,
     "bisected": bisected,
     "bookmark": bookmark,
@@ -1602,6 +1722,7 @@
     "max": maxrev,
     "merge": merge,
     "min": minrev,
+    "_missingancestors": _missingancestors,
     "modifies": modifies,
     "obsolete": obsolete,
     "origin": origin,
@@ -1624,6 +1745,8 @@
     "user": user,
     "unstable": unstable,
     "_list": _list,
+    "_intlist": _intlist,
+    "_hexlist": _hexlist,
 }
 
 # symbols which can't be used for a DoS attack for any given input
@@ -1671,6 +1794,7 @@
     "max",
     "merge",
     "min",
+    "_missingancestors",
     "modifies",
     "obsolete",
     "origin",
@@ -1693,6 +1817,8 @@
     "user",
     "unstable",
     "_list",
+    "_intlist",
+    "_hexlist",
 ])
 
 methods = {
@@ -1737,7 +1863,24 @@
     elif op == 'and':
         wa, ta = optimize(x[1], True)
         wb, tb = optimize(x[2], True)
+
+        # (::x and not ::y)/(not ::y and ::x) have a fast path
+        def ismissingancestors(revs, bases):
+            return (
+                revs[0] == 'func'
+                and getstring(revs[1], _('not a symbol')) == 'ancestors'
+                and bases[0] == 'not'
+                and bases[1][0] == 'func'
+                and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
+
         w = min(wa, wb)
+        if ismissingancestors(ta, tb):
+            return w, ('func', ('symbol', '_missingancestors'),
+                       ('list', ta[2], tb[1][2]))
+        if ismissingancestors(tb, ta):
+            return w, ('func', ('symbol', '_missingancestors'),
+                       ('list', tb[2], ta[1][2]))
+
         if wa > wb:
             return w, (op, tb, ta)
         return w, (op, ta, tb)
@@ -1907,21 +2050,26 @@
         aliases[alias.name] = alias
     return _expandaliases(aliases, tree, [], {})
 
-def parse(spec):
+def parse(spec, lookup=None):
     p = parser.parser(tokenize, elements)
-    return p.parse(spec)
-
-def match(ui, spec):
+    return p.parse(spec, lookup=lookup)
+
+def match(ui, spec, repo=None):
     if not spec:
         raise error.ParseError(_("empty query"))
-    tree, pos = parse(spec)
+    lookup = None
+    if repo:
+        lookup = repo.__contains__
+    tree, pos = parse(spec, lookup)
     if (pos != len(spec)):
         raise error.ParseError(_("invalid token"), pos)
     if ui:
         tree = findaliases(ui, tree)
     weight, tree = optimize(tree, True)
     def mfunc(repo, subset):
-        return getset(repo, subset, tree)
+        if util.safehasattr(subset, 'set'):
+            return getset(repo, subset, tree)
+        return getset(repo, baseset(subset), tree)
     return mfunc
 
 def formatspec(expr, *args):
@@ -1980,11 +2128,11 @@
         elif l == 1:
             return argtype(t, s[0])
         elif t == 'd':
-            return "_list('%s')" % "\0".join(str(int(a)) for a in s)
+            return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
         elif t == 's':
             return "_list('%s')" % "\0".join(s)
         elif t == 'n':
-            return "_list('%s')" % "\0".join(node.hex(a) for a in s)
+            return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
         elif t == 'b':
             return "_list('%s')" % "\0".join(a.branch() for a in s)
 
@@ -2050,5 +2198,662 @@
             funcs.add(tree[1][1])
         return funcs
 
+class baseset(list):
+    """Basic data structure that represents a revset and contains the basic
+    operation that it should be able to perform.
+
+    Every method in this class should be implemented by any smartset class.
+    """
+    def __init__(self, data=()):
+        super(baseset, self).__init__(data)
+        self._set = None
+
+    def ascending(self):
+        """Sorts the set in ascending order (in place).
+
+        This is part of the mandatory API for smartset."""
+        self.sort()
+
+    def descending(self):
+        """Sorts the set in descending order (in place).
+
+        This is part of the mandatory API for smartset."""
+        self.sort(reverse=True)
+
+    def min(self):
+        return min(self)
+
+    def max(self):
+        return max(self)
+
+    def set(self):
+        """Returns a set or a smartset containing all the elements.
+
+        The returned structure should be the fastest option for membership
+        testing.
+
+        This is part of the mandatory API for smartset."""
+        if not self._set:
+            self._set = set(self)
+        return self._set
+
+    def __sub__(self, other):
+        """Returns a new object with the substraction of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        if isinstance(other, baseset):
+            s = other.set()
+        else:
+            s = set(other)
+        return baseset(self.set() - s)
+
+    def __and__(self, other):
+        """Returns a new object with the intersection of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        if isinstance(other, baseset):
+            other = other.set()
+        return baseset([y for y in self if y in other])
+
+    def __add__(self, other):
+        """Returns a new object with the union of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        s = self.set()
+        l = [r for r in other if r not in s]
+        return baseset(list(self) + l)
+
+    def isascending(self):
+        """Returns True if the collection is ascending order, False if not.
+
+        This is part of the mandatory API for smartset."""
+        return False
+
+    def isdescending(self):
+        """Returns True if the collection is descending order, False if not.
+
+        This is part of the mandatory API for smartset."""
+        return False
+
+    def filter(self, condition):
+        """Returns this smartset filtered by condition as a new smartset.
+
+        `condition` is a callable which takes a revision number and returns a
+        boolean.
+
+        This is part of the mandatory API for smartset."""
+        return lazyset(self, condition)
+
+class _orderedsetmixin(object):
+    """Mixin class with utility methods for smartsets
+
+    This should be extended by smartsets which have the isascending(),
+    isdescending() and reverse() methods"""
+
+    def _first(self):
+        """return the first revision in the set"""
+        for r in self:
+            return r
+        raise ValueError('arg is an empty sequence')
+
+    def _last(self):
+        """return the last revision in the set"""
+        self.reverse()
+        m = self._first()
+        self.reverse()
+        return m
+
+    def min(self):
+        """return the smallest element in the set"""
+        if self.isascending():
+            return self._first()
+        return self._last()
+
+    def max(self):
+        """return the largest element in the set"""
+        if self.isascending():
+            return self._last()
+        return self._first()
+
+class lazyset(object):
+    """Duck type for baseset class which iterates lazily over the revisions in
+    the subset and contains a function which tests for membership in the
+    revset
+    """
+    def __init__(self, subset, condition=lambda x: True):
+        """
+        condition: a function that decide whether a revision in the subset
+                   belongs to the revset or not.
+        """
+        self._subset = subset
+        self._condition = condition
+        self._cache = {}
+
+    def ascending(self):
+        self._subset.sort()
+
+    def descending(self):
+        self._subset.sort(reverse=True)
+
+    def min(self):
+        return min(self)
+
+    def max(self):
+        return max(self)
+
+    def __contains__(self, x):
+        c = self._cache
+        if x not in c:
+            c[x] = x in self._subset and self._condition(x)
+        return c[x]
+
+    def __iter__(self):
+        cond = self._condition
+        for x in self._subset:
+            if cond(x):
+                yield x
+
+    def __and__(self, x):
+        return lazyset(self, lambda r: r in x)
+
+    def __sub__(self, x):
+        return lazyset(self, lambda r: r not in x)
+
+    def __add__(self, x):
+        return _addset(self, x)
+
+    def __nonzero__(self):
+        for r in self:
+            return True
+        return False
+
+    def __len__(self):
+        # Basic implementation to be changed in future patches.
+        l = baseset([r for r in self])
+        return len(l)
+
+    def __getitem__(self, x):
+        # Basic implementation to be changed in future patches.
+        l = baseset([r for r in self])
+        return l[x]
+
+    def sort(self, reverse=False):
+        if not util.safehasattr(self._subset, 'sort'):
+            self._subset = baseset(self._subset)
+        self._subset.sort(reverse=reverse)
+
+    def reverse(self):
+        self._subset.reverse()
+
+    def set(self):
+        return set([r for r in self])
+
+    def isascending(self):
+        return False
+
+    def isdescending(self):
+        return False
+
+    def filter(self, l):
+        return lazyset(self, l)
+
+class orderedlazyset(_orderedsetmixin, lazyset):
+    """Subclass of lazyset which subset can be ordered either ascending or
+    descendingly
+    """
+    def __init__(self, subset, condition, ascending=True):
+        super(orderedlazyset, self).__init__(subset, condition)
+        self._ascending = ascending
+
+    def filter(self, l):
+        return orderedlazyset(self, l, ascending=self._ascending)
+
+    def ascending(self):
+        if not self._ascending:
+            self.reverse()
+
+    def descending(self):
+        if self._ascending:
+            self.reverse()
+
+    def __and__(self, x):
+        return orderedlazyset(self, lambda r: r in x,
+                ascending=self._ascending)
+
+    def __sub__(self, x):
+        return orderedlazyset(self, lambda r: r not in x,
+                ascending=self._ascending)
+
+    def __add__(self, x):
+        kwargs = {}
+        if self.isascending() and x.isascending():
+            kwargs['ascending'] = True
+        if self.isdescending() and x.isdescending():
+            kwargs['ascending'] = False
+        return _addset(self, x, **kwargs)
+
+    def sort(self, reverse=False):
+        if reverse:
+            if self._ascending:
+                self._subset.sort(reverse=reverse)
+        else:
+            if not self._ascending:
+                self._subset.sort(reverse=reverse)
+        self._ascending = not reverse
+
+    def isascending(self):
+        return self._ascending
+
+    def isdescending(self):
+        return not self._ascending
+
+    def reverse(self):
+        self._subset.reverse()
+        self._ascending = not self._ascending
+
+class _addset(_orderedsetmixin):
+    """Represent the addition of two sets
+
+    Wrapper structure for lazily adding two structures without losing much
+    performance on the __contains__ method
+
+    If the ascending attribute is set, that means the two structures are
+    ordered in either an ascending or descending way. Therefore, we can add
+    them maintaining the order by iterating over both at the same time
+
+    This class does not duck-type baseset and it's only supposed to be used
+    internally
+    """
+    def __init__(self, revs1, revs2, ascending=None):
+        self._r1 = revs1
+        self._r2 = revs2
+        self._iter = None
+        self._ascending = ascending
+        self._genlist = None
+
+    def __len__(self):
+        return len(self._list)
+
+    @util.propertycache
+    def _list(self):
+        if not self._genlist:
+            self._genlist = baseset(self._iterator())
+        return self._genlist
+
+    def filter(self, condition):
+        if self._ascending is not None:
+            return orderedlazyset(self, condition, ascending=self._ascending)
+        return lazyset(self, condition)
+
+    def ascending(self):
+        if self._ascending is None:
+            self.sort()
+            self._ascending = True
+        else:
+            if not self._ascending:
+                self.reverse()
+
+    def descending(self):
+        if self._ascending is None:
+            self.sort(reverse=True)
+            self._ascending = False
+        else:
+            if self._ascending:
+                self.reverse()
+
+    def __and__(self, other):
+        filterfunc = other.__contains__
+        if self._ascending is not None:
+            return orderedlazyset(self, filterfunc, ascending=self._ascending)
+        return lazyset(self, filterfunc)
+
+    def __sub__(self, other):
+        filterfunc = lambda r: r not in other
+        if self._ascending is not None:
+            return orderedlazyset(self, filterfunc, ascending=self._ascending)
+        return lazyset(self, filterfunc)
+
+    def __add__(self, other):
+        """When both collections are ascending or descending, preserve the order
+        """
+        kwargs = {}
+        if self._ascending is not None:
+            if self.isascending() and other.isascending():
+                kwargs['ascending'] = True
+            if self.isdescending() and other.isdescending():
+                kwargs['ascending'] = False
+        return _addset(self, other, **kwargs)
+
+    def _iterator(self):
+        """Iterate over both collections without repeating elements
+
+        If the ascending attribute is not set, iterate over the first one and
+        then over the second one checking for membership on the first one so we
+        dont yield any duplicates.
+
+        If the ascending attribute is set, iterate over both collections at the
+        same time, yielding only one value at a time in the given order.
+        """
+        if not self._iter:
+            def gen():
+                if self._ascending is None:
+                    for r in self._r1:
+                        yield r
+                    s = self._r1.set()
+                    for r in self._r2:
+                        if r not in s:
+                            yield r
+                else:
+                    iter1 = iter(self._r1)
+                    iter2 = iter(self._r2)
+
+                    val1 = None
+                    val2 = None
+
+                    choice = max
+                    if self._ascending:
+                        choice = min
+                    try:
+                        # Consume both iterators in an ordered way until one is
+                        # empty
+                        while True:
+                            if val1 is None:
+                                val1 = iter1.next()
+                            if val2 is None:
+                                val2 = iter2.next()
+                            next = choice(val1, val2)
+                            yield next
+                            if val1 == next:
+                                val1 = None
+                            if val2 == next:
+                                val2 = None
+                    except StopIteration:
+                        # Flush any remaining values and consume the other one
+                        it = iter2
+                        if val1 is not None:
+                            yield val1
+                            it = iter1
+                        elif val2 is not None:
+                            # might have been equality and both are empty
+                            yield val2
+                        for val in it:
+                            yield val
+
+            self._iter = _generatorset(gen())
+
+        return self._iter
+
+    def __iter__(self):
+        if self._genlist:
+            return iter(self._genlist)
+        return iter(self._iterator())
+
+    def __contains__(self, x):
+        return x in self._r1 or x in self._r2
+
+    def set(self):
+        return self
+
+    def sort(self, reverse=False):
+        """Sort the added set
+
+        For this we use the cached list with all the generated values and if we
+        know they are ascending or descending we can sort them in a smart way.
+        """
+        if self._ascending is None:
+            self._list.sort(reverse=reverse)
+            self._ascending = not reverse
+        else:
+            if bool(self._ascending) == bool(reverse):
+                self.reverse()
+
+    def isascending(self):
+        return self._ascending is not None and self._ascending
+
+    def isdescending(self):
+        return self._ascending is not None and not self._ascending
+
+    def reverse(self):
+        self._list.reverse()
+        if self._ascending is not None:
+            self._ascending = not self._ascending
+
+class _generatorset(object):
+    """Wrap a generator for lazy iteration
+
+    Wrapper structure for generators that provides lazy membership and can
+    be iterated more than once.
+    When asked for membership it generates values until either it finds the
+    requested one or has gone through all the elements in the generator
+
+    This class does not duck-type baseset and it's only supposed to be used
+    internally
+    """
+    def __init__(self, gen):
+        """
+        gen: a generator producing the values for the generatorset.
+        """
+        self._gen = gen
+        self._cache = {}
+        self._genlist = baseset([])
+        self._finished = False
+
+    def __contains__(self, x):
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+
+        self._cache[x] = False
+        return False
+
+    def __iter__(self):
+        if self._finished:
+            for x in self._genlist:
+                yield x
+            return
+
+        i = 0
+        genlist = self._genlist
+        consume = self._consumegen()
+        while True:
+            if i < len(genlist):
+                yield genlist[i]
+            else:
+                yield consume.next()
+            i += 1
+
+    def _consumegen(self):
+        for item in self._gen:
+            self._cache[item] = True
+            self._genlist.append(item)
+            yield item
+        self._finished = True
+
+    def set(self):
+        return self
+
+    def sort(self, reverse=False):
+        if not self._finished:
+            for i in self:
+                continue
+        self._genlist.sort(reverse=reverse)
+
+class _ascgeneratorset(_generatorset):
+    """Wrap a generator of ascending elements for lazy iteration
+
+    Same structure as _generatorset but stops iterating after it goes past
+    the value when asked for membership and the element is not contained
+
+    This class does not duck-type baseset and it's only supposed to be used
+    internally
+    """
+    def __contains__(self, x):
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+            if l > x:
+                break
+
+        self._cache[x] = False
+        return False
+
+class _descgeneratorset(_generatorset):
+    """Wrap a generator of descending elements for lazy iteration
+
+    Same structure as _generatorset but stops iterating after it goes past
+    the value when asked for membership and the element is not contained
+
+    This class does not duck-type baseset and it's only supposed to be used
+    internally
+    """
+    def __contains__(self, x):
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+            if l < x:
+                break
+
+        self._cache[x] = False
+        return False
+
+class spanset(_orderedsetmixin):
+    """Duck type for baseset class which represents a range of revisions and
+    can work lazily and without having all the range in memory
+
+    Note that spanset(x, y) behave almost like xrange(x, y) except for two
+    notable points:
+    - when x < y it will be automatically descending,
+    - revision filtered with this repoview will be skipped.
+
+    """
+    def __init__(self, repo, start=0, end=None):
+        """
+        start: first revision included the set
+               (default to 0)
+        end:   first revision excluded (last+1)
+               (default to len(repo)
+
+        Spanset will be descending if `end` < `start`.
+        """
+        self._start = start
+        if end is not None:
+            self._end = end
+        else:
+            self._end = len(repo)
+        self._hiddenrevs = repo.changelog.filteredrevs
+
+    def ascending(self):
+        if self._start > self._end:
+            self.reverse()
+
+    def descending(self):
+        if self._start < self._end:
+            self.reverse()
+
+    def _contained(self, rev):
+        return (rev <= self._start and rev > self._end) or (rev >= self._start
+                and rev < self._end)
+
+    def __iter__(self):
+        if self._start <= self._end:
+            iterrange = xrange(self._start, self._end)
+        else:
+            iterrange = xrange(self._start, self._end, -1)
+
+        if self._hiddenrevs:
+            s = self._hiddenrevs
+            for r in iterrange:
+                if r not in s:
+                    yield r
+        else:
+            for r in iterrange:
+                yield r
+
+    def __contains__(self, x):
+        return self._contained(x) and not (self._hiddenrevs and rev in
+                self._hiddenrevs)
+
+    def __nonzero__(self):
+        for r in self:
+            return True
+        return False
+
+    def __and__(self, x):
+        if isinstance(x, baseset):
+            x = x.set()
+        if self._start <= self._end:
+            return orderedlazyset(self, lambda r: r in x)
+        else:
+            return orderedlazyset(self, lambda r: r in x, ascending=False)
+
+    def __sub__(self, x):
+        if isinstance(x, baseset):
+            x = x.set()
+        if self._start <= self._end:
+            return orderedlazyset(self, lambda r: r not in x)
+        else:
+            return orderedlazyset(self, lambda r: r not in x, ascending=False)
+
+    def __add__(self, x):
+        kwargs = {}
+        if self.isascending() and x.isascending():
+            kwargs['ascending'] = True
+        if self.isdescending() and x.isdescending():
+            kwargs['ascending'] = False
+        return _addset(self, x, **kwargs)
+
+    def __len__(self):
+        if not self._hiddenrevs:
+            return abs(self._end - self._start)
+        else:
+            count = 0
+            for rev in self._hiddenrevs:
+                if self._contained(rev):
+                    count += 1
+            return abs(self._end - self._start) - count
+
+    def __getitem__(self, x):
+        # Basic implementation to be changed in future patches.
+        l = baseset([r for r in self])
+        return l[x]
+
+    def sort(self, reverse=False):
+        if bool(reverse) != (self._start > self._end):
+            self.reverse()
+
+    def reverse(self):
+        # Just switch the _start and _end parameters
+        if self._start <= self._end:
+            self._start, self._end = self._end - 1, self._start - 1
+        else:
+            self._start, self._end = self._end + 1, self._start + 1
+
+    def set(self):
+        return self
+
+    def isascending(self):
+        return self._start < self._end
+
+    def isdescending(self):
+        return self._start > self._end
+
+    def filter(self, l):
+        if self._start <= self._end:
+            return orderedlazyset(self, l)
+        else:
+            return orderedlazyset(self, l, ascending=False)
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = symbols.values()
--- a/mercurial/scmutil.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/scmutil.py	Thu Apr 17 19:36:17 2014 -0400
@@ -10,7 +10,7 @@
 import util, error, osutil, revset, similar, encoding, phases, parsers
 import pathutil
 import match as matchmod
-import os, errno, re, glob
+import os, errno, re, glob, tempfile
 
 if os.name == 'nt':
     import scmwindows as scmplatform
@@ -20,6 +20,16 @@
 systemrcpath = scmplatform.systemrcpath
 userrcpath = scmplatform.userrcpath
 
+def itersubrepos(ctx1, ctx2):
+    """find subrepos in ctx1 or ctx2"""
+    # Create a (subpath, ctx) mapping where we prefer subpaths from
+    # ctx1. The subpaths from ctx2 are important when the .hgsub file
+    # has been modified (in ctx2) but not yet committed (in ctx1).
+    subpaths = dict.fromkeys(ctx2.substate, ctx2)
+    subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
+    for subpath, ctx in sorted(subpaths.iteritems()):
+        yield subpath, ctx.sub(subpath)
+
 def nochangesfound(ui, repo, excluded=None):
     '''Report no changes for push/pull, excluded is None or a list of
     nodes excluded from the push/pull.
@@ -183,6 +193,15 @@
     def mkdir(self, path=None):
         return os.mkdir(self.join(path))
 
+    def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
+        fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
+                                    dir=self.join(dir), text=text)
+        dname, fname = util.split(name)
+        if dir:
+            return fd, os.path.join(dir, fname)
+        else:
+            return fd, fname
+
     def readdir(self, path=None, stat=None, skip=None):
         return osutil.listdir(self.join(path), stat, skip)
 
@@ -460,15 +479,26 @@
 
     l = revrange(repo, revs)
 
-    if len(l) == 0:
-        if revs:
-            raise util.Abort(_('empty revision range'))
-        return repo.dirstate.p1(), None
+    if not l:
+        first = second = None
+    elif l.isascending():
+        first = l.min()
+        second = l.max()
+    elif l.isdescending():
+        first = l.max()
+        second = l.min()
+    else:
+        l = list(l)
+        first = l[0]
+        second = l[-1]
 
-    if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
-        return repo.lookup(l[0]), None
+    if first is None:
+        raise util.Abort(_('empty revision range'))
 
-    return repo.lookup(l[0]), repo.lookup(l[-1])
+    if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
+        return repo.lookup(first), None
+
+    return repo.lookup(first), repo.lookup(second)
 
 _revrangesep = ':'
 
@@ -480,7 +510,7 @@
             return defval
         return repo[val].rev()
 
-    seen, l = set(), []
+    seen, l = set(), revset.baseset([])
     for spec in revs:
         if l and not seen:
             seen = set(l)
@@ -489,7 +519,7 @@
         try:
             if isinstance(spec, int):
                 seen.add(spec)
-                l.append(spec)
+                l = l + revset.baseset([spec])
                 continue
 
             if _revrangesep in spec:
@@ -501,7 +531,7 @@
                 rangeiter = repo.changelog.revs(start, end)
                 if not seen and not l:
                     # by far the most common case: revs = ["-1:0"]
-                    l = list(rangeiter)
+                    l = revset.baseset(rangeiter)
                     # defer syncing seen until next iteration
                     continue
                 newrevs = set(rangeiter)
@@ -510,44 +540,51 @@
                     seen.update(newrevs)
                 else:
                     seen = newrevs
-                l.extend(sorted(newrevs, reverse=start > end))
+                l = l + revset.baseset(sorted(newrevs, reverse=start > end))
                 continue
             elif spec and spec in repo: # single unquoted rev
                 rev = revfix(repo, spec, None)
                 if rev in seen:
                     continue
                 seen.add(rev)
-                l.append(rev)
+                l = l + revset.baseset([rev])
                 continue
         except error.RepoLookupError:
             pass
 
         # fall through to new-style queries if old-style fails
-        m = revset.match(repo.ui, spec)
-        dl = [r for r in m(repo, list(repo)) if r not in seen]
-        l.extend(dl)
-        seen.update(dl)
+        m = revset.match(repo.ui, spec, repo)
+        if seen or l:
+            dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
+            l = l + revset.baseset(dl)
+            seen.update(dl)
+        else:
+            l = m(repo, revset.spanset(repo))
 
     return l
 
 def expandpats(pats):
+    '''Expand bare globs when running on windows.
+    On posix we assume it already has already been done by sh.'''
     if not util.expandglobs:
         return list(pats)
     ret = []
-    for p in pats:
-        kind, name = matchmod._patsplit(p, None)
+    for kindpat in pats:
+        kind, pat = matchmod._patsplit(kindpat, None)
         if kind is None:
             try:
-                globbed = glob.glob(name)
+                globbed = glob.glob(pat)
             except re.error:
-                globbed = [name]
+                globbed = [pat]
             if globbed:
                 ret.extend(globbed)
                 continue
-        ret.append(p)
+        ret.append(kindpat)
     return ret
 
 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
+    '''Return a matcher and the patterns that were used.
+    The matcher will warn about bad matches.'''
     if pats == ("",):
         pats = []
     if not globbed and default == 'relpath':
@@ -561,12 +598,15 @@
     return m, pats
 
 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
+    '''Return a matcher that will warn about bad matches.'''
     return matchandpats(ctx, pats, opts, globbed, default)[0]
 
 def matchall(repo):
+    '''Return a matcher that will efficiently match everything.'''
     return matchmod.always(repo.root, repo.getcwd())
 
 def matchfiles(repo, files):
+    '''Return a matcher that will efficiently match exactly these files.'''
     return matchmod.exact(repo.root, repo.getcwd(), files)
 
 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
@@ -721,8 +761,10 @@
     missings.sort()
     if missings:
         raise error.RequirementError(
-            _("unknown repository format: requires features '%s' (upgrade "
-              "Mercurial)") % "', '".join(missings))
+            _("repository requires features unknown to this Mercurial: %s")
+            % " ".join(missings),
+            hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
+                   " for more information"))
     return requirements
 
 class filecachesubentry(object):
--- a/mercurial/setdiscovery.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/setdiscovery.py	Thu Apr 17 19:36:17 2014 -0400
@@ -5,6 +5,40 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
+"""
+Algorithm works in the following way. You have two repository: local and
+remote. They both contains a DAG of changelists.
+
+The goal of the discovery protocol is to find one set of node *common*,
+the set of nodes shared by local and remote.
+
+One of the issue with the original protocol was latency, it could
+potentially require lots of roundtrips to discover that the local repo was a
+subset of remote (which is a very common case, you usually have few changes
+compared to upstream, while upstream probably had lots of development).
+
+The new protocol only requires one interface for the remote repo: `known()`,
+which given a set of changelists tells you if they are present in the DAG.
+
+The algorithm then works as follow:
+
+ - We will be using three sets, `common`, `missing`, `unknown`. Originally
+ all nodes are in `unknown`.
+ - Take a sample from `unknown`, call `remote.known(sample)`
+   - For each node that remote knows, move it and all its ancestors to `common`
+   - For each node that remote doesn't know, move it and all its descendants
+   to `missing`
+ - Iterate until `unknown` is empty
+
+There are a couple optimizations, first is instead of starting with a random
+sample of missing, start by sending all heads, in the case where the local
+repo is a subset, you computed the answer in one round trip.
+
+Then you can do something similar to the bisecting strategy used when
+finding faulty changesets. Instead of random samples, you can try picking
+nodes that will maximize the number of nodes that will be
+classified with it (since all ancestors or descendants will be marked as well).
+"""
 
 from node import nullid
 from i18n import _
--- a/mercurial/sshpeer.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/sshpeer.py	Thu Apr 17 19:36:17 2014 -0400
@@ -51,7 +51,7 @@
             cmd = '%s %s %s' % (sshcmd, args,
                 util.shellquote("%s init %s" %
                     (_serverquote(remotecmd), _serverquote(self.path))))
-            ui.note(_('running %s\n') % cmd)
+            ui.debug('running %s\n' % cmd)
             res = util.system(cmd)
             if res != 0:
                 self._abort(error.RepoError(_("could not create remote repo")))
@@ -68,7 +68,7 @@
         cmd = '%s %s %s' % (sshcmd, args,
             util.shellquote("%s -R %s serve --stdio" %
                 (_serverquote(remotecmd), _serverquote(self.path))))
-        self.ui.note(_('running %s\n') % cmd)
+        self.ui.debug('running %s\n' % cmd)
         cmd = util.quotecommand(cmd)
 
         # while self.subprocess isn't used, having it allows the subprocess to
@@ -157,6 +157,9 @@
 
         return self.pipei
 
+    def _callcompressable(self, cmd, **args):
+        return self._callstream(cmd, **args)
+
     def _call(self, cmd, **args):
         self._callstream(cmd, **args)
         return self._recv()
@@ -176,8 +179,18 @@
             return '', r
         return self._recv(), ''
 
-    def _decompress(self, stream):
-        return stream
+    def _calltwowaystream(self, cmd, fp, **args):
+        r = self._call(cmd, **args)
+        if r:
+            # XXX needs to be made better
+            raise util.Abort('unexpected remote reply: %s' % r)
+        while True:
+            d = fp.read(4096)
+            if not d:
+                break
+            self._send(d)
+        self._send("", flush=True)
+        return self.pipei
 
     def _recv(self):
         l = self.pipei.readline()
--- a/mercurial/sshserver.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/sshserver.py	Thu Apr 17 19:36:17 2014 -0400
@@ -9,7 +9,7 @@
 import util, hook, wireproto, changegroup
 import os, sys
 
-class sshserver(object):
+class sshserver(wireproto.abstractserverproto):
     def __init__(self, ui, repo):
         self.ui = ui
         self.repo = repo
@@ -143,7 +143,7 @@
 
         self.sendresponse("")
         cg = changegroup.unbundle10(self.fin, "UN")
-        r = self.repo.addchangegroup(cg, 'serve', self._client())
+        r = changegroup.addchangegroup(self.repo, cg, 'serve', self._client())
         self.lock.release()
         return str(r)
 
--- a/mercurial/statichttprepo.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/statichttprepo.py	Thu Apr 17 19:36:17 2014 -0400
@@ -143,6 +143,7 @@
         self.decodepats = None
 
     def _restrictcapabilities(self, caps):
+        caps = super(statichttprepository, self)._restrictcapabilities(caps)
         return caps.difference(["pushkey"])
 
     def url(self):
--- a/mercurial/store.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/store.py	Thu Apr 17 19:36:17 2014 -0400
@@ -337,7 +337,13 @@
     def copylist(self):
         return ['requires'] + _data.split()
 
-    def write(self):
+    def write(self, tr):
+        pass
+
+    def invalidatecaches(self):
+        pass
+
+    def markremoved(self, fn):
         pass
 
     def __contains__(self, path):
@@ -402,20 +408,14 @@
                     raise util.Abort(t)
         fp.close()
 
-    def _write(self, files, atomictemp):
-        fp = self.vfs('fncache', mode='wb', atomictemp=atomictemp)
-        if files:
-            fp.write(encodedir('\n'.join(files) + '\n'))
-        fp.close()
-        self._dirty = False
-
-    def rewrite(self, files):
-        self._write(files, False)
-        self.entries = set(files)
-
-    def write(self):
+    def write(self, tr):
         if self._dirty:
-            self._write(self.entries, True)
+            tr.addbackup('fncache')
+            fp = self.vfs('fncache', mode='wb', atomictemp=True)
+            if self.entries:
+                fp.write(encodedir('\n'.join(self.entries) + '\n'))
+            fp.close()
+            self._dirty = False
 
     def add(self, fn):
         if self.entries is None:
@@ -424,6 +424,15 @@
             self._dirty = True
             self.entries.add(fn)
 
+    def remove(self, fn):
+        if self.entries is None:
+            self._load()
+        try:
+            self.entries.remove(fn)
+            self._dirty = True
+        except KeyError:
+            pass
+
     def __contains__(self, fn):
         if self.entries is None:
             self._load()
@@ -476,22 +485,13 @@
         return self.rawvfs.stat(path).st_size
 
     def datafiles(self):
-        rewrite = False
-        existing = []
         for f in sorted(self.fncache):
             ef = self.encode(f)
             try:
                 yield f, ef, self.getsize(ef)
-                existing.append(f)
             except OSError, err:
                 if err.errno != errno.ENOENT:
                     raise
-                # nonexistent entry
-                rewrite = True
-        if rewrite:
-            # rewrite fncache to remove nonexistent entries
-            # (may be caused by rollback / strip)
-            self.fncache.rewrite(existing)
 
     def copylist(self):
         d = ('data dh fncache phaseroots obsstore'
@@ -499,8 +499,14 @@
         return (['requires', '00changelog.i'] +
                 ['store/' + f for f in d.split()])
 
-    def write(self):
-        self.fncache.write()
+    def write(self, tr):
+        self.fncache.write(tr)
+
+    def invalidatecaches(self):
+        self.fncache.entries = None
+
+    def markremoved(self, fn):
+        self.fncache.remove(fn)
 
     def _exists(self, f):
         ef = self.encode(f)
--- a/mercurial/subrepo.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/subrepo.py	Thu Apr 17 19:36:17 2014 -0400
@@ -326,16 +326,6 @@
                 os.unlink(os.path.join(dirname, f))
     os.walk(path, v, None)
 
-def itersubrepos(ctx1, ctx2):
-    """find subrepos in ctx1 or ctx2"""
-    # Create a (subpath, ctx) mapping where we prefer subpaths from
-    # ctx1. The subpaths from ctx2 are important when the .hgsub file
-    # has been modified (in ctx2) but not yet committed (in ctx1).
-    subpaths = dict.fromkeys(ctx2.substate, ctx2)
-    subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
-    for subpath, ctx in sorted(subpaths.iteritems()):
-        yield subpath, ctx.sub(subpath)
-
 def subrepo(ctx, path):
     """return instance of the right subrepo class for subrepo in path"""
     # subrepo inherently violates our import layering rules
@@ -449,6 +439,9 @@
     def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
         return []
 
+    def cat(self, ui, match, prefix, **opts):
+        return 1
+
     def status(self, rev2, **opts):
         return [], [], [], [], [], [], []
 
@@ -522,8 +515,8 @@
         for s, k in [('ui', 'commitsubrepos')]:
             v = r.ui.config(s, k)
             if v:
-                self._repo.ui.setconfig(s, k, v)
-        self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
+                self._repo.ui.setconfig(s, k, v, 'subrepo')
+        self._repo.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
         self._initrepo(r, state[0], create)
 
     def storeclean(self, path):
@@ -604,7 +597,7 @@
             def addpathconfig(key, value):
                 if value:
                     fp.write('%s = %s\n' % (key, value))
-                    self._repo.ui.setconfig('paths', key, value)
+                    self._repo.ui.setconfig('paths', key, value, 'subrepo')
 
             defpath = _abssource(self._repo, abort=False)
             defpushpath = _abssource(self._repo, True, abort=False)
@@ -619,6 +612,12 @@
                            os.path.join(prefix, self._path), explicitonly)
 
     @annotatesubrepoerror
+    def cat(self, ui, match, prefix, **opts):
+        rev = self._state[1]
+        ctx = self._repo[rev]
+        return cmdutil.cat(ui, self._repo, ctx, match, prefix, **opts)
+
+    @annotatesubrepoerror
     def status(self, rev2, **opts):
         try:
             rev1 = self._state[1]
@@ -1117,18 +1116,50 @@
                 raise
             self._gitexecutable = 'git.cmd'
             out, err = self._gitnodir(['--version'])
+        versionstatus = self._checkversion(out)
+        if versionstatus == 'unknown':
+            self._ui.warn(_('cannot retrieve git version\n'))
+        elif versionstatus == 'abort':
+            raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
+        elif versionstatus == 'warning':
+            self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
+
+    @staticmethod
+    def _checkversion(out):
+        '''ensure git version is new enough
+
+        >>> _checkversion = gitsubrepo._checkversion
+        >>> _checkversion('git version 1.6.0')
+        'ok'
+        >>> _checkversion('git version 1.8.5')
+        'ok'
+        >>> _checkversion('git version 1.4.0')
+        'abort'
+        >>> _checkversion('git version 1.5.0')
+        'warning'
+        >>> _checkversion('git version 1.9-rc0')
+        'ok'
+        >>> _checkversion('git version 1.9.0.265.g81cdec2')
+        'ok'
+        >>> _checkversion('git version 1.9.0.GIT')
+        'ok'
+        >>> _checkversion('git version 12345')
+        'unknown'
+        >>> _checkversion('no')
+        'unknown'
+        '''
         m = re.search(r'^git version (\d+)\.(\d+)', out)
         if not m:
-            self._ui.warn(_('cannot retrieve git version\n'))
-            return
+            return 'unknown'
         version = (int(m.group(1)), int(m.group(2)))
         # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
         # despite the docstring comment.  For now, error on 1.4.0, warn on
         # 1.5.0 but attempt to continue.
         if version < (1, 5):
-            raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
+            return 'abort'
         elif version < (1, 6):
-            self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
+            return 'warning'
+        return 'ok'
 
     def _gitcommand(self, commands, env=None, stream=False):
         return self._gitdir(commands, env=env, stream=stream)[0]
@@ -1441,8 +1472,8 @@
                 return False
             self._ui.status(_('pushing branch %s of subrepo %s\n') %
                             (current.split('/', 2)[2], self._relpath))
-            self._gitcommand(cmd + ['origin', current])
-            return True
+            ret = self._gitdir(cmd + ['origin', current])
+            return ret[1] == 0
         else:
             self._ui.warn(_('no branch checked out in subrepo %s\n'
                             'cannot push revision %s\n') %
--- a/mercurial/tags.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/tags.py	Thu Apr 17 19:36:17 2014 -0400
@@ -15,6 +15,7 @@
 import encoding
 import error
 import errno
+import time
 
 def findglobaltags(ui, repo, alltags, tagtypes):
     '''Find global tags in repo by reading .hgtags from every head that
@@ -234,6 +235,8 @@
         # potentially expensive search.
         return (repoheads, cachefnode, None, True)
 
+    starttime = time.time()
+
     newheads = [head
                 for head in repoheads
                 if head not in set(cacheheads)]
@@ -251,6 +254,12 @@
             # no .hgtags file on this head
             pass
 
+    duration = time.time() - starttime
+    ui.log('tagscache',
+           'resolved %d tags cache entries from %d manifests in %0.4f '
+           'seconds\n',
+           len(cachefnode), len(newheads), duration)
+
     # Caller has to iterate over all heads, but can use the filenodes in
     # cachefnode to get to each .hgtags revision quickly.
     return (repoheads, cachefnode, None, True)
@@ -262,6 +271,9 @@
     except (OSError, IOError):
         return
 
+    ui.log('tagscache', 'writing tags cache file with %d heads and %d tags\n',
+            len(heads), len(cachetags))
+
     realheads = repo.heads()            # for sanity checks below
     for head in heads:
         # temporary sanity checks; these can probably be removed
--- a/mercurial/templatekw.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templatekw.py	Thu Apr 17 19:36:17 2014 -0400
@@ -195,8 +195,12 @@
     """:bookmarks: List of strings. Any bookmarks associated with the
     changeset.
     """
+    repo = args['ctx']._repo
     bookmarks = args['ctx'].bookmarks()
-    return showlist('bookmark', bookmarks, **args)
+    hybrid = showlist('bookmark', bookmarks, **args)
+    for value in hybrid.values:
+        value['current'] = repo._bookmarkcurrent
+    return hybrid
 
 def showchildren(**args):
     """:children: List of strings. The children of the changeset."""
@@ -297,8 +301,8 @@
 def showmanifest(**args):
     repo, ctx, templ = args['repo'], args['ctx'], args['templ']
     args = args.copy()
-    args.update(dict(rev=repo.manifest.rev(ctx.changeset()[0]),
-                     node=hex(ctx.changeset()[0])))
+    args.update({'rev': repo.manifest.rev(ctx.changeset()[0]),
+                 'node': hex(ctx.changeset()[0])})
     return templ('manifest', **args)
 
 def shownode(repo, ctx, templ, **args):
--- a/mercurial/templater.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templater.py	Thu Apr 17 19:36:17 2014 -0400
@@ -7,7 +7,7 @@
 
 from i18n import _
 import sys, os, re
-import util, config, templatefilters, parser, error
+import util, config, templatefilters, templatekw, parser, error
 import types
 import minirst
 
@@ -245,6 +245,31 @@
 
     return templatefilters.fill(text, width, initindent, hangindent)
 
+def pad(context, mapping, args):
+    """usage: pad(text, width, fillchar=' ', right=False)
+    """
+    if not (2 <= len(args) <= 4):
+        raise error.ParseError(_("pad() expects two to four arguments"))
+
+    width = int(args[1][1])
+
+    text = stringify(args[0][0](context, mapping, args[0][1]))
+    if args[0][0] == runstring:
+        text = stringify(runtemplate(context, mapping,
+            compiletemplate(text, context)))
+
+    right = False
+    fillchar = ' '
+    if len(args) > 2:
+        fillchar = stringify(args[2][0](context, mapping, args[2][1]))
+    if len(args) > 3:
+        right = util.parsebool(args[3][1])
+
+    if right:
+        return text.rjust(width, fillchar)
+    else:
+        return text.ljust(width, fillchar)
+
 def get(context, mapping, args):
     if len(args) != 2:
         # i18n: "get" is a keyword
@@ -277,6 +302,19 @@
     elif len(args) == 3:
         yield _evalifliteral(args[2], context, mapping)
 
+def ifcontains(context, mapping, args):
+    if not (3 <= len(args) <= 4):
+        # i18n: "ifcontains" is a keyword
+        raise error.ParseError(_("ifcontains expects three or four arguments"))
+
+    item = stringify(args[0][0](context, mapping, args[0][1]))
+    items = args[1][0](context, mapping, args[1][1])
+
+    if item in items:
+        yield _evalifliteral(args[2], context, mapping)
+    elif len(args) == 4:
+        yield _evalifliteral(args[3], context, mapping)
+
 def ifeq(context, mapping, args):
     if not (3 <= len(args) <= 4):
         # i18n: "ifeq" is a keyword
@@ -319,6 +357,32 @@
     # ignore args[0] (the label string) since this is supposed to be a a no-op
     yield _evalifliteral(args[1], context, mapping)
 
+def revset(context, mapping, args):
+    """usage: revset(query[, formatargs...])
+    """
+    if not len(args) > 0:
+        # i18n: "revset" is a keyword
+        raise error.ParseError(_("revset expects one or more arguments"))
+
+    raw = args[0][1]
+    ctx = mapping['ctx']
+    repo = ctx._repo
+
+    if len(args) > 1:
+        formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]])
+        revs = repo.revs(raw, *formatargs)
+        revs = list([str(r) for r in revs])
+    else:
+        revsetcache = mapping['cache'].setdefault("revsetcache", {})
+        if raw in revsetcache:
+            revs = revsetcache[raw]
+        else:
+            revs = repo.revs(raw)
+            revs = list([str(r) for r in revs])
+            revsetcache[raw] = revs
+
+    return templatekw.showlist("revision", revs, **mapping)
+
 def rstdoc(context, mapping, args):
     if len(args) != 2:
         # i18n: "rstdoc" is a keyword
@@ -329,6 +393,57 @@
 
     return minirst.format(text, style=style, keep=['verbose'])
 
+def shortest(context, mapping, args):
+    """usage: shortest(node, minlength=4)
+    """
+    if not (1 <= len(args) <= 2):
+        raise error.ParseError(_("shortest() expects one or two arguments"))
+
+    node = stringify(args[0][0](context, mapping, args[0][1]))
+
+    minlength = 4
+    if len(args) > 1:
+        minlength = int(args[1][1])
+
+    cl = mapping['ctx']._repo.changelog
+    def isvalid(test):
+        try:
+            try:
+                cl.index.partialmatch(test)
+            except AttributeError:
+                # Pure mercurial doesn't support partialmatch on the index.
+                # Fallback to the slow way.
+                if cl._partialmatch(test) is None:
+                    return False
+
+            try:
+                i = int(test)
+                # if we are a pure int, then starting with zero will not be
+                # confused as a rev; or, obviously, if the int is larger than
+                # the value of the tip rev
+                if test[0] == '0' or i > len(cl):
+                    return True
+                return False
+            except ValueError:
+                return True
+        except error.RevlogError:
+            return False
+
+    shortest = node
+    startlength = max(6, minlength)
+    length = startlength
+    while True:
+        test = node[:length]
+        if isvalid(test):
+            shortest = test
+            if length == minlength or length > startlength:
+                return shortest
+            length -= 1
+        else:
+            length += 1
+            if len(shortest) <= length:
+                return shortest
+
 def strip(context, mapping, args):
     if not (1 <= len(args) <= 2):
         raise error.ParseError(_("strip expects one or two arguments"))
@@ -365,10 +480,14 @@
     "fill": fill,
     "get": get,
     "if": if_,
+    "ifcontains": ifcontains,
     "ifeq": ifeq,
     "join": join,
     "label": label,
+    "pad": pad,
+    "revset": revset,
     "rstdoc": rstdoc,
+    "shortest": shortest,
     "strip": strip,
     "sub": sub,
 }
--- a/mercurial/templates/atom/changelogentry.tmpl	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/atom/changelogentry.tmpl	Thu Apr 17 19:36:17 2014 -0400
@@ -1,5 +1,5 @@
  <entry>
-  <title>{desc|strip|firstline|strip|escape|nonempty}</title>
+  <title>{inbranch%"{if(name, '[{name|escape}] ')}"}{branches%"{if(name, '[{name|escape}] ')}"}{desc|strip|firstline|strip|escape|nonempty}</title>
   <id>{urlbase}{url|urlescape}#changeset-{node}</id>
   <link href="{urlbase}{url|urlescape}rev/{node|short}"/>
   <author>
@@ -9,8 +9,35 @@
   <updated>{date|rfc3339date}</updated>
   <published>{date|rfc3339date}</published>
   <content type="xhtml">
-   <div xmlns="http://www.w3.org/1999/xhtml">
-    <pre xml:space="preserve">{desc|escape|nonempty}</pre>
-   </div>
+	<table xmlns="http://www.w3.org/1999/xhtml">
+	<tr>
+		<th style="text-align:left;">changeset</th>
+		<td>{node|short}</td>
+              </tr>
+              <tr>
+                              <th style="text-align:left;">branch</th>
+                              <td>{inbranch%"{name|escape}"}{branches%"{name|escape}"}</td>
+              </tr>
+              <tr>
+                              <th style="text-align:left;">bookmark</th>
+		<td>{bookmarks%"{name|escape}"}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;">tag</th>
+		<td>{tags%"{name|escape}"}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;">user</th>
+		<td>{author|obfuscate}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;vertical-align:top;">description</th>
+		<td>{desc|strip|escape|addbreaks|nonempty}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;vertical-align:top;">files</th>
+		<td>{files}</td>
+	</tr>
+	</table>
   </content>
  </entry>
--- a/mercurial/templates/atom/map	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/atom/map	Thu Apr 17 19:36:17 2014 -0400
@@ -13,3 +13,7 @@
 branches = branches.tmpl
 branchentry = branchentry.tmpl
 error = error.tmpl
+filedifflink = '{file|escape}<br />'
+fileellipses = '{file|escape}<br />'
+filenodelink = '{file|escape}<br />'
+filenolink = '{file|escape}<br />'
--- a/mercurial/templates/paper/filerevision.tmpl	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/paper/filerevision.tmpl	Thu Apr 17 19:36:17 2014 -0400
@@ -13,6 +13,7 @@
 <li><a href="{url|urlescape}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
 <li><a href="{url|urlescape}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
 <li><a href="{url|urlescape}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url|urlescape}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
 <li><a href="{url|urlescape}branches{sessionvars%urlparameter}">branches</a></li>
 </ul>
 <ul>
--- a/mercurial/templates/rss/changelogentry.tmpl	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/rss/changelogentry.tmpl	Thu Apr 17 19:36:17 2014 -0400
@@ -1,7 +1,40 @@
 <item>
-    <title>{desc|strip|firstline|strip|escape}</title>
+    <title>{inbranch%"{if(name, '[{name|escape}] ')}"}{branches%"{if(name, '[{name|escape}] ')}"}{desc|strip|firstline|strip|escape}</title>
     <guid isPermaLink="true">{urlbase}{url|urlescape}rev/{node|short}</guid>
-    <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description>
+             <link>{urlbase}{url|urlescape}rev/{node|short}</link>
+    <description>
+              <![CDATA[
+	<table>
+	<tr>
+		<th style="text-align:left;">changeset</th>
+		<td>{node|short}</td>
+              </tr>
+              <tr>
+                              <th style="text-align:left;">branch</th>
+                              <td>{inbranch%"{name|escape}"}{branches%"{name|escape}"}</td>
+              </tr>
+              <tr>
+                              <th style="text-align:left;">bookmark</th>
+		<td>{bookmarks%"{name|escape}"}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;">tag</th>
+		<td>{tags%"{name|escape}"}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;vertical-align:top;">user</th>
+		<td>{author|obfuscate}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;vertical-align:top;">description</th>
+		<td>{desc|strip|escape|addbreaks|nonempty}</td>
+	</tr>
+	<tr>
+		<th style="text-align:left;vertical-align:top;">files</th>
+		<td>{files}</td>
+	</tr>
+	</table>
+	]]></description>
     <author>{author|obfuscate}</author>
     <pubDate>{date|rfc822date}</pubDate>
 </item>
--- a/mercurial/templates/rss/map	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/rss/map	Thu Apr 17 19:36:17 2014 -0400
@@ -12,3 +12,7 @@
 branches = branches.tmpl
 branchentry = branchentry.tmpl
 error = error.tmpl
+filedifflink = '{file|escape}<br />'
+fileellipses = '{file|escape}<br />'
+filenodelink = '{file|escape}<br />'
+filenolink = '{file|escape}<br />'
--- a/mercurial/templates/spartan/changelogentry.tmpl	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/spartan/changelogentry.tmpl	Thu Apr 17 19:36:17 2014 -0400
@@ -1,6 +1,6 @@
 <table class="logEntry parity{parity}">
  <tr>
-  <th><span class="age">{date|rfc822date}</span>:</th>
+  <th class="label"><span class="age">{date|rfc822date}</span>:</th>
   <th class="firstline">{desc|strip|firstline|escape|nonempty}</th>
  </tr>
  <tr>
--- a/mercurial/templates/spartan/filelogentry.tmpl	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/spartan/filelogentry.tmpl	Thu Apr 17 19:36:17 2014 -0400
@@ -1,6 +1,6 @@
 <table class="logEntry parity{parity}">
  <tr>
-  <th><span class="age">{date|rfc822date}</span>:</th>
+  <th class="label"><span class="age">{date|rfc822date}</span>:</th>
   <th class="firstline"><a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></th>
  </tr>
  <tr>
--- a/mercurial/templates/static/mercurial.js	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/static/mercurial.js	Thu Apr 17 19:36:17 2014 -0400
@@ -327,6 +327,7 @@
     };
 
     xfr.open(method, url);
+    xfr.overrideMimeType("text/xhtml; charset=" + document.characterSet.toLowerCase());
     xfr.send();
     onstart();
     return xfr;
--- a/mercurial/templates/static/style.css	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/templates/static/style.css	Thu Apr 17 19:36:17 2014 -0400
@@ -41,6 +41,7 @@
 /* Changelog/Filelog entries */
 .logEntry { width: 100%; }
 .logEntry .age { width: 15%; }
+.logEntry th.label { width: 16em; }
 .logEntry th { font-weight: normal; text-align: right; vertical-align: top; }
 .logEntry th.age, .logEntry th.firstline { font-weight: bold; }
 .logEntry th.firstline { text-align: left; width: inherit; }
--- a/mercurial/transaction.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/transaction.py	Thu Apr 17 19:36:17 2014 -0400
@@ -13,7 +13,7 @@
 
 from i18n import _
 import errno
-import error
+import error, util
 
 def active(func):
     def _active(self, *args, **kwds):
@@ -23,7 +23,7 @@
         return func(self, *args, **kwds)
     return _active
 
-def _playback(journal, report, opener, entries, unlink=True):
+def _playback(journal, report, opener, entries, backupentries, unlink=True):
     for f, o, ignore in entries:
         if o or not unlink:
             try:
@@ -39,23 +39,62 @@
             except (IOError, OSError), inst:
                 if inst.errno != errno.ENOENT:
                     raise
+
+    backupfiles = []
+    for f, b, ignore in backupentries:
+        filepath = opener.join(f)
+        backuppath = opener.join(b)
+        try:
+            util.copyfile(backuppath, filepath)
+            backupfiles.append(b)
+        except IOError:
+            report(_("failed to recover %s\n") % f)
+            raise
+
     opener.unlink(journal)
+    backuppath = "%s.backupfiles" % journal
+    if opener.exists(backuppath):
+        opener.unlink(backuppath)
+    for f in backupfiles:
+        opener.unlink(f)
 
 class transaction(object):
-    def __init__(self, report, opener, journal, after=None, createmode=None):
+    def __init__(self, report, opener, journal, after=None, createmode=None,
+            onclose=None, onabort=None):
+        """Begin a new transaction
+
+        Begins a new transaction that allows rolling back writes in the event of
+        an exception.
+
+        * `after`: called after the transaction has been committed
+        * `createmode`: the mode of the journal file that will be created
+        * `onclose`: called as the transaction is closing, but before it is
+        closed
+        * `onabort`: called as the transaction is aborting, but before any files
+        have been truncated
+        """
         self.count = 1
         self.usages = 1
         self.report = report
         self.opener = opener
         self.after = after
+        self.onclose = onclose
+        self.onabort = onabort
         self.entries = []
+        self.backupentries = []
         self.map = {}
+        self.backupmap = {}
         self.journal = journal
         self._queue = []
+        # a dict of arguments to be passed to hooks
+        self.hookargs = {}
 
+        self.backupjournal = "%s.backupfiles" % journal
         self.file = opener.open(self.journal, "w")
+        self.backupsfile = opener.open(self.backupjournal, 'w')
         if createmode is not None:
             opener.chmod(self.journal, createmode & 0666)
+            opener.chmod(self.backupjournal, createmode & 0666)
 
     def __del__(self):
         if self.journal:
@@ -63,22 +102,36 @@
 
     @active
     def startgroup(self):
-        self._queue.append([])
+        self._queue.append(([], []))
 
     @active
     def endgroup(self):
         q = self._queue.pop()
-        d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q])
-        self.entries.extend(q)
+        self.entries.extend(q[0])
+        self.backupentries.extend(q[1])
+
+        offsets = []
+        backups = []
+        for f, o, _ in q[0]:
+            offsets.append((f, o))
+
+        for f, b, _ in q[1]:
+            backups.append((f, b))
+
+        d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
         self.file.write(d)
         self.file.flush()
 
+        d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
+        self.backupsfile.write(d)
+        self.backupsfile.flush()
+
     @active
     def add(self, file, offset, data=None):
-        if file in self.map:
+        if file in self.map or file in self.backupmap:
             return
         if self._queue:
-            self._queue[-1].append((file, offset, data))
+            self._queue[-1][0].append((file, offset, data))
             return
 
         self.entries.append((file, offset, data))
@@ -88,9 +141,43 @@
         self.file.flush()
 
     @active
+    def addbackup(self, file, hardlink=True):
+        """Adds a backup of the file to the transaction
+
+        Calling addbackup() creates a hardlink backup of the specified file
+        that is used to recover the file in the event of the transaction
+        aborting.
+
+        * `file`: the file path, relative to .hg/store
+        * `hardlink`: use a hardlink to quickly create the backup
+        """
+
+        if file in self.map or file in self.backupmap:
+            return
+        backupfile = "journal.%s" % file
+        if self.opener.exists(file):
+            filepath = self.opener.join(file)
+            backuppath = self.opener.join(backupfile)
+            util.copyfiles(filepath, backuppath, hardlink=hardlink)
+        else:
+            self.add(file, 0)
+            return
+
+        if self._queue:
+            self._queue[-1][1].append((file, backupfile))
+            return
+
+        self.backupentries.append((file, backupfile, None))
+        self.backupmap[file] = len(self.backupentries) - 1
+        self.backupsfile.write("%s\0%s\0" % (file, backupfile))
+        self.backupsfile.flush()
+
+    @active
     def find(self, file):
         if file in self.map:
             return self.entries[self.map[file]]
+        if file in self.backupmap:
+            return self.backupentries[self.backupmap[file]]
         return None
 
     @active
@@ -126,6 +213,9 @@
     @active
     def close(self):
         '''commit the transaction'''
+        if self.count == 1 and self.onclose is not None:
+            self.onclose()
+
         self.count -= 1
         if self.count != 0:
             return
@@ -135,6 +225,11 @@
             self.after()
         if self.opener.isfile(self.journal):
             self.opener.unlink(self.journal)
+        if self.opener.isfile(self.backupjournal):
+            self.opener.unlink(self.backupjournal)
+            for f, b, _ in self.backupentries:
+                self.opener.unlink(b)
+        self.backupentries = []
         self.journal = None
 
     @active
@@ -149,17 +244,22 @@
         self.usages = 0
         self.file.close()
 
+        if self.onabort is not None:
+            self.onabort()
+
         try:
-            if not self.entries:
+            if not self.entries and not self.backupentries:
                 if self.journal:
                     self.opener.unlink(self.journal)
+                if self.backupjournal:
+                    self.opener.unlink(self.backupjournal)
                 return
 
             self.report(_("transaction abort!\n"))
 
             try:
                 _playback(self.journal, self.report, self.opener,
-                          self.entries, False)
+                          self.entries, self.backupentries, False)
                 self.report(_("rollback completed\n"))
             except Exception:
                 self.report(_("rollback failed - please run hg recover\n"))
@@ -168,13 +268,38 @@
 
 
 def rollback(opener, file, report):
+    """Rolls back the transaction contained in the given file
+
+    Reads the entries in the specified file, and the corresponding
+    '*.backupfiles' file, to recover from an incomplete transaction.
+
+    * `file`: a file containing a list of entries, specifying where
+    to truncate each file.  The file should contain a list of
+    file\0offset pairs, delimited by newlines. The corresponding
+    '*.backupfiles' file should contain a list of file\0backupfile
+    pairs, delimited by \0.
+    """
     entries = []
+    backupentries = []
 
     fp = opener.open(file)
     lines = fp.readlines()
     fp.close()
     for l in lines:
-        f, o = l.split('\0')
-        entries.append((f, int(o), None))
+        try:
+            f, o = l.split('\0')
+            entries.append((f, int(o), None))
+        except ValueError:
+            report(_("couldn't read journal entry %r!\n") % l)
 
-    _playback(file, report, opener, entries)
+    backupjournal = "%s.backupfiles" % file
+    if opener.exists(backupjournal):
+        fp = opener.open(backupjournal)
+        data = fp.read()
+        if len(data) > 0:
+            parts = data.split('\0')
+            for i in xrange(0, len(parts), 2):
+                f, b = parts[i:i + 1]
+                backupentries.append((f, b, None))
+
+    _playback(file, report, opener, entries, backupentries)
--- a/mercurial/ui.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/ui.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,10 +8,14 @@
 from i18n import _
 import errno, getpass, os, socket, sys, tempfile, traceback
 import config, scmutil, util, error, formatter
+from node import hex
 
 class ui(object):
     def __init__(self, src=None):
+        # _buffers: used for temporary capture of output
         self._buffers = []
+        # _bufferstates: Should the temporary capture includes stderr
+        self._bufferstates = []
         self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
         self._reportuntrusted = True
         self._ocfg = config.config() # overlay
@@ -156,11 +160,9 @@
         self._tcfg.restore(data[1])
         self._ucfg.restore(data[2])
 
-    def setconfig(self, section, name, value, overlay=True):
-        if overlay:
-            self._ocfg.set(section, name, value)
-        self._tcfg.set(section, name, value)
-        self._ucfg.set(section, name, value)
+    def setconfig(self, section, name, value, source=''):
+        for cfg in (self._ocfg, self._tcfg, self._ucfg):
+            cfg.set(section, name, value, source)
         self.fixconfig(section=section)
 
     def _data(self, untrusted):
@@ -449,7 +451,9 @@
             except KeyError:
                 pass
         if not user:
-            raise util.Abort(_('no username supplied (see "hg help config")'))
+            raise util.Abort(_('no username supplied'),
+                             hint=_('use "hg config --edit" '
+                                    'to set your username'))
         if "\n" in user:
             raise util.Abort(_("username %s contains a newline\n") % repr(user))
         return user
@@ -470,8 +474,12 @@
             path = self.config('paths', default)
         return path or loc
 
-    def pushbuffer(self):
+    def pushbuffer(self, error=False):
+        """install a buffer to capture standar output of the ui object
+
+        If error is True, the error output will be captured too."""
         self._buffers.append([])
+        self._bufferstates.append(error)
 
     def popbuffer(self, labeled=False):
         '''pop the last buffer and return the buffered output
@@ -483,6 +491,7 @@
         is being buffered so it can be captured and parsed or
         processed, labeled should not be set to True.
         '''
+        self._bufferstates.pop()
         return "".join(self._buffers.pop())
 
     def write(self, *args, **opts):
@@ -510,6 +519,8 @@
 
     def write_err(self, *args, **opts):
         try:
+            if self._bufferstates and self._bufferstates[-1]:
+                return self.write(*args, **opts)
             if not getattr(self.fout, 'closed', False):
                 self.fout.flush()
             for a in args:
@@ -712,7 +723,7 @@
         if self.debugflag:
             opts['label'] = opts.get('label', '') + ' ui.debug'
             self.write(*msg, **opts)
-    def edit(self, text, user):
+    def edit(self, text, user, extra={}):
         (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
                                       text=True)
         try:
@@ -720,10 +731,18 @@
             f.write(text)
             f.close()
 
+            environ = {'HGUSER': user}
+            if 'transplant_source' in extra:
+                environ.update({'HGREVISION': hex(extra['transplant_source'])})
+            for label in ('source', 'rebase_source'):
+                if label in extra:
+                    environ.update({'HGREVISION': extra[label]})
+                    break
+
             editor = self.geteditor()
 
             util.system("%s \"%s\"" % (editor, name),
-                        environ={'HGUSER': user},
+                        environ=environ,
                         onerr=util.Abort, errprefix=_("edit failed"),
                         out=self.fout)
 
--- a/mercurial/unionrepo.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/unionrepo.py	Thu Apr 17 19:36:17 2014 -0400
@@ -170,7 +170,7 @@
 class unionrepository(localrepo.localrepository):
     def __init__(self, ui, path, path2):
         localrepo.localrepository.__init__(self, ui, path)
-        self.ui.setconfig('phases', 'publish', False)
+        self.ui.setconfig('phases', 'publish', False, 'unionrepo')
 
         self._url = 'union:%s+%s' % (util.expandpath(path),
                                      util.expandpath(path2))
--- a/mercurial/url.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/url.py	Thu Apr 17 19:36:17 2014 -0400
@@ -7,7 +7,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-import urllib, urllib2, httplib, os, socket, cStringIO
+import urllib, urllib2, httplib, os, socket, cStringIO, base64
 from i18n import _
 import keepalive, util, sslutil
 import httpconnection as httpconnectionmod
@@ -422,9 +422,22 @@
 
 class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler):
     def __init__(self, *args, **kwargs):
+        self.auth = None
         urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
         self.retried_req = None
 
+    def http_request(self, request):
+        if self.auth:
+            request.add_unredirected_header(self.auth_header, self.auth)
+
+        return request
+
+    def https_request(self, request):
+        if self.auth:
+            request.add_unredirected_header(self.auth_header, self.auth)
+
+        return request
+
     def reset_retry_count(self):
         # Python 2.6.5 will call this on 401 or 407 errors and thus loop
         # forever. We disable reset_retry_count completely and reset in
@@ -439,6 +452,19 @@
         return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
                         self, auth_header, host, req, headers)
 
+    def retry_http_basic_auth(self, host, req, realm):
+        user, pw = self.passwd.find_user_password(realm, req.get_full_url())
+        if pw is not None:
+            raw = "%s:%s" % (user, pw)
+            auth = 'Basic %s' % base64.b64encode(raw).strip()
+            if req.headers.get(self.auth_header, None) == auth:
+                return None
+            self.auth = auth
+            req.add_unredirected_header(self.auth_header, auth)
+            return self.parent.open(req)
+        else:
+            return None
+
 handlerfuncs = []
 
 def opener(ui, authinfo=None):
--- a/mercurial/util.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/util.py	Thu Apr 17 19:36:17 2014 -0400
@@ -188,6 +188,13 @@
 def cachefunc(func):
     '''cache the result of function calls'''
     # XXX doesn't handle keywords args
+    if func.func_code.co_argcount == 0:
+        cache = []
+        def f():
+            if len(cache) == 0:
+                cache.append(func())
+            return cache[0]
+        return f
     cache = {}
     if func.func_code.co_argcount == 1:
         # we gain a small amount of time because
@@ -961,13 +968,15 @@
         self.iter = splitbig(in_iter)
         self._queue = deque()
 
-    def read(self, l):
+    def read(self, l=None):
         """Read L bytes of data from the iterator of chunks of data.
-        Returns less than L bytes if the iterator runs dry."""
+        Returns less than L bytes if the iterator runs dry.
+
+        If size parameter is ommited, read everything"""
         left = l
         buf = []
         queue = self._queue
-        while left > 0:
+        while left is None or left > 0:
             # refill the queue
             if not queue:
                 target = 2**18
@@ -980,8 +989,9 @@
                     break
 
             chunk = queue.popleft()
-            left -= len(chunk)
-            if left < 0:
+            if left is not None:
+                left -= len(chunk)
+            if left is not None and left < 0:
                 queue.appendleft(chunk[left:])
                 buf.append(chunk[:left])
             else:
@@ -1198,11 +1208,11 @@
     """
 
     def lower(date):
-        d = dict(mb="1", d="1")
+        d = {'mb': "1", 'd': "1"}
         return parsedate(date, extendeddateformats, d)[0]
 
     def upper(date):
-        d = dict(mb="12", HI="23", M="59", S="59")
+        d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
         for days in ("31", "30", "29"):
             try:
                 d["d"] = days
@@ -1986,15 +1996,19 @@
 
     def __call__(self, *args):
         self._hooks.sort(key=lambda x: x[0])
+        results = []
         for source, hook in self._hooks:
-            hook(*args)
+            results.append(hook(*args))
+        return results
 
-def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr):
+def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
     '''Writes a message to f (stderr) with a nicely formatted stacktrace.
-    Skips the 'skip' last entries.
+    Skips the 'skip' last entries. By default it will flush stdout first.
     It can be used everywhere and do intentionally not require an ui object.
     Not be used in production code but very convenient while developing.
     '''
+    if otherf:
+        otherf.flush()
     f.write('%s at:\n' % msg)
     entries = [('%s:%s' % (fn, ln), func)
         for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
@@ -2002,6 +2016,7 @@
         fnmax = max(len(entry[0]) for entry in entries)
         for fnln, func in entries:
             f.write(' %-*s in %s\n' % (fnmax, fnln, func))
+    f.flush()
 
 # convenient shortcut
 dst = debugstacktrace
--- a/mercurial/win32.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/win32.py	Thu Apr 17 19:36:17 2014 -0400
@@ -24,6 +24,7 @@
 
 # GetLastError
 _ERROR_SUCCESS = 0
+_ERROR_SHARING_VIOLATION = 32
 _ERROR_INVALID_PARAMETER = 87
 _ERROR_INSUFFICIENT_BUFFER = 122
 
@@ -59,7 +60,9 @@
 
 _OPEN_EXISTING = 3
 
+_FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
 _FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
+_FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
 
 # SetFileAttributes
 _FILE_ATTRIBUTE_NORMAL = 0x80
@@ -119,6 +122,27 @@
 
 _STD_ERROR_HANDLE = _DWORD(-12).value
 
+# CreateToolhelp32Snapshot, Process32First, Process32Next
+_TH32CS_SNAPPROCESS = 0x00000002
+_MAX_PATH = 260
+
+class _tagPROCESSENTRY32(ctypes.Structure):
+    _fields_ = [('dwsize', _DWORD),
+                ('cntUsage', _DWORD),
+                ('th32ProcessID', _DWORD),
+                ('th32DefaultHeapID', ctypes.c_void_p),
+                ('th32ModuleID', _DWORD),
+                ('cntThreads', _DWORD),
+                ('th32ParentProcessID', _DWORD),
+                ('pcPriClassBase', _LONG),
+                ('dwFlags', _DWORD),
+                ('szExeFile', ctypes.c_char * _MAX_PATH)]
+
+    def __init__(self):
+        super(_tagPROCESSENTRY32, self).__init__()
+        self.dwsize = ctypes.sizeof(self)
+
+
 # types of parameters of C functions used (required by pypy)
 
 _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
@@ -186,6 +210,15 @@
 _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
 _user32.EnumWindows.restype = _BOOL
 
+_kernel32.CreateToolhelp32Snapshot.argtypes = [_DWORD, _DWORD]
+_kernel32.CreateToolhelp32Snapshot.restype = _BOOL
+
+_kernel32.Process32First.argtypes = [_HANDLE, ctypes.c_void_p]
+_kernel32.Process32First.restype = _BOOL
+
+_kernel32.Process32Next.argtypes = [_HANDLE, ctypes.c_void_p]
+_kernel32.Process32Next.restype = _BOOL
+
 def _raiseoserror(name):
     err = ctypes.WinError()
     raise OSError(err.errno, '%s: %s' % (name, err.strerror))
@@ -309,6 +342,51 @@
     width = csbi.srWindow.Right - csbi.srWindow.Left
     return width
 
+def _1stchild(pid):
+    '''return the 1st found child of the given pid
+
+    None is returned when no child is found'''
+    pe = _tagPROCESSENTRY32()
+
+    # create handle to list all processes
+    ph = _kernel32.CreateToolhelp32Snapshot(_TH32CS_SNAPPROCESS, 0)
+    if ph == _INVALID_HANDLE_VALUE:
+        raise ctypes.WinError
+    try:
+        r = _kernel32.Process32First(ph, ctypes.byref(pe))
+        # loop over all processes
+        while r:
+            if pe.th32ParentProcessID == pid:
+                # return first child found
+                return pe.th32ProcessID
+            r = _kernel32.Process32Next(ph, ctypes.byref(pe))
+    finally:
+        _kernel32.CloseHandle(ph)
+    if _kernel32.GetLastError() != _ERROR_NO_MORE_FILES:
+        raise ctypes.WinError
+    return None # no child found
+
+class _tochildpid(int): # pid is _DWORD, which always matches in an int
+    '''helper for spawndetached, returns the child pid on conversion to string
+
+    Does not resolve the child pid immediately because the child may not yet be
+    started.
+    '''
+    def childpid(self):
+        '''returns the child pid of the first found child of the process
+        with this pid'''
+        return _1stchild(self)
+    def __str__(self):
+        # run when the pid is written to the file
+        ppid = self.childpid()
+        if ppid is None:
+            # race, child has exited since check
+            # fall back to this pid. Its process will also have disappeared,
+            # raising the same error type later as when the child pid would
+            # be returned.
+            return " %d" % self
+        return str(ppid)
+
 def spawndetached(args):
     # No standard library function really spawns a fully detached
     # process under win32 because they allocate pipes or other objects
@@ -339,16 +417,24 @@
     if not res:
         raise ctypes.WinError
 
-    return pi.dwProcessId
+    # _tochildpid because the process is the child of COMSPEC
+    return _tochildpid(pi.dwProcessId)
 
 def unlink(f):
     '''try to implement POSIX' unlink semantics on Windows'''
 
-    if os.path.isdir(f):
-        # use EPERM because it is POSIX prescribed value, even though
-        # unlink(2) on directories returns EISDIR on Linux
-        raise IOError(errno.EPERM,
-                      "Unlinking directory not permitted: '%s'" % f)
+    # If we can open f exclusively, no other processes must have open handles
+    # for it and we can expect its name will be deleted immediately when we
+    # close the handle unless we have another in the same process.  We also
+    # expect we shall simply fail to open f if it is a directory.
+    fh = _kernel32.CreateFileA(f, 0, 0, None, _OPEN_EXISTING,
+        _FILE_FLAG_OPEN_REPARSE_POINT | _FILE_FLAG_DELETE_ON_CLOSE, None)
+    if fh != _INVALID_HANDLE_VALUE:
+        _kernel32.CloseHandle(fh)
+        return
+    error = _kernel32.GetLastError()
+    if error != _ERROR_SHARING_VIOLATION:
+        raise ctypes.WinError(error)
 
     # POSIX allows to unlink and rename open files. Windows has serious
     # problems with doing that:
--- a/mercurial/wireproto.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/mercurial/wireproto.py	Thu Apr 17 19:36:17 2014 -0400
@@ -8,8 +8,55 @@
 import urllib, tempfile, os, sys
 from i18n import _
 from node import bin, hex
-import changegroup as changegroupmod
-import peer, error, encoding, util, store
+import changegroup as changegroupmod, bundle2
+import peer, error, encoding, util, store, exchange
+
+
+class abstractserverproto(object):
+    """abstract class that summarizes the protocol API
+
+    Used as reference and documentation.
+    """
+
+    def getargs(self, args):
+        """return the value for arguments in <args>
+
+        returns a list of values (same order as <args>)"""
+        raise NotImplementedError()
+
+    def getfile(self, fp):
+        """write the whole content of a file into a file like object
+
+        The file is in the form::
+
+            (<chunk-size>\n<chunk>)+0\n
+
+        chunk size is the ascii version of the int.
+        """
+        raise NotImplementedError()
+
+    def redirect(self):
+        """may setup interception for stdout and stderr
+
+        See also the `restore` method."""
+        raise NotImplementedError()
+
+    # If the `redirect` function does install interception, the `restore`
+    # function MUST be defined. If interception is not used, this function
+    # MUST NOT be defined.
+    #
+    # left commented here on purpose
+    #
+    #def restore(self):
+    #    """reinstall previous stdout and stderr and return intercepted stdout
+    #    """
+    #    raise NotImplementedError()
+
+    def groupchunks(self, cg):
+        """return 4096 chunks from a changegroup object
+
+        Some protocols may have compressed the contents."""
+        raise NotImplementedError()
 
 # abstract batching support
 
@@ -145,9 +192,6 @@
 
 # client side
 
-def todict(**args):
-    return args
-
 class wirepeer(peer.peerrepository):
 
     def batch(self):
@@ -166,7 +210,7 @@
     def lookup(self, key):
         self.requirecap('lookup', _('look up remote revision'))
         f = future()
-        yield todict(key=encoding.fromlocal(key)), f
+        yield {'key': encoding.fromlocal(key)}, f
         d = f.value
         success, data = d[:-1].split(" ", 1)
         if int(success):
@@ -186,7 +230,7 @@
     @batchable
     def known(self, nodes):
         f = future()
-        yield todict(nodes=encodelist(nodes)), f
+        yield {'nodes': encodelist(nodes)}, f
         d = f.value
         try:
             yield [bool(int(f)) for f in d]
@@ -236,10 +280,10 @@
             yield False, None
         f = future()
         self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
-        yield todict(namespace=encoding.fromlocal(namespace),
-                     key=encoding.fromlocal(key),
-                     old=encoding.fromlocal(old),
-                     new=encoding.fromlocal(new)), f
+        yield {'namespace': encoding.fromlocal(namespace),
+               'key': encoding.fromlocal(key),
+               'old': encoding.fromlocal(old),
+               'new': encoding.fromlocal(new)}, f
         d = f.value
         d, output = d.split('\n', 1)
         try:
@@ -257,7 +301,7 @@
             yield {}, None
         f = future()
         self.ui.debug('preparing listkeys for "%s"\n' % namespace)
-        yield todict(namespace=encoding.fromlocal(namespace)), f
+        yield {'namespace': encoding.fromlocal(namespace)}, f
         d = f.value
         r = {}
         for l in d.splitlines():
@@ -270,18 +314,19 @@
 
     def changegroup(self, nodes, kind):
         n = encodelist(nodes)
-        f = self._callstream("changegroup", roots=n)
-        return changegroupmod.unbundle10(self._decompress(f), 'UN')
+        f = self._callcompressable("changegroup", roots=n)
+        return changegroupmod.unbundle10(f, 'UN')
 
     def changegroupsubset(self, bases, heads, kind):
         self.requirecap('changegroupsubset', _('look up remote changes'))
         bases = encodelist(bases)
         heads = encodelist(heads)
-        f = self._callstream("changegroupsubset",
-                             bases=bases, heads=heads)
-        return changegroupmod.unbundle10(self._decompress(f), 'UN')
+        f = self._callcompressable("changegroupsubset",
+                                   bases=bases, heads=heads)
+        return changegroupmod.unbundle10(f, 'UN')
 
-    def getbundle(self, source, heads=None, common=None, bundlecaps=None):
+    def getbundle(self, source, heads=None, common=None, bundlecaps=None,
+                  **kwargs):
         self.requirecap('getbundle', _('look up remote changes'))
         opts = {}
         if heads is not None:
@@ -290,14 +335,22 @@
             opts['common'] = encodelist(common)
         if bundlecaps is not None:
             opts['bundlecaps'] = ','.join(bundlecaps)
-        f = self._callstream("getbundle", **opts)
-        return changegroupmod.unbundle10(self._decompress(f), 'UN')
+        opts.update(kwargs)
+        f = self._callcompressable("getbundle", **opts)
+        if bundlecaps is not None and 'HG2X' in bundlecaps:
+            return bundle2.unbundle20(self.ui, f)
+        else:
+            return changegroupmod.unbundle10(f, 'UN')
 
     def unbundle(self, cg, heads, source):
         '''Send cg (a readable file-like object representing the
         changegroup to push, typically a chunkbuffer object) to the
-        remote server as a bundle. Return an integer indicating the
-        result of the push (see localrepository.addchangegroup()).'''
+        remote server as a bundle.
+
+        When pushing a bundle10 stream, return an integer indicating the
+        result of the push (see localrepository.addchangegroup()).
+
+        When pushing a bundle20 stream, return a bundle20 stream.'''
 
         if heads != ['force'] and self.capable('unbundlehash'):
             heads = encodelist(['hashed',
@@ -305,18 +358,24 @@
         else:
             heads = encodelist(heads)
 
-        ret, output = self._callpush("unbundle", cg, heads=heads)
-        if ret == "":
-            raise error.ResponseError(
-                _('push failed:'), output)
-        try:
-            ret = int(ret)
-        except ValueError:
-            raise error.ResponseError(
-                _('push failed (unexpected response):'), ret)
+        if util.safehasattr(cg, 'deltaheader'):
+            # this a bundle10, do the old style call sequence
+            ret, output = self._callpush("unbundle", cg, heads=heads)
+            if ret == "":
+                raise error.ResponseError(
+                    _('push failed:'), output)
+            try:
+                ret = int(ret)
+            except ValueError:
+                raise error.ResponseError(
+                    _('push failed (unexpected response):'), ret)
 
-        for l in output.splitlines(True):
-            self.ui.status(_('remote: '), l)
+            for l in output.splitlines(True):
+                self.ui.status(_('remote: '), l)
+        else:
+            # bundle2 push. Send a stream, fetch a stream.
+            stream = self._calltwowaystream('unbundle', cg, heads=heads)
+            ret = bundle2.unbundle20(self.ui, stream)
         return ret
 
     def debugwireargs(self, one, two, three=None, four=None, five=None):
@@ -328,21 +387,92 @@
             opts['four'] = four
         return self._call('debugwireargs', one=one, two=two, **opts)
 
+    def _call(self, cmd, **args):
+        """execute <cmd> on the server
+
+        The command is expected to return a simple string.
+
+        returns the server reply as a string."""
+        raise NotImplementedError()
+
+    def _callstream(self, cmd, **args):
+        """execute <cmd> on the server
+
+        The command is expected to return a stream.
+
+        returns the server reply as a file like object."""
+        raise NotImplementedError()
+
+    def _callcompressable(self, cmd, **args):
+        """execute <cmd> on the server
+
+        The command is expected to return a stream.
+
+        The stream may have been compressed in some implementations. This
+        function takes care of the decompression. This is the only difference
+        with _callstream.
+
+        returns the server reply as a file like object.
+        """
+        raise NotImplementedError()
+
+    def _callpush(self, cmd, fp, **args):
+        """execute a <cmd> on server
+
+        The command is expected to be related to a push. Push has a special
+        return method.
+
+        returns the server reply as a (ret, output) tuple. ret is either
+        empty (error) or a stringified int.
+        """
+        raise NotImplementedError()
+
+    def _calltwowaystream(self, cmd, fp, **args):
+        """execute <cmd> on server
+
+        The command will send a stream to the server and get a stream in reply.
+        """
+        raise NotImplementedError()
+
+    def _abort(self, exception):
+        """clearly abort the wire protocol connection and raise the exception
+        """
+        raise NotImplementedError()
+
 # server side
 
+# wire protocol command can either return a string or one of these classes.
 class streamres(object):
+    """wireproto reply: binary stream
+
+    The call was successful and the result is a stream.
+    Iterate on the `self.gen` attribute to retrieve chunks.
+    """
     def __init__(self, gen):
         self.gen = gen
 
 class pushres(object):
+    """wireproto reply: success with simple integer return
+
+    The call was successful and returned an integer contained in `self.res`.
+    """
     def __init__(self, res):
         self.res = res
 
 class pusherr(object):
+    """wireproto reply: failure
+
+    The call failed. The `self.res` attribute contains the error message.
+    """
     def __init__(self, res):
         self.res = res
 
 class ooberror(object):
+    """wireproto reply: failure of a batch of operation
+
+    Something failed during a batch call. The error message is stored in
+    `self.message`.
+    """
     def __init__(self, message):
         self.message = message
 
@@ -363,6 +493,17 @@
                          % (cmd, ",".join(others)))
     return opts
 
+# list of commands
+commands = {}
+
+def wireprotocommand(name, args=''):
+    """decorator for wire protocol command"""
+    def register(func):
+        commands[name] = (func, args)
+        return func
+    return register
+
+@wireprotocommand('batch', 'cmds *')
 def batch(repo, proto, cmds, others):
     repo = repo.filtered("served")
     res = []
@@ -394,6 +535,7 @@
         res.append(escapearg(result))
     return ';'.join(res)
 
+@wireprotocommand('between', 'pairs')
 def between(repo, proto, pairs):
     pairs = [decodelist(p, '-') for p in pairs.split(" ")]
     r = []
@@ -401,6 +543,7 @@
         r.append(encodelist(b) + "\n")
     return "".join(r)
 
+@wireprotocommand('branchmap')
 def branchmap(repo, proto):
     branchmap = repo.branchmap()
     heads = []
@@ -410,6 +553,7 @@
         heads.append('%s %s' % (branchname, branchnodes))
     return '\n'.join(heads)
 
+@wireprotocommand('branches', 'nodes')
 def branches(repo, proto, nodes):
     nodes = decodelist(nodes)
     r = []
@@ -417,9 +561,22 @@
         r.append(encodelist(b) + "\n")
     return "".join(r)
 
-def capabilities(repo, proto):
-    caps = ('lookup changegroupsubset branchmap pushkey known getbundle '
-            'unbundlehash batch').split()
+
+wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
+                 'known', 'getbundle', 'unbundlehash', 'batch']
+
+def _capabilities(repo, proto):
+    """return a list of capabilities for a repo
+
+    This function exists to allow extensions to easily wrap capabilities
+    computation
+
+    - returns a lists: easy to alter
+    - change done here will be propagated to both `capabilities` and `hello`
+      command without any other action needed.
+    """
+    # copy to prevent modification of the global list
+    caps = list(wireprotocaps)
     if _allowstream(repo.ui):
         if repo.ui.configbool('server', 'preferuncompressed', False):
             caps.append('stream-preferred')
@@ -430,26 +587,39 @@
         # otherwise, add 'streamreqs' detailing our local revlog format
         else:
             caps.append('streamreqs=%s' % ','.join(requiredformats))
+    if repo.ui.configbool('experimental', 'bundle2-exp', False):
+        capsblob = bundle2.encodecaps(repo.bundle2caps)
+        caps.append('bundle2-exp=' + urllib.quote(capsblob))
     caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
     caps.append('httpheader=1024')
-    return ' '.join(caps)
+    return caps
 
+# If you are writing an extension and consider wrapping this function. Wrap
+# `_capabilities` instead.
+@wireprotocommand('capabilities')
+def capabilities(repo, proto):
+    return ' '.join(_capabilities(repo, proto))
+
+@wireprotocommand('changegroup', 'roots')
 def changegroup(repo, proto, roots):
     nodes = decodelist(roots)
-    cg = repo.changegroup(nodes, 'serve')
+    cg = changegroupmod.changegroup(repo, nodes, 'serve')
     return streamres(proto.groupchunks(cg))
 
+@wireprotocommand('changegroupsubset', 'bases heads')
 def changegroupsubset(repo, proto, bases, heads):
     bases = decodelist(bases)
     heads = decodelist(heads)
-    cg = repo.changegroupsubset(bases, heads, 'serve')
+    cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
     return streamres(proto.groupchunks(cg))
 
+@wireprotocommand('debugwireargs', 'one two *')
 def debugwireargs(repo, proto, one, two, others):
     # only accept optional args from the known set
     opts = options('debugwireargs', ['three', 'four'], others)
     return repo.debugwireargs(one, two, **opts)
 
+@wireprotocommand('getbundle', '*')
 def getbundle(repo, proto, others):
     opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
     for k, v in opts.iteritems():
@@ -457,13 +627,15 @@
             opts[k] = decodelist(v)
         elif k == 'bundlecaps':
             opts[k] = set(v.split(','))
-    cg = repo.getbundle('serve', **opts)
+    cg = exchange.getbundle(repo, 'serve', **opts)
     return streamres(proto.groupchunks(cg))
 
+@wireprotocommand('heads')
 def heads(repo, proto):
     h = repo.heads()
     return encodelist(h) + "\n"
 
+@wireprotocommand('hello')
 def hello(repo, proto):
     '''the hello command returns a set of lines describing various
     interesting things about the server, in an RFC822-like format.
@@ -474,12 +646,14 @@
     '''
     return "capabilities: %s\n" % (capabilities(repo, proto))
 
+@wireprotocommand('listkeys', 'namespace')
 def listkeys(repo, proto, namespace):
     d = repo.listkeys(encoding.tolocal(namespace)).items()
     t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
                    for k, v in d])
     return t
 
+@wireprotocommand('lookup', 'key')
 def lookup(repo, proto, key):
     try:
         k = encoding.tolocal(key)
@@ -491,9 +665,11 @@
         success = 0
     return "%s %s\n" % (success, r)
 
+@wireprotocommand('known', 'nodes *')
 def known(repo, proto, nodes, others):
     return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
 
+@wireprotocommand('pushkey', 'namespace key old new')
 def pushkey(repo, proto, namespace, key, old, new):
     # compatibility with pre-1.8 clients which were accidentally
     # sending raw binary nodes rather than utf-8-encoded hex
@@ -532,6 +708,7 @@
     # this is it's own function so extensions can override it
     return repo.store.walk()
 
+@wireprotocommand('stream_out')
 def stream(repo, proto):
     '''If the server supports streaming clone, it advertises the "stream"
     capability with a value representing the version and flags of the repo
@@ -540,7 +717,7 @@
     The format is simple: the server writes out a line with the amount
     of files, then the total amount of bytes to be transferred (separated
     by a space). Then, for each file, the server first writes the filename
-    and filesize (separated by the null character), then the file contents.
+    and file size (separated by the null character), then the file contents.
     '''
 
     if not _allowstream(repo.ui):
@@ -598,68 +775,40 @@
 
     return streamres(streamer(repo, entries, total_bytes))
 
+@wireprotocommand('unbundle', 'heads')
 def unbundle(repo, proto, heads):
     their_heads = decodelist(heads)
 
-    def check_heads():
-        heads = repo.heads()
-        heads_hash = util.sha1(''.join(sorted(heads))).digest()
-        return (their_heads == ['force'] or their_heads == heads or
-                their_heads == ['hashed', heads_hash])
-
-    proto.redirect()
-
-    # fail early if possible
-    if not check_heads():
-        return pusherr('repository changed while preparing changes - '
-                       'please try again')
+    try:
+        proto.redirect()
 
-    # write bundle data to temporary file because it can be big
-    fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
-    fp = os.fdopen(fd, 'wb+')
-    r = 0
-    try:
-        proto.getfile(fp)
-        lock = repo.lock()
-        try:
-            if not check_heads():
-                # someone else committed/pushed/unbundled while we
-                # were transferring data
-                return pusherr('repository changed while uploading changes - '
-                               'please try again')
-
-            # push can proceed
-            fp.seek(0)
-            gen = changegroupmod.readbundle(fp, None)
+        exchange.check_heads(repo, their_heads, 'preparing changes')
 
-            try:
-                r = repo.addchangegroup(gen, 'serve', proto._client())
-            except util.Abort, inst:
-                sys.stderr.write("abort: %s\n" % inst)
-        finally:
-            lock.release()
-        return pushres(r)
-
-    finally:
-        fp.close()
-        os.unlink(tempname)
+        # write bundle data to temporary file because it can be big
+        fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+        fp = os.fdopen(fd, 'wb+')
+        r = 0
+        try:
+            proto.getfile(fp)
+            fp.seek(0)
+            gen = exchange.readbundle(repo.ui, fp, None)
+            r = exchange.unbundle(repo, gen, their_heads, 'serve',
+                                  proto._client())
+            if util.safehasattr(r, 'addpart'):
+                # The return looks streameable, we are in the bundle2 case and
+                # should return a stream.
+                return streamres(r.getchunks())
+            return pushres(r)
 
-commands = {
-    'batch': (batch, 'cmds *'),
-    'between': (between, 'pairs'),
-    'branchmap': (branchmap, ''),
-    'branches': (branches, 'nodes'),
-    'capabilities': (capabilities, ''),
-    'changegroup': (changegroup, 'roots'),
-    'changegroupsubset': (changegroupsubset, 'bases heads'),
-    'debugwireargs': (debugwireargs, 'one two *'),
-    'getbundle': (getbundle, '*'),
-    'heads': (heads, ''),
-    'hello': (hello, ''),
-    'known': (known, 'nodes *'),
-    'listkeys': (listkeys, 'namespace'),
-    'lookup': (lookup, 'key'),
-    'pushkey': (pushkey, 'namespace key old new'),
-    'stream_out': (stream, ''),
-    'unbundle': (unbundle, 'heads'),
-}
+        finally:
+            fp.close()
+            os.unlink(tempname)
+    except util.Abort, inst:
+        # The old code we moved used sys.stderr directly.
+        # We did not change it to minimise code change.
+        # This need to be moved to something proper.
+        # Feel free to do it.
+        sys.stderr.write("abort: %s\n" % inst)
+        return pushres(0)
+    except exchange.PushRaced, exc:
+        return pusherr(str(exc))
--- a/setup.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/setup.py	Thu Apr 17 19:36:17 2014 -0400
@@ -13,11 +13,18 @@
         '''A helper function to emulate 2.6+ bytes literals using string
         literals.'''
         return s.encode('latin1')
+    printf = eval('print')
+    libdir_escape = 'unicode_escape'
 else:
+    libdir_escape = 'string_escape'
     def b(s):
         '''A helper function to emulate 2.6+ bytes literals using string
         literals.'''
         return s
+    def printf(*args, **kwargs):
+        f = kwargs.get('file', sys.stdout)
+        end = kwargs.get('end', '\n')
+        f.write(b(' ').join(args) + end)
 
 # Solaris Python packaging brain damage
 try:
@@ -54,6 +61,7 @@
             "Couldn't import standard bz2 (incomplete Python install).")
 
 import os, subprocess, time
+import re
 import shutil
 import tempfile
 from distutils import log
@@ -64,10 +72,9 @@
 from distutils.command.build_py import build_py
 from distutils.command.install_scripts import install_scripts
 from distutils.spawn import spawn, find_executable
-from distutils.ccompiler import new_compiler
 from distutils import cygwinccompiler
 from distutils.errors import CCompilerError, DistutilsExecError
-from distutils.sysconfig import get_python_inc
+from distutils.sysconfig import get_python_inc, get_config_var
 from distutils.version import StrictVersion
 
 convert2to3 = '--c2to3' in sys.argv
@@ -152,8 +159,8 @@
               and not e.startswith(b('warning: Not importing')) \
               and not e.startswith(b('obsolete feature not enabled'))]
     if err:
-        print >> sys.stderr, "stderr from '%s':" % (' '.join(cmd))
-        print >> sys.stderr, '\n'.join(['  ' + e for e in err])
+        printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr)
+        printf(b('\n').join([b('  ') + e for e in err]), file=sys.stderr)
         return ''
     return out
 
@@ -403,7 +410,7 @@
             if b('\0') in data:
                 continue
 
-            data = data.replace('@LIBDIR@', libdir.encode('string_escape'))
+            data = data.replace(b('@LIBDIR@'), libdir.encode(libdir_escape))
             fp = open(outfile, 'wb')
             fp.write(data)
             fp.close()
@@ -467,20 +474,6 @@
 
 cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler
 
-if sys.platform.startswith('linux') and os.uname()[2] > '2.6':
-    # The inotify extension is only usable with Linux 2.6 kernels.
-    # You also need a reasonably recent C library.
-    # In any case, if it fails to build the error will be skipped ('optional').
-    cc = new_compiler()
-    if hasfunction(cc, 'inotify_add_watch'):
-        inotify = Extension('hgext.inotify.linux._inotify',
-                            ['hgext/inotify/linux/_inotify.c'],
-                            ['mercurial'],
-                            depends=common_depends)
-        inotify.optional = True
-        extmodules.append(inotify)
-        packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
-
 packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
                              'help/*.txt']}
 
@@ -513,22 +506,36 @@
     setupversion = version.split('+', 1)[0]
 
 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
-    # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
-    # distutils.sysconfig
     version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[0].splitlines()
     if version:
         version = version[0]
         xcode4 = (version.startswith('Xcode') and
                   StrictVersion(version.split()[1]) >= StrictVersion('4.0'))
+        xcode51 = re.match(r'^Xcode\s+5\.1\.', version) is not None
     else:
         # xcodebuild returns empty on OS X Lion with XCode 4.3 not
         # installed, but instead with only command-line tools. Assume
         # that only happens on >= Lion, thus no PPC support.
         xcode4 = True
+        xcode51 = False
 
+    # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
+    # distutils.sysconfig
     if xcode4:
         os.environ['ARCHFLAGS'] = ''
 
+    # XCode 5.1 changes clang such that it now fails to compile if the
+    # -mno-fused-madd flag is passed, but the version of Python shipped with
+    # OS X 10.9 Mavericks includes this flag. This causes problems in all
+    # C extension modules, and a bug has been filed upstream at
+    # http://bugs.python.org/issue21244. We also need to patch this here
+    # so Mercurial can continue to compile in the meantime.
+    if xcode51:
+        cflags = get_config_var('CFLAGS')
+        if re.search(r'-mno-fused-madd\b', cflags) is not None:
+            os.environ['CFLAGS'] = (
+                os.environ.get('CFLAGS', '') + ' -Qunused-arguments')
+
 setup(name='mercurial',
       version=setupversion,
       author='Matt Mackall and many others',
@@ -569,9 +576,11 @@
       package_data=packagedata,
       cmdclass=cmdclass,
       distclass=hgdist,
-      options=dict(py2exe=dict(packages=['hgext', 'email']),
-                   bdist_mpkg=dict(zipdist=True,
-                                   license='COPYING',
-                                   readme='contrib/macosx/Readme.html',
-                                   welcome='contrib/macosx/Welcome.html')),
+      options={'py2exe': {'packages': ['hgext', 'email']},
+               'bdist_mpkg': {'zipdist': True,
+                              'license': 'COPYING',
+                              'readme': 'contrib/macosx/Readme.html',
+                              'welcome': 'contrib/macosx/Welcome.html',
+                              },
+               },
       **extra)
--- a/tests/blacklists/inotify-failures	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-# When --inotify is activated, help output and config changes:
-test-debugcomplete
-test-empty
-test-fncache
-test-globalopts
-test-help
-test-hgrc
-test-inherit-mode
-test-qrecord
-test-strict
-
-# --inotify activates de facto the inotify extension. It does not play well
-# with inotify-specific tests, which activate/deactivate inotify at will:
-test-inotify
-test-inotify-debuginotify
-test-inotify-dirty-dirstate
-test-inotify-issue1208
-test-inotify-issue1371
-test-inotify-issue1542
-test-inotify-issue1556
-test-inotify-lookup
--- a/tests/blacklists/linux-vfat	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/blacklists/linux-vfat	Thu Apr 17 19:36:17 2014 -0400
@@ -8,14 +8,6 @@
 
 # no sockets or fifos
 test-hup.t
-test-inotify-debuginotify.t
-test-inotify-dirty-dirstate.t
-test-inotify-issue1208.t
-test-inotify-issue1371.t
-test-inotify-issue1542.t
-test-inotify-lookup.t
-test-inotify.t
-test-inotify-issue1556.t
 
 # no hardlinks
 test-hardlinks.t
--- a/tests/bundles/rebase.sh	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/bundles/rebase.sh	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 hg init rebase
 cd rebase
 
--- a/tests/bundles/remote.sh	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/bundles/remote.sh	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 hg init remote
 cd remote
 
--- a/tests/hghave.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/hghave.py	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,4 @@
-import os, stat, socket
+import os, stat
 import re
 import sys
 import tempfile
@@ -96,21 +96,6 @@
     finally:
         os.remove(path)
 
-def has_inotify():
-    try:
-        import hgext.inotify.linux.watcher
-    except ImportError:
-        return False
-    name = tempfile.mktemp(dir='.', prefix=tempprefix)
-    sock = socket.socket(socket.AF_UNIX)
-    try:
-        sock.bind(name)
-    except socket.error:
-        return False
-    sock.close()
-    os.unlink(name)
-    return True
-
 def has_fifo():
     if getattr(os, "mkfifo", None) is None:
         return False
@@ -248,6 +233,9 @@
     except ImportError:
         return False
 
+def has_python243():
+    return sys.version_info >= (2, 4, 3)
+
 def has_outer_repo():
     # failing for other reasons than 'no repo' imply that there is a repo
     return not matchoutput('hg root 2>&1',
@@ -312,7 +300,6 @@
     "gpg": (has_gpg, "gpg client"),
     "hardlink": (has_hardlink, "hardlinks"),
     "icasefs": (has_icasefs, "case insensitive file system"),
-    "inotify": (has_inotify, "inotify extension support"),
     "killdaemons": (has_killdaemons, 'killdaemons.py support'),
     "lsprof": (has_lsprof, "python lsprof module"),
     "mtn": (has_mtn, "monotone client (>= 1.0)"),
@@ -320,6 +307,7 @@
     "p4": (has_p4, "Perforce server and client"),
     "pyflakes": (has_pyflakes, "Pyflakes python linter"),
     "pygments": (has_pygments, "Pygments source highlighting library"),
+    "python243": (has_python243, "python >= 2.4.3"),
     "root": (has_root, "root permissions"),
     "serve": (has_serve, "platform and python can manage 'hg serve -d'"),
     "ssl": (has_ssl, "python >= 2.6 ssl module and python OpenSSL"),
--- a/tests/killdaemons.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/killdaemons.py	Thu Apr 17 19:36:17 2014 -0400
@@ -4,13 +4,51 @@
 
 if os.name =='nt':
     import ctypes
+
+    def _check(ret, expectederr=None):
+        if ret == 0:
+            winerrno = ctypes.GetLastError()
+            if winerrno == expectederr:
+                return True
+            raise ctypes.WinError(winerrno)
+
     def kill(pid, logfn, tryhard=True):
         logfn('# Killing daemon process %d' % pid)
         PROCESS_TERMINATE = 1
+        PROCESS_QUERY_INFORMATION = 0x400
+        SYNCHRONIZE = 0x00100000
+        WAIT_OBJECT_0 = 0
+        WAIT_TIMEOUT = 258
         handle = ctypes.windll.kernel32.OpenProcess(
-                PROCESS_TERMINATE, False, pid)
-        ctypes.windll.kernel32.TerminateProcess(handle, -1)
-        ctypes.windll.kernel32.CloseHandle(handle)
+                PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
+                False, pid)
+        if handle == 0:
+            _check(0, 87) # err 87 when process not found
+            return # process not found, already finished
+        try:
+            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
+            if r == WAIT_OBJECT_0:
+                pass # terminated, but process handle still available
+            elif r == WAIT_TIMEOUT:
+                _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
+            else:
+                _check(r)
+
+            # TODO?: forcefully kill when timeout
+            #        and ?shorter waiting time? when tryhard==True
+            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
+                                                       # timeout = 100 ms
+            if r == WAIT_OBJECT_0:
+                pass # process is terminated
+            elif r == WAIT_TIMEOUT:
+                logfn('# Daemon process %d is stuck')
+            else:
+                check(r) # any error
+        except: #re-raises
+            ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
+            raise
+        _check(ctypes.windll.kernel32.CloseHandle(handle))
+
 else:
     def kill(pid, logfn, tryhard=True):
         try:
@@ -51,4 +89,3 @@
 if __name__ == '__main__':
     path, = sys.argv[1:]
     killdaemons(path)
-
--- a/tests/run-tests.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/run-tests.py	Thu Apr 17 19:36:17 2014 -0400
@@ -133,7 +133,7 @@
         f.close()
     return entries
 
-def parseargs():
+def getparser():
     parser = optparse.OptionParser("%prog [options] [tests]")
 
     # keep these sorted
@@ -141,19 +141,19 @@
         help="skip tests listed in the specified blacklist file")
     parser.add_option("--whitelist", action="append",
         help="always run tests listed in the specified whitelist file")
+    parser.add_option("--changed", type="string",
+        help="run tests that are changed in parent rev or working directory")
     parser.add_option("-C", "--annotate", action="store_true",
         help="output files annotated with coverage")
     parser.add_option("-c", "--cover", action="store_true",
         help="print a test coverage report")
     parser.add_option("-d", "--debug", action="store_true",
         help="debug mode: write output of test scripts to console"
-             " rather than capturing and diff'ing it (disables timeout)")
+             " rather than capturing and diffing it (disables timeout)")
     parser.add_option("-f", "--first", action="store_true",
         help="exit on the first test failure")
     parser.add_option("-H", "--htmlcov", action="store_true",
         help="create an HTML report of the coverage of the files")
-    parser.add_option("--inotify", action="store_true",
-        help="enable inotify extension when running tests")
     parser.add_option("-i", "--interactive", action="store_true",
         help="prompt to accept changed output")
     parser.add_option("-j", "--jobs", type="int",
@@ -210,7 +210,11 @@
     for option, (envvar, default) in defaults.items():
         defaults[option] = type(default)(os.environ.get(envvar, default))
     parser.set_defaults(**defaults)
-    (options, args) = parser.parse_args()
+
+    return parser
+
+def parseargs(args, parser):
+    (options, args) = parser.parse_args(args)
 
     # jython is always pure
     if 'java' in sys.platform or '__pypy__' in sys.modules:
@@ -300,8 +304,14 @@
 
 def showdiff(expected, output, ref, err):
     print
+    servefail = False
     for line in difflib.unified_diff(expected, output, ref, err):
         sys.stdout.write(line)
+        if not servefail and line.startswith(
+                             '+  abort: child process failed to start'):
+            servefail = True
+    return {'servefail': servefail}
+
 
 verbose = False
 def vlog(*msg):
@@ -344,12 +354,6 @@
     hgrc.write('commit = -d "0 0"\n')
     hgrc.write('shelve = --date "0 0"\n')
     hgrc.write('tag = -d "0 0"\n')
-    if options.inotify:
-        hgrc.write('[extensions]\n')
-        hgrc.write('inotify=\n')
-        hgrc.write('[inotify]\n')
-        hgrc.write('pidfile=daemon.pids')
-        hgrc.write('appendpid=True\n')
     if options.extra_config_opt:
         for opt in options.extra_config_opt:
             section, key = opt.split('.', 1)
@@ -434,7 +438,7 @@
     if getattr(os, 'symlink', None):
         vlog("# Making python executable in test path a symlink to '%s'" %
              sys.executable)
-        mypython = os.path.join(BINDIR, pyexename)
+        mypython = os.path.join(TMPBINDIR, pyexename)
         try:
             if os.readlink(mypython) == sys.executable:
                 return
@@ -487,10 +491,10 @@
            ' build %(compiler)s --build-base="%(base)s"'
            ' install --force --prefix="%(prefix)s" --install-lib="%(libdir)s"'
            ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
-           % dict(exe=sys.executable, py3=py3, pure=pure, compiler=compiler,
-                  base=os.path.join(HGTMP, "build"),
-                  prefix=INST, libdir=PYTHONDIR, bindir=BINDIR,
-                  nohome=nohome, logfile=installerrs))
+           % {'exe': sys.executable, 'py3': py3, 'pure': pure,
+              'compiler': compiler, 'base': os.path.join(HGTMP, "build"),
+              'prefix': INST, 'libdir': PYTHONDIR, 'bindir': BINDIR,
+              'nohome': nohome, 'logfile': installerrs})
     vlog("# Running", cmd)
     if os.system(cmd) == 0:
         if not options.verbose:
@@ -602,7 +606,7 @@
 
 def globmatch(el, l):
     # The only supported special characters are * and ? plus / which also
-    # matches \ on windows. Escaping of these caracters is supported.
+    # matches \ on windows. Escaping of these characters is supported.
     if el + '\n' == l:
         if os.altsep:
             # matching on "/" is not needed for this line
@@ -660,7 +664,7 @@
     after = {}
     pos = prepos = -1
 
-    # Expected shellscript output
+    # Expected shell script output
     expected = {}
 
     # We keep track of whether or not we're in a Python block so we
@@ -698,9 +702,12 @@
         if not l.endswith('\n'):
             l += '\n'
         if l.startswith('#if'):
+            lsplit = l.split()
+            if len(lsplit) < 2 or lsplit[0] != '#if':
+                after.setdefault(pos, []).append('  !!! invalid #if\n')
             if skipping is not None:
                 after.setdefault(pos, []).append('  !!! nested #if\n')
-            skipping = not hghave(l.split()[1:])
+            skipping = not hghave(lsplit[1:])
             after.setdefault(pos, []).append(l)
         elif l.startswith('#else'):
             if skipping is None:
@@ -776,9 +783,11 @@
 
     # Merge the script output back into a unified test
 
+    warnonly = 1 # 1: not yet, 2: yes, 3: for sure not
+    if exitcode != 0: # failure has been reported
+        warnonly = 3 # set to "for sure not"
     pos = -1
     postout = []
-    ret = 0
     for l in output:
         lout, lcmd = l, None
         if salt in l:
@@ -797,11 +806,10 @@
             if isinstance(r, str):
                 if r == '+glob':
                     lout = el[:-1] + ' (glob)\n'
-                    r = False
+                    r = '' # warn only this line
                 elif r == '-glob':
-                    log('\ninfo, unnecessary glob in %s (after line %d):'
-                        ' %s (glob)\n' % (test, pos, el[-1]))
-                    r = True # pass on unnecessary glob
+                    lout = ''.join(el.rsplit(' (glob)', 1))
+                    r = '' # warn only this line
                 else:
                     log('\ninfo, unknown linematch result: %r\n' % r)
                     r = False
@@ -811,6 +819,10 @@
                 if needescape(lout):
                     lout = stringescape(lout.rstrip('\n')) + " (esc)\n"
                 postout.append("  " + lout) # let diff deal with it
+                if r != '': # if line failed
+                    warnonly = 3 # set to "for sure not"
+                elif warnonly == 1: # is "not yet" (and line is warn only)
+                    warnonly = 2 # set to "yes" do warn
 
         if lcmd:
             # add on last return code
@@ -825,6 +837,8 @@
     if pos in after:
         postout += after.pop(pos)
 
+    if warnonly == 2:
+        exitcode = False # set exitcode to warned
     return exitcode, postout
 
 wifexited = getattr(os, "WIFEXITED", lambda x: False)
@@ -882,8 +896,9 @@
         return 's', test, msg
 
     def fail(msg, ret):
+        warned = ret is False
         if not options.nodiff:
-            log("\nERROR: %s %s" % (testpath, msg))
+            log("\n%s: %s %s" % (warned and 'Warning' or 'ERROR', test, msg))
         if (not ret and options.interactive
             and os.path.exists(testpath + ".err")):
             iolock.acquire()
@@ -896,7 +911,7 @@
                 else:
                     rename(testpath + ".err", testpath + ".out")
                 return '.', test, ''
-        return '!', test, msg
+        return warned and '~' or '!', test, msg
 
     def success():
         return '.', test, ''
@@ -933,7 +948,7 @@
                 else:
                     return ignore("doesn't match keyword")
 
-    if not lctest.startswith("test-"):
+    if not os.path.basename(lctest).startswith("test-"):
         return skip("not a test file")
     for ext, func, out in testtypes:
         if lctest.endswith(ext):
@@ -1022,17 +1037,21 @@
     elif ret == 'timeout':
         result = fail("timed out", ret)
     elif out != refout:
+        info = {}
         if not options.nodiff:
             iolock.acquire()
             if options.view:
                 os.system("%s %s %s" % (options.view, ref, err))
             else:
-                showdiff(refout, out, ref, err)
+                info = showdiff(refout, out, ref, err)
             iolock.release()
+        msg = ""
+        if info.get('servefail'): msg += "serve failed and "
         if ret:
-            result = fail("output changed and " + describe(ret), ret)
+            msg += "output changed and " + describe(ret)
         else:
-            result = fail("output changed", ret)
+            msg += "output changed"
+        result = fail(msg, ret)
     elif ret:
         result = fail(describe(ret), ret)
     else:
@@ -1075,7 +1094,7 @@
                          '         (expected %s)\n'
                          % (verb, actualhg, expecthg))
 
-results = {'.':[], '!':[], 's':[], 'i':[]}
+results = {'.':[], '!':[], '~': [], 's':[], 'i':[]}
 times = []
 iolock = threading.Lock()
 abort = False
@@ -1139,7 +1158,8 @@
         scheduletests(options, tests)
 
         failed = len(results['!'])
-        tested = len(results['.']) + failed
+        warned = len(results['~'])
+        tested = len(results['.']) + failed + warned
         skipped = len(results['s'])
         ignored = len(results['i'])
 
@@ -1147,11 +1167,13 @@
         if not options.noskips:
             for s in results['s']:
                 print "Skipped %s: %s" % s
+        for s in results['~']:
+            print "Warned %s: %s" % s
         for s in results['!']:
             print "Failed %s: %s" % s
         _checkhglib("Tested")
-        print "# Ran %d tests, %d skipped, %d failed." % (
-            tested, skipped + ignored, failed)
+        print "# Ran %d tests, %d skipped, %d warned, %d failed." % (
+            tested, skipped + ignored, warned, failed)
         if results['!']:
             print 'python hash seed:', os.environ['PYTHONHASHSEED']
         if options.time:
@@ -1164,23 +1186,32 @@
         print "\ninterrupted!"
 
     if failed:
-        sys.exit(1)
+        return 1
+    if warned:
+        return 80
 
 testtypes = [('.py', pytest, '.out'),
              ('.t', tsttest, '')]
 
-def main():
-    (options, args) = parseargs()
+def main(args, parser=None):
+    parser = parser or getparser()
+    (options, args) = parseargs(args, parser)
     os.umask(022)
 
     checktools()
 
-    if len(args) == 0:
-        args = [t for t in os.listdir(".")
-                if t.startswith("test-")
-                and (t.endswith(".py") or t.endswith(".t"))]
+    if not args:
+        if options.changed:
+            proc = Popen4('hg st --rev "%s" -man0 .' % options.changed,
+                          None, 0)
+            stdout, stderr = proc.communicate()
+            args = stdout.strip('\0').split('\0')
+        else:
+            args = os.listdir(".")
 
-    tests = args
+    tests = [t for t in args
+             if os.path.basename(t).startswith("test-")
+                 and (t.endswith(".py") or t.endswith(".t"))]
 
     if options.random:
         random.shuffle(tests)
@@ -1206,7 +1237,7 @@
         # we do the randomness ourself to know what seed is used
         os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
 
-    global TESTDIR, HGTMP, INST, BINDIR, PYTHONDIR, COVERAGE_FILE
+    global TESTDIR, HGTMP, INST, BINDIR, TMPBINDIR, PYTHONDIR, COVERAGE_FILE
     TESTDIR = os.environ["TESTDIR"] = os.getcwd()
     if options.tmpdir:
         options.keep_tmpdir = True
@@ -1215,7 +1246,8 @@
             # Meaning of tmpdir has changed since 1.3: we used to create
             # HGTMP inside tmpdir; now HGTMP is tmpdir.  So fail if
             # tmpdir already exists.
-            sys.exit("error: temp dir %r already exists" % tmpdir)
+            print "error: temp dir %r already exists" % tmpdir
+            return 1
 
             # Automatically removing tmpdir sounds convenient, but could
             # really annoy anyone in the habit of using "--tmpdir=/tmp"
@@ -1235,6 +1267,8 @@
     if options.with_hg:
         INST = None
         BINDIR = os.path.dirname(os.path.realpath(options.with_hg))
+        TMPBINDIR = os.path.join(HGTMP, 'install', 'bin')
+        os.makedirs(TMPBINDIR)
 
         # This looks redundant with how Python initializes sys.path from
         # the location of the script being executed.  Needed because the
@@ -1245,18 +1279,22 @@
     else:
         INST = os.path.join(HGTMP, "install")
         BINDIR = os.environ["BINDIR"] = os.path.join(INST, "bin")
+        TMPBINDIR = BINDIR
         PYTHONDIR = os.path.join(INST, "lib", "python")
 
     os.environ["BINDIR"] = BINDIR
     os.environ["PYTHON"] = PYTHON
 
     path = [BINDIR] + os.environ["PATH"].split(os.pathsep)
+    if TMPBINDIR != BINDIR:
+        path = [TMPBINDIR] + path
     os.environ["PATH"] = os.pathsep.join(path)
 
     # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
     # can run .../tests/run-tests.py test-foo where test-foo
-    # adds an extension to HGRC
-    pypath = [PYTHONDIR, TESTDIR]
+    # adds an extension to HGRC. Also include run-test.py directory to import
+    # modules like heredoctest.
+    pypath = [PYTHONDIR, TESTDIR, os.path.abspath(os.path.dirname(__file__))]
     # We have to augment PYTHONPATH, rather than simply replacing
     # it, in case external libraries are only available via current
     # PYTHONPATH.  (In particular, the Subversion bindings on OS X
@@ -1274,10 +1312,10 @@
     vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
 
     try:
-        runtests(options, tests)
+        return runtests(options, tests) or 0
     finally:
         time.sleep(.1)
         cleanup(options)
 
 if __name__ == '__main__':
-    main()
+    sys.exit(main(sys.argv[1:]))
--- a/tests/test-acl.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-acl.t	Thu Apr 17 19:36:17 2014 -0400
@@ -88,7 +88,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -98,6 +97,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -116,9 +116,9 @@
   adding quux/file.py revisions
   files: 3/3 chunks (100.00%)
   added 3 changesets with 3 changes to 3 files
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -147,7 +147,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -157,6 +156,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -177,9 +177,9 @@
   added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: changes have source "push" - skipping
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -209,7 +209,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -219,6 +218,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -249,9 +249,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -281,7 +281,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -291,6 +290,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -348,7 +348,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -358,6 +357,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -420,7 +420,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -430,6 +429,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -489,7 +489,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -499,6 +498,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -563,7 +563,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -573,6 +572,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -634,7 +634,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -644,6 +643,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -707,7 +707,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -717,6 +716,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -747,9 +747,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -786,7 +786,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -796,6 +795,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -866,7 +866,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -876,6 +875,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -941,7 +941,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -951,6 +950,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1027,7 +1027,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -1037,6 +1036,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1067,9 +1067,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -1107,7 +1107,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -1117,6 +1116,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1147,9 +1147,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -1183,7 +1183,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -1193,6 +1192,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1259,7 +1259,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -1269,6 +1268,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1300,9 +1300,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 0 (undo push)
@@ -1336,7 +1336,6 @@
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
-  adding changesets
   bundling: 1/3 changesets (33.33%)
   bundling: 2/3 changesets (66.67%)
   bundling: 3/3 changesets (100.00%)
@@ -1346,6 +1345,7 @@
   bundling: foo/Bar/file.txt 1/3 files (33.33%)
   bundling: foo/file.txt 2/3 files (66.67%)
   bundling: quux/file.py 3/3 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1451,7 +1451,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1464,6 +1463,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1501,9 +1501,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 2 (undo push)
@@ -1534,7 +1534,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1547,6 +1546,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1613,7 +1613,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1626,6 +1625,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1688,7 +1688,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1701,6 +1700,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1757,7 +1757,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1770,6 +1769,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1807,9 +1807,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 2 (undo push)
@@ -1845,7 +1845,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1858,6 +1857,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -1895,9 +1895,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 2 (undo push)
@@ -1932,7 +1932,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -1945,6 +1944,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -2006,7 +2006,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -2019,6 +2018,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
@@ -2056,9 +2056,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
+  updating the branch cache
   listing keys for "phases"
   try to push obsolete markers to remote
-  updating the branch cache
   checking for updated bookmarks
   listing keys for "bookmarks"
   repository tip rolled back to revision 2 (undo push)
@@ -2087,7 +2087,6 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
-  adding changesets
   bundling: 1/4 changesets (25.00%)
   bundling: 2/4 changesets (50.00%)
   bundling: 3/4 changesets (75.00%)
@@ -2100,6 +2099,7 @@
   bundling: foo/Bar/file.txt 2/4 files (50.00%)
   bundling: foo/file.txt 3/4 files (75.00%)
   bundling: quux/file.py 4/4 files (100.00%)
+  adding changesets
   changesets: 1 chunks
   add changeset ef1ea85a6374
   changesets: 2 chunks
--- a/tests/test-ancestor.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-ancestor.py	Thu Apr 17 19:36:17 2014 -0400
@@ -103,7 +103,7 @@
 
 
 # The C gca algorithm requires a real repo. These are textual descriptions of
-# dags that have been known to be problematic.
+# DAGs that have been known to be problematic.
 dagtests = [
     '+2*2*2/*3/2',
     '+3*3/*2*2/*4*4/*4/2*4/2*2',
--- a/tests/test-backout.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-backout.t	Thu Apr 17 19:36:17 2014 -0400
@@ -92,7 +92,7 @@
 should fail
 
   $ hg backout 1
-  abort: cannot backout change on a different branch
+  abort: cannot backout change that is not an ancestor
   [255]
   $ echo c > c
   $ hg ci -Am2
@@ -108,7 +108,7 @@
 should fail
 
   $ hg backout 1
-  abort: cannot backout change on a different branch
+  abort: cannot backout change that is not an ancestor
   [255]
   $ hg summary
   parent: 2:db815d6d32e6 tip
--- a/tests/test-basic.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-basic.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1,5 +1,12 @@
 Create a repository:
 
+  $ hg config
+  defaults.backout=-d "0 0"
+  defaults.commit=-d "0 0"
+  defaults.shelve=--date "0 0"
+  defaults.tag=-d "0 0"
+  ui.slash=True
+  ui.interactive=False
   $ hg init t
   $ cd t
 
--- a/tests/test-blackbox.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-blackbox.t	Thu Apr 17 19:36:17 2014 -0400
@@ -55,8 +55,10 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   (run 'hg update' to get a working copy)
-  $ hg blackbox -l 3
+  $ hg blackbox -l 5
   1970/01/01 00:00:00 bob> pull
+  1970/01/01 00:00:00 bob> updated served branch cache in ?.???? seconds (glob)
+  1970/01/01 00:00:00 bob> wrote served branch cache with 1 labels and 2 nodes
   1970/01/01 00:00:00 bob> 1 incoming changes - new heads: d02f48003e62
   1970/01/01 00:00:00 bob> pull exited 0 after * seconds (glob)
 
@@ -115,11 +117,25 @@
   $ hg strip tip
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob)
-  $ hg blackbox -l 3
+  $ hg blackbox -l 5
   1970/01/01 00:00:00 bob> strip tip
   1970/01/01 00:00:00 bob> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob)
+  1970/01/01 00:00:00 bob> updated base branch cache in ?.???? seconds (glob)
+  1970/01/01 00:00:00 bob> wrote base branch cache with 1 labels and 2 nodes
   1970/01/01 00:00:00 bob> strip tip exited 0 after * seconds (glob)
 
+tags cache gets logged
+  $ hg up tip
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg tag -m 'create test tag' test-tag
+  $ hg tags
+  tip                                3:5b5562c08298
+  test-tag                           2:d02f48003e62
+  $ hg blackbox -l 3
+  1970/01/01 00:00:00 bob> resolved 1 tags cache entries from 1 manifests in ?.???? seconds (glob)
+  1970/01/01 00:00:00 bob> writing tags cache file with 2 heads and 1 tags
+  1970/01/01 00:00:00 bob> tags exited 0 after ?.?? seconds (glob)
+
 extension and python hooks - use the eol extension for a pythonhook
 
   $ echo '[extensions]' >> .hg/hgrc
@@ -128,7 +144,7 @@
   $ echo 'update = echo hooked' >> .hg/hgrc
   $ hg update
   hooked
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg blackbox -l 4
   1970/01/01 00:00:00 bob> update
   1970/01/01 00:00:00 bob> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
@@ -144,7 +160,7 @@
   $ hg status
   $ hg status
   $ hg tip -q
-  2:d02f48003e62
+  3:5b5562c08298
   $ ls .hg/blackbox.log*
   .hg/blackbox.log
   .hg/blackbox.log.1
--- a/tests/test-bookmarks-current.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-bookmarks-current.t	Thu Apr 17 19:36:17 2014 -0400
@@ -183,7 +183,7 @@
   [1]
 
 when a bookmark is active, hg up -r . is
-analogus to hg book -i <active bookmark>
+analogous to hg book -i <active bookmark>
 
   $ hg up -q X
   $ hg up -q .
--- a/tests/test-bundle.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-bundle.t	Thu Apr 17 19:36:17 2014 -0400
@@ -546,7 +546,7 @@
 
 test that verify bundle does not traceback
 
-partial history bundle, fails w/ unkown parent
+partial history bundle, fails w/ unknown parent
 
   $ hg -R bundle.hg verify
   abort: 00changelog.i@bbd179dfa0a7: unknown parent!
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-bundle2.t	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,885 @@
+
+Create an extension to test bundle2 API
+
+  $ cat > bundle2.py << EOF
+  > """A small extension to test bundle2 implementation
+  > 
+  > Current bundle2 implementation is far too limited to be used in any core
+  > code. We still need to be able to test it while it grow up.
+  > """
+  > 
+  > import sys
+  > from mercurial import cmdutil
+  > from mercurial import util
+  > from mercurial import bundle2
+  > from mercurial import scmutil
+  > from mercurial import discovery
+  > from mercurial import changegroup
+  > cmdtable = {}
+  > command = cmdutil.command(cmdtable)
+  > 
+  > ELEPHANTSSONG = """Patali Dirapata, Cromda Cromda Ripalo, Pata Pata, Ko Ko Ko
+  > Bokoro Dipoulito, Rondi Rondi Pepino, Pata Pata, Ko Ko Ko
+  > Emana Karassoli, Loucra Loucra Ponponto, Pata Pata, Ko Ko Ko."""
+  > assert len(ELEPHANTSSONG) == 178 # future test say 178 bytes, trust it.
+  > 
+  > @bundle2.parthandler('test:song')
+  > def songhandler(op, part):
+  >     """handle a "test:song" bundle2 part, printing the lyrics on stdin"""
+  >     op.ui.write('The choir starts singing:\n')
+  >     verses = 0
+  >     for line in part.read().split('\n'):
+  >         op.ui.write('    %s\n' % line)
+  >         verses += 1
+  >     op.records.add('song', {'verses': verses})
+  > 
+  > @bundle2.parthandler('test:ping')
+  > def pinghandler(op, part):
+  >     op.ui.write('received ping request (id %i)\n' % part.id)
+  >     if op.reply is not None and 'ping-pong' in op.reply.capabilities:
+  >         op.ui.write_err('replying to ping request (id %i)\n' % part.id)
+  >         rpart = bundle2.bundlepart('test:pong',
+  >                                    [('in-reply-to', str(part.id))])
+  >         op.reply.addpart(rpart)
+  > 
+  > @bundle2.parthandler('test:debugreply')
+  > def debugreply(op, part):
+  >     """print data about the capacity of the bundle reply"""
+  >     if op.reply is None:
+  >         op.ui.write('debugreply: no reply\n')
+  >     else:
+  >         op.ui.write('debugreply: capabilities:\n')
+  >         for cap in sorted(op.reply.capabilities):
+  >             op.ui.write('debugreply:     %r\n' % cap)
+  >             for val in op.reply.capabilities[cap]:
+  >                 op.ui.write('debugreply:         %r\n' % val)
+  > 
+  > @command('bundle2',
+  >          [('', 'param', [], 'stream level parameter'),
+  >           ('', 'unknown', False, 'include an unknown mandatory part in the bundle'),
+  >           ('', 'parts', False, 'include some arbitrary parts to the bundle'),
+  >           ('', 'reply', False, 'produce a reply bundle'),
+  >           ('r', 'rev', [], 'includes those changeset in the bundle'),],
+  >          '[OUTPUTFILE]')
+  > def cmdbundle2(ui, repo, path=None, **opts):
+  >     """write a bundle2 container on standard ouput"""
+  >     bundler = bundle2.bundle20(ui)
+  >     for p in opts['param']:
+  >         p = p.split('=', 1)
+  >         try:
+  >             bundler.addparam(*p)
+  >         except ValueError, exc:
+  >             raise util.Abort('%s' % exc)
+  > 
+  >     if opts['reply']:
+  >         capsstring = 'ping-pong\nelephants=babar,celeste\ncity%3D%21=celeste%2Cville'
+  >         bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsstring))
+  > 
+  >     revs = opts['rev']
+  >     if 'rev' in opts:
+  >         revs = scmutil.revrange(repo, opts['rev'])
+  >         if revs:
+  >             # very crude version of a changegroup part creation
+  >             bundled = repo.revs('%ld::%ld', revs, revs)
+  >             headmissing = [c.node() for c in repo.set('heads(%ld)', revs)]
+  >             headcommon  = [c.node() for c in repo.set('parents(%ld) - %ld', revs, revs)]
+  >             outgoing = discovery.outgoing(repo.changelog, headcommon, headmissing)
+  >             cg = changegroup.getlocalbundle(repo, 'test:bundle2', outgoing, None)
+  >             part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
+  >             bundler.addpart(part)
+  > 
+  >     if opts['parts']:
+  >        part = bundle2.bundlepart('test:empty')
+  >        bundler.addpart(part)
+  >        # add a second one to make sure we handle multiple parts
+  >        part = bundle2.bundlepart('test:empty')
+  >        bundler.addpart(part)
+  >        part = bundle2.bundlepart('test:song', data=ELEPHANTSSONG)
+  >        bundler.addpart(part)
+  >        part = bundle2.bundlepart('test:debugreply')
+  >        bundler.addpart(part)
+  >        part = bundle2.bundlepart('test:math',
+  >                                  [('pi', '3.14'), ('e', '2.72')],
+  >                                  [('cooking', 'raw')],
+  >                                  '42')
+  >        bundler.addpart(part)
+  >     if opts['unknown']:
+  >        part = bundle2.bundlepart('test:UNKNOWN',
+  >                                  data='some random content')
+  >        bundler.addpart(part)
+  >     if opts['parts']:
+  >        part = bundle2.bundlepart('test:ping')
+  >        bundler.addpart(part)
+  > 
+  >     if path is None:
+  >        file = sys.stdout
+  >     else:
+  >         file = open(path, 'w')
+  > 
+  >     for chunk in bundler.getchunks():
+  >         file.write(chunk)
+  > 
+  > @command('unbundle2', [], '')
+  > def cmdunbundle2(ui, repo, replypath=None):
+  >     """process a bundle2 stream from stdin on the current repo"""
+  >     try:
+  >         tr = None
+  >         lock = repo.lock()
+  >         tr = repo.transaction('processbundle')
+  >         try:
+  >             unbundler = bundle2.unbundle20(ui, sys.stdin)
+  >             op = bundle2.processbundle(repo, unbundler, lambda: tr)
+  >             tr.close()
+  >         except KeyError, exc:
+  >             raise util.Abort('missing support for %s' % exc)
+  >     finally:
+  >         if tr is not None:
+  >             tr.release()
+  >         lock.release()
+  >         remains = sys.stdin.read()
+  >         ui.write('%i unread bytes\n' % len(remains))
+  >     if op.records['song']:
+  >         totalverses = sum(r['verses'] for r in op.records['song'])
+  >         ui.write('%i total verses sung\n' % totalverses)
+  >     for rec in op.records['changegroup']:
+  >         ui.write('addchangegroup return: %i\n' % rec['return'])
+  >     if op.reply is not None and replypath is not None:
+  >         file = open(replypath, 'w')
+  >         for chunk in op.reply.getchunks():
+  >             file.write(chunk)
+  > 
+  > @command('statbundle2', [], '')
+  > def cmdstatbundle2(ui, repo):
+  >     """print statistic on the bundle2 container read from stdin"""
+  >     unbundler = bundle2.unbundle20(ui, sys.stdin)
+  >     try:
+  >         params = unbundler.params
+  >     except KeyError, exc:
+  >        raise util.Abort('unknown parameters: %s' % exc)
+  >     ui.write('options count: %i\n' % len(params))
+  >     for key in sorted(params):
+  >         ui.write('- %s\n' % key)
+  >         value = params[key]
+  >         if value is not None:
+  >             ui.write('    %s\n' % value)
+  >     count = 0
+  >     for p in unbundler.iterparts():
+  >         count += 1
+  >         ui.write('  :%s:\n' % p.type)
+  >         ui.write('    mandatory: %i\n' % len(p.mandatoryparams))
+  >         ui.write('    advisory: %i\n' % len(p.advisoryparams))
+  >         ui.write('    payload: %i bytes\n' % len(p.read()))
+  >     ui.write('parts count:   %i\n' % count)
+  > EOF
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > bundle2=$TESTTMP/bundle2.py
+  > [experimental]
+  > bundle2-exp=True
+  > [ui]
+  > ssh=python "$TESTDIR/dummyssh"
+  > [web]
+  > push_ssl = false
+  > allow_push = *
+  > EOF
+
+The extension requires a repo (currently unused)
+
+  $ hg init main
+  $ cd main
+  $ touch a
+  $ hg add a
+  $ hg commit -m 'a'
+
+
+Empty bundle
+=================
+
+- no option
+- no parts
+
+Test bundling
+
+  $ hg bundle2
+  HG2X\x00\x00\x00\x00 (no-eol) (esc)
+
+Test unbundling
+
+  $ hg bundle2 | hg statbundle2
+  options count: 0
+  parts count:   0
+
+Test old style bundle are detected and refused
+
+  $ hg bundle --all ../bundle.hg
+  1 changesets found
+  $ hg statbundle2 < ../bundle.hg
+  abort: unknown bundle version 10
+  [255]
+
+Test parameters
+=================
+
+- some options
+- no parts
+
+advisory parameters, no value
+-------------------------------
+
+Simplest possible parameters form
+
+Test generation simple option
+
+  $ hg bundle2 --param 'caution'
+  HG2X\x00\x07caution\x00\x00 (no-eol) (esc)
+
+Test unbundling
+
+  $ hg bundle2 --param 'caution' | hg statbundle2
+  options count: 1
+  - caution
+  parts count:   0
+
+Test generation multiple option
+
+  $ hg bundle2 --param 'caution' --param 'meal'
+  HG2X\x00\x0ccaution meal\x00\x00 (no-eol) (esc)
+
+Test unbundling
+
+  $ hg bundle2 --param 'caution' --param 'meal' | hg statbundle2
+  options count: 2
+  - caution
+  - meal
+  parts count:   0
+
+advisory parameters, with value
+-------------------------------
+
+Test generation
+
+  $ hg bundle2 --param 'caution' --param 'meal=vegan' --param 'elephants'
+  HG2X\x00\x1ccaution meal=vegan elephants\x00\x00 (no-eol) (esc)
+
+Test unbundling
+
+  $ hg bundle2 --param 'caution' --param 'meal=vegan' --param 'elephants' | hg statbundle2
+  options count: 3
+  - caution
+  - elephants
+  - meal
+      vegan
+  parts count:   0
+
+parameter with special char in value
+---------------------------------------------------
+
+Test generation
+
+  $ hg bundle2 --param 'e|! 7/=babar%#==tutu' --param simple
+  HG2X\x00)e%7C%21%207/=babar%25%23%3D%3Dtutu simple\x00\x00 (no-eol) (esc)
+
+Test unbundling
+
+  $ hg bundle2 --param 'e|! 7/=babar%#==tutu' --param simple | hg statbundle2
+  options count: 2
+  - e|! 7/
+      babar%#==tutu
+  - simple
+  parts count:   0
+
+Test unknown mandatory option
+---------------------------------------------------
+
+  $ hg bundle2 --param 'Gravity' | hg statbundle2
+  abort: unknown parameters: 'Gravity'
+  [255]
+
+Test debug output
+---------------------------------------------------
+
+bundling debug
+
+  $ hg bundle2 --debug --param 'e|! 7/=babar%#==tutu' --param simple ../out.hg2
+  start emission of HG2X stream
+  bundle parameter: e%7C%21%207/=babar%25%23%3D%3Dtutu simple
+  start of parts
+  end of bundle
+
+file content is ok
+
+  $ cat ../out.hg2
+  HG2X\x00)e%7C%21%207/=babar%25%23%3D%3Dtutu simple\x00\x00 (no-eol) (esc)
+
+unbundling debug
+
+  $ hg statbundle2 --debug < ../out.hg2
+  start processing of HG2X stream
+  reading bundle2 stream parameters
+  ignoring unknown parameter 'e|! 7/'
+  ignoring unknown parameter 'simple'
+  options count: 2
+  - e|! 7/
+      babar%#==tutu
+  - simple
+  start extraction of bundle2 parts
+  part header size: 0
+  end of bundle2 stream
+  parts count:   0
+
+
+Test buggy input
+---------------------------------------------------
+
+empty parameter name
+
+  $ hg bundle2 --param '' --quiet
+  abort: empty parameter name
+  [255]
+
+bad parameter name
+
+  $ hg bundle2 --param 42babar
+  abort: non letter first character: '42babar'
+  [255]
+
+
+Test part
+=================
+
+  $ hg bundle2 --parts ../parts.hg2 --debug
+  start emission of HG2X stream
+  bundle parameter: 
+  start of parts
+  bundle part: "test:empty"
+  bundle part: "test:empty"
+  bundle part: "test:song"
+  bundle part: "test:debugreply"
+  bundle part: "test:math"
+  bundle part: "test:ping"
+  end of bundle
+
+  $ cat ../parts.hg2
+  HG2X\x00\x00\x00\x11 (esc)
+  test:empty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11 (esc)
+  test:empty\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x10	test:song\x00\x00\x00\x02\x00\x00\x00\x00\x00\xb2Patali Dirapata, Cromda Cromda Ripalo, Pata Pata, Ko Ko Ko (esc)
+  Bokoro Dipoulito, Rondi Rondi Pepino, Pata Pata, Ko Ko Ko
+  Emana Karassoli, Loucra Loucra Ponponto, Pata Pata, Ko Ko Ko.\x00\x00\x00\x00\x00\x16\x0ftest:debugreply\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00+	test:math\x00\x00\x00\x04\x02\x01\x02\x04\x01\x04\x07\x03pi3.14e2.72cookingraw\x00\x00\x00\x0242\x00\x00\x00\x00\x00\x10	test:ping\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00 (no-eol) (esc)
+
+
+  $ hg statbundle2 < ../parts.hg2
+  options count: 0
+    :test:empty:
+      mandatory: 0
+      advisory: 0
+      payload: 0 bytes
+    :test:empty:
+      mandatory: 0
+      advisory: 0
+      payload: 0 bytes
+    :test:song:
+      mandatory: 0
+      advisory: 0
+      payload: 178 bytes
+    :test:debugreply:
+      mandatory: 0
+      advisory: 0
+      payload: 0 bytes
+    :test:math:
+      mandatory: 2
+      advisory: 1
+      payload: 2 bytes
+    :test:ping:
+      mandatory: 0
+      advisory: 0
+      payload: 0 bytes
+  parts count:   6
+
+  $ hg statbundle2 --debug < ../parts.hg2
+  start processing of HG2X stream
+  reading bundle2 stream parameters
+  options count: 0
+  start extraction of bundle2 parts
+  part header size: 17
+  part type: "test:empty"
+  part id: "0"
+  part parameters: 0
+    :test:empty:
+      mandatory: 0
+      advisory: 0
+  payload chunk size: 0
+      payload: 0 bytes
+  part header size: 17
+  part type: "test:empty"
+  part id: "1"
+  part parameters: 0
+    :test:empty:
+      mandatory: 0
+      advisory: 0
+  payload chunk size: 0
+      payload: 0 bytes
+  part header size: 16
+  part type: "test:song"
+  part id: "2"
+  part parameters: 0
+    :test:song:
+      mandatory: 0
+      advisory: 0
+  payload chunk size: 178
+  payload chunk size: 0
+      payload: 178 bytes
+  part header size: 22
+  part type: "test:debugreply"
+  part id: "3"
+  part parameters: 0
+    :test:debugreply:
+      mandatory: 0
+      advisory: 0
+  payload chunk size: 0
+      payload: 0 bytes
+  part header size: 43
+  part type: "test:math"
+  part id: "4"
+  part parameters: 3
+    :test:math:
+      mandatory: 2
+      advisory: 1
+  payload chunk size: 2
+  payload chunk size: 0
+      payload: 2 bytes
+  part header size: 16
+  part type: "test:ping"
+  part id: "5"
+  part parameters: 0
+    :test:ping:
+      mandatory: 0
+      advisory: 0
+  payload chunk size: 0
+      payload: 0 bytes
+  part header size: 0
+  end of bundle2 stream
+  parts count:   6
+
+Test actual unbundling of test part
+=======================================
+
+Process the bundle
+
+  $ hg unbundle2 --debug < ../parts.hg2
+  start processing of HG2X stream
+  reading bundle2 stream parameters
+  start extraction of bundle2 parts
+  part header size: 17
+  part type: "test:empty"
+  part id: "0"
+  part parameters: 0
+  ignoring unknown advisory part 'test:empty'
+  payload chunk size: 0
+  part header size: 17
+  part type: "test:empty"
+  part id: "1"
+  part parameters: 0
+  ignoring unknown advisory part 'test:empty'
+  payload chunk size: 0
+  part header size: 16
+  part type: "test:song"
+  part id: "2"
+  part parameters: 0
+  found a handler for part 'test:song'
+  The choir starts singing:
+  payload chunk size: 178
+  payload chunk size: 0
+      Patali Dirapata, Cromda Cromda Ripalo, Pata Pata, Ko Ko Ko
+      Bokoro Dipoulito, Rondi Rondi Pepino, Pata Pata, Ko Ko Ko
+      Emana Karassoli, Loucra Loucra Ponponto, Pata Pata, Ko Ko Ko.
+  part header size: 22
+  part type: "test:debugreply"
+  part id: "3"
+  part parameters: 0
+  found a handler for part 'test:debugreply'
+  debugreply: no reply
+  payload chunk size: 0
+  part header size: 43
+  part type: "test:math"
+  part id: "4"
+  part parameters: 3
+  ignoring unknown advisory part 'test:math'
+  payload chunk size: 2
+  payload chunk size: 0
+  part header size: 16
+  part type: "test:ping"
+  part id: "5"
+  part parameters: 0
+  found a handler for part 'test:ping'
+  received ping request (id 5)
+  payload chunk size: 0
+  part header size: 0
+  end of bundle2 stream
+  0 unread bytes
+  3 total verses sung
+
+Unbundle with an unknown mandatory part
+(should abort)
+
+  $ hg bundle2 --parts --unknown ../unknown.hg2
+
+  $ hg unbundle2 < ../unknown.hg2
+  The choir starts singing:
+      Patali Dirapata, Cromda Cromda Ripalo, Pata Pata, Ko Ko Ko
+      Bokoro Dipoulito, Rondi Rondi Pepino, Pata Pata, Ko Ko Ko
+      Emana Karassoli, Loucra Loucra Ponponto, Pata Pata, Ko Ko Ko.
+  debugreply: no reply
+  0 unread bytes
+  abort: missing support for 'test:unknown'
+  [255]
+
+unbundle with a reply
+
+  $ hg bundle2 --parts --reply ../parts-reply.hg2
+  $ hg unbundle2 ../reply.hg2 < ../parts-reply.hg2
+  0 unread bytes
+  3 total verses sung
+
+The reply is a bundle
+
+  $ cat ../reply.hg2
+  HG2X\x00\x00\x00\x1f (esc)
+  b2x:output\x00\x00\x00\x00\x00\x01\x0b\x01in-reply-to3\x00\x00\x00\xd9The choir starts singing: (esc)
+      Patali Dirapata, Cromda Cromda Ripalo, Pata Pata, Ko Ko Ko
+      Bokoro Dipoulito, Rondi Rondi Pepino, Pata Pata, Ko Ko Ko
+      Emana Karassoli, Loucra Loucra Ponponto, Pata Pata, Ko Ko Ko.
+  \x00\x00\x00\x00\x00\x1f (esc)
+  b2x:output\x00\x00\x00\x01\x00\x01\x0b\x01in-reply-to4\x00\x00\x00\xc9debugreply: capabilities: (esc)
+  debugreply:     'city=!'
+  debugreply:         'celeste,ville'
+  debugreply:     'elephants'
+  debugreply:         'babar'
+  debugreply:         'celeste'
+  debugreply:     'ping-pong'
+  \x00\x00\x00\x00\x00\x1e	test:pong\x00\x00\x00\x02\x01\x00\x0b\x01in-reply-to6\x00\x00\x00\x00\x00\x1f (esc)
+  b2x:output\x00\x00\x00\x03\x00\x01\x0b\x01in-reply-to6\x00\x00\x00=received ping request (id 6) (esc)
+  replying to ping request (id 6)
+  \x00\x00\x00\x00\x00\x00 (no-eol) (esc)
+
+The reply is valid
+
+  $ hg statbundle2 < ../reply.hg2
+  options count: 0
+    :b2x:output:
+      mandatory: 0
+      advisory: 1
+      payload: 217 bytes
+    :b2x:output:
+      mandatory: 0
+      advisory: 1
+      payload: 201 bytes
+    :test:pong:
+      mandatory: 1
+      advisory: 0
+      payload: 0 bytes
+    :b2x:output:
+      mandatory: 0
+      advisory: 1
+      payload: 61 bytes
+  parts count:   4
+
+Unbundle the reply to get the output:
+
+  $ hg unbundle2 < ../reply.hg2
+  remote: The choir starts singing:
+  remote:     Patali Dirapata, Cromda Cromda Ripalo, Pata Pata, Ko Ko Ko
+  remote:     Bokoro Dipoulito, Rondi Rondi Pepino, Pata Pata, Ko Ko Ko
+  remote:     Emana Karassoli, Loucra Loucra Ponponto, Pata Pata, Ko Ko Ko.
+  remote: debugreply: capabilities:
+  remote: debugreply:     'city=!'
+  remote: debugreply:         'celeste,ville'
+  remote: debugreply:     'elephants'
+  remote: debugreply:         'babar'
+  remote: debugreply:         'celeste'
+  remote: debugreply:     'ping-pong'
+  remote: received ping request (id 6)
+  remote: replying to ping request (id 6)
+  0 unread bytes
+
+Support for changegroup
+===================================
+
+  $ hg unbundle $TESTDIR/bundles/rebase.hg
+  adding changesets
+  adding manifests
+  adding file changes
+  added 8 changesets with 7 changes to 7 files (+3 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+
+  $ hg log -G
+  o  changeset:   8:02de42196ebe
+  |  tag:         tip
+  |  parent:      6:24b6387c8c8c
+  |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |  date:        Sat Apr 30 15:24:48 2011 +0200
+  |  summary:     H
+  |
+  | o  changeset:   7:eea13746799a
+  |/|  parent:      6:24b6387c8c8c
+  | |  parent:      5:9520eea781bc
+  | |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | |  date:        Sat Apr 30 15:24:48 2011 +0200
+  | |  summary:     G
+  | |
+  o |  changeset:   6:24b6387c8c8c
+  | |  parent:      1:cd010b8cd998
+  | |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | |  date:        Sat Apr 30 15:24:48 2011 +0200
+  | |  summary:     F
+  | |
+  | o  changeset:   5:9520eea781bc
+  |/   parent:      1:cd010b8cd998
+  |    user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |    date:        Sat Apr 30 15:24:48 2011 +0200
+  |    summary:     E
+  |
+  | o  changeset:   4:32af7686d403
+  | |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | |  date:        Sat Apr 30 15:24:48 2011 +0200
+  | |  summary:     D
+  | |
+  | o  changeset:   3:5fddd98957c8
+  | |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | |  date:        Sat Apr 30 15:24:48 2011 +0200
+  | |  summary:     C
+  | |
+  | o  changeset:   2:42ccdea3bb16
+  |/   user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |    date:        Sat Apr 30 15:24:48 2011 +0200
+  |    summary:     B
+  |
+  o  changeset:   1:cd010b8cd998
+     parent:      -1:000000000000
+     user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+     date:        Sat Apr 30 15:24:48 2011 +0200
+     summary:     A
+  
+  @  changeset:   0:3903775176ed
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     a
+  
+
+  $ hg bundle2 --debug --rev '8+7+5+4' ../rev.hg2
+  4 changesets found
+  list of changesets:
+  32af7686d403cf45b5d95f2d70cebea587ac806a
+  9520eea781bcca16c1e15acc0ba14335a0e8e5ba
+  eea13746799a9e0bfd88f29d3c2e9dc9389f524f
+  02de42196ebee42ef284b6780a87cdc96e8eaab6
+  start emission of HG2X stream
+  bundle parameter: 
+  start of parts
+  bundle part: "b2x:changegroup"
+  bundling: 1/4 changesets (25.00%)
+  bundling: 2/4 changesets (50.00%)
+  bundling: 3/4 changesets (75.00%)
+  bundling: 4/4 changesets (100.00%)
+  bundling: 1/4 manifests (25.00%)
+  bundling: 2/4 manifests (50.00%)
+  bundling: 3/4 manifests (75.00%)
+  bundling: 4/4 manifests (100.00%)
+  bundling: D 1/3 files (33.33%)
+  bundling: E 2/3 files (66.67%)
+  bundling: H 3/3 files (100.00%)
+  end of bundle
+
+  $ cat ../rev.hg2
+  HG2X\x00\x00\x00\x16\x0fb2x:changegroup\x00\x00\x00\x00\x00\x00\x00\x00\x06\x13\x00\x00\x00\xa42\xafv\x86\xd4\x03\xcfE\xb5\xd9_-p\xce\xbe\xa5\x87\xac\x80j_\xdd\xd9\x89W\xc8\xa5JMCm\xfe\x1d\xa9\xd8\x7f!\xa1\xb9{\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x002\xafv\x86\xd4\x03\xcfE\xb5\xd9_-p\xce\xbe\xa5\x87\xac\x80j\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00)6e1f4c47ecb533ffd0c8e52cdc88afb6cd39e20c (esc)
+  \x00\x00\x00f\x00\x00\x00h\x00\x00\x00\x02D (esc)
+  \x00\x00\x00i\x00\x00\x00j\x00\x00\x00\x01D\x00\x00\x00\xa4\x95 \xee\xa7\x81\xbc\xca\x16\xc1\xe1Z\xcc\x0b\xa1C5\xa0\xe8\xe5\xba\xcd\x01\x0b\x8c\xd9\x98\xf3\x98\x1aZ\x81\x15\xf9O\x8d\xa4\xabP`\x89\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x95 \xee\xa7\x81\xbc\xca\x16\xc1\xe1Z\xcc\x0b\xa1C5\xa0\xe8\xe5\xba\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00)4dece9c826f69490507b98c6383a3009b295837d (esc)
+  \x00\x00\x00f\x00\x00\x00h\x00\x00\x00\x02E (esc)
+  \x00\x00\x00i\x00\x00\x00j\x00\x00\x00\x01E\x00\x00\x00\xa2\xee\xa17Fy\x9a\x9e\x0b\xfd\x88\xf2\x9d<.\x9d\xc98\x9fRO$\xb68|\x8c\x8c\xae7\x17\x88\x80\xf3\xfa\x95\xde\xd3\xcb\x1c\xf7\x85\x95 \xee\xa7\x81\xbc\xca\x16\xc1\xe1Z\xcc\x0b\xa1C5\xa0\xe8\xe5\xba\xee\xa17Fy\x9a\x9e\x0b\xfd\x88\xf2\x9d<.\x9d\xc98\x9fRO\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00)365b93d57fdf4814e2b5911d6bacff2b12014441 (esc)
+  \x00\x00\x00f\x00\x00\x00h\x00\x00\x00\x00\x00\x00\x00i\x00\x00\x00j\x00\x00\x00\x01G\x00\x00\x00\xa4\x02\xdeB\x19n\xbe\xe4.\xf2\x84\xb6x (esc)
+  \x87\xcd\xc9n\x8e\xaa\xb6$\xb68|\x8c\x8c\xae7\x17\x88\x80\xf3\xfa\x95\xde\xd3\xcb\x1c\xf7\x85\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xdeB\x19n\xbe\xe4.\xf2\x84\xb6x (esc)
+  \x87\xcd\xc9n\x8e\xaa\xb6\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00)8bee48edc7318541fc0013ee41b089276a8c24bf (esc)
+  \x00\x00\x00f\x00\x00\x00f\x00\x00\x00\x02H (esc)
+  \x00\x00\x00g\x00\x00\x00h\x00\x00\x00\x01H\x00\x00\x00\x00\x00\x00\x00\x8bn\x1fLG\xec\xb53\xff\xd0\xc8\xe5,\xdc\x88\xaf\xb6\xcd9\xe2\x0cf\xa5\xa0\x18\x17\xfd\xf5#\x9c'8\x02\xb5\xb7a\x8d\x05\x1c\x89\xe4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x002\xafv\x86\xd4\x03\xcfE\xb5\xd9_-p\xce\xbe\xa5\x87\xac\x80j\x00\x00\x00\x81\x00\x00\x00\x81\x00\x00\x00+D\x00c3f1ca2924c16a19b0656a84900e504e5b0aec2d (esc)
+  \x00\x00\x00\x8bM\xec\xe9\xc8&\xf6\x94\x90P{\x98\xc68:0	\xb2\x95\x83}\x00}\x8c\x9d\x88\x84\x13%\xf5\xc6\xb0cq\xb3[N\x8a+\x1a\x83\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x95 \xee\xa7\x81\xbc\xca\x16\xc1\xe1Z\xcc\x0b\xa1C5\xa0\xe8\xe5\xba\x00\x00\x00+\x00\x00\x00\xac\x00\x00\x00+E\x009c6fd0350a6c0d0c49d4a9c5017cf07043f54e58 (esc)
+  \x00\x00\x00\x8b6[\x93\xd5\x7f\xdfH\x14\xe2\xb5\x91\x1dk\xac\xff+\x12\x01DA(\xa5\x84\xc6^\xf1!\xf8\x9e\xb6j\xb7\xd0\xbc\x15=\x80\x99\xe7\xceM\xec\xe9\xc8&\xf6\x94\x90P{\x98\xc68:0	\xb2\x95\x83}\xee\xa17Fy\x9a\x9e\x0b\xfd\x88\xf2\x9d<.\x9d\xc98\x9fRO\x00\x00\x00V\x00\x00\x00V\x00\x00\x00+F\x0022bfcfd62a21a3287edbd4d656218d0f525ed76a (esc)
+  \x00\x00\x00\x97\x8b\xeeH\xed\xc71\x85A\xfc\x00\x13\xeeA\xb0\x89'j\x8c$\xbf(\xa5\x84\xc6^\xf1!\xf8\x9e\xb6j\xb7\xd0\xbc\x15=\x80\x99\xe7\xce\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xdeB\x19n\xbe\xe4.\xf2\x84\xb6x (esc)
+  \x87\xcd\xc9n\x8e\xaa\xb6\x00\x00\x00+\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x81\x00\x00\x00\x81\x00\x00\x00+H\x008500189e74a9e0475e822093bc7db0d631aeb0b4 (esc)
+  \x00\x00\x00\x00\x00\x00\x00\x05D\x00\x00\x00b\xc3\xf1\xca)$\xc1j\x19\xb0ej\x84\x90\x0ePN[ (esc)
+  \xec-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x002\xafv\x86\xd4\x03\xcfE\xb5\xd9_-p\xce\xbe\xa5\x87\xac\x80j\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02D (esc)
+  \x00\x00\x00\x00\x00\x00\x00\x05E\x00\x00\x00b\x9co\xd05 (esc)
+  l\r (no-eol) (esc)
+  \x0cI\xd4\xa9\xc5\x01|\xf0pC\xf5NX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x95 \xee\xa7\x81\xbc\xca\x16\xc1\xe1Z\xcc\x0b\xa1C5\xa0\xe8\xe5\xba\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02E (esc)
+  \x00\x00\x00\x00\x00\x00\x00\x05H\x00\x00\x00b\x85\x00\x18\x9et\xa9\xe0G^\x82 \x93\xbc}\xb0\xd61\xae\xb0\xb4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xdeB\x19n\xbe\xe4.\xf2\x84\xb6x (esc)
+  \x87\xcd\xc9n\x8e\xaa\xb6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02H (esc)
+  \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 (no-eol) (esc)
+
+  $ hg unbundle2 < ../rev.hg2
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 0 changes to 3 files
+  0 unread bytes
+  addchangegroup return: 1
+
+with reply
+
+  $ hg bundle2 --rev '8+7+5+4' --reply ../rev-rr.hg2
+  $ hg unbundle2 ../rev-reply.hg2 < ../rev-rr.hg2
+  0 unread bytes
+  addchangegroup return: 1
+
+  $ cat ../rev-reply.hg2
+  HG2X\x00\x00\x003\x15b2x:reply:changegroup\x00\x00\x00\x00\x00\x02\x0b\x01\x06\x01in-reply-to1return1\x00\x00\x00\x00\x00\x1f (esc)
+  b2x:output\x00\x00\x00\x01\x00\x01\x0b\x01in-reply-to1\x00\x00\x00dadding changesets (esc)
+  adding manifests
+  adding file changes
+  added 0 changesets with 0 changes to 3 files
+  \x00\x00\x00\x00\x00\x00 (no-eol) (esc)
+
+Real world exchange
+=====================
+
+
+clone --pull
+
+  $ cd ..
+  $ hg clone main other --pull --rev 9520eea781bc
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg -R other log -G
+  @  changeset:   1:9520eea781bc
+  |  tag:         tip
+  |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |  date:        Sat Apr 30 15:24:48 2011 +0200
+  |  summary:     E
+  |
+  o  changeset:   0:cd010b8cd998
+     user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+     date:        Sat Apr 30 15:24:48 2011 +0200
+     summary:     A
+  
+
+pull
+
+  $ hg -R other pull -r 24b6387c8c8c
+  pulling from $TESTTMP/main (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+
+push
+
+  $ hg -R main push other --rev eea13746799a
+  pushing to other
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 0 changes to 0 files (-1 heads)
+
+pull over ssh
+
+  $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --traceback
+  pulling from ssh://user@dummy/main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+
+pull over http
+
+  $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
+  $ cat main.pid >> $DAEMON_PIDS
+
+  $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16
+  pulling from http://localhost:$HGPORT/
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  (run 'hg heads .' to see heads, 'hg merge' to merge)
+  $ cat main-error.log
+
+push over ssh
+
+  $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8
+  pushing to ssh://user@dummy/other
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+
+push over http
+
+  $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
+  $ cat other.pid >> $DAEMON_PIDS
+
+  $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403
+  pushing to http://localhost:$HGPORT2/
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  $ cat other-error.log
+
+Check final content.
+
+  $ hg -R other log -G
+  o  changeset:   7:32af7686d403
+  |  tag:         tip
+  |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |  date:        Sat Apr 30 15:24:48 2011 +0200
+  |  summary:     D
+  |
+  o  changeset:   6:5fddd98957c8
+  |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |  date:        Sat Apr 30 15:24:48 2011 +0200
+  |  summary:     C
+  |
+  o  changeset:   5:42ccdea3bb16
+  |  parent:      0:cd010b8cd998
+  |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |  date:        Sat Apr 30 15:24:48 2011 +0200
+  |  summary:     B
+  |
+  | o  changeset:   4:02de42196ebe
+  | |  parent:      2:24b6387c8c8c
+  | |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | |  date:        Sat Apr 30 15:24:48 2011 +0200
+  | |  summary:     H
+  | |
+  | | o  changeset:   3:eea13746799a
+  | |/|  parent:      2:24b6387c8c8c
+  | | |  parent:      1:9520eea781bc
+  | | |  user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | | |  date:        Sat Apr 30 15:24:48 2011 +0200
+  | | |  summary:     G
+  | | |
+  | o |  changeset:   2:24b6387c8c8c
+  |/ /   parent:      0:cd010b8cd998
+  | |    user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  | |    date:        Sat Apr 30 15:24:48 2011 +0200
+  | |    summary:     F
+  | |
+  | @  changeset:   1:9520eea781bc
+  |/   user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+  |    date:        Sat Apr 30 15:24:48 2011 +0200
+  |    summary:     E
+  |
+  o  changeset:   0:cd010b8cd998
+     user:        Nicolas Dumazet <nicdumz.commits@gmail.com>
+     date:        Sat Apr 30 15:24:48 2011 +0200
+     summary:     A
+  
--- a/tests/test-cat.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-cat.t	Thu Apr 17 19:36:17 2014 -0400
@@ -32,3 +32,22 @@
   1
   3
 
+  $ mkdir tmp
+  $ hg cat --output tmp/HH_%H c
+  $ hg cat --output tmp/RR_%R c
+  $ hg cat --output tmp/h_%h c
+  $ hg cat --output tmp/r_%r c
+  $ hg cat --output tmp/%s_s c
+  $ hg cat --output tmp/%d%%_d c
+  $ hg cat --output tmp/%p_p c
+  $ hg log -r . --template "{rev}: {node|short}\n"
+  2: 45116003780e
+  $ find tmp -type f | sort
+  tmp/.%_d
+  tmp/HH_45116003780e3678b333fb2c99fa7d559c8457e9
+  tmp/RR_2
+  tmp/c_p
+  tmp/c_s
+  tmp/h_45116003780e
+  tmp/r_2
+
--- a/tests/test-check-code.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-check-code.t	Thu Apr 17 19:36:17 2014 -0400
@@ -123,6 +123,7 @@
   $ cat > python3-compat.py << EOF
   > foo <> bar
   > reduce(lambda a, b: a + b, [1, 2, 3, 4])
+  > dict(key=value)
   > EOF
   $ "$check_code" python3-compat.py
   python3-compat.py:1:
@@ -131,6 +132,9 @@
   python3-compat.py:2:
    > reduce(lambda a, b: a + b, [1, 2, 3, 4])
    reduce is not available in Python 3+
+  python3-compat.py:3:
+   > dict(key=value)
+   dict() is different in Py2 and 3 and is slower than {}
   [1]
 
   $ cat > is-op.py <<EOF
@@ -251,3 +255,32 @@
    warning: add two newlines after '.. note::'
   [1]
 
+  $ cat > ./map-inside-gettext.py <<EOF
+  > print _("map inside gettext %s" % v)
+  > 
+  > print _("concatenating " " by " " space %s" % v)
+  > print _("concatenating " + " by " + " '+' %s" % v)
+  > 
+  > print _("maping operation in different line %s"
+  >         % v)
+  > 
+  > print _(
+  >         "leading spaces inside of '(' %s" % v)
+  > EOF
+  $ "$check_code" ./map-inside-gettext.py
+  ./map-inside-gettext.py:1:
+   > print _("map inside gettext %s" % v)
+   don't use % inside _()
+  ./map-inside-gettext.py:3:
+   > print _("concatenating " " by " " space %s" % v)
+   don't use % inside _()
+  ./map-inside-gettext.py:4:
+   > print _("concatenating " + " by " + " '+' %s" % v)
+   don't use % inside _()
+  ./map-inside-gettext.py:6:
+   > print _("maping operation in different line %s"
+   don't use % inside _()
+  ./map-inside-gettext.py:9:
+   > print _(
+   don't use % inside _()
+  [1]
--- a/tests/test-check-pyflakes.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-check-pyflakes.t	Thu Apr 17 19:36:17 2014 -0400
@@ -10,12 +10,10 @@
   setup.py:*: 'zlib' imported but unused (glob)
   setup.py:*: 'bz2' imported but unused (glob)
   setup.py:*: 'py2exe' imported but unused (glob)
-  tests/hghave.py:*: 'hgext' imported but unused (glob)
   tests/hghave.py:*: '_lsprof' imported but unused (glob)
   tests/hghave.py:*: 'publish_cmdline' imported but unused (glob)
   tests/hghave.py:*: 'pygments' imported but unused (glob)
   tests/hghave.py:*: 'ssl' imported but unused (glob)
-  contrib/win32/hgwebdir_wsgi.py:*: 'from isapi.install import *' used; unable to detect undefined names (glob)
-  hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob)
+  contrib/win32/hgwebdir_wsgi.py:93: 'from isapi.install import *' used; unable to detect undefined names (glob)
   
 
--- a/tests/test-command-template.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-command-template.t	Thu Apr 17 19:36:17 2014 -0400
@@ -45,7 +45,7 @@
 
   $ hg log --template '{join(file_copies, ",\n")}\n' -r .
   fourth (second)
-  $ hg log --template '{file_copies % "{source} -> {name}\n"}' -r .
+  $ hg log -T '{file_copies % "{source} -> {name}\n"}' -r .
   second -> fourth
 
 Quoting for ui.logtemplate
@@ -63,6 +63,29 @@
   $ echo 'logtemplate =' >> .hg/hgrc
   $ echo 'style =' >> .hg/hgrc
 
+Add some simple styles to settings
+
+  $ echo '[templates]' >> .hg/hgrc
+  $ printf 'simple = "{rev}\\n"\n' >> .hg/hgrc
+  $ printf 'simple2 = {rev}\\n\n' >> .hg/hgrc
+
+  $ hg log -l1 -Tsimple
+  8
+  $ hg log -l1 -Tsimple2
+  8
+
+Test templates and style maps in files:
+
+  $ echo "{rev}" > tmpl
+  $ hg log -l1 -T./tmpl
+  8
+  $ hg log -l1 -Tblah/blah
+  blah/blah (no-eol)
+
+  $ printf 'changeset = "{rev}\\n"\n' > map-simple
+  $ hg log -l1 -T./map-simple
+  8
+
 Default style is like normal output:
 
   $ hg log > log.out
@@ -84,7 +107,7 @@
 
 Compact style works:
 
-  $ hg log --style compact
+  $ hg log -Tcompact
   8[tip]   95c24699272e   2020-01-01 10:01 +0000   test
     third
   
@@ -1463,7 +1486,7 @@
   1: null+2
   0: null+1
 
-One common tag: longuest path wins:
+One common tag: longest path wins:
 
   $ hg tag -r 1 -m t1 -d '6 0' t1
   $ hg log --template '{rev}: {latesttag}+{latesttagdistance}\n'
@@ -1686,7 +1709,7 @@
   $ hg log -R a -r 8 --template '{files % r"{file}\n"}\n'
   fourth\nsecond\nthird\n
 
-Test string escapeing in nested expression:
+Test string escaping in nested expression:
 
   $ hg log -R a -r 8 --template '{ifeq(r"\x6e", if("1", "\x5c\x786e"), join(files, "\x5c\x786e"))}\n'
   fourth\x6esecond\x6ethird
@@ -1718,20 +1741,20 @@
   $ echo aa >> aa
   $ hg ci -u '{node|short}' -m 'desc to be wrapped desc to be wrapped'
 
-  $ hg log -r 1 --template '{fill(desc, "20", author, branch)}'
+  $ hg log -l1 --template '{fill(desc, "20", author, branch)}'
   {node|short}desc to
   text.{rev}be wrapped
   text.{rev}desc to be
   text.{rev}wrapped (no-eol)
-  $ hg log -r 1 --template '{fill(desc, "20", "{node|short}:", "text.{rev}:")}'
+  $ hg log -l1 --template '{fill(desc, "20", "{node|short}:", "text.{rev}:")}'
   bcc7ff960b8e:desc to
   text.1:be wrapped
   text.1:desc to be
   text.1:wrapped (no-eol)
 
-  $ hg log -r 1 --template '{sub(r"[0-9]", "-", author)}'
+  $ hg log -l 1 --template '{sub(r"[0-9]", "-", author)}'
   {node|short} (no-eol)
-  $ hg log -r 1 --template '{sub(r"[0-9]", "-", "{node|short}")}'
+  $ hg log -l 1 --template '{sub(r"[0-9]", "-", "{node|short}")}'
   bcc-ff---b-e (no-eol)
 
   $ cat >> .hg/hgrc <<EOF
@@ -1742,9 +1765,9 @@
   > text.{rev} = red
   > text.1 = green
   > EOF
-  $ hg log --color=always -r 1 --template '{label(branch, "text\n")}'
+  $ hg log --color=always -l 1 --template '{label(branch, "text\n")}'
   \x1b[0;31mtext\x1b[0m (esc)
-  $ hg log --color=always -r 1 --template '{label("text.{rev}", "text\n")}'
+  $ hg log --color=always -l 1 --template '{label("text.{rev}", "text\n")}'
   \x1b[0;32mtext\x1b[0m (esc)
 
 Test branches inside if statement:
@@ -1752,11 +1775,82 @@
   $ hg log -r 0 --template '{if(branches, "yes", "no")}\n'
   no
 
-  $ cd ..
+Test shortest(node) function:
+
+  $ echo b > b
+  $ hg ci -qAm b
+  $ hg log --template '{shortest(node)}\n'
+  e777
+  bcc7
+  f776
+  $ hg log --template '{shortest(node, 10)}\n'
+  e777603221
+  bcc7ff960b
+  f7769ec2ab
+
+Test pad function
+
+  $ hg log --template '{pad(rev, 20)} {author|user}\n'
+  2                    test
+  1                    {node|short}
+  0                    test
+
+  $ hg log --template '{pad(rev, 20, " ", True)} {author|user}\n'
+                     2 test
+                     1 {node|short}
+                     0 test
+
+  $ hg log --template '{pad(rev, 20, "-", False)} {author|user}\n'
+  2------------------- test
+  1------------------- {node|short}
+  0------------------- test
+
+Test ifcontains function
+
+  $ hg log --template '{rev} {ifcontains("a", file_adds, "added a", "did not add a")}\n'
+  2 did not add a
+  1 did not add a
+  0 added a
+
+Test revset function
+
+  $ hg log --template '{rev} {ifcontains(rev, revset("."), "current rev", "not current rev")}\n'
+  2 current rev
+  1 not current rev
+  0 not current rev
+
+  $ hg log --template '{rev} Parents: {revset("parents(%s)", rev)}\n'
+  2 Parents: 1
+  1 Parents: 0
+  0 Parents: 
+
+  $ hg log --template 'Rev: {rev}\n{revset("::%s", rev) % "Ancestor: {revision}\n"}\n'
+  Rev: 2
+  Ancestor: 0
+  Ancestor: 1
+  Ancestor: 2
+  
+  Rev: 1
+  Ancestor: 0
+  Ancestor: 1
+  
+  Rev: 0
+  Ancestor: 0
+  
+Test current bookmark templating
+
+  $ hg book foo
+  $ hg book bar
+  $ hg log --template "{rev} {bookmarks % '{bookmark}{ifeq(bookmark, current, \"*\")} '}\n"
+  2 bar* foo 
+  1 
+  0 
 
 Test stringify on sub expressions
 
+  $ cd ..
   $ hg log -R a -r 8 --template '{join(files, if("1", if("1", ", ")))}\n'
   fourth, second, third
   $ hg log -R a -r 8 --template '{strip(if("1", if("1", "-abc-")), if("1", if("1", "-")))}\n'
   abc
+
--- a/tests/test-commandserver.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-commandserver.py	Thu Apr 17 19:36:17 2014 -0400
@@ -51,7 +51,10 @@
         elif ch == 'L':
             writeblock(server, input.readline(data))
         elif ch == 'r':
-            return struct.unpack('>i', data)[0]
+            ret, = struct.unpack('>i', data)
+            if ret != 0:
+                print ' [%d]' % ret
+            return ret
         else:
             print "unexpected channel %c: %r" % (ch, data)
             if ch.isupper():
@@ -101,6 +104,9 @@
     # make sure --config doesn't stick
     runcommand(server, ['id'])
 
+    # negative return code should be masked
+    runcommand(server, ['id', '-runknown'])
+
 def inputeof(server):
     readchannel(server)
     server.stdin.write('runcommand\n')
@@ -267,12 +273,35 @@
 
     runcommand(server, ['up', 'null'])
     runcommand(server, ['phase', '-df', 'tip'])
-    os.system('hg debugobsolete `hg log -r tip --template {node}`')
+    cmd = 'hg debugobsolete `hg log -r tip --template {node}`'
+    if os.name == 'nt':
+        cmd = 'sh -c "%s"' % cmd # run in sh, not cmd.exe
+    os.system(cmd)
     runcommand(server, ['log', '--hidden'])
     runcommand(server, ['log'])
 
+def mqoutsidechanges(server):
+    readchannel(server)
+
+    # load repo.mq
+    runcommand(server, ['qapplied'])
+    os.system('hg qnew 0.diff')
+    # repo.mq should be invalidated
+    runcommand(server, ['qapplied'])
+
+    runcommand(server, ['qpop', '--all'])
+    os.system('hg qqueue --create foo')
+    # repo.mq should be recreated to point to new queue
+    runcommand(server, ['qqueue', '--active'])
+
+def startwithoutrepo(server):
+    readchannel(server)
+    runcommand(server, ['init', 'repo2'])
+    runcommand(server, ['id', '-R', 'repo2'])
+
 if __name__ == '__main__':
-    os.system('hg init')
+    os.system('hg init repo')
+    os.chdir('repo')
 
     check(hellomessage)
     check(unknowncommand)
@@ -301,3 +330,11 @@
     hgrc.write('[extensions]\nobs=obs.py\n')
     hgrc.close()
     check(obsolete)
+    hgrc = open('.hg/hgrc', 'a')
+    hgrc.write('[extensions]\nmq=\n')
+    hgrc.close()
+    check(mqoutsidechanges)
+
+    os.chdir('..')
+    check(hellomessage)
+    check(startwithoutrepo)
--- a/tests/test-commandserver.py.out	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-commandserver.py.out	Thu Apr 17 19:36:17 2014 -0400
@@ -43,6 +43,9 @@
 000000000000
  runcommand id
 000000000000 tip
+ runcommand id -runknown
+abort: unknown revision 'unknown'!
+ [255]
 
 testing inputeof:
 
@@ -70,7 +73,7 @@
 testing localhgrc:
 
  runcommand showconfig
-bundle.mainreporoot=$TESTTMP
+bundle.mainreporoot=$TESTTMP/repo
 defaults.backout=-d "0 0"
 defaults.commit=-d "0 0"
 defaults.shelve=--date "0 0"
@@ -142,6 +145,7 @@
 
  runcommand phase -r . -p
 no phases changed
+ [1]
  runcommand commit -Am.
  runcommand rollback
 repository tip rolled back to revision 3 (undo commit)
@@ -222,3 +226,27 @@
 date:        Thu Jan 01 00:00:00 1970 +0000
 summary:     1
 
+
+testing mqoutsidechanges:
+
+ runcommand qapplied
+ runcommand qapplied
+0.diff
+ runcommand qpop --all
+popping 0.diff
+patch queue now empty
+ runcommand qqueue --active
+foo
+
+testing hellomessage:
+
+o, 'capabilities: getencoding runcommand\nencoding: ***'
+ runcommand id
+abort: there is no Mercurial repository here (.hg not found)
+ [255]
+
+testing startwithoutrepo:
+
+ runcommand init repo2
+ runcommand id -R repo2
+000000000000 tip
--- a/tests/test-commit-amend.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-commit-amend.t	Thu Apr 17 19:36:17 2014 -0400
@@ -827,3 +827,25 @@
   $ hg phase '.^::.'
   35: draft
   38: secret
+
+Test that amend with --edit invokes editor forcibly
+---------------------------------------------------
+
+  $ hg parents --template "{desc}\n"
+  amend as secret
+  $ HGEDITOR=cat hg commit --amend -m "editor should be suppressed"
+  $ hg parents --template "{desc}\n"
+  editor should be suppressed
+
+  $ HGEDITOR=cat hg commit --amend -m "editor should be invoked" --edit
+  editor should be invoked
+  
+  
+  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  HG: Leave message empty to abort commit.
+  HG: --
+  HG: user: test
+  HG: branch 'silliness'
+  HG: changed obs.py
+  $ hg parents --template "{desc}\n"
+  editor should be invoked
--- a/tests/test-commit.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-commit.t	Thu Apr 17 19:36:17 2014 -0400
@@ -102,7 +102,8 @@
   $ echo foo >> foo
   $ echo fake >> .hg/requires
   $ hg commit -m bla
-  abort: unknown repository format: requires features 'fake' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: fake!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
 
   $ cd ..
@@ -119,7 +120,18 @@
   $ hg add
   adding bar/bar (glob)
   adding foo/foo (glob)
-  $ hg ci -m commit-subdir-1 foo
+  $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo
+  commit-subdir-1
+  
+  
+  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  HG: Leave message empty to abort commit.
+  HG: --
+  HG: user: test
+  HG: branch 'default'
+  HG: added foo/foo
+
+
   $ hg ci -m commit-subdir-2 bar
 
 subdir log 1
@@ -173,11 +185,23 @@
 dot and subdir commit test
 
   $ hg init test3
+  $ echo commit-foo-subdir > commit-log-test
   $ cd test3
   $ mkdir foo
   $ echo foo content > foo/plain-file
   $ hg add foo/plain-file
-  $ hg ci -m commit-foo-subdir foo
+  $ HGEDITOR=cat hg ci --edit -l ../commit-log-test foo
+  commit-foo-subdir
+  
+  
+  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  HG: Leave message empty to abort commit.
+  HG: --
+  HG: user: test
+  HG: branch 'default'
+  HG: added foo/plain-file
+
+
   $ echo modified foo content > foo/plain-file
   $ hg ci -m commit-foo-dot .
 
--- a/tests/test-committer.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-committer.t	Thu Apr 17 19:36:17 2014 -0400
@@ -49,7 +49,8 @@
   $ echo "[ui]" > .hg/hgrc
   $ echo "username = " >> .hg/hgrc
   $ hg commit -m commit-1
-  abort: no username supplied (see "hg help config")
+  abort: no username supplied
+  (use "hg config --edit" to set your username)
   [255]
   $ rm .hg/hgrc
   $ hg commit -m commit-1 2>&1
--- a/tests/test-completion.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-completion.t	Thu Apr 17 19:36:17 2014 -0400
@@ -13,6 +13,7 @@
   cat
   clone
   commit
+  config
   copy
   diff
   export
@@ -43,7 +44,6 @@
   rollback
   root
   serve
-  showconfig
   status
   summary
   tag
@@ -199,7 +199,7 @@
   add: include, exclude, subrepos, dry-run
   annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, ignore-all-space, ignore-space-change, ignore-blank-lines, include, exclude
   clone: noupdate, updaterev, rev, branch, pull, uncompressed, ssh, remotecmd, insecure
-  commit: addremove, close-branch, amend, secret, include, exclude, message, logfile, date, user, subrepos
+  commit: addremove, close-branch, amend, secret, edit, include, exclude, message, logfile, date, user, subrepos
   diff: rev, change, text, git, nodates, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, unified, stat, include, exclude, subrepos
   export: output, switch-parent, rev, text, git, nodates
   forget: include, exclude
@@ -222,6 +222,7 @@
   branches: active, closed
   bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
   cat: output, rev, decode, include, exclude
+  config: untrusted, edit, local, global
   copy: after, force, include, exclude, dry-run
   debugancestor: 
   debugbuilddag: mergeable-file, overwritten-file, new-file
@@ -250,7 +251,7 @@
   debugrebuilddirstate: rev
   debugrename: rev
   debugrevlog: changelog, manifest, dump
-  debugrevspec: 
+  debugrevspec: optimize
   debugsetparents: 
   debugsub: rev
   debugsuccessorssets: 
@@ -275,7 +276,6 @@
   revert: all, date, rev, no-backup, include, exclude, dry-run
   rollback: dry-run, force
   root: 
-  showconfig: untrusted
   tag: force, local, rev, remove, edit, message, date, user
   tags: 
   tip: patch, git, style, template
--- a/tests/test-convert-bzr.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-convert-bzr.t	Thu Apr 17 19:36:17 2014 -0400
@@ -102,7 +102,7 @@
   >   > ../bzr-timestamps
   $ cd ..
   $ hg -R source-hg log --template "{date|isodate}\n" > hg-timestamps
-  $ diff -u bzr-timestamps hg-timestamps
+  $ cmp bzr-timestamps hg-timestamps || diff -u bzr-timestamps hg-timestamps
   $ cd ..
 
 merge
--- a/tests/test-convert-filemap.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-convert-filemap.t	Thu Apr 17 19:36:17 2014 -0400
@@ -252,6 +252,24 @@
   $ hg --cwd foo-copied.repo debugrename copied
   copied renamed from foo:2ed2a3912a0b24502043eae84ee4b279c18b90dd
 
+verify the top level 'include .' if there is no other includes:
+
+  $ echo "exclude something" > default.fmap
+  $ hg convert -q --filemap default.fmap -r1 source dummydest2
+  $ hg -R dummydest2 log --template '{rev} {node|short} {desc|firstline}\n'
+  1 61e22ca76c3b 1: add bar quux; copy foo to copied
+  0 c085cf2ee7fe 0: add foo baz dir/
+
+  $ echo "include somethingelse" >> default.fmap
+  $ hg convert -q --filemap default.fmap -r1 source dummydest3
+  $ hg -R dummydest3 log --template '{rev} {node|short} {desc|firstline}\n'
+
+  $ echo "include ." >> default.fmap
+  $ hg convert -q --filemap default.fmap -r1 source dummydest4
+  $ hg -R dummydest4 log --template '{rev} {node|short} {desc|firstline}\n'
+  1 61e22ca76c3b 1: add bar quux; copy foo to copied
+  0 c085cf2ee7fe 0: add foo baz dir/
+
 ensure that the filemap contains duplicated slashes (issue3612)
 
   $ cat > renames.fmap <<EOF
--- a/tests/test-convert-hg-sink.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-convert-hg-sink.t	Thu Apr 17 19:36:17 2014 -0400
@@ -119,7 +119,7 @@
   0 add baz
   $ cd new-filemap
   $ hg tags
-  tip                                2:6f4fd1df87fb
+  tip                                2:3c74706b1ff8
   some-tag                           0:ba8636729451
   $ cd ..
 
--- a/tests/test-convert-hg-svn.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-convert-hg-svn.t	Thu Apr 17 19:36:17 2014 -0400
@@ -103,3 +103,14 @@
   scanning source...
   sorting...
   converting...
+
+verify which shamap format we are storing and must be able to handle
+
+  $ cat svn-repo-hg/.hg/shamap
+  svn:????????-????-????-????-????????????@1 ???????????????????????????????????????? (glob)
+  svn:????????-????-????-????-????????????@2 ???????????????????????????????????????? (glob)
+  svn:????????-????-????-????-????????????@2 ???????????????????????????????????????? (glob)
+  $ cat svn-repo-wc/.svn/hg-shamap
+  ???????????????????????????????????????? 1 (glob)
+  ???????????????????????????????????????? svn:????????-????-????-????-????????????@2 (glob)
+  ???????????????????????????????????????? svn:????????-????-????-????-????????????@2 (glob)
--- a/tests/test-convert-svn-source.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-convert-svn-source.t	Thu Apr 17 19:36:17 2014 -0400
@@ -198,11 +198,12 @@
   extra:       convert_revision=svn:........-....-....-....-............/proj B/mytrunk@1 (re)
   $ cd ..
 
-Test converting empty heads (issue3347)
+Test converting empty heads (issue3347).
+Also tests getting logs directly without debugsvnlog.
 
   $ svnadmin create svn-empty
   $ svnadmin load -q svn-empty < "$TESTDIR/svn/empty.svndump"
-  $ hg --config convert.svn.trunk= convert svn-empty
+  $ hg --config convert.svn.trunk= --config convert.svn.debugsvnlog=0 convert svn-empty
   assuming destination svn-empty-hg
   initializing destination svn-empty-hg repository
   scanning source...
--- a/tests/test-copy-move-merge.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-copy-move-merge.t	Thu Apr 17 19:36:17 2014 -0400
@@ -31,17 +31,17 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: b8bf91eeebbc, local: add3f11052fa+, remote: 17c05bb7fcb6
-   a: remote moved to b -> m
+   b: remote moved from a -> m
     preserving a for resolve of b
-   a: remote moved to c -> m
+   c: remote moved from a -> m
     preserving a for resolve of c
   removing a
-  updating: a 1/2 files (50.00%)
+  updating: b 1/2 files (50.00%)
   picked tool 'internal:merge' for b (binary False symlink False)
   merging a and b to b
   my b@add3f11052fa+ other b@17c05bb7fcb6 ancestor a@b8bf91eeebbc
    premerge successful
-  updating: a 2/2 files (100.00%)
+  updating: c 2/2 files (100.00%)
   picked tool 'internal:merge' for c (binary False symlink False)
   merging a and c to c
   my c@add3f11052fa+ other c@17c05bb7fcb6 ancestor a@b8bf91eeebbc
--- a/tests/test-default-push.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-default-push.t	Thu Apr 17 19:36:17 2014 -0400
@@ -15,6 +15,16 @@
   $ echo b >> b/a
   $ hg --cwd b ci -mb
 
+Push should provide a hint when both 'default' and 'default-push' not set:
+  $ cd c
+  $ hg push --config paths.default=
+  pushing to default-push
+  abort: default repository not configured!
+  (see the "path" section in "hg help config")
+  [255]
+
+  $ cd ..
+
 Push should push to 'default' when 'default-push' not set:
 
   $ hg --cwd b push
--- a/tests/test-demandimport.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-demandimport.py	Thu Apr 17 19:36:17 2014 -0400
@@ -37,3 +37,9 @@
 print "re =", f(re)
 print "re.stderr =", f(re.stderr)
 print "re =", f(re)
+
+demandimport.disable()
+os.environ['HGDEMANDIMPORT'] = 'disable'
+demandimport.enable()
+from mercurial import node
+print "node =", f(node)
--- a/tests/test-demandimport.py.out	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-demandimport.py.out	Thu Apr 17 19:36:17 2014 -0400
@@ -13,3 +13,4 @@
 re = <unloaded module 'sys'>
 re.stderr = <open file '<whatever>', mode 'w' at 0x?>
 re = <proxied module 'sys'>
+node = <module 'mercurial.node' from '?'>
--- a/tests/test-doctest.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-doctest.py	Thu Apr 17 19:36:17 2014 -0400
@@ -15,11 +15,13 @@
 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
 testmod('mercurial.dispatch')
 testmod('mercurial.encoding')
+testmod('mercurial.hg')
 testmod('mercurial.hgweb.hgwebdir_mod')
 testmod('mercurial.match')
 testmod('mercurial.minirst')
 testmod('mercurial.revset')
 testmod('mercurial.store')
+testmod('mercurial.subrepo')
 testmod('mercurial.templatefilters')
 testmod('mercurial.ui')
 testmod('mercurial.url')
@@ -27,3 +29,4 @@
 testmod('mercurial.util', testtarget='platform')
 testmod('hgext.convert.cvsps')
 testmod('hgext.convert.filemap')
+testmod('hgext.convert.subversion')
--- a/tests/test-double-merge.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-double-merge.t	Thu Apr 17 19:36:17 2014 -0400
@@ -35,11 +35,11 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: e6dc8efe11cc, local: 6a0df1dad128+, remote: 484bf6903104
-   foo: remote copied to bar -> m
+   bar: remote copied from foo -> m
     preserving foo for resolve of bar
    foo: versions differ -> m
     preserving foo for resolve of foo
-  updating: foo 1/2 files (50.00%)
+  updating: bar 1/2 files (50.00%)
   picked tool 'internal:merge' for bar (binary False symlink False)
   merging foo and bar to bar
   my bar@6a0df1dad128+ other bar@484bf6903104 ancestor foo@e6dc8efe11cc
--- a/tests/test-duplicateoptions.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-duplicateoptions.py	Thu Apr 17 19:36:17 2014 -0400
@@ -1,7 +1,7 @@
 import os
 from mercurial import ui, commands, extensions
 
-ignore = set(['highlight', 'inotify', 'win32text', 'factotum'])
+ignore = set(['highlight', 'win32text', 'factotum'])
 
 if os.name != 'nt':
     ignore.add('win32mbcs')
--- a/tests/test-encoding-textwrap.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-encoding-textwrap.t	Thu Apr 17 19:36:17 2014 -0400
@@ -238,7 +238,7 @@
   
   use "hg -v help show_ambig_ru" to show the global options
 
-(2-2-4) display Russian ambiguous-width charactes in utf-8
+(2-2-4) display Russian ambiguous-width characters in utf-8
 
   $ COLUMNS=60 HGENCODINGAMBIGUOUS=wide hg --encoding utf-8 --config extensions.show=./show.py help show_ambig_ru
   hg show_ambig_ru
--- a/tests/test-filelog.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-filelog.py	Thu Apr 17 19:36:17 2014 -0400
@@ -13,7 +13,7 @@
 def addrev(text, renamed=False):
     if renamed:
         # data doesn't matter. Just make sure filelog.renamed() returns True
-        meta = dict(copyrev=hex(nullid), copy='bar')
+        meta = {'copyrev': hex(nullid), 'copy': 'bar'}
     else:
         meta = {}
 
--- a/tests/test-fncache.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-fncache.t	Thu Apr 17 19:36:17 2014 -0400
@@ -177,4 +177,62 @@
 
   $ cd ..
 
+Aborting lock does not prevent fncache writes
 
+  $ cat > exceptionext.py <<EOF
+  > import os
+  > from mercurial import commands, util
+  > from mercurial.extensions import wrapfunction
+  > 
+  > def lockexception(orig, vfs, lockname, wait, releasefn, acquirefn, desc):
+  >     def releasewrap():
+  >         raise util.Abort("forced lock failure")
+  >     return orig(vfs, lockname, wait, releasewrap, acquirefn, desc)
+  > 
+  > def reposetup(ui, repo):
+  >     wrapfunction(repo, '_lock', lockexception)
+  > 
+  > cmdtable = {}
+  > 
+  > EOF
+  $ extpath=`pwd`/exceptionext.py
+  $ hg init fncachetxn
+  $ cd fncachetxn
+  $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
+  $ touch y
+  $ hg ci -qAm y
+  abort: forced lock failure
+  [255]
+  $ cat .hg/store/fncache
+  data/y.i
+
+Aborting transaction prevents fncache change
+
+  $ cat > ../exceptionext.py <<EOF
+  > import os
+  > from mercurial import commands, util, transaction
+  > from mercurial.extensions import wrapfunction
+  > 
+  > def wrapper(orig, self, *args, **kwargs):
+  >     origonclose = self.onclose
+  >     def onclose():
+  >         origonclose()
+  >         raise util.Abort("forced transaction failure")
+  >     self.onclose = onclose
+  >     return orig(self, *args, **kwargs)
+  > 
+  > def uisetup(ui):
+  >     wrapfunction(transaction.transaction, 'close', wrapper)
+  > 
+  > cmdtable = {}
+  > 
+  > EOF
+  $ rm "${extpath}c"
+  $ touch z
+  $ hg ci -qAm z
+  transaction abort!
+  rollback completed
+  abort: forced transaction failure
+  [255]
+  $ cat .hg/store/fncache
+  data/y.i
--- a/tests/test-gendoc.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-gendoc.t	Thu Apr 17 19:36:17 2014 -0400
@@ -3,7 +3,7 @@
   $ "$TESTDIR/hghave" docutils || exit 80
   $ HGENCODING=UTF-8
   $ export HGENCODING
-  $ { echo C; find "$TESTDIR/../i18n" -name "*.po" | sort; } | while read PO; do
+  $ { echo C; ls "$TESTDIR/../i18n"/*.po | sort; } | while read PO; do
   >     LOCALE=`basename "$PO" .po`
   >     echo
   >     echo "% extracting documentation from $LOCALE"
--- a/tests/test-globalopts.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-globalopts.t	Thu Apr 17 19:36:17 2014 -0400
@@ -297,6 +297,7 @@
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
+   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
    export        dump the header and diffs for one or more changesets
@@ -326,7 +327,6 @@
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
    serve         start stand-alone webserver
-   showconfig    show combined config settings from all hgrc files
    status        show changed files in the working directory
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
@@ -379,6 +379,7 @@
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
+   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
    export        dump the header and diffs for one or more changesets
@@ -408,7 +409,6 @@
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
    serve         start stand-alone webserver
-   showconfig    show combined config settings from all hgrc files
    status        show changed files in the working directory
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
--- a/tests/test-glog.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-glog.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1853,7 +1853,7 @@
   |  date:        Thu Jan 01 00:00:00 1970 +0000
   |  summary:     copy a b
   |
-  |   a |  0
+  |   b |  0
   |   1 files changed, 0 insertions(+), 0 deletions(-)
   |
 
@@ -1886,7 +1886,7 @@
   |  date:        Thu Jan 01 00:00:00 1970 +0000
   |  summary:     copy a b
   |
-  |   a |  0
+  |   b |  0
   |   1 files changed, 0 insertions(+), 0 deletions(-)
   |
   o  changeset:   0:f8035bb17114
@@ -1972,8 +1972,9 @@
   +++ glog.nodes	* (glob)
   @@ -1,3 +1,3 @@
   -nodetag 6
+  -nodetag 7
    nodetag 8
-   nodetag 7
+  +nodetag 7
   +nodetag 6
 
 Test --follow-first and forward --rev
@@ -1988,8 +1989,9 @@
   +++ glog.nodes	* (glob)
   @@ -1,3 +1,3 @@
   -nodetag 6
+  -nodetag 7
    nodetag 8
-   nodetag 7
+  +nodetag 7
   +nodetag 6
 
 Test --follow and backward --rev
--- a/tests/test-graft.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-graft.t	Thu Apr 17 19:36:17 2014 -0400
@@ -137,7 +137,7 @@
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: 68795b066622, local: ef0ef43d49e7+, remote: 5d205f8b35b6
-   b: local copied/moved to a -> m
+   b: local copied/moved from a -> m
     preserving b for resolve of b
   updating: b 1/1 files (100.00%)
   picked tool 'internal:merge' for b (binary False symlink False)
@@ -150,6 +150,7 @@
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: 4c60f11aa304, local: 6b9e5368ca4e+, remote: 97f8bfe72746
+   b: keep -> k
    e: remote is newer -> g
   getting e
   updating: e 1/1 files (100.00%)
@@ -159,6 +160,7 @@
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: 4c60f11aa304, local: 1905859650ec+, remote: 9c233e8e184d
+   b: keep -> k
    d: remote is newer -> g
    e: versions differ -> m
     preserving e for resolve of e
--- a/tests/test-grep.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-grep.t	Thu Apr 17 19:36:17 2014 -0400
@@ -38,7 +38,7 @@
   >     --color=always port port
   \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32m4\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
   \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32m4\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
-  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32m4\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/export (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32m4\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
 
 all
 
@@ -118,7 +118,7 @@
 
   $ cd ..
 
-Issue685: trackback in grep -r after rename
+Issue685: traceback in grep -r after rename
 
 Got a traceback when using grep on a single
 revision with renamed files.
--- a/tests/test-help.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-help.t	Thu Apr 17 19:36:17 2014 -0400
@@ -62,6 +62,7 @@
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
+   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
    export        dump the header and diffs for one or more changesets
@@ -91,7 +92,6 @@
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
    serve         start stand-alone webserver
-   showconfig    show combined config settings from all hgrc files
    status        show changed files in the working directory
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
@@ -138,6 +138,7 @@
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
+   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
    export        dump the header and diffs for one or more changesets
@@ -167,7 +168,6 @@
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
    serve         start stand-alone webserver
-   showconfig    show combined config settings from all hgrc files
    status        show changed files in the working directory
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
@@ -198,6 +198,86 @@
    templating    Template Usage
    urls          URL Paths
 
+Test extension help:
+  $ hg help extensions --config extensions.rebase= --config extensions.children=
+  Using Additional Features
+  """""""""""""""""""""""""
+  
+      Mercurial has the ability to add new features through the use of
+      extensions. Extensions may add new commands, add options to existing
+      commands, change the default behavior of commands, or implement hooks.
+  
+      To enable the "foo" extension, either shipped with Mercurial or in the
+      Python search path, create an entry for it in your configuration file,
+      like this:
+  
+        [extensions]
+        foo =
+  
+      You may also specify the full path to an extension:
+  
+        [extensions]
+        myfeature = ~/.hgext/myfeature.py
+  
+      See "hg help config" for more information on configuration files.
+  
+      Extensions are not loaded by default for a variety of reasons: they can
+      increase startup overhead; they may be meant for advanced usage only; they
+      may provide potentially dangerous abilities (such as letting you destroy
+      or modify history); they might not be ready for prime time; or they may
+      alter some usual behaviors of stock Mercurial. It is thus up to the user
+      to activate extensions as needed.
+  
+      To explicitly disable an extension enabled in a configuration file of
+      broader scope, prepend its path with !:
+  
+        [extensions]
+        # disabling extension bar residing in /path/to/extension/bar.py
+        bar = !/path/to/extension/bar.py
+        # ditto, but no path was supplied for extension baz
+        baz = !
+  
+      enabled extensions:
+  
+       children      command to display child changesets (DEPRECATED)
+       rebase        command to move sets of revisions to a different ancestor
+  
+      disabled extensions:
+  
+       acl           hooks for controlling repository access
+       blackbox      log repository events to a blackbox for debugging
+       bugzilla      hooks for integrating with the Bugzilla bug tracker
+       churn         command to display statistics about repository history
+       color         colorize output from some commands
+       convert       import revisions from foreign VCS repositories into
+                     Mercurial
+       eol           automatically manage newlines in repository files
+       extdiff       command to allow external programs to compare revisions
+       factotum      http authentication with factotum
+       gpg           commands to sign and verify changesets
+       hgcia         hooks for integrating with the CIA.vc notification service
+       hgk           browse the repository in a graphical way
+       highlight     syntax highlighting for hgweb (requires Pygments)
+       histedit      interactive history editing
+       keyword       expand keywords in tracked files
+       largefiles    track large binary files
+       mq            manage a stack of patches
+       notify        hooks for sending email push notifications
+       pager         browse command output with an external pager
+       patchbomb     command to send changesets as (a series of) patch emails
+       progress      show progress bars for some actions
+       purge         command to delete untracked files from the working
+                     directory
+       record        commands to interactively select changes for
+                     commit/qrefresh
+       relink        recreates hardlinks between repository clones
+       schemes       extend schemes with shortcuts to repository swarms
+       share         share a common history between several working directories
+       shelve        save and restore changes to the working directory
+       strip         strip changesets and their descendents from history
+       transplant    command to transplant changesets from another branch
+       win32mbcs     allow the use of MBCS paths with problematic encodings
+       zeroconf      discover and advertise repositories on the local network
 Test short command list with verbose option
 
   $ hg -v help shortlist
@@ -483,7 +563,7 @@
         ! = missing (deleted by non-hg command, but still tracked)
         ? = not tracked
         I = ignored
-          = origin of the previous file listed as A (added)
+          = origin of the previous file (with --copies)
   
       Returns 0 on success.
   
@@ -569,6 +649,7 @@
   use "hg help" for the full list of commands or "hg -v" for details
   [255]
 
+
   $ cat > helpext.py <<EOF
   > import os
   > from mercurial import commands
@@ -577,7 +658,11 @@
   >     pass
   > 
   > cmdtable = {
-  >     "nohelp": (nohelp, [], "hg nohelp"),
+  >     "debugoptDEP": (nohelp, [('', 'dopt', None, 'option is DEPRECATED')],),
+  >     "nohelp": (nohelp, [('', 'longdesc', 3, 'x'*90),
+  >                         ('n', '', None, 'normal desc'),
+  >                         ('', 'newline', '', 'line1\nline2'),
+  >                        ], "hg nohelp"),
   > }
   > 
   > commands.norepo += ' nohelp'
@@ -592,6 +677,13 @@
   
   (no help text available)
   
+  options:
+  
+      --longdesc VALUE xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+                       xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx (default: 3)
+   -n --               normal desc
+      --newline VALUE  line1 line2
+  
   use "hg -v help nohelp" to show the global options
 
   $ hg help -k nohelp
@@ -623,6 +715,7 @@
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
+   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
    export        dump the header and diffs for one or more changesets
@@ -652,7 +745,6 @@
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
    serve         start stand-alone webserver
-   showconfig    show combined config settings from all hgrc files
    status        show changed files in the working directory
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
@@ -690,6 +782,67 @@
   use "hg -v help" to show builtin aliases and global options
 
 
+Test list of internal help commands
+
+  $ hg help debug
+  debug commands (internal and unsupported):
+  
+   debugancestor
+                 find the ancestor revision of two revisions in a given index
+   debugbuilddag
+                 builds a repo with a given DAG from scratch in the current
+                 empty repo
+   debugbundle   lists the contents of a bundle
+   debugcheckstate
+                 validate the correctness of the current dirstate
+   debugcommands
+                 list all available commands and options
+   debugcomplete
+                 returns the completion list associated with the given command
+   debugdag      format the changelog or an index DAG as a concise textual
+                 description
+   debugdata     dump the contents of a data file revision
+   debugdate     parse and display a date
+   debugdirstate
+                 show the contents of the current dirstate
+   debugdiscovery
+                 runs the changeset discovery protocol in isolation
+   debugfileset  parse and apply a fileset specification
+   debugfsinfo   show information detected about current filesystem
+   debuggetbundle
+                 retrieves a bundle from a repo
+   debugignore   display the combined ignore pattern
+   debugindex    dump the contents of an index file
+   debugindexdot
+                 dump an index DAG as a graphviz dot file
+   debuginstall  test Mercurial installation
+   debugknown    test whether node ids are known to a repo
+   debuglabelcomplete
+                 complete "labels" - tags, open branch names, bookmark names
+   debugobsolete
+                 create arbitrary obsolete marker
+   debugoptDEP   (no help text available)
+   debugpathcomplete
+                 complete part or all of a tracked path
+   debugpushkey  access the pushkey key/value protocol
+   debugpvec     (no help text available)
+   debugrebuilddirstate
+                 rebuild the dirstate as it would look like for the given
+                 revision
+   debugrename   dump rename information
+   debugrevlog   show data and statistics about a revlog
+   debugrevspec  parse and apply a revision specification
+   debugsetparents
+                 manually set the parents of the current working directory
+   debugsub      (no help text available)
+   debugsuccessorssets
+                 show set of successors for revision
+   debugwalk     show how files match on given patterns
+   debugwireargs
+                 (no help text available)
+  
+  use "hg -v help debug" to show builtin aliases and global options
+
 
 Test list of commands with command with no help text
 
@@ -702,6 +855,34 @@
   
   use "hg -v help helpext" to show builtin aliases and global options
 
+
+test deprecated option is hidden in command help
+  $ hg help debugoptDEP
+  hg debugoptDEP
+  
+  (no help text available)
+  
+  options:
+  
+  use "hg -v help debugoptDEP" to show the global options
+
+test deprecated option is shown with -v
+  $ hg help -v debugoptDEP | grep dopt
+    --dopt option is DEPRECATED
+
+#if gettext
+test deprecated option is hidden with translation with untranslated description
+(use many globy for not failing on changed transaction)
+  $ LANGUAGE=sv hg help debugoptDEP
+  hg debugoptDEP
+  
+  (*) (glob)
+  
+  flaggor:
+  
+  *"hg -v help debugoptDEP"* (glob)
+#endif
+
 Test a help topic
 
   $ hg help revs
@@ -1259,6 +1440,13 @@
   output the current or given revision of files
   </td></tr>
   <tr><td>
+  <a href="/help/config">
+  config
+  </a>
+  </td><td>
+  show combined config settings from all hgrc files
+  </td></tr>
+  <tr><td>
   <a href="/help/copy">
   copy
   </a>
@@ -1399,13 +1587,6 @@
   print the root (top) of the current working directory
   </td></tr>
   <tr><td>
-  <a href="/help/showconfig">
-  showconfig
-  </a>
-  </td><td>
-  show combined config settings from all hgrc files
-  </td></tr>
-  <tr><td>
   <a href="/help/tag">
   tag
   </a>
--- a/tests/test-hgrc.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgrc.t	Thu Apr 17 19:36:17 2014 -0400
@@ -141,7 +141,7 @@
   $ hg showconfig
   read config from: $TESTTMP/hgrc
   $TESTTMP/hgrc:13: alias.log=log -g
-  none: bundle.mainreporoot=$TESTTMP
+  repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:11: defaults.identify=-n
   $TESTTMP/hgrc:2: ui.debug=true
   $TESTTMP/hgrc:3: ui.fallbackencoding=ASCII
@@ -157,11 +157,11 @@
   $ HGPLAIN=; export HGPLAIN
   $ hg showconfig --config ui.traceback=True --debug
   read config from: $TESTTMP/hgrc
-  none: bundle.mainreporoot=$TESTTMP
-  none: ui.traceback=True
-  none: ui.verbose=False
-  none: ui.debug=True
-  none: ui.quiet=False
+  repo: bundle.mainreporoot=$TESTTMP
+  --config: ui.traceback=True
+  --verbose: ui.verbose=False
+  --debug: ui.debug=True
+  --quiet: ui.quiet=False
 
 plain mode with exceptions
 
@@ -175,29 +175,40 @@
   $ hg showconfig --config ui.traceback=True --debug
   plain: True
   read config from: $TESTTMP/hgrc
-  none: bundle.mainreporoot=$TESTTMP
+  repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:15: extensions.plain=./plain.py
-  none: ui.traceback=True
-  none: ui.verbose=False
-  none: ui.debug=True
-  none: ui.quiet=False
+  --config: ui.traceback=True
+  --verbose: ui.verbose=False
+  --debug: ui.debug=True
+  --quiet: ui.quiet=False
   $ unset HGPLAIN
   $ hg showconfig --config ui.traceback=True --debug
   plain: True
   read config from: $TESTTMP/hgrc
-  none: bundle.mainreporoot=$TESTTMP
+  repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:15: extensions.plain=./plain.py
-  none: ui.traceback=True
-  none: ui.verbose=False
-  none: ui.debug=True
-  none: ui.quiet=False
+  --config: ui.traceback=True
+  --verbose: ui.verbose=False
+  --debug: ui.debug=True
+  --quiet: ui.quiet=False
   $ HGPLAINEXCEPT=i18n; export HGPLAINEXCEPT
   $ hg showconfig --config ui.traceback=True --debug
   plain: True
   read config from: $TESTTMP/hgrc
-  none: bundle.mainreporoot=$TESTTMP
+  repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:15: extensions.plain=./plain.py
-  none: ui.traceback=True
-  none: ui.verbose=False
-  none: ui.debug=True
-  none: ui.quiet=False
+  --config: ui.traceback=True
+  --verbose: ui.verbose=False
+  --debug: ui.debug=True
+  --quiet: ui.quiet=False
+
+source of paths is not mangled
+
+  $ cat >> $HGRCPATH <<EOF
+  > [paths]
+  > foo = bar
+  > EOF
+  $ hg showconfig --debug paths
+  plain: True
+  read config from: $TESTTMP/hgrc
+  $TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar
--- a/tests/test-hgweb-commands.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgweb-commands.t	Thu Apr 17 19:36:17 2014 -0400
@@ -62,7 +62,7 @@
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
-    <title>branch commit with null character: </title>
+    <title>[unstable] branch commit with null character: </title>
     <id>http://*:$HGPORT/#changeset-cad8025a2e87f88c06259790adfa15acb4080123</id> (glob)
     <link href="http://*:$HGPORT/rev/cad8025a2e87"/> (glob)
     <author>
@@ -72,13 +72,40 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">branch commit with null character: </pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>cad8025a2e87</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>unstable</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td>something</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>tip</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>branch commit with null character: </td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
    <entry>
-    <title>branch</title>
+    <title>[stable] branch</title>
     <id>http://*:$HGPORT/#changeset-1d22e65f027e5a0609357e7d8e7508cd2ba5d2fe</id> (glob)
     <link href="http://*:$HGPORT/rev/1d22e65f027e"/> (glob)
     <author>
@@ -88,13 +115,40 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">branch</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>1d22e65f027e</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>stable</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>branch</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>foo<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
    <entry>
-    <title>Added tag 1.0 for changeset 2ef0ac749a14</title>
+    <title>[default] Added tag 1.0 for changeset 2ef0ac749a14</title>
     <id>http://*:$HGPORT/#changeset-a4f92ed23982be056b9852de5dfe873eaac7f0de</id> (glob)
     <link href="http://*:$HGPORT/rev/a4f92ed23982"/> (glob)
     <author>
@@ -104,9 +158,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">Added tag 1.0 for changeset 2ef0ac749a14</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>a4f92ed23982</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>default</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>Added tag 1.0 for changeset 2ef0ac749a14</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>.hgtags<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
    <entry>
@@ -120,9 +201,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">base</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>2ef0ac749a14</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td></td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td>anotherthing</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>1.0</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>base</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>da/foo<br />foo<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
@@ -140,7 +248,7 @@
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
-    <title>Added tag 1.0 for changeset 2ef0ac749a14</title>
+    <title>[default] Added tag 1.0 for changeset 2ef0ac749a14</title>
     <id>http://*:$HGPORT/#changeset-a4f92ed23982be056b9852de5dfe873eaac7f0de</id> (glob)
     <link href="http://*:$HGPORT/rev/a4f92ed23982"/> (glob)
     <author>
@@ -150,9 +258,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">Added tag 1.0 for changeset 2ef0ac749a14</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>a4f92ed23982</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>default</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>Added tag 1.0 for changeset 2ef0ac749a14</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>.hgtags<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
    <entry>
@@ -166,9 +301,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">base</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>2ef0ac749a14</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td></td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td>anotherthing</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>1.0</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>base</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>da/foo<br />foo<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
@@ -194,9 +356,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">base</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>2ef0ac749a14</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td></td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td>anotherthing</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>1.0</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>base</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
@@ -739,6 +928,13 @@
   -rw-r--r-- 4 foo
   
   
+  $ hg log --template "{file_mods}\n" -r 1
+  
+  $ hg parents --template "{node|short}\n" -r 1
+  2ef0ac749a14
+  $ hg parents --template "{node|short}\n" -r 1 foo
+  2ef0ac749a14
+
   $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/1/foo'
   200 Script output follows
   
@@ -764,6 +960,7 @@
   <li><a href="/shortlog/a4f92ed23982">log</a></li>
   <li><a href="/graph/a4f92ed23982">graph</a></li>
   <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
   <li><a href="/branches">branches</a></li>
   </ul>
   <ul>
@@ -808,7 +1005,7 @@
   </tr>
   <tr>
    <th class="author">parents</th>
-   <td class="author"></td>
+   <td class="author"><a href="/file/2ef0ac749a14/foo">2ef0ac749a14</a> </td>
   </tr>
   <tr>
    <th class="author">children</th>
@@ -855,6 +1052,109 @@
   
   
 
+  $ hg log --template "{file_mods}\n" -r 2
+  foo
+  $ hg parents --template "{node|short}\n" -r 2
+  a4f92ed23982
+  $ hg parents --template "{node|short}\n" -r 2 foo
+  2ef0ac749a14
+
+  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/2/foo'
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: 1d22e65f027e foo</title>
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="http://mercurial.selenic.com/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/1d22e65f027e">log</a></li>
+  <li><a href="/graph/1d22e65f027e">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/1d22e65f027e">changeset</a></li>
+  <li><a href="/file/1d22e65f027e/">browse</a></li>
+  </ul>
+  <ul>
+  <li class="active">file</li>
+  <li><a href="/file/tip/foo">latest</a></li>
+  <li><a href="/diff/1d22e65f027e/foo">diff</a></li>
+  <li><a href="/comparison/1d22e65f027e/foo">comparison</a></li>
+  <li><a href="/annotate/1d22e65f027e/foo">annotate</a></li>
+  <li><a href="/log/1d22e65f027e/foo">file log</a></li>
+  <li><a href="/raw-file/1d22e65f027e/foo">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>view foo @ 2:1d22e65f027e</h3>
+  
+  <form class="search" action="/log">
+  
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="description">branch</div>
+  
+  <table id="changesetEntry">
+  <tr>
+   <th class="author">author</th>
+   <td class="author">&#116;&#101;&#115;&#116;</td>
+  </tr>
+  <tr>
+   <th class="date">date</th>
+   <td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+  </tr>
+  <tr>
+   <th class="author">parents</th>
+   <td class="author"><a href="/file/2ef0ac749a14/foo">2ef0ac749a14</a> </td>
+  </tr>
+  <tr>
+   <th class="author">children</th>
+   <td class="author"></td>
+  </tr>
+  </table>
+  
+  <div class="overflow">
+  <div class="sourcefirst linewraptoggle">line wrap: <a class="linewraplink" href="javascript:toggleLinewrap()">on</a></div>
+  <div class="sourcefirst"> line source</div>
+  <pre class="sourcelines stripes4 wrap">
+  <span id="l1">another</span><a href="#l1"></a></pre>
+  <div class="sourcelast"></div>
+  </div>
+  </div>
+  </div>
+  
+  <script type="text/javascript">process_dates()</script>
+  
+  
+  </body>
+  </html>
+  
+
+
 Overviews
 
   $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'raw-tags'
@@ -1366,6 +1666,7 @@
   /* Changelog/Filelog entries */
   .logEntry { width: 100%; }
   .logEntry .age { width: 15%; }
+  .logEntry th.label { width: 16em; }
   .logEntry th { font-weight: normal; text-align: right; vertical-align: top; }
   .logEntry th.age, .logEntry th.firstline { font-weight: bold; }
   .logEntry th.firstline { text-align: left; width: inherit; }
--- a/tests/test-hgweb-diffs.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgweb-diffs.t	Thu Apr 17 19:36:17 2014 -0400
@@ -193,6 +193,14 @@
 
 diff removed file
 
+  $ hg log --template "{file_mods}\n{file_dels}\n" -r tip
+  a
+  b
+  $ hg parents --template "{node|short}\n" -r tip
+  0cd96de13884
+  $ hg parents --template "{node|short}\n" -r tip b
+  0cd96de13884
+
   $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'diff/tip/b'
   200 Script output follows
   
@@ -459,7 +467,15 @@
   +b
   
 
-diff removed file
+diff modified file
+
+  $ hg log --template "{file_mods}\n{file_dels}\n" -r tip
+  a
+  b
+  $ hg parents --template "{node|short}\n" -r tip
+  0cd96de13884
+  $ hg parents --template "{node|short}\n" -r tip a
+  0cd96de13884
 
   $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'diff/tip/a'
   200 Script output follows
@@ -531,7 +547,7 @@
   </tr>
   <tr>
    <th>parents</th>
-   <td></td>
+   <td><a href="/file/0cd96de13884/a">0cd96de13884</a> </td>
   </tr>
   <tr>
    <th>children</th>
@@ -560,6 +576,10 @@
 
 comparison new file
 
+  $ hg parents --template "{rev}:{node|short}\n" -r 0
+  $ hg log --template "{rev}:{node|short}\n" -r 0
+  0:0cd96de13884
+
   $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'comparison/0/a'
   200 Script output follows
   
@@ -651,7 +671,7 @@
     <thead class="header">
       <tr>
         <th>-1:000000000000</th>
-        <th>0:b789fdd96dc2</th>
+        <th>0:0cd96de13884</th>
       </tr>
     </thead>
     
@@ -681,6 +701,12 @@
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ echo a >> a
   $ hg ci -mc
+
+  $ hg parents --template "{rev}:{node|short}\n" -r tip
+  1:559edbd9ed20
+  $ hg log --template "{rev}:{node|short}\n" -r tip
+  2:d73db4d812ff
+
   $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'comparison/tip/a'
   200 Script output follows
   
@@ -771,8 +797,8 @@
   <table class="bigtable">
     <thead class="header">
       <tr>
-        <th>0:b789fdd96dc2</th>
-        <th>1:a80d06849b33</th>
+        <th>1:559edbd9ed20</th>
+        <th>2:d73db4d812ff</th>
       </tr>
     </thead>
     
@@ -804,6 +830,12 @@
 
   $ hg rm a
   $ hg ci -md
+
+  $ hg parents --template "{rev}:{node|short}\n" -r tip
+  2:d73db4d812ff
+  $ hg log --template "{rev}:{node|short}\n" -r tip
+  3:20e80271eb7a
+
   $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'comparison/tip/a'
   200 Script output follows
   
@@ -894,8 +926,8 @@
   <table class="bigtable">
     <thead class="header">
       <tr>
-        <th>1:a80d06849b33</th>
-        <th>-1:000000000000</th>
+        <th>2:d73db4d812ff</th>
+        <th>3:20e80271eb7a</th>
       </tr>
     </thead>
     
@@ -923,6 +955,129 @@
   </html>
   
 
+comparison not-modified file
+
+  $ echo e > e
+  $ hg add e
+  $ hg ci -m e
+  $ echo f > f
+  $ hg add f
+  $ hg ci -m f
+  $ hg tip --template "{rev}:{node|short}\n"
+  5:41d9fc4a6ae1
+  $ hg diff -c tip e
+  $ hg parents --template "{rev}:{node|short}\n" -r tip
+  4:402bea3b0976
+  $ hg parents --template "{rev}:{node|short}\n" -r tip e
+  4:402bea3b0976
+
+  $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'comparison/tip/e'
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: e comparison</title>
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="http://mercurial.selenic.com/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/41d9fc4a6ae1">log</a></li>
+  <li><a href="/graph/41d9fc4a6ae1">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/41d9fc4a6ae1">changeset</a></li>
+  <li><a href="/file/41d9fc4a6ae1">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/41d9fc4a6ae1/e">file</a></li>
+  <li><a href="/file/tip/e">latest</a></li>
+  <li><a href="/diff/41d9fc4a6ae1/e">diff</a></li>
+  <li class="active">comparison</li>
+  <li><a href="/annotate/41d9fc4a6ae1/e">annotate</a></li>
+  <li><a href="/log/41d9fc4a6ae1/e">file log</a></li>
+  <li><a href="/raw-file/41d9fc4a6ae1/e">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>comparison e @ 5:41d9fc4a6ae1</h3>
+  
+  <form class="search" action="/log">
+  <p></p>
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="description">f</div>
+  
+  <table id="changesetEntry">
+  <tr>
+   <th>author</th>
+   <td>&#116;&#101;&#115;&#116;</td>
+  </tr>
+  <tr>
+   <th>date</th>
+   <td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+  </tr>
+  <tr>
+   <th>parents</th>
+   <td><a href="/file/402bea3b0976/e">402bea3b0976</a> </td>
+  </tr>
+  <tr>
+   <th>children</th>
+   <td></td>
+  </tr>
+  </table>
+  
+  <div class="overflow">
+  <div class="sourcefirst">   comparison</div>
+  <div class="legend">
+    <span class="legendinfo equal">equal</span>
+    <span class="legendinfo delete">deleted</span>
+    <span class="legendinfo insert">inserted</span>
+    <span class="legendinfo replace">replaced</span>
+  </div>
+  
+  <table class="bigtable">
+    <thead class="header">
+      <tr>
+        <th>4:402bea3b0976</th>
+        <th>5:41d9fc4a6ae1</th>
+      </tr>
+    </thead>
+    
+  </table>
+  
+  </div>
+  </div>
+  </div>
+  
+  <script type="text/javascript">process_dates()</script>
+  
+  
+  </body>
+  </html>
+  
   $ cd ..
 
 test import rev as raw-rev
--- a/tests/test-hgweb-filelog.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgweb-filelog.t	Thu Apr 17 19:36:17 2014 -0400
@@ -639,7 +639,7 @@
   
   <table class="logEntry parity0">
    <tr>
-    <th><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>:</th>
+    <th class="label"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>:</th>
     <th class="firstline"><a href="/rev/b7682196df1c?style=spartan">change c</a></th>
    </tr>
    <tr>
@@ -664,7 +664,7 @@
   
   <table class="logEntry parity1">
    <tr>
-    <th><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>:</th>
+    <th class="label"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>:</th>
     <th class="firstline"><a href="/rev/1a6696706df2?style=spartan">mv b</a></th>
    </tr>
    <tr>
@@ -762,9 +762,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">second a</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>01de2d66a28d</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td></td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>second a</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
    <entry>
@@ -778,9 +805,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">first a</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>5ed941583260</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td></td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>first a</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
--- a/tests/test-hgweb-no-path-info.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgweb-no-path-info.t	Thu Apr 17 19:36:17 2014 -0400
@@ -78,7 +78,7 @@
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
-    <title>test</title>
+    <title>[default] test</title>
     <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id>
     <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/>
     <author>
@@ -88,9 +88,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">test</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>61c9426e69fe</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>default</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>tip</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>test</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>bar<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
--- a/tests/test-hgweb-no-request-uri.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgweb-no-request-uri.t	Thu Apr 17 19:36:17 2014 -0400
@@ -89,7 +89,7 @@
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
-    <title>test</title>
+    <title>[default] test</title>
     <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id>
     <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/>
     <author>
@@ -99,9 +99,36 @@
     <updated>1970-01-01T00:00:00+00:00</updated>
     <published>1970-01-01T00:00:00+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">test</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>61c9426e69fe</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>default</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>tip</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>test</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>bar<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
--- a/tests/test-hgwebdir.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hgwebdir.t	Thu Apr 17 19:36:17 2014 -0400
@@ -745,7 +745,7 @@
    <updated>1970-01-01T00:00:01+00:00</updated>
   
    <entry>
-    <title>a</title>
+    <title>[default] a</title>
     <id>http://*:$HGPORT1/t/a/#changeset-8580ff50825a50c8f716709acdf8de0deddcd6ab</id> (glob)
     <link href="http://*:$HGPORT1/t/a/rev/8580ff50825a"/> (glob)
     <author>
@@ -755,9 +755,36 @@
     <updated>1970-01-01T00:00:01+00:00</updated>
     <published>1970-01-01T00:00:01+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">a</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>8580ff50825a</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>default</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>tip</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>a</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>a<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
@@ -775,7 +802,7 @@
    <updated>1970-01-01T00:00:01+00:00</updated>
   
    <entry>
-    <title>a</title>
+    <title>[default] a</title>
     <id>http://*:$HGPORT1/t/a/#changeset-8580ff50825a50c8f716709acdf8de0deddcd6ab</id> (glob)
     <link href="http://*:$HGPORT1/t/a/rev/8580ff50825a"/> (glob)
     <author>
@@ -785,9 +812,36 @@
     <updated>1970-01-01T00:00:01+00:00</updated>
     <published>1970-01-01T00:00:01+00:00</published>
     <content type="xhtml">
-     <div xmlns="http://www.w3.org/1999/xhtml">
-      <pre xml:space="preserve">a</pre>
-     </div>
+  	<table xmlns="http://www.w3.org/1999/xhtml">
+  	<tr>
+  		<th style="text-align:left;">changeset</th>
+  		<td>8580ff50825a</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">branch</th>
+                                <td>default</td>
+                </tr>
+                <tr>
+                                <th style="text-align:left;">bookmark</th>
+  		<td></td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">tag</th>
+  		<td>tip</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;">user</th>
+  		<td>&#116;&#101;&#115;&#116;</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">description</th>
+  		<td>a</td>
+  	</tr>
+  	<tr>
+  		<th style="text-align:left;vertical-align:top;">files</th>
+  		<td>a<br /></td>
+  	</tr>
+  	</table>
     </content>
    </entry>
   
--- a/tests/test-highlight.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-highlight.t	Thu Apr 17 19:36:17 2014 -0400
@@ -82,6 +82,7 @@
   <li><a href="/shortlog/853dcd4de2a6">log</a></li>
   <li><a href="/graph/853dcd4de2a6">graph</a></li>
   <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
   <li><a href="/branches">branches</a></li>
   </ul>
   <ul>
@@ -522,7 +523,7 @@
   $ echo "" >> b
   $ echo "" >> b
   $ echo "" >> b
-  $ diff -u b a
+  $ cmp b a || diff -u b a
 
 hgweb filerevision, raw
 
@@ -531,7 +532,7 @@
   $ echo "200 Script output follows" > b
   $ echo "" >> b
   $ hg cat primes.py >> b
-  $ diff -u b a
+  $ cmp b a || diff -u b a
 
 hgweb highlightcss friendly
 
--- a/tests/test-histedit-arguments.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-histedit-arguments.t	Thu Apr 17 19:36:17 2014 -0400
@@ -51,10 +51,12 @@
   
   # Edit history between eb57da33312f and 08d98a8350f3
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
@@ -70,6 +72,26 @@
   [255]
   $ hg up --quiet
 
+
+Test that we pick the minimum of a revrange
+---------------------------------------
+
+  $ HGEDITOR=cat hg histedit '2::' --commands - << EOF
+  > pick eb57da33312f 2 three
+  > pick c8e68270e35a 3 four
+  > pick 08d98a8350f3 4 five
+  > EOF
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg up --quiet
+
+  $ HGEDITOR=cat hg histedit 'tip:2' --commands - << EOF
+  > pick eb57da33312f 2 three
+  > pick c8e68270e35a 3 four
+  > pick 08d98a8350f3 4 five
+  > EOF
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg up --quiet
+
 Run on a revision not descendants of the initial parent
 --------------------------------------------------------------------
 
@@ -196,3 +218,12 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   saved backup bundle to $TESTTMP/foo/.hg/strip-backup/*-backup.hg (glob)
+
+  $ hg update -q 2
+  $ echo x > x
+  $ hg add x
+  $ hg commit -m'x' x
+  created new head
+  $ hg histedit -r 'heads(all())'
+  abort: The specified revisions must have exactly one common root
+  [255]
--- a/tests/test-histedit-bookmark-motion.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-histedit-bookmark-motion.t	Thu Apr 17 19:36:17 2014 -0400
@@ -67,10 +67,12 @@
   
   # Edit history between d2ae7f538514 and 652413bf663e
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
@@ -125,10 +127,12 @@
   
   # Edit history between b346ab9a313d and cacdfd884a93
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
--- a/tests/test-histedit-commute.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-histedit-commute.t	Thu Apr 17 19:36:17 2014 -0400
@@ -61,10 +61,12 @@
   
   # Edit history between 177f92b77385 and 652413bf663e
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
--- a/tests/test-histedit-fold.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-histedit-fold.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,4 @@
-Test histedit extention: Fold commands
+Test histedit extension: Fold commands
 ======================================
 
 This test file is dedicated to testing the fold command in non conflicting
@@ -173,7 +173,7 @@
 folding and creating no new change doesn't break:
 -------------------------------------------------
 
-folded content is dropped during a merge. The folded commit should properly disapear.
+folded content is dropped during a merge. The folded commit should properly disappear.
 
   $ mkdir fold-to-empty-test
   $ cd fold-to-empty-test
--- a/tests/test-histedit-obsolete.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-histedit-obsolete.t	Thu Apr 17 19:36:17 2014 -0400
@@ -51,10 +51,12 @@
   
   # Edit history between d2ae7f538514 and 652413bf663e
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
--- a/tests/test-histedit-outgoing.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-histedit-outgoing.t	Thu Apr 17 19:36:17 2014 -0400
@@ -43,10 +43,12 @@
   
   # Edit history between 055a42cdd887 and 652413bf663e
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
@@ -72,10 +74,12 @@
   
   # Edit history between 2a4042b45417 and 51281e65ba79
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
@@ -93,10 +97,12 @@
   
   # Edit history between f26599ee3441 and f26599ee3441
   #
+  # Commits are listed from least to most recent
+  #
   # Commands:
   #  p, pick = use commit
   #  e, edit = use commit, but stop for amending
-  #  f, fold = use commit, but fold into previous commit (combines N and N-1)
+  #  f, fold = use commit, but combine it with the one above
   #  d, drop = remove commit from history
   #  m, mess = edit message without changing commit content
   #
--- a/tests/test-hook.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hook.t	Thu Apr 17 19:36:17 2014 -0400
@@ -499,7 +499,7 @@
   $ echo >> foo
   $ hg ci --debug -d '0 0' -m 'change foo'
   foo
-  calling hook commit.auto: <function autohook at *> (glob)
+  calling hook commit.auto: hgext_hookext.autohook
   Automatically installed hook
   committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
 
--- a/tests/test-http-clone-r.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-http-clone-r.t	Thu Apr 17 19:36:17 2014 -0400
@@ -197,4 +197,11 @@
   checking files
   4 files, 9 changesets, 7 total revisions
   $ cd ..
+
+no default destination if url has no path:
+
+  $ hg clone http://localhost:$HGPORT/
+  abort: empty destination path is not valid
+  [255]
+
   $ cat error.log
--- a/tests/test-http.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-http.t	Thu Apr 17 19:36:17 2014 -0400
@@ -153,7 +153,8 @@
   >     common.permhooks.insert(0, perform_authentication)
   > EOT
   $ hg --config extensions.x=userpass.py serve -p $HGPORT2 -d --pid-file=pid \
-  >    --config server.preferuncompressed=True
+  >    --config server.preferuncompressed=True \
+  >    --config web.push_ssl=False --config web.allow_push=* -A ../access.log
   $ cat pid >> $DAEMON_PIDS
 
   $ cat << EOF > get_pass.py
@@ -163,6 +164,7 @@
   > getpass.getpass = newgetpass
   > EOF
 
+#if python243
   $ hg id http://localhost:$HGPORT2/
   abort: http authorization required for http://localhost:$HGPORT2/
   [255]
@@ -176,6 +178,7 @@
   password: 5fed3813f7f5
   $ hg id http://user:pass@localhost:$HGPORT2/
   5fed3813f7f5
+#endif
   $ echo '[auth]' >> .hg/hgrc
   $ echo 'l.schemes=http' >> .hg/hgrc
   $ echo 'l.prefix=lo' >> .hg/hgrc
@@ -187,6 +190,7 @@
   5fed3813f7f5
   $ hg id http://user@localhost:$HGPORT2/
   5fed3813f7f5
+#if python243
   $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
   streaming all changes
   7 files to transfer, 916 bytes of data
@@ -201,6 +205,71 @@
   abort: HTTP Error 403: no
   [255]
 
+  $ hg -R dest tag -r tip top
+  $ hg -R dest push http://user:pass@localhost:$HGPORT2/
+  pushing to http://user:***@localhost:$HGPORT2/
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  $ hg rollback -q
+
+  $ cut -c38- ../access.log
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=branchmap HTTP/1.1" 200 -
+  "GET /?cmd=stream_out HTTP/1.1" 401 -
+  "GET /?cmd=stream_out HTTP/1.1" 200 -
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=capabilities HTTP/1.1" 200 -
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
+  "GET /?cmd=branchmap HTTP/1.1" 200 -
+  "GET /?cmd=branchmap HTTP/1.1" 200 -
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+
+#endif
   $ cd ..
 
 clone of serve with repo in root and unserved subrepo (issue2970)
--- a/tests/test-hup.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-hup.t	Thu Apr 17 19:36:17 2014 -0400
@@ -11,7 +11,16 @@
   $ (
   > echo lock
   > echo addchangegroup
-  > while [ ! -s .hg/store/journal ]; do sleep 0; done
+  > start=`date +%s`
+  > # 10 second seems much enough to let the server catch up
+  > deadline=`expr $start + 10`
+  > while [ ! -s .hg/store/journal ]; do
+  >     sleep 0;
+  >     if [ `date +%s` -gt $deadline ]; then
+  >         echo "transaction did not start after 10 seconds" >&2;
+  >         exit 1;
+  >     fi
+  > done
   > kill -HUP $P
   > ) > p
 
--- a/tests/test-i18n.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-i18n.t	Thu Apr 17 19:36:17 2014 -0400
@@ -13,7 +13,7 @@
   abortado: n?o foi encontrado um reposit?rio em '$TESTTMP' (.hg n?o encontrado)!
   [255]
 
-Using a more accomodating encoding:
+Using a more accommodating encoding:
 
   $ HGENCODING=UTF-8 LANGUAGE=pt_BR hg tip
   abortado: n\xc3\xa3o foi encontrado um reposit\xc3\xb3rio em '$TESTTMP' (.hg n\xc3\xa3o encontrado)! (esc)
--- a/tests/test-identify.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-identify.t	Thu Apr 17 19:36:17 2014 -0400
@@ -113,12 +113,14 @@
 
   $ echo fake >> .hg/requires
   $ hg id
-  abort: unknown repository format: requires features 'fake' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: fake!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
 
   $ cd ..
 #if no-outer-repo
   $ hg id test
-  abort: unknown repository format: requires features 'fake' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: fake!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
 #endif
--- a/tests/test-import-git.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-import-git.t	Thu Apr 17 19:36:17 2014 -0400
@@ -321,7 +321,7 @@
   a874b471193996e7cb034bb301cac7bdaf3e3f46 644   mbinary2
 
 Binary file and delta hunk (we build the patch using this sed hack to
-avoid an unquoted ^, which check-code says breaks sh on solaris):
+avoid an unquoted ^, which check-code says breaks sh on Solaris):
 
   $ sed 's/ caret /^/g;s/ dollarparen /$(/g' > quote-hack.patch <<'EOF'
   > diff --git a/delta b/delta
--- a/tests/test-inotify-debuginotify.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ hg init
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-
-inserve
-
-  $ hg inserve -d --pid-file=hg.pid
-  $ cat hg.pid >> "$DAEMON_PIDS"
-
-let the daemon finish its stuff
-
-  $ sleep 1
-
-empty
-
-  $ hg debuginotify
-  directories being watched:
-    /
-    .hg/
-  $ mkdir a
-  $ sleep 1
-
-only 'a
-
-  $ hg debuginotify
-  directories being watched:
-    /
-    .hg/
-    a/
-  $ rmdir a
-  $ sleep 1
-
-empty again
-
-  $ hg debuginotify
-  directories being watched:
-    /
-    .hg/
-  $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-dirty-dirstate.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-issues when status queries are issued when dirstate is dirty
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-  $ echo "fetch=" >> $HGRCPATH
-
-issue1810: inotify and fetch
-
-  $ hg init test; cd test
-  $ hg inserve -d --pid-file=../hg.pid
-  $ cat ../hg.pid >> "$DAEMON_PIDS"
-  $ echo foo > foo
-  $ hg add
-  adding foo
-  $ hg ci -m foo
-  $ cd ..
-  $ hg --config "inotify.pidfile=../hg2.pid" clone test test2
-  updating to branch default
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat ../hg2.pid >> "$DAEMON_PIDS"
-  $ cd test2
-  $ echo bar > bar
-  $ hg add
-  adding bar
-  $ hg ci -m bar
-  $ cd ../test
-  $ echo spam > spam
-  $ hg add
-  adding spam
-  $ hg ci -m spam
-  $ cd ../test2
-  $ hg st
-
-abort, outstanding changes
-
-  $ hg fetch -q
-  $ hg st
-  $ cd ..
-
-issue1719: inotify and mq
-
-  $ echo "mq=" >> $HGRCPATH
-  $ hg init test-1719
-  $ cd test-1719
-
-inserve
-
-  $ hg inserve -d --pid-file=../hg-test-1719.pid
-  $ cat ../hg-test-1719.pid >> "$DAEMON_PIDS"
-  $ echo content > file
-  $ hg add file
-  $ hg qnew -f test.patch
-  $ hg status
-  $ hg qpop
-  popping test.patch
-  patch queue now empty
-
-st should not output anything
-
-  $ hg status
-  $ hg qpush
-  applying test.patch
-  now at: test.patch
-
-st should not output anything
-
-  $ hg status
-  $ hg qrefresh
-  $ hg status
-
-  $ cd ..
--- a/tests/test-inotify-issue1371.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ hg init
-  $ touch a b c d e f
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-
-inserve
-
-  $ hg inserve -d --pid-file=hg.pid 2>&1
-  $ cat hg.pid >> "$DAEMON_PIDS"
-  $ hg ci -Am m
-  adding a
-  adding b
-  adding c
-  adding d
-  adding e
-  adding f
-  adding hg.pid
-
-let the daemon finish its stuff
-
-  $ sleep 1
-
-eed to test all file operations
-
-  $ hg rm a
-  $ rm b
-  $ echo c >> c
-  $ touch g
-  $ hg add g
-  $ hg mv e h
-  $ hg status
-  M c
-  A g
-  A h
-  R a
-  R e
-  ! b
-  $ sleep 1
-
-Are we able to kill the service? if not, the service died on some error
-
-  $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-issue1542.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ hg init
-  $ touch a
-  $ mkdir dir
-  $ touch dir/b
-  $ touch dir/c
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-  $ hg add dir/c
-
-inserve
-
-  $ hg inserve -d --pid-file=hg.pid 2>&1
-  $ cat hg.pid >> "$DAEMON_PIDS"
-  $ hg st
-  A dir/c
-  ? a
-  ? dir/b
-  ? hg.pid
-
-moving dir out
-
-  $ mv dir ../tmp-test-inotify-issue1542
-
-status
-
-  $ hg st
-  ! dir/c
-  ? a
-  ? hg.pid
-  $ sleep 1
-
-Are we able to kill the service? if not, the service died on some error
-
-  $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-issue1556.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ hg init
-  $ touch a b
-  $ hg add a b
-  $ rm b
-
-status without inotify
-
-  $ hg st
-  A a
-  ! b
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-
-inserve
-
-  $ hg inserve -d --pid-file=hg.pid 2>&1
-  $ cat hg.pid >> "$DAEMON_PIDS"
-
-status
-
-  $ hg st
-  A a
-  ! b
-  ? hg.pid
-  $ sleep 1
-
-Are we able to kill the service? if not, the service died on some error
-
-  $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-lookup.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ hg init
-  $ echo "[extensions]" > .hg/hgrc
-  $ echo "inotify=" >> .hg/hgrc
-  $ hg inserve -d --pid-file .hg/inotify.pid
-  $ echo a > a
-  $ hg ci -Aqm0
-  $ hg co -q null
-  $ hg co -q
-  $ hg st
-  $ cat a
-  a
-  $ "$TESTDIR/killdaemons.py" .hg/inotify.pid
--- a/tests/test-inotify.t	Tue Apr 15 03:21:59 2014 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,182 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ hg init repo1
-  $ cd repo1
-  $ touch a b c d e
-  $ mkdir dir
-  $ mkdir dir/bar
-  $ touch dir/x dir/y dir/bar/foo
-  $ hg ci -Am m
-  adding a
-  adding b
-  adding c
-  adding d
-  adding dir/bar/foo
-  adding dir/x
-  adding dir/y
-  adding e
-  $ cd ..
-  $ hg clone repo1 repo2
-  updating to branch default
-  8 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-  $ cd repo2
-  $ echo b >> a
-
-check that daemon started automatically works correctly
-and make sure that inotify.pidfile works
-
-  $ hg --config "inotify.pidfile=../hg2.pid" status
-  M a
-
-make sure that pidfile worked. Output should be silent.
-
-  $ "$TESTDIR/killdaemons.py" ../hg2.pid
-  $ cd ../repo1
-
-inserve
-
-  $ hg inserve -d --pid-file=hg.pid
-  $ cat hg.pid >> "$DAEMON_PIDS"
-
-let the daemon finish its stuff
-
-  $ sleep 1
-
-cannot start, already bound
-
-  $ hg inserve
-  abort: inotify-server: cannot start: socket is already bound
-  [255]
-
-issue907
-
-  $ hg status
-  ? hg.pid
-
-clean
-
-  $ hg status -c
-  C a
-  C b
-  C c
-  C d
-  C dir/bar/foo
-  C dir/x
-  C dir/y
-  C e
-
-all
-
-  $ hg status -A
-  ? hg.pid
-  C a
-  C b
-  C c
-  C d
-  C dir/bar/foo
-  C dir/x
-  C dir/y
-  C e
-
-path patterns
-
-  $ echo x > dir/x
-  $ hg status .
-  M dir/x
-  ? hg.pid
-  $ hg status dir
-  M dir/x
-  $ cd dir
-  $ hg status .
-  M x
-  $ cd ..
-
-issue 1375
-testing that we can remove a folder and then add a file with the same name
-issue 1375
-
-  $ mkdir h
-  $ echo h > h/h
-  $ hg ci -Am t
-  adding h/h
-  adding hg.pid
-  $ hg rm h
-  removing h/h
-  $ echo h >h
-  $ hg add h
-  $ hg status
-  A h
-  R h/h
-  $ hg ci -m0
-
-Test for issue1735: inotify watches files in .hg/merge
-
-  $ hg st
-  $ echo a > a
-  $ hg ci -Am a
-  $ hg st
-  $ echo b >> a
-  $ hg ci -m ab
-  $ hg st
-  $ echo c >> a
-  $ hg st
-  M a
-  $ HGMERGE=internal:local hg up 0
-  1 files updated, 1 files merged, 2 files removed, 0 files unresolved
-  $ hg st
-  M a
-  $ HGMERGE=internal:local hg up
-  3 files updated, 1 files merged, 0 files removed, 0 files unresolved
-  $ hg st
-  M a
-
-Test for 1844: "hg ci folder" will not commit all changes beneath "folder"
-
-  $ mkdir 1844
-  $ echo a > 1844/foo
-  $ hg add 1844
-  adding 1844/foo
-  $ hg ci -m 'working'
-  $ echo b >> 1844/foo
-  $ hg ci 1844 -m 'broken'
-
-Test for issue884: "Build products not ignored until .hgignore is touched"
-
-  $ echo '^build$' > .hgignore
-  $ hg add .hgignore
-  $ hg ci .hgignore -m 'ignorelist'
-
-Now, lets add some build products...
-
-  $ mkdir build
-  $ touch build/x
-  $ touch build/y
-
-build/x & build/y shouldn't appear in "hg st"
-
-  $ hg st
-  $ "$TESTDIR/killdaemons.py" hg.pid
-
-  $ cd ..
-
-Ensure that if the repo is in a directory whose name is too long, the
-unix domain socket is reached through a symlink (issue1208).
-
-  $ mkdir 0_3456789_10_456789_20_456789_30_456789_40_456789_50_45678_
-  $ cd 0_3456789_10_456789_20_456789_30_456789_40_456789_50_45678_
-  $ mkdir 60_456789_70_456789_80_456789_90_456789_100_56789_
-  $ cd 60_456789_70_456789_80_456789_90_456789_100_56789_
-
-  $ hg --config inotify.pidfile=hg3.pid clone -q ../../repo1
-  $ readlink repo1/.hg/inotify.sock
-  */inotify.sock (glob)
-
-Trying to start the server a second time should fail as usual.
-
-  $ hg --cwd repo1 inserve
-  abort: inotify-server: cannot start: socket is already bound
-  [255]
-
-  $ "$TESTDIR/killdaemons.py" hg3.pid
--- a/tests/test-install.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-install.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1,6 +1,8 @@
 hg debuginstall
   $ hg debuginstall
   checking encoding (ascii)...
+  checking Python executable (*) (glob)
+  checking Python version (2.*) (glob)
   checking Python lib (*lib*)... (glob)
   checking installed modules (*mercurial)... (glob)
   checking templates (*mercurial?templates)... (glob)
@@ -11,12 +13,14 @@
 hg debuginstall with no username
   $ HGUSER= hg debuginstall
   checking encoding (ascii)...
+  checking Python executable (*) (glob)
+  checking Python version (2.*) (glob)
   checking Python lib (*lib*)... (glob)
   checking installed modules (*mercurial)... (glob)
   checking templates (*mercurial?templates)... (glob)
   checking commit editor...
   checking username...
-   no username supplied (see "hg help config")
+   no username supplied
    (specify a username in your configuration file)
   1 problems detected, please check your install!
   [1]
--- a/tests/test-issue672.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-issue672.t	Thu Apr 17 19:36:17 2014 -0400
@@ -36,6 +36,7 @@
    ancestor: 81f4b099af3d, local: c64f439569a9+, remote: c12dcd37c90a
    1: other deleted -> r
    1a: remote created -> g
+   2: keep -> k
   removing 1
   updating: 1 1/2 files (50.00%)
   getting 1a
@@ -65,7 +66,7 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: c64f439569a9, local: e327dca35ac8+, remote: 746e9549ea96
-   1a: local copied/moved to 1 -> m
+   1a: local copied/moved from 1 -> m
     preserving 1a for resolve of 1a
   updating: 1a 1/1 files (100.00%)
   picked tool 'internal:merge' for 1a (binary False symlink False)
@@ -88,10 +89,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: c64f439569a9, local: 746e9549ea96+, remote: e327dca35ac8
-   1: remote moved to 1a -> m
+   1a: remote moved from 1 -> m
     preserving 1 for resolve of 1a
   removing 1
-  updating: 1 1/1 files (100.00%)
+  updating: 1a 1/1 files (100.00%)
   picked tool 'internal:merge' for 1a (binary False symlink False)
   merging 1 and 1a to 1a
   my 1a@746e9549ea96+ other 1a@e327dca35ac8 ancestor 1@81f4b099af3d
--- a/tests/test-largefiles-cache.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-largefiles-cache.t	Thu Apr 17 19:36:17 2014 -0400
@@ -27,7 +27,7 @@
 
 Create mirror repo, and pull from source without largefile:
 "pull" is used instead of "clone" for suppression of (1) updating to
-tip (= cahcing largefile from source repo), and (2) recording source
+tip (= caching largefile from source repo), and (2) recording source
 repo as "default" path in .hg/hgrc.
 
   $ hg init mirror
@@ -47,7 +47,7 @@
 
   $ hg update -r0
   getting changed largefiles
-  large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file://$TESTTMP/mirror (glob)
+  large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
   0 largefiles updated, 0 removed
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg status
@@ -64,7 +64,7 @@
 
   $ hg update -r0
   getting changed largefiles
-  large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file://$TESTTMP/mirror (glob)
+  large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
   0 largefiles updated, 0 removed
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg status
--- a/tests/test-largefiles.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-largefiles.t	Thu Apr 17 19:36:17 2014 -0400
@@ -43,7 +43,7 @@
   n 644         41 .hglf/sub/large2
   n 644          8 normal1
   n 644          8 sub/normal2
-  $ hg debugstate --large
+  $ hg debugstate --large --nodates
   n 644          7 large1
   n 644          7 sub/large2
   $ echo normal11 > normal1
@@ -489,8 +489,8 @@
   $ cat sub2/large7
   large7
 
-Test addremove: verify that files that should be added as largfiles are added as
-such and that already-existing largfiles are not added as normal files by
+Test addremove: verify that files that should be added as largefiles are added as
+such and that already-existing largefiles are not added as normal files by
 accident.
 
   $ rm normal3
@@ -703,7 +703,6 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     this used to not notice the rm
   
-  searching for changes
   largefiles to upload:
   foo
   large
@@ -730,6 +729,26 @@
   0:30d30fe6a5be  add files
   $ cat normal3
   normal33
+
+Test graph log
+
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n'
+  @  7:daea875e9014  add/edit more largefiles
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
+  o  3:9e8fbc4bce62  copy files
+  |
+  o  2:51a0ae4d5864  remove files
+  |
+  o  1:ce8896473775  edit files
+  |
+  o  0:30d30fe6a5be  add files
+  
   $ cat sub/normal4
   normal44
   $ cat sub/large4
@@ -740,6 +759,9 @@
   large7
   $ hg log -qf sub2/large7
   7:daea875e9014
+  $ hg log -Gqf sub2/large7
+  @  7:daea875e9014
+  |
   $ cd ..
   $ hg clone a -r 3 c
   adding changesets
@@ -900,7 +922,7 @@
   adding manifests
   adding file changes
   added 6 changesets with 16 changes to 8 files
-  calling hook changegroup.lfiles: <function checkrequireslfiles at *> (glob)
+  calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
   (run 'hg update' to get a working copy)
   pulling largefiles for revision 7
   found 971fb41e78fea4f8e0ba5244784239371cb00591 in store
@@ -990,6 +1012,27 @@
   2:51a0ae4d5864  remove files
   1:ce8896473775  edit files
   0:30d30fe6a5be  add files
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n'
+  @  9:598410d3eb9a  modify normal file largefile in repo d
+  |
+  o  8:a381d2c8c80e  modify normal file and largefile in repo b
+  |
+  o  7:daea875e9014  add/edit more largefiles
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
+  o  3:9e8fbc4bce62  copy files
+  |
+  o  2:51a0ae4d5864  remove files
+  |
+  o  1:ce8896473775  edit files
+  |
+  o  0:30d30fe6a5be  add files
+  
   $ cat normal3
   normal3-modified
   $ cat sub/normal4
@@ -1044,11 +1087,29 @@
   6:4355d653f84f  edit files yet again
   5:9d5af5072dbd  edit files again
   4:74c02385b94c  move files
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n' .hglf/sub/large4
+  o  8:a381d2c8c80e  modify normal file and largefile in repo b
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
   $ hg log --template '{rev}:{node|short}  {desc|firstline}\n' sub/large4
   8:a381d2c8c80e  modify normal file and largefile in repo b
   6:4355d653f84f  edit files yet again
   5:9d5af5072dbd  edit files again
   4:74c02385b94c  move files
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n' .hglf/sub/large4
+  o  8:a381d2c8c80e  modify normal file and largefile in repo b
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
 
 - .hglf only matches largefiles, without .hglf it matches 9 bco sub/normal
   $ hg log --template '{rev}:{node|short}  {desc|firstline}\n' .hglf/sub
@@ -1058,6 +1119,19 @@
   4:74c02385b94c  move files
   1:ce8896473775  edit files
   0:30d30fe6a5be  add files
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n' .hglf/sub
+  o  8:a381d2c8c80e  modify normal file and largefile in repo b
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
+  o  1:ce8896473775  edit files
+  |
+  o  0:30d30fe6a5be  add files
+  
   $ hg log --template '{rev}:{node|short}  {desc|firstline}\n' sub
   9:598410d3eb9a  modify normal file largefile in repo d
   8:a381d2c8c80e  modify normal file and largefile in repo b
@@ -1066,7 +1140,21 @@
   4:74c02385b94c  move files
   1:ce8896473775  edit files
   0:30d30fe6a5be  add files
-
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n' sub
+  @  9:598410d3eb9a  modify normal file largefile in repo d
+  |
+  o  8:a381d2c8c80e  modify normal file and largefile in repo b
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
+  o  1:ce8896473775  edit files
+  |
+  o  0:30d30fe6a5be  add files
+  
 - globbing gives same result
   $ hg log --template '{rev}:{node|short}  {desc|firstline}\n' 'glob:sub/*'
   9:598410d3eb9a  modify normal file largefile in repo d
@@ -1076,7 +1164,21 @@
   4:74c02385b94c  move files
   1:ce8896473775  edit files
   0:30d30fe6a5be  add files
-
+  $ hg log -G --template '{rev}:{node|short}  {desc|firstline}\n' 'glob:sub/*'
+  @  9:598410d3eb9a  modify normal file largefile in repo d
+  |
+  o  8:a381d2c8c80e  modify normal file and largefile in repo b
+  |
+  o  6:4355d653f84f  edit files yet again
+  |
+  o  5:9d5af5072dbd  edit files again
+  |
+  o  4:74c02385b94c  move files
+  |
+  o  1:ce8896473775  edit files
+  |
+  o  0:30d30fe6a5be  add files
+  
 Rollback on largefiles.
 
   $ echo large4-modified-again > sub/large4
@@ -1112,6 +1214,12 @@
 
   $ echo mistake > sub2/large7
   $ hg revert sub2/large7
+  $ cat sub2/large7
+  large7
+  $ cat sub2/large7.orig
+  mistake
+  $ test ! -f .hglf/sub2/large7.orig
+
   $ hg -q update --clean -r null
   $ hg update --clean
   getting changed largefiles
@@ -1129,18 +1237,16 @@
   large7
   $ cat sub2/large7.orig
   mistake
-  $ cat .hglf/sub2/large7.orig
-  9dbfb2c79b1c40981b258c3efa1b10b03f18ad31
+  $ test ! -f .hglf/sub2/large7.orig
 
-demonstrate misfeature: .orig file is overwritten on every update -C,
-also when clean:
+verify that largefile .orig file no longer is overwritten on every update -C:
   $ hg update --clean
   getting changed largefiles
   0 largefiles updated, 0 removed
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat sub2/large7.orig
-  large7
-  $ rm sub2/large7.orig .hglf/sub2/large7.orig
+  mistake
+  $ rm sub2/large7.orig
 
 Now "update check" is happy.
   $ hg update --check 8
@@ -1280,7 +1386,7 @@
   $ rm ${USERCACHE}/7838695e10da2bb75ac1156565f40a2595fa2fa0
   $ hg up -r 6
   getting changed largefiles
-  large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file://$TESTTMP/d (glob)
+  large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file:/*/$TESTTMP/d (glob)
   1 largefiles updated, 2 removed
   4 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ rm normal3
@@ -1301,7 +1407,7 @@
   ! normal3
   $ hg up -Cr.
   getting changed largefiles
-  large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file://$TESTTMP/d (glob)
+  large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file:/*/$TESTTMP/d (glob)
   0 largefiles updated, 0 removed
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg st
@@ -1323,7 +1429,7 @@
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   getting changed largefiles
-  large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file://$TESTTMP/d (glob)
+  large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file:/*/$TESTTMP/d (glob)
   1 largefiles updated, 0 removed
 
   $ hg rollback -q
@@ -1503,6 +1609,12 @@
   large4-modified
   $ hg --cwd sub cat -r '.^' ../normal3
   normal3-modified
+Cat a standin
+  $ hg cat .hglf/sub/large4
+  e166e74c7303192238d60af5a9c4ce9bef0b7928
+  $ hg cat .hglf/normal3
+  .hglf/normal3: no such file in rev 598410d3eb9a
+  [1]
 
 Test that renaming a largefile results in correct output for status
 
@@ -1593,7 +1705,6 @@
   $ hg push ../dest
   pushing to ../dest
   searching for changes
-  searching for changes
   adding changesets
   adding manifests
   adding file changes
@@ -1687,7 +1798,6 @@
   $ hg push -R r7 http://localhost:$HGPORT1
   pushing to http://localhost:$HGPORT1/
   searching for changes
-  searching for changes
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
@@ -1711,7 +1821,6 @@
   $ hg push -R r8 http://localhost:$HGPORT2/#default
   pushing to http://localhost:$HGPORT2/
   searching for changes
-  searching for changes
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
@@ -2097,6 +2206,13 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     anotherlarge
   
+  $ hg log -G anotherlarge
+  @  changeset:   1:9627a577c5e9
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     anotherlarge
+  |
   $ echo more >> anotherlarge
   $ hg st .
   M anotherlarge
@@ -2152,16 +2268,20 @@
   branch: default
   commit: (clean)
   update: (current)
-  searching for changes
   largefiles: (no files to upload)
   $ hg -R clone2 outgoing --large
   comparing with $TESTTMP/issue3651/src (glob)
   searching for changes
   no changes found
-  searching for changes
   largefiles: no files to upload
   [1]
 
+  $ hg -R clone2 outgoing --large --graph --template "{rev}"
+  comparing with $TESTTMP/issue3651/src (glob)
+  searching for changes
+  no changes found
+  largefiles: no files to upload
+
 check messages when there are files to upload:
 
   $ echo b > clone2/b
@@ -2175,7 +2295,6 @@
   branch: default
   commit: (clean)
   update: (current)
-  searching for changes
   largefiles: 1 to upload
   $ hg -R clone2 outgoing --large
   comparing with $TESTTMP/issue3651/src (glob)
@@ -2186,7 +2305,14 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     #1
   
+  largefiles to upload:
+  b
+  
+  $ hg -R clone2 outgoing --large --graph --template "{rev}"
+  comparing with $TESTTMP/issue3651/src
   searching for changes
+  @  1
+  
   largefiles to upload:
   b
   
@@ -2226,6 +2352,79 @@
   0 largefiles updated, 0 removed
   $ cd ..
 
+
+Merge conflicts:
+
+  $ hg init merge
+  $ cd merge
+  $ echo 0 > f-different
+  $ echo 0 > f-same
+  $ echo 0 > f-unchanged-1
+  $ echo 0 > f-unchanged-2
+  $ hg add --large *
+  $ hg ci -m0
+  Invoking status precommit hook
+  A f-different
+  A f-same
+  A f-unchanged-1
+  A f-unchanged-2
+  $ echo tmp1 > f-unchanged-1
+  $ echo tmp1 > f-unchanged-2
+  $ echo tmp1 > f-same
+  $ hg ci -m1
+  Invoking status precommit hook
+  M f-same
+  M f-unchanged-1
+  M f-unchanged-2
+  $ echo 2 > f-different
+  $ echo 0 > f-unchanged-1
+  $ echo 1 > f-unchanged-2
+  $ echo 1 > f-same
+  $ hg ci -m2
+  Invoking status precommit hook
+  M f-different
+  M f-same
+  M f-unchanged-1
+  M f-unchanged-2
+  $ hg up -qr0
+  $ echo tmp2 > f-unchanged-1
+  $ echo tmp2 > f-unchanged-2
+  $ echo tmp2 > f-same
+  $ hg ci -m3
+  Invoking status precommit hook
+  M f-same
+  M f-unchanged-1
+  M f-unchanged-2
+  created new head
+  $ echo 1 > f-different
+  $ echo 1 > f-unchanged-1
+  $ echo 0 > f-unchanged-2
+  $ echo 1 > f-same
+  $ hg ci -m4
+  Invoking status precommit hook
+  M f-different
+  M f-same
+  M f-unchanged-1
+  M f-unchanged-2
+  $ hg merge
+  largefile f-different has a merge conflict
+  ancestor was 09d2af8dd22201dd8d48e5dcfcaed281ff9422c7
+  keep (l)ocal e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e or
+  take (o)ther 7448d8798a4380162d4b56f9b452e2f6f9e24e7a? l
+  0 files updated, 4 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  getting changed largefiles
+  1 largefiles updated, 0 removed
+  $ cat f-different
+  1
+  $ cat f-same
+  1
+  $ cat f-unchanged-1
+  1
+  $ cat f-unchanged-2
+  1
+  $ cd ..
+
 Check whether "largefiles" feature is supported only in repositories
 enabling largefiles extension.
 
@@ -2259,7 +2458,8 @@
   $ hg -R enabledlocally root
   $TESTTMP/individualenabling/enabledlocally (glob)
   $ hg -R notenabledlocally root
-  abort: unknown repository format: requires features 'largefiles' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: largefiles!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
 
   $ hg init push-dst
@@ -2275,7 +2475,8 @@
   [255]
 
   $ hg clone enabledlocally clone-dst
-  abort: unknown repository format: requires features 'largefiles' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: largefiles!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
   $ test -d clone-dst
   [1]
--- a/tests/test-lfconvert.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-lfconvert.t	Thu Apr 17 19:36:17 2014 -0400
@@ -342,7 +342,7 @@
   $ rm largefiles-repo/.hg/largefiles/*
   $ hg lfconvert --to-normal issue3519 normalized3519
   initializing destination normalized3519
-  large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file://$TESTTMP/largefiles-repo (glob)
+  large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob)
   abort: missing largefile 'large' from revision d4892ec57ce212905215fad1d9018f56b99202ad
   [255]
 
--- a/tests/test-lock-badness.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-lock-badness.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,7 @@
-#if unix-permissions no-root
+#if unix-permissions no-root no-windows
+
+Prepare
+
   $ hg init a
   $ echo a > a/a
   $ hg -R a ci -A -m a
@@ -8,14 +11,30 @@
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
+One process waiting for another
+
+  $ cat > hooks.py << EOF
+  > import time
+  > def sleepone(**x): time.sleep(1)
+  > def sleephalf(**x): time.sleep(0.5)
+  > EOF
   $ echo b > b/b
-  $ hg -R b ci -A -m b
+  $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
+  $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf"
+  waiting for lock on working directory of b held by '*:*' (glob)
+  got lock after ? seconds (glob)
+  warning: ignoring unknown working parent d2ae7f538514!
+  $ wait
+  $ cat stdout
   adding b
 
+Pushing to a local read-only repo that can't be locked
+
   $ chmod 100 a/.hg/store
 
   $ hg -R b push a
   pushing to a
+  searching for changes
   abort: could not lock repository a: Permission denied
   [255]
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-merge-criss-cross.t	Thu Apr 17 19:36:17 2014 -0400
@@ -0,0 +1,267 @@
+Criss cross merging
+
+  $ hg init criss-cross
+  $ cd criss-cross
+  $ echo '0 base' > f1
+  $ echo '0 base' > f2
+  $ hg ci -Aqm '0 base'
+
+  $ echo '1 first change' > f1
+  $ hg ci -m '1 first change f1'
+
+  $ hg up -qr0
+  $ echo '2 first change' > f2
+  $ hg ci -qm '2 first change f2'
+
+  $ hg merge -qr 1
+  $ hg ci -m '3 merge'
+
+  $ hg up -qr2
+  $ hg merge -qr1
+  $ hg ci -qm '4 merge'
+
+  $ echo '5 second change' > f1
+  $ hg ci -m '5 second change f1'
+
+  $ hg up -r3
+  note: using 0f6b37dbe527 as ancestor of adfe50279922 and cf89f02107e5
+        alternatively, use --config merge.preferancestor=40663881a6dd
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo '6 second change' > f2
+  $ hg ci -m '6 second change f2'
+
+  $ hg log -G
+  @  changeset:   6:3b08d01b0ab5
+  |  tag:         tip
+  |  parent:      3:cf89f02107e5
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     6 second change f2
+  |
+  | o  changeset:   5:adfe50279922
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     5 second change f1
+  | |
+  | o    changeset:   4:7d3e55501ae6
+  | |\   parent:      2:40663881a6dd
+  | | |  parent:      1:0f6b37dbe527
+  | | |  user:        test
+  | | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | | |  summary:     4 merge
+  | | |
+  o---+  changeset:   3:cf89f02107e5
+  | | |  parent:      2:40663881a6dd
+  |/ /   parent:      1:0f6b37dbe527
+  | |    user:        test
+  | |    date:        Thu Jan 01 00:00:00 1970 +0000
+  | |    summary:     3 merge
+  | |
+  | o  changeset:   2:40663881a6dd
+  | |  parent:      0:40494bf2444c
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     2 first change f2
+  | |
+  o |  changeset:   1:0f6b37dbe527
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     1 first change f1
+  |
+  o  changeset:   0:40494bf2444c
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     0 base
+  
+
+  $ hg merge -v --debug --tool internal:dump 5
+  note: using 0f6b37dbe527 as ancestor of 3b08d01b0ab5 and adfe50279922
+        alternatively, use --config merge.preferancestor=40663881a6dd
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 0f6b37dbe527, local: 3b08d01b0ab5+, remote: adfe50279922
+   f1: remote is newer -> g
+   f2: versions differ -> m
+    preserving f2 for resolve of f2
+  getting f1
+  updating: f1 1/2 files (50.00%)
+  updating: f2 2/2 files (100.00%)
+  picked tool 'internal:dump' for f2 (binary False symlink False)
+  merging f2
+  my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@40494bf2444c
+  1 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
+  [1]
+
+  $ head *
+  ==> f1 <==
+  5 second change
+  
+  ==> f2 <==
+  6 second change
+  
+  ==> f2.base <==
+  0 base
+  
+  ==> f2.local <==
+  6 second change
+  
+  ==> f2.orig <==
+  6 second change
+  
+  ==> f2.other <==
+  2 first change
+
+  $ hg up -qC .
+  $ hg merge -v --tool internal:dump 5 --config merge.preferancestor="null 40663881 3b08d"
+  note: using 40663881a6dd as ancestor of 3b08d01b0ab5 and adfe50279922
+        alternatively, use --config merge.preferancestor=0f6b37dbe527
+  resolving manifests
+  merging f1
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
+  [1]
+
+Redo merge with merge.preferancestor="*" to enable bid merge
+
+  $ rm f*
+  $ hg up -qC .
+  $ hg merge -v --debug --tool internal:dump 5 --config merge.preferancestor="*"
+  
+  calculating bids for ancestor 0f6b37dbe527
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 0f6b37dbe527, local: 3b08d01b0ab5+, remote: adfe50279922
+   f1: g
+   f2: m
+  
+  calculating bids for ancestor 40663881a6dd
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 40663881a6dd, local: 3b08d01b0ab5+, remote: adfe50279922
+   f1: m
+   f2: k
+  
+  auction for merging merge bids
+   f1: picking 'get' action
+   f2: picking 'keep' action
+  end of auction
+  
+   f1: remote is newer -> g
+   f2: keep -> k
+  getting f1
+  updating: f1 1/1 files (100.00%)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ head *
+  ==> f1 <==
+  5 second change
+  
+  ==> f2 <==
+  6 second change
+
+
+The other way around:
+
+  $ hg up -C -r5
+  note: using 0f6b37dbe527 as ancestor of 3b08d01b0ab5 and adfe50279922
+        alternatively, use --config merge.preferancestor=40663881a6dd
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge -v --debug --config merge.preferancestor="*"
+  
+  calculating bids for ancestor 0f6b37dbe527
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 0f6b37dbe527, local: adfe50279922+, remote: 3b08d01b0ab5
+   f1: k
+   f2: m
+  
+  calculating bids for ancestor 40663881a6dd
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 40663881a6dd, local: adfe50279922+, remote: 3b08d01b0ab5
+   f1: m
+   f2: g
+  
+  auction for merging merge bids
+   f1: picking 'keep' action
+   f2: picking 'get' action
+  end of auction
+  
+   f1: keep -> k
+   f2: remote is newer -> g
+  getting f2
+  updating: f2 1/1 files (100.00%)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ head *
+  ==> f1 <==
+  5 second change
+  
+  ==> f2 <==
+  6 second change
+
+Verify how the output looks and and how verbose it is:
+
+  $ hg up -qC
+  $ hg merge --config merge.preferancestor="*"
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ hg up -qC
+  $ hg merge -v --config merge.preferancestor="*"
+  
+  calculating bids for ancestor 0f6b37dbe527
+  resolving manifests
+  
+  calculating bids for ancestor 40663881a6dd
+  resolving manifests
+  
+  auction for merging merge bids
+   f1: picking 'get' action
+   f2: picking 'keep' action
+  end of auction
+  
+  getting f1
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ hg up -qC
+  $ hg merge -v --debug --config merge.preferancestor="*"
+  
+  calculating bids for ancestor 0f6b37dbe527
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 0f6b37dbe527, local: 3b08d01b0ab5+, remote: adfe50279922
+   f1: g
+   f2: m
+  
+  calculating bids for ancestor 40663881a6dd
+    searching for copies back to rev 3
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 40663881a6dd, local: 3b08d01b0ab5+, remote: adfe50279922
+   f1: m
+   f2: k
+  
+  auction for merging merge bids
+   f1: picking 'get' action
+   f2: picking 'keep' action
+  end of auction
+  
+   f1: remote is newer -> g
+   f2: keep -> k
+  getting f1
+  updating: f1 1/1 files (100.00%)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ cd ..
--- a/tests/test-merge-types.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-merge-types.t	Thu Apr 17 19:36:17 2014 -0400
@@ -268,6 +268,7 @@
   merging b
   warning: conflicts during merge.
   merging b incomplete! (edit conflicts, then use 'hg resolve --mark')
+  warning: cannot merge flags for c
   merging d
   warning: internal:merge cannot merge symlinks for d
   merging d incomplete! (edit conflicts, then use 'hg resolve --mark')
@@ -328,6 +329,7 @@
   merging b
   warning: conflicts during merge.
   merging b incomplete! (edit conflicts, then use 'hg resolve --mark')
+  warning: cannot merge flags for c
   merging d
   warning: internal:merge cannot merge symlinks for d
   merging d incomplete! (edit conflicts, then use 'hg resolve --mark')
@@ -355,7 +357,7 @@
   2
   >>>>>>> other
   $ tellmeabout c
-  c is a plain file with content:
+  c is an executable file with content:
   x
   $ tellmeabout d
   d is an executable file with content:
--- a/tests/test-minirst.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-minirst.py	Thu Apr 17 19:36:17 2014 -0400
@@ -244,3 +244,14 @@
 print table
 
 debugformats('table', table)
+
+data = [['s', 'long', 'line\ngoes on here'],
+        ['', 'xy', 'tried to fix here\n        by indenting']]
+
+rst = minirst.maketable(data, 1, False)
+table = ''.join(rst)
+
+print table
+
+debugformats('table+nl', table)
+
--- a/tests/test-minirst.py.out	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-minirst.py.out	Thu Apr 17 19:36:17 2014 -0400
@@ -773,3 +773,34 @@
 </table>
 ----------------------------------------------------------------------
 
+ = ==== ======================================
+ s long line goes on here                     
+   xy   tried to fix here by indenting        
+ = ==== ======================================
+
+== table+nl ==
+60 column format:
+----------------------------------------------------------------------
+ s long line goes on here
+   xy   tried to fix here by indenting
+----------------------------------------------------------------------
+
+30 column format:
+----------------------------------------------------------------------
+ s long line goes on here
+   xy   tried to fix here by
+        indenting
+----------------------------------------------------------------------
+
+html format:
+----------------------------------------------------------------------
+<table>
+<tr><td>s</td>
+<td>long</td>
+<td>line goes on here</td></tr>
+<tr><td></td>
+<td>xy</td>
+<td>tried to fix here by indenting</td></tr>
+</table>
+----------------------------------------------------------------------
+
--- a/tests/test-module-imports.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-module-imports.t	Thu Apr 17 19:36:17 2014 -0400
@@ -22,18 +22,20 @@
 hidden by deduplication algorithm in the cycle detector, so fixing
 these may expose other cycles.
 
-  $ hg locate 'mercurial/**.py' | xargs python "$import_checker"
-  mercurial/dispatch.py mixed stdlib and relative imports:
-     commands, error, extensions, fancyopts, hg, hook, util
-  mercurial/fileset.py mixed stdlib and relative imports:
-     error, merge, parser, util
-  mercurial/revset.py mixed stdlib and relative imports:
-     discovery, error, hbisect, parser, phases, util
-  mercurial/templater.py mixed stdlib and relative imports:
-     config, error, parser, templatefilters, util
-  mercurial/ui.py mixed stdlib and relative imports:
-     config, error, formatter, scmutil, util
-  Import cycle: mercurial.cmdutil -> mercurial.subrepo -> mercurial.cmdutil
-  Import cycle: mercurial.repoview -> mercurial.revset -> mercurial.repoview
-  Import cycle: mercurial.fileset -> mercurial.merge -> mercurial.subrepo -> mercurial.match -> mercurial.fileset
-  Import cycle: mercurial.filemerge -> mercurial.match -> mercurial.fileset -> mercurial.merge -> mercurial.filemerge
+  $ hg locate 'mercurial/**.py' | sed 's-\\-/-g' | xargs python "$import_checker"
+  mercurial/dispatch.py mixed imports
+     stdlib:    commands
+     relative:  error, extensions, fancyopts, hg, hook, util
+  mercurial/fileset.py mixed imports
+     stdlib:    parser
+     relative:  error, merge, util
+  mercurial/revset.py mixed imports
+     stdlib:    parser
+     relative:  discovery, error, hbisect, phases, util
+  mercurial/templater.py mixed imports
+     stdlib:    parser
+     relative:  config, error, templatefilters, templatekw, util
+  mercurial/ui.py mixed imports
+     stdlib:    formatter
+     relative:  config, error, scmutil, util
+  Import cycle: mercurial.cmdutil -> mercurial.context -> mercurial.subrepo -> mercurial.cmdutil -> mercurial.cmdutil
--- a/tests/test-mq-guards.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-mq-guards.t	Thu Apr 17 19:36:17 2014 -0400
@@ -441,7 +441,7 @@
   \x1b[0;31;1mb.patch\x1b[0m (esc)
 
 
-excercise cornercases in "qselect --reapply"
+excercise corner cases in "qselect --reapply"
 
   $ hg qpop -a
   popping c.patch
--- a/tests/test-mq-subrepo.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-mq-subrepo.t	Thu Apr 17 19:36:17 2014 -0400
@@ -94,32 +94,32 @@
 handle subrepos safely on qnew
 
   $ mkrepo repo-2499-qnew
-  $ testadd qnew -m0 0.diff
+  $ testadd qnew -X path:no-effect -m0 0.diff
   adding a
   % abort when adding .hgsub w/dirty subrepo
   A .hgsub
   A sub/a
-  % qnew -m0 0.diff
+  % qnew -X path:no-effect -m0 0.diff
   abort: uncommitted changes in subrepository sub
   [255]
   % update substate when adding .hgsub w/clean updated subrepo
   A .hgsub
-  % qnew -m0 0.diff
+  % qnew -X path:no-effect -m0 0.diff
   path sub
    source   sub
    revision b2fdb12cd82b021c3b7053d67802e77b6eeaee31
 
-  $ testmod qnew -m1 1.diff
+  $ testmod qnew --cwd .. -R repo-2499-qnew -X path:no-effect -m1 1.diff
   adding a
   % abort when modifying .hgsub w/dirty subrepo
   M .hgsub
   A sub2/a
-  % qnew -m1 1.diff
+  % qnew --cwd .. -R repo-2499-qnew -X path:no-effect -m1 1.diff
   abort: uncommitted changes in subrepository sub2
   [255]
   % update substate when modifying .hgsub w/clean updated subrepo
   M .hgsub
-  % qnew -m1 1.diff
+  % qnew --cwd .. -R repo-2499-qnew -X path:no-effect -m1 1.diff
   path sub
    source   sub
    revision b2fdb12cd82b021c3b7053d67802e77b6eeaee31
@@ -407,12 +407,12 @@
   $ cat .hgsubstate
   b6f6e9c41f3dfd374a6d2ed4535c87951cf979cf sub
   $ hg diff -c tip
-  diff -r f499373e340c -r b20ffac88564 .hgsub
+  diff -r f499373e340c -r f69e96d86e75 .hgsub
   --- /dev/null
   +++ b/.hgsub
   @@ -0,0 +1,1 @@
   +sub = sub
-  diff -r f499373e340c -r b20ffac88564 .hgsubstate
+  diff -r f499373e340c -r f69e96d86e75 .hgsubstate
   --- /dev/null
   +++ b/.hgsubstate
   @@ -0,0 +1,1 @@
@@ -423,22 +423,46 @@
   # User test
   # Date 0 0
   
-  diff -r f499373e340c -r b20ffac88564 .hgsub
+  diff -r f499373e340c -r f69e96d86e75 .hgsub
   --- /dev/null
   +++ b/.hgsub
   @@ -0,0 +1,1 @@
   +sub = sub
-  diff -r f499373e340c -r b20ffac88564 .hgsubstate
+  diff -r f499373e340c -r f69e96d86e75 .hgsubstate
   --- /dev/null
   +++ b/.hgsubstate
   @@ -0,0 +1,1 @@
   +b6f6e9c41f3dfd374a6d2ed4535c87951cf979cf sub
+  $ hg parents --template '{node}\n'
+  f69e96d86e75a6d4fd88285dc9697acb23951041
+  $ hg parents --template '{files}\n'
+  .hgsub .hgsubstate
+
+check also whether qnew not including ".hgsubstate" explicitly causes
+as same result (in node hash) as one including it.
+
+  $ hg qpop -a -q
+  patch queue now empty
+  $ hg qdelete import-at-qnew
+  $ echo 'sub = sub' > .hgsub
+  $ hg add .hgsub
+  $ rm -f .hgsubstate
+  $ hg qnew -u test -d '0 0' import-at-qnew
+  $ hg parents --template '{node}\n'
+  f69e96d86e75a6d4fd88285dc9697acb23951041
+  $ hg parents --template '{files}\n'
+  .hgsub .hgsubstate
+
+check whether qrefresh imports updated .hgsubstate correctly
+
   $ hg qpop
   popping import-at-qnew
   patch queue now empty
   $ hg qpush
   applying import-at-qnew
   now at: import-at-qnew
+  $ hg parents --template '{files}\n'
+  .hgsub .hgsubstate
 
   $ hg qnew import-at-qrefresh
   $ echo sb > sub/sb
@@ -450,7 +474,7 @@
   $ cat .hgsubstate
   88ac1bef5ed43b689d1d200b59886b675dec474b sub
   $ hg diff -c tip
-  diff -r 44f846335325 -r b3e8c5fa3aaa .hgsubstate
+  diff -r 05b056bb9c8c -r d987bec230f4 .hgsubstate
   --- a/.hgsubstate
   +++ b/.hgsubstate
   @@ -1,1 +1,1 @@
@@ -460,20 +484,22 @@
   # HG changeset patch
   # Date 0 0
   # User test
-  # Parent 44f846335325209be6be35dc2c9a4be107278c09
+  # Parent 05b056bb9c8c05ff15258b84fd42ab3527271033
   
-  diff -r 44f846335325 .hgsubstate
+  diff -r 05b056bb9c8c .hgsubstate
   --- a/.hgsubstate
   +++ b/.hgsubstate
   @@ -1,1 +1,1 @@
   -b6f6e9c41f3dfd374a6d2ed4535c87951cf979cf sub
   +88ac1bef5ed43b689d1d200b59886b675dec474b sub
+  $ hg parents --template '{files}\n'
+  .hgsubstate
 
   $ hg qrefresh -u test -d '0 0'
   $ cat .hgsubstate
   88ac1bef5ed43b689d1d200b59886b675dec474b sub
   $ hg diff -c tip
-  diff -r 44f846335325 -r b3e8c5fa3aaa .hgsubstate
+  diff -r 05b056bb9c8c -r d987bec230f4 .hgsubstate
   --- a/.hgsubstate
   +++ b/.hgsubstate
   @@ -1,1 +1,1 @@
@@ -483,14 +509,16 @@
   # HG changeset patch
   # Date 0 0
   # User test
-  # Parent 44f846335325209be6be35dc2c9a4be107278c09
+  # Parent 05b056bb9c8c05ff15258b84fd42ab3527271033
   
-  diff -r 44f846335325 .hgsubstate
+  diff -r 05b056bb9c8c .hgsubstate
   --- a/.hgsubstate
   +++ b/.hgsubstate
   @@ -1,1 +1,1 @@
   -b6f6e9c41f3dfd374a6d2ed4535c87951cf979cf sub
   +88ac1bef5ed43b689d1d200b59886b675dec474b sub
+  $ hg parents --template '{files}\n'
+  .hgsubstate
 
   $ hg update -C tip
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -536,6 +564,37 @@
   @@ -1,1 +1,1 @@
   -b6f6e9c41f3dfd374a6d2ed4535c87951cf979cf sub
   +88ac1bef5ed43b689d1d200b59886b675dec474b sub
+  $ hg parents --template '{files}\n'
+  .hgsubstate
+
+check whether qrefresh not including ".hgsubstate" explicitly causes
+as same result (in node hash) as one including it.
+
+  $ hg update -C -q 0
+  $ hg qpop -a -q
+  patch queue now empty
+  $ hg qnew -u test -d '0 0' add-hgsub-at-qrefresh
+  $ echo 'sub = sub' > .hgsub
+  $ echo > .hgsubstate
+  $ hg add .hgsub .hgsubstate
+  $ hg qrefresh -u test -d '0 0'
+  $ hg parents --template '{node}\n'
+  7c48c35501aae6770ed9c2517014628615821a8e
+  $ hg parents --template '{files}\n'
+  .hgsub .hgsubstate
+
+  $ hg qpop -a -q
+  patch queue now empty
+  $ hg qdelete add-hgsub-at-qrefresh
+  $ hg qnew -u test -d '0 0' add-hgsub-at-qrefresh
+  $ echo 'sub = sub' > .hgsub
+  $ hg add .hgsub
+  $ rm -f .hgsubstate
+  $ hg qrefresh -u test -d '0 0'
+  $ hg parents --template '{node}\n'
+  7c48c35501aae6770ed9c2517014628615821a8e
+  $ hg parents --template '{files}\n'
+  .hgsub .hgsubstate
 
   $ cd ..
 
--- a/tests/test-newbranch.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-newbranch.t	Thu Apr 17 19:36:17 2014 -0400
@@ -210,7 +210,7 @@
   abort: branch foobar not found
   [255]
 
-Fastforward merge:
+Fast-forward merge:
 
   $ hg branch ff
   marked working directory as branch ff
--- a/tests/test-obsolete-divergent.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-obsolete-divergent.t	Thu Apr 17 19:36:17 2014 -0400
@@ -247,7 +247,7 @@
       392fd25390da
   $ hg log -r 'divergent()'
 
-Even when subsequente rewriting happen
+Even when subsequent rewriting happen
 
   $ mkcommit A_3
   created new head
--- a/tests/test-obsolete.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-obsolete.t	Thu Apr 17 19:36:17 2014 -0400
@@ -81,6 +81,12 @@
   $ hg debugobsolete --flag 12 `getid original_c`  `getid new_c` -d '56 12'
   $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
   2:245bde4270cd add original_c
+  $ hg debugrevlog -cd
+  # rev p1rev p2rev start   end deltastart base   p1   p2 rawsize totalsize compression heads
+      0    -1    -1     0    59          0    0    0    0      58        58           0     1
+      1     0    -1    59   118         59   59    0    0      58       116           0     1
+      2     1    -1   118   204         59   59   59    0      76       192           0     1
+      3     1    -1   204   271        204  204   59    0      66       258           0     2
   $ hg debugobsolete
   245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
 
@@ -884,4 +890,20 @@
   no changes found
   [1]
 
+Test that a local tag blocks a changeset from being hidden
 
+  $ hg tag -l visible -r 0 --hidden
+  $ hg log -G
+  @  changeset:   2:3816541e5485
+     tag:         tip
+     parent:      -1:000000000000
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A
+  
+  x  changeset:   0:193e9254ce7e
+     tag:         visible
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A
+  
--- a/tests/test-parseindex2.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-parseindex2.py	Thu Apr 17 19:36:17 2014 -0400
@@ -1,8 +1,13 @@
-"""This unit test tests parsers.parse_index2()."""
+"""This unit test primarily tests parsers.parse_index2().
+
+It also checks certain aspects of the parsers module as a whole.
+"""
 
 from mercurial import parsers
 from mercurial.node import nullid, nullrev
 import struct
+import subprocess
+import sys
 
 # original python implementation
 def gettype(q):
@@ -95,7 +100,70 @@
     index, chunkcache = parsers.parse_index2(data, inline)
     return list(index), chunkcache
 
+def importparsers(hexversion):
+    """Import mercurial.parsers with the given sys.hexversion."""
+    # The file parsers.c inspects sys.hexversion to determine the version
+    # of the currently-running Python interpreter, so we monkey-patch
+    # sys.hexversion to simulate using different versions.
+    code = ("import sys; sys.hexversion=%s; "
+            "import mercurial.parsers" % hexversion)
+    cmd = "python -c \"%s\"" % code
+    # We need to do these tests inside a subprocess because parser.c's
+    # version-checking code happens inside the module init function, and
+    # when using reload() to reimport an extension module, "The init function
+    # of extension modules is not called a second time"
+    # (from http://docs.python.org/2/library/functions.html?#reload).
+    p = subprocess.Popen(cmd, shell=True,
+                         stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    return p.communicate()  # returns stdout, stderr
+
+def printhexfail(testnumber, hexversion, stdout, expected):
+    try:
+        hexstring = hex(hexversion)
+    except TypeError:
+        hexstring = None
+    print ("FAILED: version test #%s with Python %s and patched "
+           "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
+           (testnumber, sys.version_info, hexversion, hexstring, expected,
+            stdout))
+
+def testversionokay(testnumber, hexversion):
+    stdout, stderr = importparsers(hexversion)
+    if stdout:
+        printhexfail(testnumber, hexversion, stdout, expected="no stdout")
+
+def testversionfail(testnumber, hexversion):
+    stdout, stderr = importparsers(hexversion)
+    # We include versionerrortext to distinguish from other ImportErrors.
+    errtext = "ImportError: %s" % parsers.versionerrortext
+    if errtext not in stdout:
+        printhexfail(testnumber, hexversion, stdout,
+                     expected="stdout to contain %r" % errtext)
+
+def makehex(major, minor, micro):
+    return int("%x%02x%02x00" % (major, minor, micro), 16)
+
+def runversiontests():
+    """Check the version-detection logic when importing parsers."""
+    info = sys.version_info
+    major, minor, micro = info[0], info[1], info[2]
+    # Test same major-minor versions.
+    testversionokay(1, makehex(major, minor, micro))
+    testversionokay(2, makehex(major, minor, micro + 1))
+    # Test different major-minor versions.
+    testversionfail(3, makehex(major + 1, minor, micro))
+    testversionfail(4, makehex(major, minor + 1, micro))
+    testversionfail(5, "'foo'")
+
 def runtest() :
+    # Only test the version-detection logic if it is present.
+    try:
+        parsers.versionerrortext
+    except AttributeError:
+        pass
+    else:
+        runversiontests()
+
     # Check that parse_index2() raises TypeError on bad arguments.
     try:
         parse_index2(0, True)
--- a/tests/test-patchbomb.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-patchbomb.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1805,7 +1805,9 @@
   +b
   
 
-test single flag for single patch:
+test single flag for single patch (and no warning when not mailing dirty rev):
+  $ hg up -qr1
+  $ echo dirt > a
   $ hg email --date '1970-1-1 0:1' -n --flag fooFlag -f quux -t foo -c bar -s test \
   >  -r 2
   this patch series consists of 1 patches.
@@ -1839,9 +1841,10 @@
   +c
   
 
-test single flag for multiple patches:
+test single flag for multiple patches (and warning when mailing dirty rev):
   $ hg email --date '1970-1-1 0:1' -n --flag fooFlag -f quux -t foo -c bar -s test \
   >  -r 0:1
+  warning: working directory has uncommitted changes
   this patch series consists of 2 patches.
   
   
@@ -1919,6 +1922,8 @@
   @@ -0,0 +1,1 @@
   +b
   
+  $ hg revert --no-b a
+  $ hg up -q
 
 test multiple flags for single patch:
   $ hg email --date '1970-1-1 0:1' -n --flag fooFlag --flag barFlag -f quux -t foo \
--- a/tests/test-permissions.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-permissions.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1,4 +1,4 @@
-#ifdef unix-permissions no-root
+#if unix-permissions no-root
 
   $ hg init t
   $ cd t
--- a/tests/test-phases-exchange.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-phases-exchange.t	Thu Apr 17 19:36:17 2014 -0400
@@ -753,6 +753,69 @@
   
 
 
+Bare push with next changeset and common changeset needing sync (issue3575)
+
+(reset some stat on remot repo to not confused other test)
+
+  $ hg -R ../alpha --config extensions.strip= strip --no-backup 967b449fbc94
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg phase --force --draft b740e3e5c05d 967b449fbc94
+  $ hg push -fv ../alpha
+  pushing to ../alpha
+  searching for changes
+  1 changesets found
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  $ hgph
+  o  9 public a-H - 967b449fbc94
+  |
+  | o  8 public a-F - b740e3e5c05d
+  | |
+  | o  7 public a-E - e9f537e46dea
+  | |
+  +---o  6 public n-B - 145e75495359
+  | |
+  o |  5 public n-A - d6bcb4f74035
+  | |
+  | o  4 public a-D - b555f63b6063
+  | |
+  | o  3 public a-C - 54acac6f23ab
+  | |
+  o |  2 public b-A - f54f1bb90ff3
+  |/
+  o  1 public a-B - 548a3d25dbf0
+  |
+  o  0 public a-A - 054250a37db4
+  
+
+  $ hg -R ../alpha update 967b449fbc94 #for latter test consistency
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hgph -R ../alpha
+  @  10 public a-H - 967b449fbc94
+  |
+  | o  9 draft a-G - 3e27b6f1eee1
+  | |
+  | o  8 public a-F - b740e3e5c05d
+  | |
+  | o  7 public a-E - e9f537e46dea
+  | |
+  +---o  6 public n-B - 145e75495359
+  | |
+  o |  5 public n-A - d6bcb4f74035
+  | |
+  o |  4 public b-A - f54f1bb90ff3
+  | |
+  | o  3 public a-D - b555f63b6063
+  | |
+  | o  2 public a-C - 54acac6f23ab
+  |/
+  o  1 public a-B - 548a3d25dbf0
+  |
+  o  0 public a-A - 054250a37db4
+  
+
 Discovery locally secret changeset on a remote repository:
 
 - should make it non-secret
@@ -845,10 +908,9 @@
   o  0 public a-A - 054250a37db4
   
 
-pushing a locally public and draft changesets remotly secret should make them
+pushing a locally public and draft changesets remotely secret should make them
 appear on the remote side.
 
-
   $ hg -R ../mu phase --secret --force 967b449fbc94
   $ hg push -r 435b5d83910c ../mu
   pushing to ../mu
--- a/tests/test-phases.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-phases.t	Thu Apr 17 19:36:17 2014 -0400
@@ -148,7 +148,7 @@
 (Issue3303)
 Check that remote secret changeset are ignore when checking creation of remote heads
 
-We add a secret head into the push destination.  This secreat head shadow a
+We add a secret head into the push destination. This secret head shadows a
 visible shared between the initial repo and the push destination.
 
   $ hg up -q 4 # B'
@@ -156,8 +156,8 @@
   $ hg phase .
   5: secret
 
-# We now try to push a new public changeset that descend from the common public
-# head shadowed by the remote secret head.
+We now try to push a new public changeset that descend from the common public
+head shadowed by the remote secret head.
 
   $ cd ../initialrepo
   $ hg up -q 6 #B'
--- a/tests/test-propertycache.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-propertycache.py	Thu Apr 17 19:36:17 2014 -0400
@@ -39,8 +39,8 @@
 mercurial.localrepo.localrepository.testcachedunfifoobar = testcachedunfifoobar
 
 
-# create an empty repo. and instanciate it. It is important to run
-# those test on the real object to detect regression.
+# Create an empty repo and instantiate it. It is important to run
+# these tests on the real object to detect regression.
 repopath = os.path.join(os.environ['TESTTMP'], 'repo')
 assert subprocess.call(['hg', 'init', repopath]) == 0
 ui = uimod.ui()
--- a/tests/test-push-http.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-push-http.t	Thu Apr 17 19:36:17 2014 -0400
@@ -68,7 +68,6 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
-  remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -85,7 +84,6 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
-  remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -102,7 +100,6 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
-  remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -123,8 +120,6 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: prepushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1
-  updating ba677d0156c1 to public failed!
   % serve errors
 
 expect phase change success
@@ -134,7 +129,6 @@
   pushing to http://localhost:$HGPORT/
   searching for changes
   no changes found
-  remote: prepushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1
   % serve errors
   [1]
   $ hg rollback
--- a/tests/test-push-warn.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-push-warn.t	Thu Apr 17 19:36:17 2014 -0400
@@ -22,6 +22,7 @@
   $ hg push ../a
   pushing to ../a
   searching for changes
+  remote has heads on branch 'default' that are not known locally: 1c9246a22a0a
   abort: push creates new remote head 1e108cc5548c!
   (pull and merge or see "hg help push" for details about pushing new heads)
   [255]
@@ -35,6 +36,7 @@
   query 2; still undecided: 1, sample size is: 1
   2 total queries
   listing keys for "bookmarks"
+  remote has heads on branch 'default' that are not known locally: 1c9246a22a0a
   new remote heads on branch 'default':
    1e108cc5548c
   abort: push creates new remote head 1e108cc5548c!
@@ -351,7 +353,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
 
-Pushing muliple headed new branch:
+Pushing multi headed new branch:
 
   $ echo 14 > foo
   $ hg -q branch f
@@ -405,6 +407,7 @@
   $ hg -R i push h
   pushing to h
   searching for changes
+  remote has heads on branch 'default' that are not known locally: ce4212fc8847
   abort: push creates new remote head 97bd0c84d346!
   (pull and merge or see "hg help push" for details about pushing new heads)
   [255]
--- a/tests/test-qrecord.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-qrecord.t	Thu Apr 17 19:36:17 2014 -0400
@@ -62,6 +62,7 @@
                             list
       --amend               amend the parent of the working dir
    -s --secret              use the secret phase for committing
+   -e --edit                further edit commit message already specified
    -I --include PATTERN [+] include names matching the given patterns
    -X --exclude PATTERN [+] exclude names matching the given patterns
    -m --message TEXT        use text as commit message
--- a/tests/test-rebase-collapse.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rebase-collapse.t	Thu Apr 17 19:36:17 2014 -0400
@@ -632,8 +632,8 @@
   merging a and d to d
   merging b and e to e
   merging c and f to f
+  merging f and c to c
   merging e and g to g
-  merging f and c to c
   saved backup bundle to $TESTTMP/copies/.hg/strip-backup/*-backup.hg (glob)
   $ hg st
   $ hg st --copies --change tip
--- a/tests/test-rebase-conflicts.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rebase-conflicts.t	Thu Apr 17 19:36:17 2014 -0400
@@ -210,7 +210,7 @@
      summary:     added default.txt
   
   $ hg rebase -s9 -d2 --debug # use debug to really check merge base used
-  rebase onto 2 starting from [<changectx e31216eec445>]
+  rebase onto 2 starting from e31216eec445
   rebasing: 9:e31216eec445 5/6 changesets (83.33%)
    future parents are 2 and -1
   rebase status stored
--- a/tests/test-rebase-named-branches.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rebase-named-branches.t	Thu Apr 17 19:36:17 2014 -0400
@@ -239,6 +239,17 @@
   |/
   @  0: 'A'
   
+
+Reopen branch by rebase
+
+  $ hg up -qr3
+  $ hg branch -q b
+  $ hg ci -m 'create b'
+  $ hg ci -m 'close b' --close
+  $ hg rebase -b 8 -d b
+  reopening closed branch head ea9de14a36c6
+  saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob)
+
   $ cd ..
 
 Rebase to other head on branch
--- a/tests/test-rebase-scenario-global.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rebase-scenario-global.t	Thu Apr 17 19:36:17 2014 -0400
@@ -650,7 +650,7 @@
   o  0: 'A'
   
 
-Test that rebase is not confused by $CWD disapearing during rebase (issue 4121)
+Test that rebase is not confused by $CWD disappearing during rebase (issue 4121)
 
   $ cd ..
   $ hg init cwd-vanish
--- a/tests/test-record.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-record.t	Thu Apr 17 19:36:17 2014 -0400
@@ -251,7 +251,8 @@
   $ echo 11 >> plain
   $ unset HGUSER
   $ hg record --config ui.username= -d '8 0' -m end plain
-  abort: no username supplied (see "hg help config")
+  abort: no username supplied
+  (use "hg config --edit" to set your username)
   [255]
 
 
--- a/tests/test-rename-dir-merge.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rename-dir-merge.t	Thu Apr 17 19:36:17 2014 -0400
@@ -41,16 +41,16 @@
    ancestor: f9b20c0d4c51, local: ce36d17b18fb+, remote: 397f8b00a740
    a/a: other deleted -> r
    a/b: other deleted -> r
-   a/c: remote renamed directory to b/c -> d
    b/a: remote created -> g
    b/b: remote created -> g
+   b/c: remote directory rename - move from a/c -> dm
   removing a/a
   removing a/b
   updating: a/b 2/5 files (40.00%)
   getting b/a
   getting b/b
   updating: b/b 4/5 files (80.00%)
-  updating: a/c 5/5 files (100.00%)
+  updating: b/c 5/5 files (100.00%)
   moving a/c to b/c (glob)
   3 files updated, 0 files merged, 2 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
@@ -88,8 +88,8 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb
-   None: local renamed directory to b/c -> d
-  updating:None 1/1 files (100.00%)
+   b/c: local directory rename - get from a/c -> dg
+  updating: b/c 1/1 files (100.00%)
   getting a/c to b/c
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
--- a/tests/test-rename-merge1.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rename-merge1.t	Thu Apr 17 19:36:17 2014 -0400
@@ -36,22 +36,22 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: af1939970a1c, local: 044f8520aeeb+, remote: 85c198ef2f6c
-   a: remote moved to b -> m
+   a2: divergent renames -> dr
+   b: remote moved from a -> m
     preserving a for resolve of b
-   a2: divergent renames -> dr
    b2: remote created -> g
   removing a
   getting b2
   updating: b2 1/3 files (33.33%)
-  updating: a 2/3 files (66.67%)
+  updating: a2 2/3 files (66.67%)
+  note: possible conflict - a2 was renamed multiple times to:
+   c2
+   b2
+  updating: b 3/3 files (100.00%)
   picked tool 'internal:merge' for b (binary False symlink False)
   merging a and b to b
   my b@044f8520aeeb+ other b@85c198ef2f6c ancestor a@af1939970a1c
    premerge successful
-  updating: a2 3/3 files (100.00%)
-  note: possible conflict - a2 was renamed multiple times to:
-   c2
-   b2
   1 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
 
--- a/tests/test-rename-merge2.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rename-merge2.t	Thu Apr 17 19:36:17 2014 -0400
@@ -86,11 +86,12 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: e300d1c794ec+, remote: 4ce40f5aca24
-   a: remote copied to b -> m
+   a: keep -> k
+   b: remote copied from a -> m
     preserving a for resolve of b
    rev: versions differ -> m
     preserving rev for resolve of rev
-  updating: a 1/2 files (50.00%)
+  updating: b 1/2 files (50.00%)
   picked tool 'python ../merge' for b (binary False symlink False)
   merging a and b to b
   my b@e300d1c794ec+ other b@4ce40f5aca24 ancestor a@924404dff337
@@ -122,7 +123,7 @@
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: f4db7e329e71
    a: remote is newer -> g
-   b: local copied/moved to a -> m
+   b: local copied/moved from a -> m
     preserving b for resolve of b
    rev: versions differ -> m
     preserving rev for resolve of rev
@@ -159,12 +160,12 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: e300d1c794ec+, remote: bdb19105162a
-   a: remote moved to b -> m
+   b: remote moved from a -> m
     preserving a for resolve of b
    rev: versions differ -> m
     preserving rev for resolve of rev
   removing a
-  updating: a 1/2 files (50.00%)
+  updating: b 1/2 files (50.00%)
   picked tool 'python ../merge' for b (binary False symlink False)
   merging a and b to b
   my b@e300d1c794ec+ other b@bdb19105162a ancestor a@924404dff337
@@ -194,7 +195,7 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 02963e448370+, remote: f4db7e329e71
-   b: local copied/moved to a -> m
+   b: local copied/moved from a -> m
     preserving b for resolve of b
    rev: versions differ -> m
     preserving rev for resolve of rev
@@ -335,6 +336,8 @@
   test L:um a b R:um a b W:       - 9  do merge with ancestor in a
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 62e7bf090eba+, remote: 49b6d8032493
@@ -403,6 +406,8 @@
   test L:nc a b R:up b   W:       - 12 merge b no ancestor
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: af30c7647fc7
@@ -431,6 +436,8 @@
   test L:up b   R:nm a b W:       - 13 merge b no ancestor
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a
@@ -461,6 +468,8 @@
   test L:nc a b R:up a b W:       - 14 merge b no ancestor
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a
@@ -492,6 +501,8 @@
   test L:up b   R:nm a b W:       - 15 merge b no ancestor, remove a
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a
@@ -522,6 +533,8 @@
   test L:nc a b R:up a b W:       - 16 get a, merge b no ancestor
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a
@@ -553,9 +566,12 @@
   test L:up a b R:nc a b W:       - 17 keep a, merge b no ancestor
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 0b76e65c8289+, remote: 4ce40f5aca24
+   a: keep -> k
    b: versions differ -> m
     preserving b for resolve of b
    rev: versions differ -> m
@@ -581,6 +597,8 @@
   test L:nm a b R:up a b W:       - 18 merge b no ancestor
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 02963e448370+, remote: 8dbce441892a
@@ -614,6 +632,8 @@
   test L:up a b R:nm a b W:       - 19 merge b no ancestor, prompt remove a
   --------------
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 0b76e65c8289+, remote: bdb19105162a
@@ -654,12 +674,12 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: e300d1c794ec+, remote: 49b6d8032493
-   a: remote moved to b -> m
+   b: remote moved from a -> m
     preserving a for resolve of b
    rev: versions differ -> m
     preserving rev for resolve of rev
   removing a
-  updating: a 1/2 files (50.00%)
+  updating: b 1/2 files (50.00%)
   picked tool 'python ../merge' for b (binary False symlink False)
   merging a and b to b
   my b@e300d1c794ec+ other b@49b6d8032493 ancestor a@924404dff337
@@ -688,7 +708,7 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 62e7bf090eba+, remote: f4db7e329e71
-   b: local copied/moved to a -> m
+   b: local copied/moved from a -> m
     preserving b for resolve of b
    rev: versions differ -> m
     preserving rev for resolve of rev
@@ -726,7 +746,7 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 02963e448370+, remote: 2b958612230f
-   b: local copied/moved to a -> m
+   b: local copied/moved from a -> m
     preserving b for resolve of b
    c: remote created -> g
    rev: versions differ -> m
@@ -752,3 +772,201 @@
   
 
   $ cd ..
+
+
+Systematic and terse testing of merge merges and ancestor calculation:
+
+Expected result:
+
+\  a  m1  m2  dst
+0  -   f   f   f   "versions differ"
+1  f   g   g   g   "versions differ"
+2  f   f   f   f   "versions differ"
+3  f   f   g  f+g  "remote copied to " + f
+4  f   f   g   g   "remote moved to " + f
+5  f   g   f  f+g  "local copied to " + f2
+6  f   g   f   g   "local moved to " + f2
+7  -  (f)  f   f   "remote differs from untracked local"
+8  f  (f)  f   f   "remote differs from untracked local"
+
+  $ hg init ancestortest
+  $ cd ancestortest
+  $ for x in 1 2 3 4 5 6 8; do mkdir $x; echo a > $x/f; done
+  $ hg ci -Aqm "a"
+  $ mkdir 0
+  $ touch 0/f
+  $ hg mv 1/f 1/g
+  $ hg cp 5/f 5/g
+  $ hg mv 6/f 6/g
+  $ hg rm 8/f
+  $ for x in */*; do echo m1 > $x; done
+  $ hg ci -Aqm "m1"
+  $ hg up -qr0
+  $ mkdir 0 7
+  $ touch 0/f 7/f
+  $ hg mv 1/f 1/g
+  $ hg cp 3/f 3/g
+  $ hg mv 4/f 4/g
+  $ for x in */*; do echo m2 > $x; done
+  $ hg ci -Aqm "m2"
+  $ hg up -qr1
+  $ mkdir 7 8
+  $ echo m > 7/f
+  $ echo m > 8/f
+  $ hg merge -f --tool internal:dump -v --debug -r2 | sed '/^updating:/,$d' 2> /dev/null
+    searching for copies back to rev 1
+    unmatched files in local:
+     5/g
+     6/g
+    unmatched files in other:
+     3/g
+     4/g
+     7/f
+    unmatched files new in both:
+     0/f
+     1/g
+    all copies found (* = to merge, ! = divergent, % = renamed and deleted):
+     src: '3/f' -> dst: '3/g' *
+     src: '4/f' -> dst: '4/g' *
+     src: '5/f' -> dst: '5/g' *
+     src: '6/f' -> dst: '6/g' *
+    checking for directory renames
+  resolving manifests
+   branchmerge: True, force: True, partial: False
+   ancestor: e6cb3cf11019, local: ec44bf929ab5+, remote: c62e34d0b898
+  remote changed 8/f which local deleted
+  use (c)hanged version or leave (d)eleted? c
+   0/f: versions differ -> m
+    preserving 0/f for resolve of 0/f
+   1/g: versions differ -> m
+    preserving 1/g for resolve of 1/g
+   2/f: versions differ -> m
+    preserving 2/f for resolve of 2/f
+   3/f: versions differ -> m
+    preserving 3/f for resolve of 3/f
+   3/g: remote copied from 3/f -> m
+    preserving 3/f for resolve of 3/g
+   4/g: remote moved from 4/f -> m
+    preserving 4/f for resolve of 4/g
+   5/f: versions differ -> m
+    preserving 5/f for resolve of 5/f
+   5/g: local copied/moved from 5/f -> m
+    preserving 5/g for resolve of 5/g
+   6/g: local copied/moved from 6/f -> m
+    preserving 6/g for resolve of 6/g
+   7/f: remote differs from untracked local -> m
+    preserving 7/f for resolve of 7/f
+   8/f: prompt recreating -> g
+  removing 4/f
+  getting 8/f
+  $ hg mani
+  0/f
+  1/g
+  2/f
+  3/f
+  4/f
+  5/f
+  5/g
+  6/g
+  $ for f in */*; do echo $f:; cat $f; done
+  0/f:
+  m1
+  0/f.base:
+  0/f.local:
+  m1
+  0/f.orig:
+  m1
+  0/f.other:
+  m2
+  1/g:
+  m1
+  1/g.base:
+  a
+  1/g.local:
+  m1
+  1/g.orig:
+  m1
+  1/g.other:
+  m2
+  2/f:
+  m1
+  2/f.base:
+  a
+  2/f.local:
+  m1
+  2/f.orig:
+  m1
+  2/f.other:
+  m2
+  3/f:
+  m1
+  3/f.base:
+  a
+  3/f.local:
+  m1
+  3/f.orig:
+  m1
+  3/f.other:
+  m2
+  3/g:
+  m1
+  3/g.base:
+  a
+  3/g.local:
+  m1
+  3/g.orig:
+  m1
+  3/g.other:
+  m2
+  4/g:
+  m1
+  4/g.base:
+  a
+  4/g.local:
+  m1
+  4/g.orig:
+  m1
+  4/g.other:
+  m2
+  5/f:
+  m1
+  5/f.base:
+  a
+  5/f.local:
+  m1
+  5/f.orig:
+  m1
+  5/f.other:
+  m2
+  5/g:
+  m1
+  5/g.base:
+  a
+  5/g.local:
+  m1
+  5/g.orig:
+  m1
+  5/g.other:
+  m2
+  6/g:
+  m1
+  6/g.base:
+  a
+  6/g.local:
+  m1
+  6/g.orig:
+  m1
+  6/g.other:
+  m2
+  7/f:
+  m
+  7/f.base:
+  7/f.local:
+  m
+  7/f.orig:
+  m
+  7/f.other:
+  m2
+  8/f:
+  m2
+  $ cd ..
--- a/tests/test-requires.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-requires.t	Thu Apr 17 19:36:17 2014 -0400
@@ -9,11 +9,13 @@
   [255]
   $ echo indoor-pool > .hg/requires
   $ hg tip
-  abort: unknown repository format: requires features 'indoor-pool' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: indoor-pool!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
   $ echo outdoor-pool >> .hg/requires
   $ hg tip
-  abort: unknown repository format: requires features 'indoor-pool', 'outdoor-pool' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: indoor-pool outdoor-pool!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
   $ cd ..
 
@@ -60,7 +62,8 @@
   [255]
 
   $ hg clone supported clone-dst
-  abort: unknown repository format: requires features 'featuresetup-test' (upgrade Mercurial)!
+  abort: repository requires features unknown to this Mercurial: featuresetup-test!
+  (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
   [255]
   $ hg clone --pull supported clone-dst
   abort: required features are not supported in the destination: featuresetup-test
--- a/tests/test-revset.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-revset.t	Thu Apr 17 19:36:17 2014 -0400
@@ -124,6 +124,7 @@
   [255]
   $ log -a-b-c- # succeeds with fallback
   4
+
   $ try -- -a-b-c--a # complains
   (minus
     (minus
@@ -140,6 +141,13 @@
   ('symbol', '\xc3\xa9')
   9
 
+no quoting needed
+
+  $ log ::a-b-c-
+  0
+  1
+  2
+
 quoting needed
 
   $ try '"-a-b-c-"-a'
@@ -347,10 +355,46 @@
   $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
   6
   7
+
+Testing min and max
+
+max: simple
+
   $ log 'max(contains(a))'
   5
+
+max: simple on unordered set)
+
+  $ log 'max((4+0+2+5+7) and contains(a))'
+  5
+
+max: no result
+
+  $ log 'max(contains(stringthatdoesnotappearanywhere))'
+
+max: no result on unordered set
+
+  $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
+
+min: simple
+
   $ log 'min(contains(a))'
   0
+
+min: simple on unordered set
+
+  $ log 'min((4+0+2+5+7) and contains(a))'
+  0
+
+min: empty
+
+  $ log 'min(contains(stringthatdoesnotappearanywhere))'
+
+min: empty on unordered set
+
+  $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
+
+
   $ log 'merge()'
   6
   $ log 'branchpoint()'
@@ -367,6 +411,22 @@
   4
   $ log 'id(5)'
   2
+  $ log 'only(9)'
+  8
+  9
+  $ log 'only(8)'
+  8
+  $ log 'only(9, 5)'
+  2
+  4
+  8
+  9
+  $ log 'only(7 + 9, 5 + 2)'
+  4
+  6
+  7
+  8
+  9
   $ log 'outgoing()'
   8
   9
@@ -434,6 +494,138 @@
   $ log 'tag(tip)'
   9
 
+test sort revset
+--------------------------------------------
+
+test when adding two unordered revsets
+
+  $ log 'sort(keyword(issue) or modifies(b))'
+  4
+  6
+
+test when sorting a reversed collection in the same way it is
+
+  $ log 'sort(reverse(all()), -rev)'
+  9
+  8
+  7
+  6
+  5
+  4
+  3
+  2
+  1
+  0
+
+test when sorting a reversed collection
+
+  $ log 'sort(reverse(all()), rev)'
+  0
+  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+
+
+test sorting two sorted collections in different orders
+
+  $ log 'sort(outgoing() or reverse(removes(a)), rev)'
+  2
+  6
+  8
+  9
+
+test sorting two sorted collections in different orders backwards
+
+  $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
+  9
+  8
+  6
+  2
+
+test subtracting something from an addset
+
+  $ log '(outgoing() or removes(a)) - removes(a)'
+  8
+  9
+
+test intersecting something with an addset
+
+  $ log 'parents(outgoing() or removes(a))'
+  1
+  4
+  5
+  8
+
+check that conversion to _missingancestors works
+  $ try --optimize '::3 - ::1'
+  (minus
+    (dagrangepre
+      ('symbol', '3'))
+    (dagrangepre
+      ('symbol', '1')))
+  * optimized:
+  (func
+    ('symbol', '_missingancestors')
+    (list
+      ('symbol', '3')
+      ('symbol', '1')))
+  3
+  $ try --optimize 'ancestors(1) - ancestors(3)'
+  (minus
+    (func
+      ('symbol', 'ancestors')
+      ('symbol', '1'))
+    (func
+      ('symbol', 'ancestors')
+      ('symbol', '3')))
+  * optimized:
+  (func
+    ('symbol', '_missingancestors')
+    (list
+      ('symbol', '1')
+      ('symbol', '3')))
+  $ try --optimize 'not ::2 and ::6'
+  (and
+    (not
+      (dagrangepre
+        ('symbol', '2')))
+    (dagrangepre
+      ('symbol', '6')))
+  * optimized:
+  (func
+    ('symbol', '_missingancestors')
+    (list
+      ('symbol', '6')
+      ('symbol', '2')))
+  3
+  4
+  5
+  6
+  $ try --optimize 'ancestors(6) and not ancestors(4)'
+  (and
+    (func
+      ('symbol', 'ancestors')
+      ('symbol', '6'))
+    (not
+      (func
+        ('symbol', 'ancestors')
+        ('symbol', '4'))))
+  * optimized:
+  (func
+    ('symbol', '_missingancestors')
+    (list
+      ('symbol', '6')
+      ('symbol', '4')))
+  3
+  5
+  6
+
 we can use patterns when searching for tags
 
   $ log 'tag("1..*")'
@@ -568,6 +760,53 @@
   hg: parse error: ^ expects a number 0, 1, or 2
   [255]
 
+multiple revspecs
+
+  $ hg log -r 'tip~1:tip' -r 'tip~2:tip~1' --template '{rev}\n'
+  8
+  9
+  4
+  5
+  6
+  7
+
+test usage in revpair (with "+")
+
+(real pair)
+
+  $ hg diff -r 'tip^^' -r 'tip'
+  diff -r 2326846efdab -r 24286f4ae135 .hgtags
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/.hgtags	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
+  $ hg diff -r 'tip^^::tip'
+  diff -r 2326846efdab -r 24286f4ae135 .hgtags
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/.hgtags	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
+
+(single rev)
+
+  $ hg diff -r 'tip^' -r 'tip^'
+  $ hg diff -r 'tip^::tip^ or tip^'
+
+(single rev that does not looks like a range)
+
+  $ hg diff -r 'tip^ or tip^'
+  diff -r d5d0dcbdc4d9 .hgtags
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/.hgtags	* (glob)
+  @@ -0,0 +1,1 @@
+  +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
+
+(no rev)
+
+  $ hg diff -r 'author("babar") or author("celeste")'
+  abort: empty revision range
+  [255]
+
 aliases:
 
   $ echo '[revsetalias]' >> .hg/hgrc
--- a/tests/test-rollback.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-rollback.t	Thu Apr 17 19:36:17 2014 -0400
@@ -184,4 +184,14 @@
   $ cat a
   a
 
-  $ cd ..
+corrupt journal test
+  $ echo "foo" > .hg/store/journal
+  $ hg recover
+  rolling back interrupted transaction
+  couldn't read journal entry 'foo\n'!
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 2 changesets, 2 total revisions
+
--- a/tests/test-run-tests.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-run-tests.t	Thu Apr 17 19:36:17 2014 -0400
@@ -65,7 +65,7 @@
 Combining esc with other markups - and handling lines ending with \r instead of \n:
 
   $ printf 'foo/bar\r'
-  foo/bar\r (no-eol) (glob) (esc)
+  fo?/bar\r (no-eol) (glob) (esc)
 #if windows
   $ printf 'foo\\bar\r'
   foo/bar\r (no-eol) (glob) (esc)
--- a/tests/test-shelve.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-shelve.t	Thu Apr 17 19:36:17 2014 -0400
@@ -23,10 +23,6 @@
 
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 5 changes to 5 files
 
   $ hg commit -q -m 'initial commit'
 
@@ -81,11 +77,11 @@
 ensure that our shelved changes exist
 
   $ hg shelve -l
-  default-01      (*)    [mq]: second.patch (glob)
-  default         (*)    [mq]: second.patch (glob)
+  default-01      (*)    changes to '[mq]: second.patch' (glob)
+  default         (*)    changes to '[mq]: second.patch' (glob)
 
   $ hg shelve -l -p default
-  default         (*)    [mq]: second.patch (glob)
+  default         (*)    changes to '[mq]: second.patch' (glob)
   
   diff --git a/a/a b/a/a
   --- a/a/a
@@ -104,10 +100,8 @@
   $ printf "z\na\n" > a/a
   $ hg unshelve --keep
   unshelving change 'default-01'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 3 changes to 8 files (+1 heads)
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
   merging a/a
 
   $ hg revert --all -q
@@ -117,10 +111,6 @@
 
   $ hg unshelve
   unshelving change 'default-01'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 3 changes to 8 files
   $ hg status -C
   M a/a
   A b.rename/b
@@ -192,10 +182,8 @@
 
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 3 changes to 8 files (+1 heads)
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
   merging a/a
   warning: conflicts during merge.
   merging a/a incomplete! (edit conflicts, then use 'hg resolve --mark')
@@ -379,10 +367,8 @@
 
   $ HGMERGE=true hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 6 files (+1 heads)
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
   merging a/a
   $ hg parents -q
   4:33f7f61e6c5e
@@ -400,15 +386,11 @@
   shelved as default
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg shelve --list
-  default         (*)    create conflict (glob)
+  default         (*)    changes to 'create conflict' (glob)
   $ hg unshelve --keep
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 7 files
   $ hg shelve --list
-  default         (*)    create conflict (glob)
+  default         (*)    changes to 'create conflict' (glob)
   $ hg shelve --cleanup
   $ hg shelve --list
 
@@ -424,10 +406,6 @@
    * test                      4:33f7f61e6c5e
   $ hg unshelve
   unshelving change 'test'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 7 files
   $ hg bookmark
    * test                      4:33f7f61e6c5e
 
@@ -437,13 +415,9 @@
   shelved as test
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg --config extensions.mq=! shelve --list
-  test            (1s ago)    create conflict
+  test            (*)    changes to 'create conflict' (glob)
   $ hg --config extensions.mq=! unshelve
   unshelving change 'test'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 7 files
 
 shelve should leave dirstate clean (issue 4055)
 
@@ -468,10 +442,7 @@
   saved backup bundle to $TESTTMP/shelverebase/.hg/strip-backup/323bfa07f744-backup.hg (glob)
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
+  rebasing shelved changes
   $ hg status
   M z
 
@@ -497,10 +468,7 @@
   $ hg up -q 1
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 3 files
+  rebasing shelved changes
   $ hg status
   A d
 
@@ -513,10 +481,7 @@
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 3 files
+  rebasing shelved changes
   $ hg status
   A d
 
@@ -534,10 +499,6 @@
   $ hg debugobsolete `hg --debug id -i -r 1`
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 2 files (+1 heads)
 
 unshelve should leave unknown files alone (issue4113)
 
@@ -549,10 +510,6 @@
   ? e
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 2 files (+1 heads)
   $ hg status
   A d
   ? e
@@ -568,13 +525,142 @@
   $ echo z > e
   $ hg unshelve
   unshelving change 'default'
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 2 changes to 3 files (+1 heads)
   $ cat e
   e
   $ cat e.orig
   z
 
+
+unshelve and conflicts with tracked and untracked files
+
+ preparing:
+
+  $ rm *.orig
+  $ hg ci -qm 'commit stuff'
+  $ hg phase -p null:
+
+ no other changes - no merge:
+
+  $ echo f > f
+  $ hg add f
+  $ hg shelve
+  shelved as default
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo g > f
+  $ hg unshelve
+  unshelving change 'default'
+  $ hg st
+  A f
+  ? f.orig
+  $ cat f
+  f
+  $ cat f.orig
+  g
+
+ other uncommitted changes - merge:
+
+  $ hg st
+  A f
+  ? f.orig
+  $ hg shelve
+  shelved as default
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg log -G --template '{rev}  {desc|firstline}  {author}' -R bundle://.hg/shelved/default.hg -r 'bundle()'
+  o  4  changes to 'commit stuff'  shelve@localhost
+  |
+  $ hg log -G --template '{rev}  {desc|firstline}  {author}'
+  @  3  commit stuff  test
+  |
+  | o  2  c  test
+  |/
+  o  0  a  test
+  
+  $ mv f.orig f
+  $ echo 1 > a
+  $ hg unshelve --date '1073741824 0'
+  unshelving change 'default'
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
+  merging f
+  warning: conflicts during merge.
+  merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
+  unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+  [1]
+  $ hg log -G --template '{rev}  {desc|firstline}  {author}  {date|isodate}'
+  @  5  changes to 'commit stuff'  shelve@localhost  1970-01-01 00:00 +0000
+  |
+  | @  4  pending changes temporary commit  shelve@localhost  2004-01-10 13:37 +0000
+  |/
+  o  3  commit stuff  test  1970-01-01 00:00 +0000
+  |
+  | o  2  c  test  1970-01-01 00:00 +0000
+  |/
+  o  0  a  test  1970-01-01 00:00 +0000
+  
+  $ hg st
+  M f
+  ? f.orig
+  $ cat f
+  <<<<<<< local
+  g
+  =======
+  f
+  >>>>>>> other
+  $ cat f.orig
+  g
+  $ hg unshelve --abort
+  rebase aborted
+  unshelve of 'default' aborted
+  $ hg st
+  M a
+  ? f.orig
+  $ cat f.orig
+  g
+  $ hg unshelve
+  unshelving change 'default'
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
+  $ hg st
+  M a
+  A f
+  ? f.orig
+
+ other committed changes - merge:
+
+  $ hg shelve f
+  shelved as default
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg ci a -m 'intermediate other change'
+  $ mv f.orig f
+  $ hg unshelve
+  unshelving change 'default'
+  rebasing shelved changes
+  merging f
+  warning: conflicts during merge.
+  merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
+  unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+  [1]
+  $ hg st
+  M f
+  ? f.orig
+  $ cat f
+  <<<<<<< local
+  g
+  =======
+  f
+  >>>>>>> other
+  $ cat f.orig
+  g
+  $ hg unshelve --abort
+  rebase aborted
+  no changes needed to a
+  no changes needed to d
+  no changes needed to e
+  unshelve of 'default' aborted
+  $ hg st
+  ? f.orig
+  $ cat f.orig
+  g
+  $ hg shelve --delete default
+
   $ cd ..
--- a/tests/test-ssh.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-ssh.t	Thu Apr 17 19:36:17 2014 -0400
@@ -223,7 +223,7 @@
   $ hg push
   pushing to ssh://user@dummy/remote
   searching for changes
-  note: unsynced remote changes!
+  remote has heads on branch 'default' that are not known locally: 6c0482d977a3
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
--- a/tests/test-status-color.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-status-color.t	Thu Apr 17 19:36:17 2014 -0400
@@ -1,5 +1,3 @@
-  $ "$TESTDIR/hghave" tic || exit 80
-
   $ echo "[extensions]" >> $HGRCPATH
   $ echo "color=" >> $HGRCPATH
   $ echo "[color]" >> $HGRCPATH
@@ -186,8 +184,11 @@
   \x1b[0;0mC \x1b[0m\x1b[0;0m.hgignore\x1b[0m (esc)
   \x1b[0;0mC \x1b[0m\x1b[0;0mmodified\x1b[0m (esc)
 
+
 hg status -A (with terminfo color):
 
+#if tic
+
   $ mkdir "$TESTTMP/terminfo"
   $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
   $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --color=always -A
@@ -201,6 +202,8 @@
   \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30m.hgignore\x1b[30m (esc)
   \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30mmodified\x1b[30m (esc)
 
+#endif
+
 
   $ echo "^ignoreddir$" > .hgignore
   $ mkdir ignoreddir
--- a/tests/test-strip.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-strip.t	Thu Apr 17 19:36:17 2014 -0400
@@ -336,6 +336,19 @@
   saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
   $ restore
 
+verify fncache is kept up-to-date
+
+  $ touch a
+  $ hg ci -qAm a
+  $ cat .hg/store/fncache | sort
+  data/a.i
+  data/bar.i
+  $ hg strip tip
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  saved backup bundle to $TESTTMP/test/.hg/strip-backup/*-backup.hg (glob)
+  $ cat .hg/store/fncache
+  data/bar.i
+
 stripping an empty revset
 
   $ hg strip "1 and not 1"
--- a/tests/test-subrepo-git.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-subrepo-git.t	Thu Apr 17 19:36:17 2014 -0400
@@ -452,7 +452,7 @@
   da5f5b1d8ffcf62fb8327bcd3c89a4367a6018e7
   $ cd ..
 
-Sticky subrepositorys, file changes
+Sticky subrepositories, file changes
   $ touch s/f1
   $ cd s
   $ git add f1
--- a/tests/test-subrepo-svn.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-subrepo-svn.t	Thu Apr 17 19:36:17 2014 -0400
@@ -306,7 +306,7 @@
   2
   $ cd ..
 
-Sticky subrepositorys, file changes
+Sticky subrepositories, file changes
   $ touch s/f1
   $ cd s
   $ svn add f1
--- a/tests/test-subrepo.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-subrepo.t	Thu Apr 17 19:36:17 2014 -0400
@@ -35,6 +35,18 @@
   update: (current)
   $ hg ci -m1
 
+test handling .hgsubstate "added" explicitly.
+
+  $ hg parents --template '{node}\n{files}\n'
+  7cf8cfea66e410e8e3336508dfeec07b3192de51
+  .hgsub .hgsubstate
+  $ hg rollback -q
+  $ hg add .hgsubstate
+  $ hg ci -m1
+  $ hg parents --template '{node}\n{files}\n'
+  7cf8cfea66e410e8e3336508dfeec07b3192de51
+  .hgsub .hgsubstate
+
 Revert subrepo and test subrepo fileset keyword:
 
   $ echo b > s/a
@@ -99,6 +111,19 @@
   commit: (clean)
   update: (current)
 
+test handling .hgsubstate "modified" explicitly.
+
+  $ hg parents --template '{node}\n{files}\n'
+  df30734270ae757feb35e643b7018e818e78a9aa
+  .hgsubstate
+  $ hg rollback -q
+  $ hg status -A .hgsubstate
+  M .hgsubstate
+  $ hg ci -m2
+  $ hg parents --template '{node}\n{files}\n'
+  df30734270ae757feb35e643b7018e818e78a9aa
+  .hgsubstate
+
 bump sub rev (and check it is ignored by ui.commitsubrepos)
 
   $ echo b > s/a
@@ -184,6 +209,18 @@
 
   $ hg ci -m8 # remove sub
 
+test handling .hgsubstate "removed" explicitly.
+
+  $ hg parents --template '{node}\n{files}\n'
+  96615c1dad2dc8e3796d7332c77ce69156f7b78e
+  .hgsub .hgsubstate
+  $ hg rollback -q
+  $ hg remove .hgsubstate
+  $ hg ci -m8
+  $ hg parents --template '{node}\n{files}\n'
+  96615c1dad2dc8e3796d7332c77ce69156f7b78e
+  .hgsub .hgsubstate
+
 merge tests
 
   $ hg co -C 3
@@ -755,6 +792,19 @@
   $ echo test >> sub/repo/foo
   $ hg ci -mtest
   committing subrepository sub/repo (glob)
+  $ hg cat sub/repo/foo
+  test
+  test
+  $ mkdir -p tmp/sub/repo
+  $ hg cat -r 0 --output tmp/%p_p sub/repo/foo
+  $ cat tmp/sub/repo/foo_p
+  test
+  $ mv sub/repo sub_
+  $ hg cat sub/repo/baz
+  skipping missing subrepository: sub/repo
+  [1]
+  $ rm -rf sub/repo
+  $ mv sub_ sub/repo
   $ cd ..
 
 Create repo without default path, pull top repo, and see what happens on update
@@ -911,7 +961,7 @@
   $ hg -R t id
   e95bcfa18a35
 
-Sticky subrepositorys, file changes
+Sticky subrepositories, file changes
   $ touch s/f1
   $ touch t/f1
   $ hg add -S s/f1
@@ -1296,7 +1346,7 @@
   $ cd ..
 
 
-Test that comit --secret works on both repo and subrepo (issue4182)
+Test that commit --secret works on both repo and subrepo (issue4182)
 
   $ cd main
   $ echo secret >> b
--- a/tests/test-transplant.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-transplant.t	Thu Apr 17 19:36:17 2014 -0400
@@ -71,7 +71,7 @@
       "transplanted([set])"
         Transplanted changesets in set, or all transplanted changesets.
 
-test tranplanted keyword
+test transplanted keyword
 
   $ hg log --template '{rev} {transplanted}\n'
   7 a53251cdf717679d1907b289f991534be05c997a
@@ -414,7 +414,7 @@
   $ hg ci -m appendd
   created new head
 
-tranplant
+transplant
 
   $ hg transplant -m 1
   applying 42dc4432fd35
--- a/tests/test-up-local-change.t	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-up-local-change.t	Thu Apr 17 19:36:17 2014 -0400
@@ -176,6 +176,8 @@
   [255]
   $ hg --debug merge -f
     searching for copies back to rev 1
+    unmatched files new in both:
+     b
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: c19d34741b0a, local: 1e71731e6fbb+, remote: 83c51d0caff4
--- a/tests/test-url.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-url.py	Thu Apr 17 19:36:17 2014 -0400
@@ -5,7 +5,7 @@
         print (a, b)
 
 def cert(cn):
-    return dict(subject=((('commonName', cn),),))
+    return {'subject': ((('commonName', cn),),)}
 
 from mercurial.sslutil import _verifycert
 
--- a/tests/test-wireproto.py	Tue Apr 15 03:21:59 2014 +0900
+++ b/tests/test-wireproto.py	Thu Apr 17 19:36:17 2014 -0400
@@ -18,7 +18,7 @@
     @wireproto.batchable
     def greet(self, name):
         f = wireproto.future()
-        yield wireproto.todict(name=mangle(name)), f
+        yield {'name': mangle(name)}, f
         yield unmangle(f.value)
 
 class serverrepo(object):