merge with stable
authorMatt Mackall <mpm@selenic.com>
Sat, 12 May 2012 13:20:26 +0200
changeset 16674 76c744e0c5bb
parent 16665 e410be860393 (diff)
parent 16673 775a8d33e6f0 (current diff)
child 16675 f29f187ee73d
merge with stable
tests/test-check-code-hg.t
tests/test-convert-baz
tests/test-convert-baz.out
tests/test-convert-p4
tests/test-convert-p4-filetypes
tests/test-convert-p4-filetypes.out
tests/test-convert-p4.out
--- a/hgext/bugzilla.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/hgext/bugzilla.py	Sat May 12 13:20:26 2012 +0200
@@ -585,11 +585,13 @@
 # inheritance with a new-style class.
 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
     def __init__(self, use_datetime=0):
-        xmlrpclib.Transport.__init__(self, use_datetime)
+        if util.safehasattr(xmlrpclib.Transport, "__init__"):
+            xmlrpclib.Transport.__init__(self, use_datetime)
 
 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
     def __init__(self, use_datetime=0):
-        xmlrpclib.SafeTransport.__init__(self, use_datetime)
+        if util.safehasattr(xmlrpclib.Transport, "__init__"):
+            xmlrpclib.SafeTransport.__init__(self, use_datetime)
 
 class bzxmlrpc(bzaccess):
     """Support for access to Bugzilla via the Bugzilla XMLRPC API.
--- a/hgext/largefiles/overrides.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/hgext/largefiles/overrides.py	Sat May 12 13:20:26 2012 +0200
@@ -552,7 +552,8 @@
         for lfile in modified:
             lfutil.updatestandin(repo, lfutil.standin(lfile))
         for lfile in missing:
-            os.unlink(repo.wjoin(lfutil.standin(lfile)))
+            if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
+                os.unlink(repo.wjoin(lfutil.standin(lfile)))
 
         try:
             ctx = repo[opts.get('rev')]
@@ -696,6 +697,33 @@
         ui.status(_("%d largefiles cached\n") % numcached)
     return result
 
+def overrideclone(orig, ui, source, dest=None, **opts):
+    result = hg.clone(ui, opts, source, dest,
+                      pull=opts.get('pull'),
+                      stream=opts.get('uncompressed'),
+                      rev=opts.get('rev'),
+                      update=True, # required for successful walkchangerevs
+                      branch=opts.get('branch'))
+    if result is None:
+        return True
+    totalsuccess = 0
+    totalmissing = 0
+    if opts.get('all_largefiles'):
+        sourcerepo, destrepo = result
+        matchfn = scmutil.match(destrepo[None],
+                                [destrepo.wjoin(lfutil.shortname)], {})
+        def prepare(ctx, fns):
+            pass
+        for ctx in cmdutil.walkchangerevs(destrepo, matchfn, {'rev' : None},
+                                          prepare):
+            success, missing = lfcommands.cachelfiles(ui, destrepo, ctx.node())
+            totalsuccess += len(success)
+            totalmissing += len(missing)
+        ui.status(_("%d additional largefiles cached\n") % totalsuccess)
+        if totalmissing > 0:
+            ui.status(_("%d largefiles failed to download\n") % totalmissing)
+    return totalmissing != 0
+
 def overriderebase(orig, ui, repo, **opts):
     repo._isrebasing = True
     try:
@@ -953,6 +981,8 @@
             ui.status(_('largefiles: %d to upload\n') % len(toupload))
 
 def overrideaddremove(orig, ui, repo, *pats, **opts):
+    if not lfutil.islfilesrepo(repo):
+        return orig(ui, repo, *pats, **opts)
     # Get the list of missing largefiles so we can remove them
     lfdirstate = lfutil.openlfdirstate(ui, repo)
     s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
--- a/hgext/largefiles/uisetup.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/hgext/largefiles/uisetup.py	Sat May 12 13:20:26 2012 +0200
@@ -70,6 +70,12 @@
                                    overrides.overrideupdate)
     entry = extensions.wrapcommand(commands.table, 'pull',
                                    overrides.overridepull)
+    entry = extensions.wrapcommand(commands.table, 'clone',
+                                   overrides.overrideclone)
+    cloneopt = [('', 'all-largefiles', None,
+                 _('download all versions of all largefiles'))]
+
+    entry[1].extend(cloneopt)
     entry = extensions.wrapcommand(commands.table, 'cat',
                                    overrides.overridecat)
     entry = extensions.wrapfunction(merge, '_checkunknownfile',
--- a/hgext/mq.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/hgext/mq.py	Sat May 12 13:20:26 2012 +0200
@@ -46,6 +46,17 @@
 
 You will by default be managing a patch queue named "patches". You can
 create other, independent patch queues with the :hg:`qqueue` command.
+
+If the working directory contains uncommitted files, qpush, qpop and
+qgoto abort immediately. If -f/--force is used, the changes are
+discarded. Setting:
+
+  [mq]
+  check = True
+
+make them behave as if -c/--check were passed, and non-conflicting
+local changes will be tolerated and preserved. If incompatible options
+such as -f/--force or --exact are passed, this setting is ignored.
 '''
 
 from mercurial.i18n import _
@@ -280,6 +291,9 @@
         if phase is not None:
             repo.ui.restoreconfig(backup)
 
+class AbortNoCleanup(error.Abort):
+    pass
+
 class queue(object):
     def __init__(self, ui, path, patchdir=None):
         self.basepath = path
@@ -554,6 +568,18 @@
         except OSError, inst:
             self.ui.warn(_('error removing undo: %s\n') % str(inst))
 
+    def backup(self, repo, files, copy=False):
+        # backup local changes in --force case
+        for f in sorted(files):
+            absf = repo.wjoin(f)
+            if os.path.lexists(absf):
+                self.ui.note(_('saving current version of %s as %s\n') %
+                             (f, f + '.orig'))
+                if copy:
+                    util.copyfile(absf, absf + '.orig')
+                else:
+                    util.rename(absf, absf + '.orig')
+
     def printdiff(self, repo, diffopts, node1, node2=None, files=None,
                   fp=None, changes=None, opts={}):
         stat = opts.get('stat')
@@ -668,7 +694,8 @@
             return (False, list(files), False)
 
     def apply(self, repo, series, list=False, update_status=True,
-              strict=False, patchdir=None, merge=None, all_files=None):
+              strict=False, patchdir=None, merge=None, all_files=None,
+              tobackup=None, check=False):
         wlock = lock = tr = None
         try:
             wlock = repo.wlock()
@@ -676,10 +703,15 @@
             tr = repo.transaction("qpush")
             try:
                 ret = self._apply(repo, series, list, update_status,
-                                  strict, patchdir, merge, all_files=all_files)
+                                  strict, patchdir, merge, all_files=all_files,
+                                  tobackup=tobackup, check=check)
                 tr.close()
                 self.savedirty()
                 return ret
+            except AbortNoCleanup:
+                tr.close()
+                self.savedirty()
+                return 2, repo.dirstate.p1()
             except:
                 try:
                     tr.abort()
@@ -693,9 +725,14 @@
             self.removeundo(repo)
 
     def _apply(self, repo, series, list=False, update_status=True,
-               strict=False, patchdir=None, merge=None, all_files=None):
-        '''returns (error, hash)
-        error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
+               strict=False, patchdir=None, merge=None, all_files=None,
+               tobackup=None, check=False):
+        """returns (error, hash)
+
+        error = 1 for unable to read, 2 for patch failed, 3 for patch
+        fuzz. tobackup is None or a set of files to backup before they
+        are modified by a patch.
+        """
         # TODO unify with commands.py
         if not patchdir:
             patchdir = self.path
@@ -727,6 +764,14 @@
                 message = '\n'.join(message)
 
             if ph.haspatch:
+                if tobackup:
+                    touched = patchmod.changedfiles(self.ui, repo, pf)
+                    touched = set(touched) & tobackup
+                    if touched and check:
+                        raise AbortNoCleanup(
+                            _("local changes found, refresh first"))
+                    self.backup(repo, touched, copy=True)
+                    tobackup = tobackup - touched
                 (patcherr, files, fuzz) = self.patch(repo, pf)
                 if all_files is not None:
                     all_files.update(files)
@@ -838,7 +883,7 @@
     def finish(self, repo, revs):
         # Manually trigger phase computation to ensure phasedefaults is
         # executed before we remove the patches.
-        repo._phaserev
+        repo._phasecache
         patches = self._revpatches(repo, sorted(revs))
         qfinished = self._cleanup(patches, len(patches))
         if qfinished and repo.ui.configbool('mq', 'secret', False):
@@ -935,6 +980,10 @@
             else:
                 raise util.Abort(_('patch "%s" already exists') % name)
 
+    def checkforcecheck(self, check, force):
+        if force and check:
+            raise util.Abort(_('cannot use both --force and --check'))
+
     def new(self, repo, patchfn, *pats, **opts):
         """options:
            msg: a string or a no-argument function returning a string
@@ -1132,8 +1181,9 @@
                                 return self.series[i + off]
         raise util.Abort(_("patch %s not in series") % patch)
 
-    def push(self, repo, patch=None, force=False, list=False,
-             mergeq=None, all=False, move=False, exact=False):
+    def push(self, repo, patch=None, force=False, list=False, mergeq=None,
+             all=False, move=False, exact=False, nobackup=False, check=False):
+        self.checkforcecheck(check, force)
         diffopts = self.diffopts()
         wlock = repo.wlock()
         try:
@@ -1188,10 +1238,13 @@
             if start == len(self.series):
                 self.ui.warn(_('patch series already fully applied\n'))
                 return 1
-            if not force:
+            if not force and not check:
                 self.checklocalchanges(repo, refresh=self.applied)
 
             if exact:
+                if check:
+                    raise util.Abort(
+                        _("cannot use --exact and --check together"))
                 if move:
                     raise util.Abort(_("cannot use --exact and --move together"))
                 if self.applied:
@@ -1232,13 +1285,22 @@
             else:
                 end = self.series.index(patch, start) + 1
 
+            tobackup = set()
+            if (not nobackup and force) or check:
+                m, a, r, d = self.checklocalchanges(repo, force=True)
+                if check:
+                    tobackup.update(m + a + r + d)
+                else:
+                    tobackup.update(m + a)
+
             s = self.series[start:end]
             all_files = set()
             try:
                 if mergeq:
                     ret = self.mergepatch(repo, mergeq, s, diffopts)
                 else:
-                    ret = self.apply(repo, s, list, all_files=all_files)
+                    ret = self.apply(repo, s, list, all_files=all_files,
+                                     tobackup=tobackup, check=check)
             except:
                 self.ui.warn(_('cleaning up working directory...'))
                 node = repo.dirstate.p1()
@@ -1268,7 +1330,9 @@
         finally:
             wlock.release()
 
-    def pop(self, repo, patch=None, force=False, update=True, all=False):
+    def pop(self, repo, patch=None, force=False, update=True, all=False,
+            nobackup=False, check=False):
+        self.checkforcecheck(check, force)
         wlock = repo.wlock()
         try:
             if patch:
@@ -1313,8 +1377,14 @@
                         break
                 update = needupdate
 
-            if not force and update:
-                self.checklocalchanges(repo)
+            tobackup = set()
+            if update:
+                m, a, r, d = self.checklocalchanges(repo, force=force or check)
+                if force:
+                    if not nobackup:
+                        tobackup.update(m + a)
+                elif check:
+                    tobackup.update(m + a + r + d)
 
             self.applieddirty = True
             end = len(self.applied)
@@ -1344,6 +1414,12 @@
                 m, a, r, d = repo.status(qp, top)[:4]
                 if d:
                     raise util.Abort(_("deletions found between repo revs"))
+
+                tobackup = set(a + m + r) & tobackup
+                if check and tobackup:
+                    self.localchangesfound()
+                self.backup(repo, tobackup)
+
                 for f in a:
                     try:
                         util.unlinkpath(repo.wjoin(f))
@@ -1921,6 +1997,14 @@
         self.removeundo(repo)
         return imported
 
+def fixcheckopts(ui, opts):
+    if (not ui.configbool('mq', 'check') or opts.get('force')
+        or opts.get('exact')):
+        return opts
+    opts = dict(opts)
+    opts['check'] = True
+    return opts
+
 @command("qdelete|qremove|qrm",
          [('k', 'keep', None, _('keep patch file')),
           ('r', 'rev', [],
@@ -2460,18 +2544,25 @@
         wlock.release()
 
 @command("qgoto",
-         [('f', 'force', None, _('overwrite any local changes'))],
+         [('c', 'check', None, _('tolerate non-conflicting local changes')),
+          ('f', 'force', None, _('overwrite any local changes')),
+          ('', 'no-backup', None, _('do not save backup copies of files'))],
          _('hg qgoto [OPTION]... PATCH'))
 def goto(ui, repo, patch, **opts):
     '''push or pop patches until named patch is at top of stack
 
     Returns 0 on success.'''
+    opts = fixcheckopts(ui, opts)
     q = repo.mq
     patch = q.lookup(patch)
+    nobackup = opts.get('no_backup')
+    check = opts.get('check')
     if q.isapplied(patch):
-        ret = q.pop(repo, patch, force=opts.get('force'))
+        ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
+                    check=check)
     else:
-        ret = q.push(repo, patch, force=opts.get('force'))
+        ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
+                     check=check)
     q.savedirty()
     return ret
 
@@ -2591,26 +2682,32 @@
     return newpath
 
 @command("^qpush",
-         [('f', 'force', None, _('apply on top of local changes')),
+         [('c', 'check', None, _('tolerate non-conflicting local changes')),
+          ('f', 'force', None, _('apply on top of local changes')),
           ('e', 'exact', None, _('apply the target patch to its recorded parent')),
           ('l', 'list', None, _('list patch name in commit text')),
           ('a', 'all', None, _('apply all patches')),
           ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
           ('n', 'name', '',
            _('merge queue name (DEPRECATED)'), _('NAME')),
-          ('', 'move', None, _('reorder patch series and apply only the patch'))],
+          ('', 'move', None,
+           _('reorder patch series and apply only the patch')),
+          ('', 'no-backup', None, _('do not save backup copies of files'))],
          _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
 def push(ui, repo, patch=None, **opts):
     """push the next patch onto the stack
 
-    When -f/--force is applied, all local changes in patched files
-    will be lost.
+    By default, abort if the working directory contains uncommitted
+    changes. With -c/--check, abort only if the uncommitted files
+    overlap with patched files. With -f/--force, backup and patch over
+    uncommitted changes.
 
     Return 0 on success.
     """
     q = repo.mq
     mergeq = None
 
+    opts = fixcheckopts(ui, opts)
     if opts.get('merge'):
         if opts.get('name'):
             newpath = repo.join(opts.get('name'))
@@ -2623,24 +2720,33 @@
         ui.warn(_("merging with queue at: %s\n") % mergeq.path)
     ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
                  mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
-                 exact=opts.get('exact'))
+                 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
+                 check=opts.get('check'))
     return ret
 
 @command("^qpop",
          [('a', 'all', None, _('pop all patches')),
           ('n', 'name', '',
            _('queue name to pop (DEPRECATED)'), _('NAME')),
-          ('f', 'force', None, _('forget any local changes to patched files'))],
+          ('c', 'check', None, _('tolerate non-conflicting local changes')),
+          ('f', 'force', None, _('forget any local changes to patched files')),
+          ('', 'no-backup', None, _('do not save backup copies of files'))],
          _('hg qpop [-a] [-f] [PATCH | INDEX]'))
 def pop(ui, repo, patch=None, **opts):
     """pop the current patch off the stack
 
-    By default, pops off the top of the patch stack. If given a patch
-    name, keeps popping off patches until the named patch is at the
-    top of the stack.
+    Without argument, pops off the top of the patch stack. If given a
+    patch name, keeps popping off patches until the named patch is at
+    the top of the stack.
+
+    By default, abort if the working directory contains uncommitted
+    changes. With -c/--check, abort only if the uncommitted files
+    overlap with patched files. With -f/--force, backup and discard
+    changes made to such files.
 
     Return 0 on success.
     """
+    opts = fixcheckopts(ui, opts)
     localupdate = True
     if opts.get('name'):
         q = queue(ui, repo.path, repo.join(opts.get('name')))
@@ -2649,7 +2755,8 @@
     else:
         q = repo.mq
     ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
-                all=opts.get('all'))
+                all=opts.get('all'), nobackup=opts.get('no_backup'),
+                check=opts.get('check'))
     q.savedirty()
     return ret
 
--- a/hgext/pager.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/hgext/pager.py	Sat May 12 13:20:26 2012 +0200
@@ -22,12 +22,6 @@
 If no pager is set, the pager extensions uses the environment variable
 $PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
 
-If you notice "BROKEN PIPE" error messages, you can disable them by
-setting::
-
-  [pager]
-  quiet = True
-
 You can disable the pager for certain commands by adding them to the
 pager.ignore list::
 
@@ -53,37 +47,27 @@
 normal behavior.
 '''
 
-import sys, os, signal, shlex, errno
+import atexit, sys, os, signal, subprocess
 from mercurial import commands, dispatch, util, extensions
 from mercurial.i18n import _
 
 def _runpager(p):
-    if not util.safehasattr(os, 'fork'):
-        sys.stdout = util.popen(p, 'wb')
-        if util.isatty(sys.stderr):
-            sys.stderr = sys.stdout
-        return
-    fdin, fdout = os.pipe()
-    pid = os.fork()
-    if pid == 0:
-        os.close(fdin)
-        os.dup2(fdout, sys.stdout.fileno())
-        if util.isatty(sys.stderr):
-            os.dup2(fdout, sys.stderr.fileno())
-        os.close(fdout)
-        return
-    os.dup2(fdin, sys.stdin.fileno())
-    os.close(fdin)
-    os.close(fdout)
-    try:
-        os.execvp('/bin/sh', ['/bin/sh', '-c', p])
-    except OSError, e:
-        if e.errno == errno.ENOENT:
-            # no /bin/sh, try executing the pager directly
-            args = shlex.split(p)
-            os.execvp(args[0], args)
-        else:
-            raise
+    pager = subprocess.Popen(p, shell=True, bufsize=-1,
+                             close_fds=util.closefds, stdin=subprocess.PIPE,
+                             stdout=sys.stdout, stderr=sys.stderr)
+
+    stdout = os.dup(sys.stdout.fileno())
+    stderr = os.dup(sys.stderr.fileno())
+    os.dup2(pager.stdin.fileno(), sys.stdout.fileno())
+    if util.isatty(sys.stderr):
+        os.dup2(pager.stdin.fileno(), sys.stderr.fileno())
+
+    @atexit.register
+    def killpager():
+        pager.stdin.close()
+        os.dup2(stdout, sys.stdout.fileno())
+        os.dup2(stderr, sys.stderr.fileno())
+        pager.wait()
 
 def uisetup(ui):
     if ui.plain() or '--debugger' in sys.argv or not util.isatty(sys.stdout):
@@ -101,9 +85,9 @@
                  (cmd not in ui.configlist('pager', 'ignore') and not attend))):
                 ui.setconfig('ui', 'formatted', ui.formatted())
                 ui.setconfig('ui', 'interactive', False)
+                if util.safehasattr(signal, "SIGPIPE"):
+                    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
                 _runpager(p)
-                if ui.configbool('pager', 'quiet'):
-                    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
         return orig(ui, options, cmd, cmdfunc)
 
     extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
--- a/hgext/transplant.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/hgext/transplant.py	Sat May 12 13:20:26 2012 +0200
@@ -124,7 +124,7 @@
                     continue
 
                 parents = source.changelog.parents(node)
-                if not opts.get('filter'):
+                if not (opts.get('filter') or opts.get('log')):
                     # If the changeset parent is the same as the
                     # wdir's parent, just pull it.
                     if parents[0] == p1:
--- a/mercurial/cmdutil.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/cmdutil.py	Sat May 12 13:20:26 2012 +0200
@@ -1311,6 +1311,12 @@
         #          |
         # base     o - parent of amending changeset
 
+        # Update extra dict from amended commit (e.g. to preserve graft source)
+        extra.update(old.extra())
+
+        # Also update it from the intermediate commit or from the wctx
+        extra.update(ctx.extra())
+
         files = set(old.files())
 
         # Second, we use either the commit we just did, or if there were no
@@ -1322,7 +1328,6 @@
             user = ctx.user()
             date = ctx.date()
             message = ctx.description()
-            extra = ctx.extra()
             # Recompute copies (avoid recording a -> b -> a)
             copied = copies.pathcopies(base, ctx)
 
--- a/mercurial/commands.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/commands.py	Sat May 12 13:20:26 2012 +0200
@@ -196,6 +196,8 @@
     be identical) as its parameter. Detecting renamed files this way
     can be expensive. After using this option, :hg:`status -C` can be
     used to check which files were identified as moved or renamed.
+    If this option is not specified, only renames of identical files
+    are detected.
 
     Returns 0 if all files are successfully added.
     """
@@ -518,10 +520,12 @@
     revision as good or bad without checking it out first.
 
     If you supply a command, it will be used for automatic bisection.
-    Its exit status will be used to mark revisions as good or bad:
-    status 0 means good, 125 means to skip the revision, 127
-    (command not found) will abort the bisection, and any other
-    non-zero exit status means the revision is bad.
+    The environment variable HG_NODE will contain the ID of the
+    changeset being tested. The exit status of the command will be
+    used to mark revisions as good or bad: status 0 means good, 125
+    means to skip the revision, 127 (command not found) will abort the
+    bisection, and any other non-zero exit status means the revision
+    is bad.
 
     .. container:: verbose
 
@@ -561,6 +565,11 @@
 
           hg log -r "bisect(pruned)"
 
+      - see the changeset currently being bisected (especially useful
+        if running with -U/--noupdate)::
+
+          hg log -r "bisect(current)"
+
       - see all changesets that took part in the current bisection::
 
           hg log -r "bisect(range)"
@@ -645,10 +654,22 @@
     if command:
         changesets = 1
         try:
+            node = state['current'][0]
+        except LookupError:
+            if noupdate:
+                raise util.Abort(_('current bisect revision is unknown - '
+                                   'start a new bisect to fix'))
+            node, p2 = repo.dirstate.parents()
+            if p2 != nullid:
+                raise util.Abort(_('current bisect revision is a merge'))
+        try:
             while changesets:
                 # update state
+                state['current'] = [node]
                 hbisect.save_state(repo, state)
-                status = util.system(command, out=ui.fout)
+                status = util.system(command,
+                                     environ={'HG_NODE': hex(node)},
+                                     out=ui.fout)
                 if status == 125:
                     transition = "skip"
                 elif status == 0:
@@ -660,7 +681,7 @@
                     raise util.Abort(_("%s killed") % command)
                 else:
                     transition = "bad"
-                ctx = scmutil.revsingle(repo, rev)
+                ctx = scmutil.revsingle(repo, rev, node)
                 rev = None # clear for future iterations
                 state[transition].append(ctx.node())
                 ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
@@ -668,9 +689,12 @@
                 # bisect
                 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
                 # update to next check
-                cmdutil.bailifchanged(repo)
-                hg.clean(repo, nodes[0], show_stats=False)
+                node = nodes[0]
+                if not noupdate:
+                    cmdutil.bailifchanged(repo)
+                    hg.clean(repo, node, show_stats=False)
         finally:
+            state['current'] = [node]
             hbisect.save_state(repo, state)
         print_result(nodes, good)
         return
@@ -702,6 +726,8 @@
             if extendnode is not None:
                 ui.write(_("Extending search to changeset %d:%s\n"
                          % (extendnode.rev(), extendnode)))
+                state['current'] = [extendnode.node()]
+                hbisect.save_state(repo, state)
                 if noupdate:
                     return
                 cmdutil.bailifchanged(repo)
@@ -721,6 +747,8 @@
         ui.write(_("Testing changeset %d:%s "
                    "(%d changesets remaining, ~%d tests)\n")
                  % (rev, short(node), changesets, tests))
+        state['current'] = [node]
+        hbisect.save_state(repo, state)
         if not noupdate:
             cmdutil.bailifchanged(repo)
             return hg.clean(repo, node)
@@ -2553,6 +2581,7 @@
     'graft',
     [('c', 'continue', False, _('resume interrupted graft')),
      ('e', 'edit', False, _('invoke editor on commit messages')),
+     ('', 'log', None, _('append graft info to log message')),
      ('D', 'currentdate', False,
       _('record the current date as commit date')),
      ('U', 'currentuser', False,
@@ -2571,6 +2600,11 @@
     Changesets that are ancestors of the current revision, that have
     already been grafted, or that are merges will be skipped.
 
+    If --log is specified, log messages will have a comment appended
+    of the form::
+
+      (grafted from CHANGESETHASH)
+
     If a graft merge results in conflicts, the graft process is
     interrupted so that the current merge can be manually resolved.
     Once all conflicts are addressed, the graft process can be
@@ -2720,7 +2754,10 @@
             date = ctx.date()
             if opts.get('date'):
                 date = opts['date']
-            node = repo.commit(text=ctx.description(), user=user,
+            message = ctx.description()
+            if opts.get('log'):
+                message += '\n(grafted from %s)' % ctx.hex()
+            node = repo.commit(text=message, user=user,
                         date=date, extra=extra, editor=editor)
             if node is None:
                 ui.status(_('graft for revision %s is empty\n') % ctx.rev())
@@ -4347,10 +4384,10 @@
         lock = repo.lock()
         try:
             # set phase
-            nodes = [ctx.node() for ctx in repo.set('%ld', revs)]
-            if not nodes:
-                raise util.Abort(_('empty revision set'))
-            olddata = repo._phaserev[:]
+            if not revs:
+                 raise util.Abort(_('empty revision set'))
+            nodes = [repo[r].node() for r in revs]
+            olddata = repo._phasecache.getphaserevs(repo)[:]
             phases.advanceboundary(repo, targetphase, nodes)
             if opts['force']:
                 phases.retractboundary(repo, targetphase, nodes)
@@ -4358,7 +4395,7 @@
             lock.release()
         if olddata is not None:
             changes = 0
-            newdata = repo._phaserev
+            newdata = repo._phasecache.getphaserevs(repo)
             changes = sum(o != newdata[i] for i, o in enumerate(olddata))
             rejected = [n for n in nodes
                         if newdata[repo[n].rev()] < targetphase]
--- a/mercurial/context.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/context.py	Sat May 12 13:20:26 2012 +0200
@@ -191,12 +191,7 @@
     def bookmarks(self):
         return self._repo.nodebookmarks(self._node)
     def phase(self):
-        if self._rev == -1:
-            return phases.public
-        if self._rev >= len(self._repo._phaserev):
-            # outdated cache
-            del self._repo._phaserev
-        return self._repo._phaserev[self._rev]
+        return self._repo._phasecache.phase(self._repo, self._rev)
     def phasestr(self):
         return phases.phasenames[self.phase()]
     def mutable(self):
--- a/mercurial/diffhelpers.c	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/diffhelpers.c	Sat May 12 13:20:26 2012 +0200
@@ -135,7 +135,7 @@
 		return NULL;
 	alen = PyList_Size(a);
 	blen = PyList_Size(b);
-	if (alen > blen - bstart) {
+	if (alen > blen - bstart || bstart < 0) {
 		return Py_BuildValue("l", -1);
 	}
 	for (i = 0; i < alen; i++) {
--- a/mercurial/discovery.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/discovery.py	Sat May 12 13:20:26 2012 +0200
@@ -105,7 +105,7 @@
     og.commonheads, _any, _hds = commoninc
 
     # compute outgoing
-    if not repo._phaseroots[phases.secret]:
+    if not repo._phasecache.phaseroots[phases.secret]:
         og.missingheads = onlyheads or repo.heads()
     elif onlyheads is None:
         # use visible heads as it should be cached
--- a/mercurial/hbisect.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/hbisect.py	Sat May 12 13:20:26 2012 +0200
@@ -132,7 +132,7 @@
 
 
 def load_state(repo):
-    state = {'good': [], 'bad': [], 'skip': []}
+    state = {'current': [], 'good': [], 'bad': [], 'skip': []}
     if os.path.exists(repo.join("bisect.state")):
         for l in repo.opener("bisect.state"):
             kind, node = l[:-1].split()
@@ -164,10 +164,11 @@
     - ``pruned``             : csets that are goods, bads or skipped
     - ``untested``           : csets whose fate is yet unknown
     - ``ignored``            : csets ignored due to DAG topology
+    - ``current``            : the cset currently being bisected
     """
     state = load_state(repo)
-    if status in ('good', 'bad', 'skip'):
-        return [repo.changelog.rev(n) for n in state[status]]
+    if status in ('good', 'bad', 'skip', 'current'):
+        return map(repo.changelog.rev, state[status])
     else:
         # In the floowing sets, we do *not* call 'bisect()' with more
         # than one level of recusrsion, because that can be very, very
@@ -233,7 +234,7 @@
     if rev in get(repo, 'skip'):
         # i18n: bisect changeset status
         return _('skipped')
-    if rev in get(repo, 'untested'):
+    if rev in get(repo, 'untested') or rev in get(repo, 'current'):
         # i18n: bisect changeset status
         return _('untested')
     if rev in get(repo, 'ignored'):
--- a/mercurial/httpclient/__init__.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/__init__.py	Sat May 12 13:20:26 2012 +0200
@@ -45,6 +45,7 @@
 import select
 import socket
 
+import _readers
 import socketutil
 
 logger = logging.getLogger(__name__)
@@ -54,8 +55,6 @@
 HTTP_VER_1_0 = 'HTTP/1.0'
 HTTP_VER_1_1 = 'HTTP/1.1'
 
-_LEN_CLOSE_IS_END = -1
-
 OUTGOING_BUFFER_SIZE = 1 << 15
 INCOMING_BUFFER_SIZE = 1 << 20
 
@@ -83,23 +82,19 @@
     The response will continue to load as available. If you need the
     complete response before continuing, check the .complete() method.
     """
-    def __init__(self, sock, timeout):
+    def __init__(self, sock, timeout, method):
         self.sock = sock
+        self.method = method
         self.raw_response = ''
-        self._body = None
         self._headers_len = 0
-        self._content_len = 0
         self.headers = None
         self.will_close = False
         self.status_line = ''
         self.status = None
+        self.continued = False
         self.http_version = None
         self.reason = None
-        self._chunked = False
-        self._chunked_done = False
-        self._chunked_until_next = 0
-        self._chunked_skip_bytes = 0
-        self._chunked_preloaded_block = None
+        self._reader = None
 
         self._read_location = 0
         self._eol = EOL
@@ -117,11 +112,12 @@
         socket is closed, this will nearly always return False, even
         in cases where all the data has actually been loaded.
         """
-        if self._chunked:
-            return self._chunked_done
-        if self._content_len == _LEN_CLOSE_IS_END:
-            return False
-        return self._body is not None and len(self._body) >= self._content_len
+        if self._reader:
+            return self._reader.done()
+
+    def _close(self):
+        if self._reader is not None:
+            self._reader._close()
 
     def readline(self):
         """Read a single line from the response body.
@@ -129,30 +125,34 @@
         This may block until either a line ending is found or the
         response is complete.
         """
-        eol = self._body.find('\n', self._read_location)
-        while eol == -1 and not self.complete():
+        # TODO: move this into the reader interface where it can be
+        # smarter (and probably avoid copies)
+        bytes = []
+        while not bytes:
+            try:
+                bytes = [self._reader.read(1)]
+            except _readers.ReadNotReady:
+                self._select()
+        while bytes[-1] != '\n' and not self.complete():
             self._select()
-            eol = self._body.find('\n', self._read_location)
-        if eol != -1:
-            eol += 1
-        else:
-            eol = len(self._body)
-        data = self._body[self._read_location:eol]
-        self._read_location = eol
-        return data
+            bytes.append(self._reader.read(1))
+        if bytes[-1] != '\n':
+            next = self._reader.read(1)
+            while next and next != '\n':
+                bytes.append(next)
+                next = self._reader.read(1)
+            bytes.append(next)
+        return ''.join(bytes)
 
     def read(self, length=None):
         # if length is None, unbounded read
         while (not self.complete()  # never select on a finished read
                and (not length  # unbounded, so we wait for complete()
-                    or (self._read_location + length) > len(self._body))):
+                    or length > self._reader.available_data)):
             self._select()
         if not length:
-            length = len(self._body) - self._read_location
-        elif len(self._body) < (self._read_location + length):
-            length = len(self._body) - self._read_location
-        r = self._body[self._read_location:self._read_location + length]
-        self._read_location += len(r)
+            length = self._reader.available_data
+        r = self._reader.read(length)
         if self.complete() and self.will_close:
             self.sock.close()
         return r
@@ -160,93 +160,35 @@
     def _select(self):
         r, _, _ = select.select([self.sock], [], [], self._timeout)
         if not r:
-            # socket was not readable. If the response is not complete
-            # and we're not a _LEN_CLOSE_IS_END response, raise a timeout.
-            # If we are a _LEN_CLOSE_IS_END response and we have no data,
-            # raise a timeout.
-            if not (self.complete() or
-                    (self._content_len == _LEN_CLOSE_IS_END and self._body)):
+            # socket was not readable. If the response is not
+            # complete, raise a timeout.
+            if not self.complete():
                 logger.info('timed out with timeout of %s', self._timeout)
                 raise HTTPTimeoutException('timeout reading data')
-            logger.info('cl: %r body: %r', self._content_len, self._body)
         try:
             data = self.sock.recv(INCOMING_BUFFER_SIZE)
-            # If the socket was readable and no data was read, that
-            # means the socket was closed. If this isn't a
-            # _CLOSE_IS_END socket, then something is wrong if we're
-            # here (we shouldn't enter _select() if the response is
-            # complete), so abort.
-            if not data and self._content_len != _LEN_CLOSE_IS_END:
-                raise HTTPRemoteClosedError(
-                    'server appears to have closed the socket mid-response')
         except socket.sslerror, e:
             if e.args[0] != socket.SSL_ERROR_WANT_READ:
                 raise
             logger.debug('SSL_WANT_READ in _select, should retry later')
             return True
         logger.debug('response read %d data during _select', len(data))
+        # If the socket was readable and no data was read, that means
+        # the socket was closed. Inform the reader (if any) so it can
+        # raise an exception if this is an invalid situation.
         if not data:
-            if self.headers and self._content_len == _LEN_CLOSE_IS_END:
-                self._content_len = len(self._body)
+            if self._reader:
+                self._reader._close()
             return False
         else:
             self._load_response(data)
             return True
 
-    def _chunked_parsedata(self, data):
-        if self._chunked_preloaded_block:
-            data = self._chunked_preloaded_block + data
-            self._chunked_preloaded_block = None
-        while data:
-            logger.debug('looping with %d data remaining', len(data))
-            # Slice out anything we should skip
-            if self._chunked_skip_bytes:
-                if len(data) <= self._chunked_skip_bytes:
-                    self._chunked_skip_bytes -= len(data)
-                    data = ''
-                    break
-                else:
-                    data = data[self._chunked_skip_bytes:]
-                    self._chunked_skip_bytes = 0
-
-            # determine how much is until the next chunk
-            if self._chunked_until_next:
-                amt = self._chunked_until_next
-                logger.debug('reading remaining %d of existing chunk', amt)
-                self._chunked_until_next = 0
-                body = data
-            else:
-                try:
-                    amt, body = data.split(self._eol, 1)
-                except ValueError:
-                    self._chunked_preloaded_block = data
-                    logger.debug('saving %r as a preloaded block for chunked',
-                                 self._chunked_preloaded_block)
-                    return
-                amt = int(amt, base=16)
-                logger.debug('reading chunk of length %d', amt)
-                if amt == 0:
-                    self._chunked_done = True
-
-            # read through end of what we have or the chunk
-            self._body += body[:amt]
-            if len(body) >= amt:
-                data = body[amt:]
-                self._chunked_skip_bytes = len(self._eol)
-            else:
-                self._chunked_until_next = amt - len(body)
-                self._chunked_skip_bytes = 0
-                data = ''
-
     def _load_response(self, data):
-        if self._chunked:
-            self._chunked_parsedata(data)
-            return
-        elif self._body is not None:
-            self._body += data
-            return
-
-        # We haven't seen end of headers yet
+        # Being here implies we're not at the end of the headers yet,
+        # since at the end of this method if headers were completely
+        # loaded we replace this method with the load() method of the
+        # reader we created.
         self.raw_response += data
         # This is a bogus server with bad line endings
         if self._eol not in self.raw_response:
@@ -270,6 +212,7 @@
         http_ver, status = hdrs.split(' ', 1)
         if status.startswith('100'):
             self.raw_response = body
+            self.continued = True
             logger.debug('continue seen, setting body to %r', body)
             return
 
@@ -289,23 +232,46 @@
         if self._eol != EOL:
             hdrs = hdrs.replace(self._eol, '\r\n')
         headers = rfc822.Message(cStringIO.StringIO(hdrs))
+        content_len = None
         if HDR_CONTENT_LENGTH in headers:
-            self._content_len = int(headers[HDR_CONTENT_LENGTH])
+            content_len = int(headers[HDR_CONTENT_LENGTH])
         if self.http_version == HTTP_VER_1_0:
             self.will_close = True
         elif HDR_CONNECTION_CTRL in headers:
             self.will_close = (
                 headers[HDR_CONNECTION_CTRL].lower() == CONNECTION_CLOSE)
-            if self._content_len == 0:
-                self._content_len = _LEN_CLOSE_IS_END
         if (HDR_XFER_ENCODING in headers
             and headers[HDR_XFER_ENCODING].lower() == XFER_ENCODING_CHUNKED):
-            self._body = ''
-            self._chunked_parsedata(body)
-            self._chunked = True
-        if self._body is None:
-            self._body = body
+            self._reader = _readers.ChunkedReader(self._eol)
+            logger.debug('using a chunked reader')
+        else:
+            # HEAD responses are forbidden from returning a body, and
+            # it's implausible for a CONNECT response to use
+            # close-is-end logic for an OK response.
+            if (self.method == 'HEAD' or
+                (self.method == 'CONNECT' and content_len is None)):
+                content_len = 0
+            if content_len is not None:
+                logger.debug('using a content-length reader with length %d',
+                             content_len)
+                self._reader = _readers.ContentLengthReader(content_len)
+            else:
+                # Response body had no length specified and is not
+                # chunked, so the end of the body will only be
+                # identifiable by the termination of the socket by the
+                # server. My interpretation of the spec means that we
+                # are correct in hitting this case if
+                # transfer-encoding, content-length, and
+                # connection-control were left unspecified.
+                self._reader = _readers.CloseIsEndReader()
+                logger.debug('using a close-is-end reader')
+                self.will_close = True
+
+        if body:
+            self._reader._load(body)
+        logger.debug('headers complete')
         self.headers = headers
+        self._load_response = self._reader._load
 
 
 class HTTPConnection(object):
@@ -382,13 +348,14 @@
                                          {}, HTTP_VER_1_0)
                 sock.send(data)
                 sock.setblocking(0)
-                r = self.response_class(sock, self.timeout)
+                r = self.response_class(sock, self.timeout, 'CONNECT')
                 timeout_exc = HTTPTimeoutException(
                     'Timed out waiting for CONNECT response from proxy')
                 while not r.complete():
                     try:
                         if not r._select():
-                            raise timeout_exc
+                            if not r.complete():
+                                raise timeout_exc
                     except HTTPTimeoutException:
                         # This raise/except pattern looks goofy, but
                         # _select can raise the timeout as well as the
@@ -527,7 +494,7 @@
             out = outgoing_headers or body
             blocking_on_continue = False
             if expect_continue and not outgoing_headers and not (
-                response and response.headers):
+                response and (response.headers or response.continued)):
                 logger.info(
                     'waiting up to %s seconds for'
                     ' continue response from server',
@@ -550,11 +517,6 @@
                                 'server, optimistically sending request body')
                 else:
                     raise HTTPTimeoutException('timeout sending data')
-            # TODO exceptional conditions with select? (what are those be?)
-            # TODO if the response is loading, must we finish sending at all?
-            #
-            # Certainly not if it's going to close the connection and/or
-            # the response is already done...I think.
             was_first = first
 
             # incoming data
@@ -572,11 +534,11 @@
                         logger.info('socket appears closed in read')
                         self.sock = None
                         self._current_response = None
+                        if response is not None:
+                            response._close()
                         # This if/elif ladder is a bit subtle,
                         # comments in each branch should help.
-                        if response is not None and (
-                            response.complete() or
-                            response._content_len == _LEN_CLOSE_IS_END):
+                        if response is not None and response.complete():
                             # Server responded completely and then
                             # closed the socket. We should just shut
                             # things down and let the caller get their
@@ -605,7 +567,7 @@
                                 'response was missing or incomplete!')
                     logger.debug('read %d bytes in request()', len(data))
                     if response is None:
-                        response = self.response_class(r[0], self.timeout)
+                        response = self.response_class(r[0], self.timeout, method)
                     response._load_response(data)
                     # Jump to the next select() call so we load more
                     # data if the server is still sending us content.
@@ -613,10 +575,6 @@
                 except socket.error, e:
                     if e[0] != errno.EPIPE and not was_first:
                         raise
-                    if (response._content_len
-                        and response._content_len != _LEN_CLOSE_IS_END):
-                        outgoing_headers = sent_data + outgoing_headers
-                        reconnect('read')
 
             # outgoing data
             if w and out:
@@ -661,7 +619,7 @@
         # close if the server response said to or responded before eating
         # the whole request
         if response is None:
-            response = self.response_class(self.sock, self.timeout)
+            response = self.response_class(self.sock, self.timeout, method)
         complete = response.complete()
         data_left = bool(outgoing_headers or body)
         if data_left:
@@ -679,7 +637,8 @@
             raise httplib.ResponseNotReady()
         r = self._current_response
         while r.headers is None:
-            r._select()
+            if not r._select() and not r.complete():
+                raise _readers.HTTPRemoteClosedError()
         if r.will_close:
             self.sock = None
             self._current_response = None
@@ -705,7 +664,7 @@
 class HTTPStateError(httplib.HTTPException):
     """Invalid internal state encountered."""
 
-
-class HTTPRemoteClosedError(httplib.HTTPException):
-    """The server closed the remote socket in the middle of a response."""
+# Forward this exception type from _readers since it needs to be part
+# of the public API.
+HTTPRemoteClosedError = _readers.HTTPRemoteClosedError
 # no-check-code
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/httpclient/_readers.py	Sat May 12 13:20:26 2012 +0200
@@ -0,0 +1,195 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Reader objects to abstract out different body response types.
+
+This module is package-private. It is not expected that these will
+have any clients outside of httpplus.
+"""
+
+import httplib
+import itertools
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class ReadNotReady(Exception):
+    """Raised when read() is attempted but not enough data is loaded."""
+
+
+class HTTPRemoteClosedError(httplib.HTTPException):
+    """The server closed the remote socket in the middle of a response."""
+
+
+class AbstractReader(object):
+    """Abstract base class for response readers.
+
+    Subclasses must implement _load, and should implement _close if
+    it's not an error for the server to close their socket without
+    some termination condition being detected during _load.
+    """
+    def __init__(self):
+        self._finished = False
+        self._done_chunks = []
+
+    @property
+    def available_data(self):
+        return sum(map(len, self._done_chunks))
+
+    def done(self):
+        return self._finished
+
+    def read(self, amt):
+        if self.available_data < amt and not self._finished:
+            raise ReadNotReady()
+        need = [amt]
+        def pred(s):
+            needed = need[0] > 0
+            need[0] -= len(s)
+            return needed
+        blocks = list(itertools.takewhile(pred, self._done_chunks))
+        self._done_chunks = self._done_chunks[len(blocks):]
+        over_read = sum(map(len, blocks)) - amt
+        if over_read > 0 and blocks:
+            logger.debug('need to reinsert %d data into done chunks', over_read)
+            last = blocks[-1]
+            blocks[-1], reinsert = last[:-over_read], last[-over_read:]
+            self._done_chunks.insert(0, reinsert)
+        result = ''.join(blocks)
+        assert len(result) == amt or (self._finished and len(result) < amt)
+        return result
+
+    def _load(self, data): # pragma: no cover
+        """Subclasses must implement this.
+
+        As data is available to be read out of this object, it should
+        be placed into the _done_chunks list. Subclasses should not
+        rely on data remaining in _done_chunks forever, as it may be
+        reaped if the client is parsing data as it comes in.
+        """
+        raise NotImplementedError
+
+    def _close(self):
+        """Default implementation of close.
+
+        The default implementation assumes that the reader will mark
+        the response as finished on the _finished attribute once the
+        entire response body has been read. In the event that this is
+        not true, the subclass should override the implementation of
+        close (for example, close-is-end responses have to set
+        self._finished in the close handler.)
+        """
+        if not self._finished:
+            raise HTTPRemoteClosedError(
+                'server appears to have closed the socket mid-response')
+
+
+class AbstractSimpleReader(AbstractReader):
+    """Abstract base class for simple readers that require no response decoding.
+
+    Examples of such responses are Connection: Close (close-is-end)
+    and responses that specify a content length.
+    """
+    def _load(self, data):
+        if data:
+            assert not self._finished, (
+                'tried to add data (%r) to a closed reader!' % data)
+        logger.debug('%s read an addtional %d data', self.name, len(data))
+        self._done_chunks.append(data)
+
+
+class CloseIsEndReader(AbstractSimpleReader):
+    """Reader for responses that specify Connection: Close for length."""
+    name = 'close-is-end'
+
+    def _close(self):
+        logger.info('Marking close-is-end reader as closed.')
+        self._finished = True
+
+
+class ContentLengthReader(AbstractSimpleReader):
+    """Reader for responses that specify an exact content length."""
+    name = 'content-length'
+
+    def __init__(self, amount):
+        AbstractReader.__init__(self)
+        self._amount = amount
+        if amount == 0:
+            self._finished = True
+        self._amount_seen = 0
+
+    def _load(self, data):
+        AbstractSimpleReader._load(self, data)
+        self._amount_seen += len(data)
+        if self._amount_seen >= self._amount:
+            self._finished = True
+            logger.debug('content-length read complete')
+
+
+class ChunkedReader(AbstractReader):
+    """Reader for chunked transfer encoding responses."""
+    def __init__(self, eol):
+        AbstractReader.__init__(self)
+        self._eol = eol
+        self._leftover_skip_amt = 0
+        self._leftover_data = ''
+
+    def _load(self, data):
+        assert not self._finished, 'tried to add data to a closed reader!'
+        logger.debug('chunked read an addtional %d data', len(data))
+        position = 0
+        if self._leftover_data:
+            logger.debug('chunked reader trying to finish block from leftover data')
+            # TODO: avoid this string concatenation if possible
+            data = self._leftover_data + data
+            position = self._leftover_skip_amt
+            self._leftover_data = ''
+            self._leftover_skip_amt = 0
+        datalen = len(data)
+        while position < datalen:
+            split = data.find(self._eol, position)
+            if split == -1:
+                self._leftover_data = data
+                self._leftover_skip_amt = position
+                return
+            amt = int(data[position:split], base=16)
+            block_start = split + len(self._eol)
+            # If the whole data chunk plus the eol trailer hasn't
+            # loaded, we'll wait for the next load.
+            if block_start + amt + len(self._eol) > len(data):
+                self._leftover_data = data
+                self._leftover_skip_amt = position
+                return
+            if amt == 0:
+                self._finished = True
+                logger.debug('closing chunked redaer due to chunk of length 0')
+                return
+            self._done_chunks.append(data[block_start:block_start + amt])
+            position = block_start + amt + len(self._eol)
+# no-check-code
--- a/mercurial/httpclient/tests/simple_http_test.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/tests/simple_http_test.py	Sat May 12 13:20:26 2012 +0200
@@ -29,7 +29,7 @@
 import socket
 import unittest
 
-import http
+import httpplus
 
 # relative import to ease embedding the library
 import util
@@ -38,7 +38,7 @@
 class SimpleHttpTest(util.HttpTestBase, unittest.TestCase):
 
     def _run_simple_test(self, host, server_data, expected_req, expected_data):
-        con = http.HTTPConnection(host)
+        con = httpplus.HTTPConnection(host)
         con._connect()
         con.sock.data = server_data
         con.request('GET', '/')
@@ -47,9 +47,9 @@
         self.assertEqual(expected_data, con.getresponse().read())
 
     def test_broken_data_obj(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
-        self.assertRaises(http.BadRequestData,
+        self.assertRaises(httpplus.BadRequestData,
                           con.request, 'POST', '/', body=1)
 
     def test_no_keepalive_http_1_0(self):
@@ -74,7 +74,7 @@
 fncache
 dotencode
 """
-        con = http.HTTPConnection('localhost:9999')
+        con = httpplus.HTTPConnection('localhost:9999')
         con._connect()
         con.sock.data = [expected_response_headers, expected_response_body]
         con.request('GET', '/remote/.hg/requires',
@@ -95,7 +95,7 @@
         self.assert_(resp.sock.closed)
 
     def test_multiline_header(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         con.sock.data = ['HTTP/1.1 200 OK\r\n',
                          'Server: BogusServer 1.0\r\n',
@@ -122,7 +122,7 @@
         self.assertEqual(con.sock.closed, False)
 
     def testSimpleRequest(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         con.sock.data = ['HTTP/1.1 200 OK\r\n',
                          'Server: BogusServer 1.0\r\n',
@@ -149,12 +149,13 @@
                          resp.headers.getheaders('server'))
 
     def testHeaderlessResponse(self):
-        con = http.HTTPConnection('1.2.3.4', use_ssl=False)
+        con = httpplus.HTTPConnection('1.2.3.4', use_ssl=False)
         con._connect()
         con.sock.data = ['HTTP/1.1 200 OK\r\n',
                          '\r\n'
                          '1234567890'
                          ]
+        con.sock.close_on_empty = True
         con.request('GET', '/')
 
         expected_req = ('GET / HTTP/1.1\r\n'
@@ -169,7 +170,30 @@
         self.assertEqual(resp.status, 200)
 
     def testReadline(self):
-        con = http.HTTPConnection('1.2.3.4')
+        con = httpplus.HTTPConnection('1.2.3.4')
+        con._connect()
+        con.sock.data = ['HTTP/1.1 200 OK\r\n',
+                         'Server: BogusServer 1.0\r\n',
+                         'Connection: Close\r\n',
+                         '\r\n'
+                         '1\n2\nabcdefg\n4\n5']
+        con.sock.close_on_empty = True
+
+        expected_req = ('GET / HTTP/1.1\r\n'
+                        'Host: 1.2.3.4\r\n'
+                        'accept-encoding: identity\r\n\r\n')
+
+        con.request('GET', '/')
+        self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+        self.assertEqual(expected_req, con.sock.sent)
+        r = con.getresponse()
+        for expected in ['1\n', '2\n', 'abcdefg\n', '4\n', '5']:
+            actual = r.readline()
+            self.assertEqual(expected, actual,
+                             'Expected %r, got %r' % (expected, actual))
+
+    def testReadlineTrickle(self):
+        con = httpplus.HTTPConnection('1.2.3.4')
         con._connect()
         # make sure it trickles in one byte at a time
         # so that we touch all the cases in readline
@@ -179,6 +203,7 @@
              'Connection: Close\r\n',
              '\r\n'
              '1\n2\nabcdefg\n4\n5']))
+        con.sock.close_on_empty = True
 
         expected_req = ('GET / HTTP/1.1\r\n'
                         'Host: 1.2.3.4\r\n'
@@ -193,6 +218,59 @@
             self.assertEqual(expected, actual,
                              'Expected %r, got %r' % (expected, actual))
 
+    def testVariousReads(self):
+        con = httpplus.HTTPConnection('1.2.3.4')
+        con._connect()
+        # make sure it trickles in one byte at a time
+        # so that we touch all the cases in readline
+        con.sock.data = list(''.join(
+            ['HTTP/1.1 200 OK\r\n',
+             'Server: BogusServer 1.0\r\n',
+             'Connection: Close\r\n',
+             '\r\n'
+             '1\n2',
+             '\na', 'bc',
+             'defg\n4\n5']))
+        con.sock.close_on_empty = True
+
+        expected_req = ('GET / HTTP/1.1\r\n'
+                        'Host: 1.2.3.4\r\n'
+                        'accept-encoding: identity\r\n\r\n')
+
+        con.request('GET', '/')
+        self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+        self.assertEqual(expected_req, con.sock.sent)
+        r = con.getresponse()
+        for read_amt, expect in [(1, '1'), (1, '\n'),
+                                 (4, '2\nab'),
+                                 ('line', 'cdefg\n'),
+                                 (None, '4\n5')]:
+            if read_amt == 'line':
+                self.assertEqual(expect, r.readline())
+            else:
+                self.assertEqual(expect, r.read(read_amt))
+
+    def testZeroLengthBody(self):
+        con = httpplus.HTTPConnection('1.2.3.4')
+        con._connect()
+        # make sure it trickles in one byte at a time
+        # so that we touch all the cases in readline
+        con.sock.data = list(''.join(
+            ['HTTP/1.1 200 OK\r\n',
+             'Server: BogusServer 1.0\r\n',
+             'Content-length: 0\r\n',
+             '\r\n']))
+
+        expected_req = ('GET / HTTP/1.1\r\n'
+                        'Host: 1.2.3.4\r\n'
+                        'accept-encoding: identity\r\n\r\n')
+
+        con.request('GET', '/')
+        self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+        self.assertEqual(expected_req, con.sock.sent)
+        r = con.getresponse()
+        self.assertEqual('', r.read())
+
     def testIPv6(self):
         self._run_simple_test('[::1]:8221',
                         ['HTTP/1.1 200 OK\r\n',
@@ -226,7 +304,7 @@
                         '1234567890')
 
     def testEarlyContinueResponse(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.data = ['HTTP/1.1 403 Forbidden\r\n',
@@ -240,8 +318,23 @@
         self.assertEqual("You can't do that.", con.getresponse().read())
         self.assertEqual(sock.closed, True)
 
+    def testEarlyContinueResponseNoContentLength(self):
+        con = httpplus.HTTPConnection('1.2.3.4:80')
+        con._connect()
+        sock = con.sock
+        sock.data = ['HTTP/1.1 403 Forbidden\r\n',
+                         'Server: BogusServer 1.0\r\n',
+                         '\r\n'
+                         "You can't do that."]
+        sock.close_on_empty = True
+        expected_req = self.doPost(con, expect_body=False)
+        self.assertEqual(('1.2.3.4', 80), sock.sa)
+        self.assertStringEqual(expected_req, sock.sent)
+        self.assertEqual("You can't do that.", con.getresponse().read())
+        self.assertEqual(sock.closed, True)
+
     def testDeniedAfterContinueTimeoutExpires(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.data = ['HTTP/1.1 403 Forbidden\r\n',
@@ -269,7 +362,7 @@
         self.assertEqual(sock.closed, True)
 
     def testPostData(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.read_wait_sentinel = 'POST data'
@@ -286,7 +379,7 @@
         self.assertEqual(sock.closed, False)
 
     def testServerWithoutContinue(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.read_wait_sentinel = 'POST data'
@@ -302,7 +395,7 @@
         self.assertEqual(sock.closed, False)
 
     def testServerWithSlowContinue(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.read_wait_sentinel = 'POST data'
@@ -321,7 +414,7 @@
         self.assertEqual(sock.closed, False)
 
     def testSlowConnection(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         # simulate one byte arriving at a time, to check for various
         # corner cases
@@ -340,12 +433,26 @@
         self.assertEqual(expected_req, con.sock.sent)
         self.assertEqual('1234567890', con.getresponse().read())
 
+    def testCloseAfterNotAllOfHeaders(self):
+        con = httpplus.HTTPConnection('1.2.3.4:80')
+        con._connect()
+        con.sock.data = ['HTTP/1.1 200 OK\r\n',
+                         'Server: NO CARRIER']
+        con.sock.close_on_empty = True
+        con.request('GET', '/')
+        self.assertRaises(httpplus.HTTPRemoteClosedError,
+                          con.getresponse)
+
+        expected_req = ('GET / HTTP/1.1\r\n'
+                        'Host: 1.2.3.4\r\n'
+                        'accept-encoding: identity\r\n\r\n')
+
     def testTimeout(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         con.sock.data = []
         con.request('GET', '/')
-        self.assertRaises(http.HTTPTimeoutException,
+        self.assertRaises(httpplus.HTTPTimeoutException,
                           con.getresponse)
 
         expected_req = ('GET / HTTP/1.1\r\n'
@@ -370,7 +477,7 @@
             return s
 
         socket.socket = closingsocket
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         con.request('GET', '/')
         r1 = con.getresponse()
@@ -381,7 +488,7 @@
         self.assertEqual(2, len(sockets))
 
     def test_server_closes_before_end_of_body(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         s = con.sock
         s.data = ['HTTP/1.1 200 OK\r\n',
@@ -393,9 +500,9 @@
         s.close_on_empty = True
         con.request('GET', '/')
         r1 = con.getresponse()
-        self.assertRaises(http.HTTPRemoteClosedError, r1.read)
+        self.assertRaises(httpplus.HTTPRemoteClosedError, r1.read)
 
     def test_no_response_raises_response_not_ready(self):
-        con = http.HTTPConnection('foo')
-        self.assertRaises(http.httplib.ResponseNotReady, con.getresponse)
+        con = httpplus.HTTPConnection('foo')
+        self.assertRaises(httpplus.httplib.ResponseNotReady, con.getresponse)
 # no-check-code
--- a/mercurial/httpclient/tests/test_bogus_responses.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/tests/test_bogus_responses.py	Sat May 12 13:20:26 2012 +0200
@@ -34,7 +34,7 @@
 """
 import unittest
 
-import http
+import httpplus
 
 # relative import to ease embedding the library
 import util
@@ -43,7 +43,7 @@
 class SimpleHttpTest(util.HttpTestBase, unittest.TestCase):
 
     def bogusEOL(self, eol):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         con.sock.data = ['HTTP/1.1 200 OK%s' % eol,
                          'Server: BogusServer 1.0%s' % eol,
--- a/mercurial/httpclient/tests/test_chunked_transfer.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/tests/test_chunked_transfer.py	Sat May 12 13:20:26 2012 +0200
@@ -29,7 +29,7 @@
 import cStringIO
 import unittest
 
-import http
+import httpplus
 
 # relative import to ease embedding the library
 import util
@@ -50,7 +50,7 @@
 
 class ChunkedTransferTest(util.HttpTestBase, unittest.TestCase):
     def testChunkedUpload(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.read_wait_sentinel = '0\r\n\r\n'
@@ -77,7 +77,7 @@
         self.assertEqual(sock.closed, False)
 
     def testChunkedDownload(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.data = ['HTTP/1.1 200 OK\r\n',
@@ -85,14 +85,31 @@
                      'transfer-encoding: chunked',
                      '\r\n\r\n',
                      chunkedblock('hi '),
-                     chunkedblock('there'),
+                     ] + list(chunkedblock('there')) + [
                      chunkedblock(''),
                      ]
         con.request('GET', '/')
         self.assertStringEqual('hi there', con.getresponse().read())
 
+    def testChunkedDownloadOddReadBoundaries(self):
+        con = httpplus.HTTPConnection('1.2.3.4:80')
+        con._connect()
+        sock = con.sock
+        sock.data = ['HTTP/1.1 200 OK\r\n',
+                     'Server: BogusServer 1.0\r\n',
+                     'transfer-encoding: chunked',
+                     '\r\n\r\n',
+                     chunkedblock('hi '),
+                     ] + list(chunkedblock('there')) + [
+                     chunkedblock(''),
+                     ]
+        con.request('GET', '/')
+        resp = con.getresponse()
+        for amt, expect in [(1, 'h'), (5, 'i the'), (100, 're')]:
+            self.assertEqual(expect, resp.read(amt))
+
     def testChunkedDownloadBadEOL(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.data = ['HTTP/1.1 200 OK\n',
@@ -107,7 +124,7 @@
         self.assertStringEqual('hi there', con.getresponse().read())
 
     def testChunkedDownloadPartialChunkBadEOL(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.data = ['HTTP/1.1 200 OK\n',
@@ -122,7 +139,7 @@
                                con.getresponse().read())
 
     def testChunkedDownloadPartialChunk(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         sock.data = ['HTTP/1.1 200 OK\r\n',
@@ -136,7 +153,7 @@
                                con.getresponse().read())
 
     def testChunkedDownloadEarlyHangup(self):
-        con = http.HTTPConnection('1.2.3.4:80')
+        con = httpplus.HTTPConnection('1.2.3.4:80')
         con._connect()
         sock = con.sock
         broken = chunkedblock('hi'*20)[:-1]
@@ -149,5 +166,5 @@
         sock.close_on_empty = True
         con.request('GET', '/')
         resp = con.getresponse()
-        self.assertRaises(http.HTTPRemoteClosedError, resp.read)
+        self.assertRaises(httpplus.HTTPRemoteClosedError, resp.read)
 # no-check-code
--- a/mercurial/httpclient/tests/test_proxy_support.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/tests/test_proxy_support.py	Sat May 12 13:20:26 2012 +0200
@@ -29,13 +29,13 @@
 import unittest
 import socket
 
-import http
+import httpplus
 
 # relative import to ease embedding the library
 import util
 
 
-def make_preloaded_socket(data):
+def make_preloaded_socket(data, close=False):
     """Make a socket pre-loaded with data so it can be read during connect.
 
     Useful for https proxy tests because we have to read from the
@@ -44,6 +44,7 @@
     def s(*args, **kwargs):
         sock = util.MockSocket(*args, **kwargs)
         sock.early_data = data[:]
+        sock.close_on_empty = close
         return sock
     return s
 
@@ -51,7 +52,7 @@
 class ProxyHttpTest(util.HttpTestBase, unittest.TestCase):
 
     def _run_simple_test(self, host, server_data, expected_req, expected_data):
-        con = http.HTTPConnection(host)
+        con = httpplus.HTTPConnection(host)
         con._connect()
         con.sock.data = server_data
         con.request('GET', '/')
@@ -60,7 +61,7 @@
         self.assertEqual(expected_data, con.getresponse().read())
 
     def testSimpleRequest(self):
-        con = http.HTTPConnection('1.2.3.4:80',
+        con = httpplus.HTTPConnection('1.2.3.4:80',
                                   proxy_hostport=('magicproxy', 4242))
         con._connect()
         con.sock.data = ['HTTP/1.1 200 OK\r\n',
@@ -88,7 +89,7 @@
                          resp.headers.getheaders('server'))
 
     def testSSLRequest(self):
-        con = http.HTTPConnection('1.2.3.4:443',
+        con = httpplus.HTTPConnection('1.2.3.4:443',
                                   proxy_hostport=('magicproxy', 4242))
         socket.socket = make_preloaded_socket(
             ['HTTP/1.1 200 OK\r\n',
@@ -124,12 +125,47 @@
         self.assertEqual(['BogusServer 1.0'],
                          resp.headers.getheaders('server'))
 
-    def testSSLProxyFailure(self):
-        con = http.HTTPConnection('1.2.3.4:443',
+    def testSSLRequestNoConnectBody(self):
+        con = httpplus.HTTPConnection('1.2.3.4:443',
                                   proxy_hostport=('magicproxy', 4242))
         socket.socket = make_preloaded_socket(
-            ['HTTP/1.1 407 Proxy Authentication Required\r\n\r\n'])
-        self.assertRaises(http.HTTPProxyConnectFailedException, con._connect)
-        self.assertRaises(http.HTTPProxyConnectFailedException,
+            ['HTTP/1.1 200 OK\r\n',
+             'Server: BogusServer 1.0\r\n',
+             '\r\n'])
+        con._connect()
+        con.sock.data = ['HTTP/1.1 200 OK\r\n',
+                         'Server: BogusServer 1.0\r\n',
+                         'Content-Length: 10\r\n',
+                         '\r\n'
+                         '1234567890'
+                         ]
+        connect_sent = con.sock.sent
+        con.sock.sent = ''
+        con.request('GET', '/')
+
+        expected_connect = ('CONNECT 1.2.3.4:443 HTTP/1.0\r\n'
+                            'Host: 1.2.3.4\r\n'
+                            'accept-encoding: identity\r\n'
+                            '\r\n')
+        expected_request = ('GET / HTTP/1.1\r\n'
+                            'Host: 1.2.3.4\r\n'
+                            'accept-encoding: identity\r\n\r\n')
+
+        self.assertEqual(('127.0.0.42', 4242), con.sock.sa)
+        self.assertStringEqual(expected_connect, connect_sent)
+        self.assertStringEqual(expected_request, con.sock.sent)
+        resp = con.getresponse()
+        self.assertEqual(resp.status, 200)
+        self.assertEqual('1234567890', resp.read())
+        self.assertEqual(['BogusServer 1.0'],
+                         resp.headers.getheaders('server'))
+
+    def testSSLProxyFailure(self):
+        con = httpplus.HTTPConnection('1.2.3.4:443',
+                                  proxy_hostport=('magicproxy', 4242))
+        socket.socket = make_preloaded_socket(
+            ['HTTP/1.1 407 Proxy Authentication Required\r\n\r\n'], close=True)
+        self.assertRaises(httpplus.HTTPProxyConnectFailedException, con._connect)
+        self.assertRaises(httpplus.HTTPProxyConnectFailedException,
                           con.request, 'GET', '/')
 # no-check-code
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/httpclient/tests/test_readers.py	Sat May 12 13:20:26 2012 +0200
@@ -0,0 +1,70 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from httpplus import _readers
+
+def chunkedblock(x, eol='\r\n'):
+    r"""Make a chunked transfer-encoding block.
+
+    >>> chunkedblock('hi')
+    '2\r\nhi\r\n'
+    >>> chunkedblock('hi' * 10)
+    '14\r\nhihihihihihihihihihi\r\n'
+    >>> chunkedblock('hi', eol='\n')
+    '2\nhi\n'
+    """
+    return ''.join((hex(len(x))[2:], eol, x, eol))
+
+corpus = 'foo\r\nbar\r\nbaz\r\n'
+
+
+class ChunkedReaderTest(unittest.TestCase):
+    def test_many_block_boundaries(self):
+        for step in xrange(1, len(corpus)):
+            data = ''.join(chunkedblock(corpus[start:start+step]) for
+                           start in xrange(0, len(corpus), step))
+            for istep in xrange(1, len(data)):
+                rdr = _readers.ChunkedReader('\r\n')
+                print 'step', step, 'load', istep
+                for start in xrange(0, len(data), istep):
+                    rdr._load(data[start:start+istep])
+                rdr._load(chunkedblock(''))
+                self.assertEqual(corpus, rdr.read(len(corpus) + 1))
+
+    def test_small_chunk_blocks_large_wire_blocks(self):
+        data = ''.join(map(chunkedblock, corpus)) + chunkedblock('')
+        rdr = _readers.ChunkedReader('\r\n')
+        for start in xrange(0, len(data), 4):
+            d = data[start:start + 4]
+            if d:
+                rdr._load(d)
+        self.assertEqual(corpus, rdr.read(len(corpus)+100))
+# no-check-code
--- a/mercurial/httpclient/tests/test_ssl.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/tests/test_ssl.py	Sat May 12 13:20:26 2012 +0200
@@ -28,7 +28,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 import unittest
 
-import http
+import httpplus
 
 # relative import to ease embedding the library
 import util
@@ -37,7 +37,7 @@
 
 class HttpSslTest(util.HttpTestBase, unittest.TestCase):
     def testSslRereadRequired(self):
-        con = http.HTTPConnection('1.2.3.4:443')
+        con = httpplus.HTTPConnection('1.2.3.4:443')
         con._connect()
         # extend the list instead of assign because of how
         # MockSSLSocket works.
@@ -66,7 +66,7 @@
                          resp.headers.getheaders('server'))
 
     def testSslRereadInEarlyResponse(self):
-        con = http.HTTPConnection('1.2.3.4:443')
+        con = httpplus.HTTPConnection('1.2.3.4:443')
         con._connect()
         con.sock.early_data = ['HTTP/1.1 200 OK\r\n',
                                'Server: BogusServer 1.0\r\n',
--- a/mercurial/httpclient/tests/util.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/httpclient/tests/util.py	Sat May 12 13:20:26 2012 +0200
@@ -29,7 +29,7 @@
 import difflib
 import socket
 
-import http
+import httpplus
 
 
 class MockSocket(object):
@@ -57,7 +57,7 @@
         self.remote_closed = self.closed = False
         self.close_on_empty = False
         self.sent = ''
-        self.read_wait_sentinel = http._END_HEADERS
+        self.read_wait_sentinel = httpplus._END_HEADERS
 
     def close(self):
         self.closed = True
@@ -86,7 +86,7 @@
 
     @property
     def ready_for_read(self):
-        return ((self.early_data and http._END_HEADERS in self.sent)
+        return ((self.early_data and httpplus._END_HEADERS in self.sent)
                 or (self.read_wait_sentinel in self.sent and self.data)
                 or self.closed or self.remote_closed)
 
@@ -132,7 +132,7 @@
 
 
 def mocksslwrap(sock, keyfile=None, certfile=None,
-                server_side=False, cert_reqs=http.socketutil.CERT_NONE,
+                server_side=False, cert_reqs=httpplus.socketutil.CERT_NONE,
                 ssl_version=None, ca_certs=None,
                 do_handshake_on_connect=True,
                 suppress_ragged_eofs=True):
@@ -156,16 +156,16 @@
         self.orig_getaddrinfo = socket.getaddrinfo
         socket.getaddrinfo = mockgetaddrinfo
 
-        self.orig_select = http.select.select
-        http.select.select = mockselect
+        self.orig_select = httpplus.select.select
+        httpplus.select.select = mockselect
 
-        self.orig_sslwrap = http.socketutil.wrap_socket
-        http.socketutil.wrap_socket = mocksslwrap
+        self.orig_sslwrap = httpplus.socketutil.wrap_socket
+        httpplus.socketutil.wrap_socket = mocksslwrap
 
     def tearDown(self):
         socket.socket = self.orig_socket
-        http.select.select = self.orig_select
-        http.socketutil.wrap_socket = self.orig_sslwrap
+        httpplus.select.select = self.orig_select
+        httpplus.socketutil.wrap_socket = self.orig_sslwrap
         socket.getaddrinfo = self.orig_getaddrinfo
 
     def assertStringEqual(self, l, r):
--- a/mercurial/localrepo.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/localrepo.py	Sat May 12 13:20:26 2012 +0200
@@ -41,7 +41,6 @@
         self.wopener = scmutil.opener(self.root)
         self.baseui = baseui
         self.ui = baseui.copy()
-        self._dirtyphases = False
         # A list of callback to shape the phase if no data were found.
         # Callback are in the form: func(repo, roots) --> processed root.
         # This list it to be filled by extension during repo setup
@@ -182,23 +181,8 @@
       bookmarks.write(self)
 
     @storecache('phaseroots')
-    def _phaseroots(self):
-        self._dirtyphases = False
-        phaseroots = phases.readroots(self)
-        phases.filterunknown(self, phaseroots)
-        return phaseroots
-
-    @propertycache
-    def _phaserev(self):
-        cache = [phases.public] * len(self)
-        for phase in phases.trackedphases:
-            roots = map(self.changelog.rev, self._phaseroots[phase])
-            if roots:
-                for rev in roots:
-                    cache[rev] = phase
-                for rev in self.changelog.descendants(*roots):
-                    cache[rev] = phase
-        return cache
+    def _phasecache(self):
+        return phases.phasecache(self, self._phasedefaults)
 
     @storecache('00changelog.i')
     def changelog(self):
@@ -505,7 +489,7 @@
             partial = self._branchcache
 
         self._branchtags(partial, lrev)
-        # this private cache holds all heads (not just tips)
+        # this private cache holds all heads (not just the branch tips)
         self._branchcache = partial
 
     def branchmap(self):
@@ -585,8 +569,8 @@
                 latest = newnodes.pop()
                 if latest not in bheads:
                     continue
-                minbhrev = self[bheads[0]].node()
-                reachable = self.changelog.reachable(latest, minbhrev)
+                minbhnode = self[bheads[0]].node()
+                reachable = self.changelog.reachable(latest, minbhnode)
                 reachable.remove(latest)
                 if reachable:
                     bheads = [b for b in bheads if b not in reachable]
@@ -605,10 +589,11 @@
 
     def known(self, nodes):
         nm = self.changelog.nodemap
+        pc = self._phasecache
         result = []
         for n in nodes:
             r = nm.get(n)
-            resp = not (r is None or self._phaserev[r] >= phases.secret)
+            resp = not (r is None or pc.phase(self, r) >= phases.secret)
             result.append(resp)
         return result
 
@@ -864,7 +849,6 @@
                 pass
 
         delcache('_tagscache')
-        delcache('_phaserev')
 
         self._branchcache = None # in UTF-8
         self._branchcachetip = None
@@ -932,9 +916,8 @@
 
         def unlock():
             self.store.write()
-            if self._dirtyphases:
-                phases.writeroots(self)
-                self._dirtyphases = False
+            if '_phasecache' in vars(self):
+                self._phasecache.write()
             for k, ce in self._filecache.items():
                 if k == 'dirstate':
                     continue
@@ -1334,6 +1317,8 @@
 
         def mfmatches(ctx):
             mf = ctx.manifest().copy()
+            if match.always():
+                return mf
             for fn in mf.keys():
                 if not match(fn):
                     del mf[fn]
@@ -1419,10 +1404,11 @@
                 mf2 = mfmatches(ctx2)
 
             modified, added, clean = [], [], []
+            withflags = mf1.withflags() | mf2.withflags()
             for fn in mf2:
                 if fn in mf1:
                     if (fn not in deleted and
-                        (mf1.flags(fn) != mf2.flags(fn) or
+                        ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
                          (mf1[fn] != mf2[fn] and
                           (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
                         modified.append(fn)
@@ -1694,7 +1680,7 @@
                     # * missingheads part of comon (::commonheads)
                     common = set(outgoing.common)
                     cheads = [node for node in revs if node in common]
-                    # and 
+                    # and
                     # * commonheads parents on missing
                     revset = self.set('%ln and parents(roots(%ln))',
                                      outgoing.commonheads,
--- a/mercurial/manifest.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/manifest.py	Sat May 12 13:20:26 2012 +0200
@@ -19,6 +19,8 @@
         self._flags = flags
     def flags(self, f):
         return self._flags.get(f, "")
+    def withflags(self):
+        return set(self._flags.keys())
     def set(self, f, flags):
         self._flags[f] = flags
     def copy(self):
--- a/mercurial/match.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/match.py	Sat May 12 13:20:26 2012 +0200
@@ -118,6 +118,8 @@
         return self._files
     def anypats(self):
         return self._anypats
+    def always(self):
+        return False
 
 class exact(match):
     def __init__(self, root, cwd, files):
@@ -126,6 +128,8 @@
 class always(match):
     def __init__(self, root, cwd):
         match.__init__(self, root, cwd, [])
+    def always(self):
+        return True
 
 class narrowmatcher(match):
     """Adapt a matcher to work on a subdirectory only.
--- a/mercurial/parsers.c	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/parsers.c	Sat May 12 13:20:26 2012 +0200
@@ -387,7 +387,7 @@
 	Py_ssize_t length = index_length(self);
 	const char *data;
 
-	if (pos == length - 1)
+	if (pos == length - 1 || pos == INT_MAX)
 		return nullid;
 
 	if (pos >= length)
@@ -508,13 +508,13 @@
 		return NULL;
 
 #define istat(__n, __d) \
-	if (PyDict_SetItemString(obj, __d, PyInt_FromLong(self->__n)) == -1) \
+	if (PyDict_SetItemString(obj, __d, PyInt_FromSsize_t(self->__n)) == -1) \
 		goto bail;
 
 	if (self->added) {
 		Py_ssize_t len = PyList_GET_SIZE(self->added);
 		if (PyDict_SetItemString(obj, "index entries added",
-					 PyInt_FromLong(len)) == -1)
+					 PyInt_FromSsize_t(len)) == -1)
 			goto bail;
 	}
 
@@ -553,9 +553,11 @@
  *   -2: not found
  * rest: valid rev
  */
-static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen)
+static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
+		   int hex)
 {
-	int level, off;
+	int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
+	int level, maxlevel, off;
 
 	if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
 		return -1;
@@ -563,19 +565,28 @@
 	if (self->nt == NULL)
 		return -2;
 
-	for (level = off = 0; level < nodelen; level++) {
-		int k = nt_level(node, level);
+	if (hex)
+		maxlevel = nodelen > 40 ? 40 : (int)nodelen;
+	else
+		maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
+
+	for (level = off = 0; level < maxlevel; level++) {
+		int k = getnybble(node, level);
 		nodetree *n = &self->nt[off];
 		int v = n->children[k];
 
 		if (v < 0) {
 			const char *n;
+			Py_ssize_t i;
+
 			v = -v - 1;
 			n = index_node(self, v);
 			if (n == NULL)
 				return -2;
-			return memcmp(node, n, nodelen > 20 ? 20 : nodelen)
-				? -2 : v;
+			for (i = level; i < maxlevel; i++)
+				if (getnybble(node, i) != nt_level(n, i))
+					return -2;
+			return v;
 		}
 		if (v == 0)
 			return -2;
@@ -606,7 +617,7 @@
 	int level = 0;
 	int off = 0;
 
-	while (level < 20) {
+	while (level < 40) {
 		int k = nt_level(node, level);
 		nodetree *n;
 		int v;
@@ -660,6 +671,8 @@
 		self->ntrev = (int)index_length(self) - 1;
 		self->ntlookups = 1;
 		self->ntmisses = 0;
+		if (nt_insert(self, nullid, INT_MAX) == -1)
+			return -1;
 	}
 	return 0;
 }
@@ -677,7 +690,7 @@
 	int rev;
 
 	self->ntlookups++;
-	rev = nt_find(self, node, nodelen);
+	rev = nt_find(self, node, nodelen, 0);
 	if (rev >= -1)
 		return rev;
 
@@ -782,6 +795,77 @@
 	return NULL;
 }
 
+static int nt_partialmatch(indexObject *self, const char *node,
+			   Py_ssize_t nodelen)
+{
+	int rev;
+
+	if (nt_init(self) == -1)
+		return -3;
+
+	if (self->ntrev > 0) {
+		/* ensure that the radix tree is fully populated */
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node(self, rev);
+			if (n == NULL)
+				return -2;
+			if (nt_insert(self, n, rev) == -1)
+				return -3;
+		}
+		self->ntrev = rev;
+	}
+
+	return nt_find(self, node, nodelen, 1);
+}
+
+static PyObject *index_partialmatch(indexObject *self, PyObject *args)
+{
+	const char *fullnode;
+	int nodelen;
+	char *node;
+	int rev, i;
+
+	if (!PyArg_ParseTuple(args, "s#", &node, &nodelen))
+		return NULL;
+
+	if (nodelen < 4) {
+		PyErr_SetString(PyExc_ValueError, "key too short");
+		return NULL;
+	}
+
+	if (nodelen > 40)
+		nodelen = 40;
+
+	for (i = 0; i < nodelen; i++)
+		hexdigit(node, i);
+	if (PyErr_Occurred()) {
+		/* input contains non-hex characters */
+		PyErr_Clear();
+		Py_RETURN_NONE;
+	}
+
+	rev = nt_partialmatch(self, node, nodelen);
+
+	switch (rev) {
+	case -4:
+		raise_revlog_error();
+	case -3:
+		return NULL;
+	case -2:
+		Py_RETURN_NONE;
+	case -1:
+		return PyString_FromStringAndSize(nullid, 20);
+	}
+
+	fullnode = index_node(self, rev);
+	if (fullnode == NULL) {
+		PyErr_Format(PyExc_IndexError,
+			     "could not access rev %d", rev);
+		return NULL;
+	}
+	return PyString_FromStringAndSize(fullnode, 20);
+}
+
 static PyObject *index_m_get(indexObject *self, PyObject *args)
 {
 	char *node;
@@ -1064,6 +1148,8 @@
 	 "get an index entry"},
 	{"insert", (PyCFunction)index_insert, METH_VARARGS,
 	 "insert an index entry"},
+	{"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
+	 "match a potentially ambiguous node ID"},
 	{"stats", (PyCFunction)index_stats, METH_NOARGS,
 	 "stats for the index"},
 	{NULL} /* Sentinel */
--- a/mercurial/patch.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/patch.py	Sat May 12 13:20:26 2012 +0200
@@ -1014,9 +1014,9 @@
         oldstart = self.starta + top
         newstart = self.startb + top
         # zero length hunk ranges already have their start decremented
-        if self.lena:
+        if self.lena and oldstart > 0:
             oldstart -= 1
-        if self.lenb:
+        if self.lenb and newstart > 0:
             newstart -= 1
         return old, oldstart, new, newstart
 
--- a/mercurial/phases.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/phases.py	Sat May 12 13:20:26 2012 +0200
@@ -99,15 +99,44 @@
 """
 
 import errno
-from node import nullid, bin, hex, short
+from node import nullid, nullrev, bin, hex, short
 from i18n import _
+import util
 
 allphases = public, draft, secret = range(3)
 trackedphases = allphases[1:]
 phasenames = ['public', 'draft', 'secret']
 
-def readroots(repo):
-    """Read phase roots from disk"""
+def _filterunknown(ui, changelog, phaseroots):
+    """remove unknown nodes from the phase boundary
+
+    Nothing is lost as unknown nodes only hold data for their descendants
+    """
+    updated = False
+    nodemap = changelog.nodemap # to filter unknown nodes
+    for phase, nodes in enumerate(phaseroots):
+        missing = [node for node in nodes if node not in nodemap]
+        if missing:
+            for mnode in missing:
+                ui.debug(
+                    'removing unknown node %s from %i-phase boundary\n'
+                    % (short(mnode), phase))
+            nodes.symmetric_difference_update(missing)
+            updated = True
+    return updated
+
+def _readroots(repo, phasedefaults=None):
+    """Read phase roots from disk
+
+    phasedefaults is a list of fn(repo, roots) callable, which are
+    executed if the phase roots file does not exist. When phases are
+    being initialized on an existing repository, this could be used to
+    set selected changesets phase to something else than public.
+
+    Return (roots, dirty) where dirty is true if roots differ from
+    what is being stored.
+    """
+    dirty = False
     roots = [set() for i in allphases]
     try:
         f = repo.sopener('phaseroots')
@@ -120,39 +149,115 @@
     except IOError, inst:
         if inst.errno != errno.ENOENT:
             raise
-        for f in repo._phasedefaults:
-            roots = f(repo, roots)
-        repo._dirtyphases = True
-    return roots
+        if phasedefaults:
+            for f in phasedefaults:
+                roots = f(repo, roots)
+        dirty = True
+    if _filterunknown(repo.ui, repo.changelog, roots):
+        dirty = True
+    return roots, dirty
+
+class phasecache(object):
+    def __init__(self, repo, phasedefaults, _load=True):
+        if _load:
+            # Cheap trick to allow shallow-copy without copy module
+            self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
+            self.opener = repo.sopener
+            self._phaserevs = None
+
+    def copy(self):
+        # Shallow copy meant to ensure isolation in
+        # advance/retractboundary(), nothing more.
+        ph = phasecache(None, None, _load=False)
+        ph.phaseroots = self.phaseroots[:]
+        ph.dirty = self.dirty
+        ph.opener = self.opener
+        ph._phaserevs = self._phaserevs
+        return ph
 
-def writeroots(repo):
-    """Write phase roots from disk"""
-    f = repo.sopener('phaseroots', 'w', atomictemp=True)
-    try:
-        for phase, roots in enumerate(repo._phaseroots):
-            for h in roots:
-                f.write('%i %s\n' % (phase, hex(h)))
-        repo._dirtyphases = False
-    finally:
-        f.close()
+    def replace(self, phcache):
+        for a in 'phaseroots dirty opener _phaserevs'.split():
+            setattr(self, a, getattr(phcache, a))
+
+    def getphaserevs(self, repo, rebuild=False):
+        if rebuild or self._phaserevs is None:
+            revs = [public] * len(repo.changelog)
+            for phase in trackedphases:
+                roots = map(repo.changelog.rev, self.phaseroots[phase])
+                if roots:
+                    for rev in roots:
+                        revs[rev] = phase
+                    for rev in repo.changelog.descendants(*roots):
+                        revs[rev] = phase
+            self._phaserevs = revs
+        return self._phaserevs
+
+    def phase(self, repo, rev):
+        # We need a repo argument here to be able to build _phaserev
+        # if necessary. The repository instance is not stored in
+        # phasecache to avoid reference cycles. The changelog instance
+        # is not stored because it is a filecache() property and can
+        # be replaced without us being notified.
+        if rev == nullrev:
+            return public
+        if self._phaserevs is None or rev >= len(self._phaserevs):
+            self._phaserevs = self.getphaserevs(repo, rebuild=True)
+        return self._phaserevs[rev]
 
-def filterunknown(repo, phaseroots=None):
-    """remove unknown nodes from the phase boundary
+    def write(self):
+        if not self.dirty:
+            return
+        f = self.opener('phaseroots', 'w', atomictemp=True)
+        try:
+            for phase, roots in enumerate(self.phaseroots):
+                for h in roots:
+                    f.write('%i %s\n' % (phase, hex(h)))
+        finally:
+            f.close()
+        self.dirty = False
+
+    def _updateroots(self, phase, newroots):
+        self.phaseroots[phase] = newroots
+        self._phaserevs = None
+        self.dirty = True
+
+    def advanceboundary(self, repo, targetphase, nodes):
+        # Be careful to preserve shallow-copied values: do not update
+        # phaseroots values, replace them.
 
-    no data is lost as unknown node only old data for their descentants
-    """
-    if phaseroots is None:
-        phaseroots = repo._phaseroots
-    nodemap = repo.changelog.nodemap # to filter unknown nodes
-    for phase, nodes in enumerate(phaseroots):
-        missing = [node for node in nodes if node not in nodemap]
-        if missing:
-            for mnode in missing:
-                repo.ui.debug(
-                    'removing unknown node %s from %i-phase boundary\n'
-                    % (short(mnode), phase))
-            nodes.symmetric_difference_update(missing)
-            repo._dirtyphases = True
+        delroots = [] # set of root deleted by this path
+        for phase in xrange(targetphase + 1, len(allphases)):
+            # filter nodes that are not in a compatible phase already
+            nodes = [n for n in nodes
+                     if self.phase(repo, repo[n].rev()) >= phase]
+            if not nodes:
+                break # no roots to move anymore
+            olds = self.phaseroots[phase]
+            roots = set(ctx.node() for ctx in repo.set(
+                    'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
+            if olds != roots:
+                self._updateroots(phase, roots)
+                # some roots may need to be declared for lower phases
+                delroots.extend(olds - roots)
+            # declare deleted root in the target phase
+            if targetphase != 0:
+                self.retractboundary(repo, targetphase, delroots)
+
+    def retractboundary(self, repo, targetphase, nodes):
+        # Be careful to preserve shallow-copied values: do not update
+        # phaseroots values, replace them.
+
+        currentroots = self.phaseroots[targetphase]
+        newroots = [n for n in nodes
+                    if self.phase(repo, repo[n].rev()) < targetphase]
+        if newroots:
+            if nullid in newroots:
+                raise util.Abort(_('cannot change null revision phase'))
+            currentroots = currentroots.copy()
+            currentroots.update(newroots)
+            ctxs = repo.set('roots(%ln::)', currentroots)
+            currentroots.intersection_update(ctx.node() for ctx in ctxs)
+            self._updateroots(targetphase, currentroots)
 
 def advanceboundary(repo, targetphase, nodes):
     """Add nodes to a phase changing other nodes phases if necessary.
@@ -161,30 +266,9 @@
     in the target phase or kept in a *lower* phase.
 
     Simplify boundary to contains phase roots only."""
-    delroots = [] # set of root deleted by this path
-    for phase in xrange(targetphase + 1, len(allphases)):
-        # filter nodes that are not in a compatible phase already
-        # XXX rev phase cache might have been invalidated by a previous loop
-        # XXX we need to be smarter here
-        nodes = [n for n in nodes if repo[n].phase() >= phase]
-        if not nodes:
-            break # no roots to move anymore
-        roots = repo._phaseroots[phase]
-        olds = roots.copy()
-        ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
-        roots.clear()
-        roots.update(ctx.node() for ctx in ctxs)
-        if olds != roots:
-            # invalidate cache (we probably could be smarter here
-            if '_phaserev' in vars(repo):
-                del repo._phaserev
-            repo._dirtyphases = True
-            # some roots may need to be declared for lower phases
-            delroots.extend(olds - roots)
-        # declare deleted root in the target phase
-        if targetphase != 0:
-            retractboundary(repo, targetphase, delroots)
-
+    phcache = repo._phasecache.copy()
+    phcache.advanceboundary(repo, targetphase, nodes)
+    repo._phasecache.replace(phcache)
 
 def retractboundary(repo, targetphase, nodes):
     """Set nodes back to a phase changing other nodes phases if necessary.
@@ -193,22 +277,15 @@
     in the target phase or kept in a *higher* phase.
 
     Simplify boundary to contains phase roots only."""
-    currentroots = repo._phaseroots[targetphase]
-    newroots = [n for n in nodes if repo[n].phase() < targetphase]
-    if newroots:
-        currentroots.update(newroots)
-        ctxs = repo.set('roots(%ln::)', currentroots)
-        currentroots.intersection_update(ctx.node() for ctx in ctxs)
-        if '_phaserev' in vars(repo):
-            del repo._phaserev
-        repo._dirtyphases = True
-
+    phcache = repo._phasecache.copy()
+    phcache.retractboundary(repo, targetphase, nodes)
+    repo._phasecache.replace(phcache)
 
 def listphases(repo):
     """List phases root for serialisation over pushkey"""
     keys = {}
     value = '%i' % draft
-    for root in repo._phaseroots[draft]:
+    for root in repo._phasecache.phaseroots[draft]:
         keys[hex(root)] = value
 
     if repo.ui.configbool('phases', 'publish', True):
@@ -251,7 +328,7 @@
 def visibleheads(repo):
     """return the set of visible head of this repo"""
     # XXX we want a cache on this
-    sroots = repo._phaseroots[secret]
+    sroots = repo._phasecache.phaseroots[secret]
     if sroots:
         # XXX very slow revset. storing heads or secret "boundary" would help.
         revset = repo.set('heads(not (%ln::))', sroots)
@@ -267,7 +344,7 @@
     """return a branchmap for the visible set"""
     # XXX Recomputing this data on the fly is very slow.  We should build a
     # XXX cached version while computin the standard branchmap version.
-    sroots = repo._phaseroots[secret]
+    sroots = repo._phasecache.phaseroots[secret]
     if sroots:
         vbranchmap = {}
         for branch, nodes in  repo.branchmap().iteritems():
--- a/mercurial/repair.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/repair.py	Sat May 12 13:20:26 2012 +0200
@@ -6,7 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-from mercurial import changegroup, bookmarks, phases
+from mercurial import changegroup, bookmarks
 from mercurial.node import short
 from mercurial.i18n import _
 import os
@@ -38,14 +38,14 @@
     """return the changesets which will be broken by the truncation"""
     s = set()
     def collectone(revlog):
-        links = (revlog.linkrev(i) for i in revlog)
+        linkgen = (revlog.linkrev(i) for i in revlog)
         # find the truncation point of the revlog
-        for lrev in links:
+        for lrev in linkgen:
             if lrev >= striprev:
                 break
         # see if any revision after this point has a linkrev
         # less than striprev (those will be broken by strip)
-        for lrev in links:
+        for lrev in linkgen:
             if lrev < striprev:
                 s.add(lrev)
 
@@ -170,7 +170,3 @@
         raise
 
     repo.destroyed()
-
-    # remove potential unknown phase
-    # XXX using to_strip data would be faster
-    phases.filterunknown(repo)
--- a/mercurial/revlog.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/revlog.py	Sat May 12 13:20:26 2012 +0200
@@ -756,6 +756,15 @@
                 pass
 
     def _partialmatch(self, id):
+        try:
+            return self.index.partialmatch(id)
+        except RevlogError:
+            # parsers.c radix tree lookup gave multiple matches
+            raise LookupError(id, self.indexfile, _("ambiguous identifier"))
+        except (AttributeError, ValueError):
+            # we are pure python, or key was too short to search radix tree
+            pass
+
         if id in self._pcache:
             return self._pcache[id]
 
--- a/mercurial/revset.py	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/revset.py	Sat May 12 13:20:26 2012 +0200
@@ -289,6 +289,7 @@
     - ``pruned``             : csets that are goods, bads or skipped
     - ``untested``           : csets whose fate is yet unknown
     - ``ignored``            : csets ignored due to DAG topology
+    - ``current``            : the cset currently being bisected
     """
     status = getstring(x, _("bisect requires a string")).lower()
     state = set(hbisect.get(repo, status))
@@ -462,7 +463,26 @@
     """``draft()``
     Changeset in draft phase."""
     getargs(x, 0, 0, _("draft takes no arguments"))
-    return [r for r in subset if repo._phaserev[r] == phases.draft]
+    pc = repo._phasecache
+    return [r for r in subset if pc.phase(repo, r) == phases.draft]
+
+def extra(repo, subset, x):
+    """``extra(label, [value])``
+    Changesets with the given label in the extra metadata, with the given
+    optional value."""
+
+    l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
+    label = getstring(l[0], _('first argument to extra must be a string'))
+    value = None
+
+    if len(l) > 1:
+        value = getstring(l[1], _('second argument to extra must be a string'))
+
+    def _matchvalue(r):
+        extra = repo[r].extra()
+        return label in extra and (value is None or value == extra[label])
+
+    return [r for r in subset if _matchvalue(r)]
 
 def filelog(repo, subset, x):
     """``filelog(pattern)``
@@ -851,7 +871,8 @@
     """``public()``
     Changeset in public phase."""
     getargs(x, 0, 0, _("public takes no arguments"))
-    return [r for r in subset if repo._phaserev[r] == phases.public]
+    pc = repo._phasecache
+    return [r for r in subset if pc.phase(repo, r) == phases.public]
 
 def remote(repo, subset, x):
     """``remote([id [,path]])``
@@ -926,7 +947,7 @@
 
     Special fields are ``summary`` and ``metadata``:
     ``summary`` matches the first line of the description.
-    ``metatadata`` is equivalent to matching ``description user date``
+    ``metadata`` is equivalent to matching ``description user date``
     (i.e. it matches the main metadata fields).
 
     ``metadata`` is the default field which is used when no fields are
@@ -996,7 +1017,7 @@
     # is only one field to match)
     getinfo = lambda r: [f(r) for f in getfieldfuncs]
 
-    matches = []
+    matches = set()
     for rev in revs:
         target = getinfo(rev)
         for r in subset:
@@ -1006,10 +1027,8 @@
                     match = False
                     break
             if match:
-                matches.append(r)
-    if len(revs) > 1:
-        matches = sorted(set(matches))
-    return matches
+                matches.add(r)
+    return [r for r in subset if r in matches]
 
 def reverse(repo, subset, x):
     """``reverse(set)``
@@ -1032,7 +1051,8 @@
     """``secret()``
     Changeset in secret phase."""
     getargs(x, 0, 0, _("secret takes no arguments"))
-    return [r for r in subset if repo._phaserev[r] == phases.secret]
+    pc = repo._phasecache
+    return [r for r in subset if pc.phase(repo, r) == phases.secret]
 
 def sort(repo, subset, x):
     """``sort(set[, [-]key...])``
@@ -1145,6 +1165,7 @@
     "descendants": descendants,
     "_firstdescendants": _firstdescendants,
     "draft": draft,
+    "extra": extra,
     "file": hasfile,
     "filelog": filelog,
     "first": first,
--- a/mercurial/util.h	Wed Apr 25 01:35:39 2012 +0200
+++ b/mercurial/util.h	Sat May 12 13:20:26 2012 +0200
@@ -109,6 +109,7 @@
 typedef int Py_ssize_t;
 typedef Py_ssize_t (*lenfunc)(PyObject *);
 typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
+#define PyInt_FromSsize_t PyInt_FromLong
 
 #if !defined(PY_SSIZE_T_MIN)
 #define PY_SSIZE_T_MAX INT_MAX
--- a/tests/test-bisect.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-bisect.t	Sat May 12 13:20:26 2012 +0200
@@ -224,6 +224,7 @@
   Testing changeset 12:1941b52820a5 (23 changesets remaining, ~4 tests)
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat .hg/bisect.state
+  current 1941b52820a544549596820a8ae006842b0e2c64
   skip 9d7d07bc967ca98ad0600c24953fd289ad5fa991
   skip ce8f0998e922c179e80819d5066fbe46e2998784
   skip e7fa0811edb063f6319531f0d0a865882138e180
@@ -396,6 +397,12 @@
   date:        Thu Jan 01 00:00:06 1970 +0000
   summary:     msg 6
   
+  $ hg log -r "bisect(current)"
+  changeset:   5:7874a09ea728
+  user:        test
+  date:        Thu Jan 01 00:00:05 1970 +0000
+  summary:     msg 5
+  
   $ hg log -r "bisect(skip)"
   changeset:   1:5cd978ea5149
   user:        test
@@ -466,3 +473,40 @@
   date:        Thu Jan 01 00:00:06 1970 +0000
   summary:     msg 6
   
+
+
+test bisecting via a command without updating the working dir, and
+ensure that the bisect state file is updated before running a test
+command
+
+  $ hg update null
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ cat > script.sh <<'EOF'
+  > #!/bin/sh
+  > test -n "$HG_NODE" || (echo HG_NODE missing; exit 127)
+  > current="`hg log -r \"bisect(current)\" --template {node}`"
+  > test "$current" = "$HG_NODE" || (echo current is bad: $current; exit 127)
+  > rev="`hg log -r $HG_NODE --template {rev}`"
+  > test "$rev" -ge 6
+  > EOF
+  $ chmod +x script.sh
+  $ hg bisect -r
+  $ hg bisect --good tip --noupdate
+  $ hg bisect --bad 0 --noupdate
+  Testing changeset 15:e7fa0811edb0 (31 changesets remaining, ~4 tests)
+  $ hg bisect --command "'`pwd`/script.sh' and some params" --noupdate
+  Changeset 15:e7fa0811edb0: good
+  Changeset 7:03750880c6b5: good
+  Changeset 3:b53bea5e2fcb: bad
+  Changeset 5:7874a09ea728: bad
+  Changeset 6:a3d5c6fdf0d3: good
+  The first good revision is:
+  changeset:   6:a3d5c6fdf0d3
+  user:        test
+  date:        Thu Jan 01 00:00:06 1970 +0000
+  summary:     msg 6
+  
+
+ensure that we still don't have a working dir
+
+  $ hg parents
--- a/tests/test-check-code-hg.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-check-code-hg.t	Sat May 12 13:20:26 2012 +0200
@@ -220,9 +220,6 @@
    >             raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
    warning: line over 80 characters
   hgext/mq.py:0:
-   >           ('', 'move', None, _('reorder patch series and apply only the patch'))],
-   warning: line over 80 characters
-  hgext/mq.py:0:
    >           ('U', 'noupdate', None, _('do not update the new working directories')),
    warning: line over 80 characters
   hgext/mq.py:0:
--- a/tests/test-commit-amend.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-commit-amend.t	Sat May 12 13:20:26 2012 +0200
@@ -316,3 +316,37 @@
   $ hg rollback
   no rollback information available
   [1]
+
+Preserve extra dict (issue3430):
+
+  $ hg branch a
+  marked working directory as branch a
+  (branches are permanent and global, did you want a bookmark?)
+  $ echo a >> a
+  $ hg ci -ma
+  $ hg ci --amend -m "a'"
+  saved backup bundle to $TESTTMP/.hg/strip-backup/167f8e3031df-amend-backup.hg
+  $ hg log -r . --template "{branch}\n"
+  a
+  $ hg ci --amend -m "a''"
+  saved backup bundle to $TESTTMP/.hg/strip-backup/ceac1a44c806-amend-backup.hg
+  $ hg log -r . --template "{branch}\n"
+  a
+
+Also preserve other entries in the dict that are in the old commit,
+first graft something so there's an additional entry:
+
+  $ hg up 0 -q
+  $ echo z > z
+  $ hg ci -Am 'fork'
+  adding z
+  created new head
+  $ hg up 11
+  5 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg graft 12
+  grafting revision 12
+  $ hg ci --amend -m 'graft amend'
+  saved backup bundle to $TESTTMP/.hg/strip-backup/18a5124daf7a-amend-backup.hg
+  $ hg log -r . --debug | grep extra
+  extra:       branch=a
+  extra:       source=2647734878ef0236dda712fae9c1651cf694ea8a
--- a/tests/test-debugcomplete.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-debugcomplete.t	Sat May 12 13:20:26 2012 +0200
@@ -247,7 +247,7 @@
   debugsub: rev
   debugwalk: include, exclude
   debugwireargs: three, four, five, ssh, remotecmd, insecure
-  graft: continue, edit, currentdate, currentuser, date, user, tool, dry-run
+  graft: continue, edit, log, currentdate, currentuser, date, user, tool, dry-run
   grep: print0, all, text, follow, ignore-case, files-with-matches, line-number, rev, user, date, include, exclude
   heads: rev, topo, active, closed, style, template
   help: extension, command
--- a/tests/test-graft.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-graft.t	Sat May 12 13:20:26 2012 +0200
@@ -277,3 +277,13 @@
   $ hg graft tip
   skipping already grafted revision 13 (same origin 2)
   [255]
+
+Graft with --log
+
+  $ hg up -Cq 1
+  $ hg graft 3 --log -u foo
+  grafting revision 3
+  warning: can't find ancestor for 'c' copied from 'b'!
+  $ hg log --template '{rev} {parents} {desc}\n' -r tip
+  14 1:5d205f8b35b6  3
+  (grafted from 4c60f11aa304a54ae1c199feb94e7fc771e51ed8)
--- a/tests/test-import.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-import.t	Sat May 12 13:20:26 2012 +0200
@@ -997,6 +997,26 @@
   c3
   c4
 
+no segfault while importing a unified diff which start line is zero but chunk
+size is non-zero
+
+  $ hg init startlinezero
+  $ cd startlinezero
+  $ echo foo > foo
+  $ hg commit -Amfoo
+  adding foo
+
+  $ hg import --no-commit - << EOF
+  > diff a/foo b/foo
+  > --- a/foo
+  > +++ b/foo
+  > @@ -0,1 +0,1 @@
+  >  foo
+  > EOF
+  applying patch from stdin
+
+  $ cd ..
+
 Test corner case involving fuzz and skew
 
   $ hg init morecornercases
--- a/tests/test-keyword.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-keyword.t	Sat May 12 13:20:26 2012 +0200
@@ -558,6 +558,7 @@
   $ hg --debug commit -ma2c -d '1 0' -u 'User Name <user@example.com>'
   c
    c: copy a:0045e12f6c5791aac80ca6cbfd97709a88307292
+  removing unknown node 40a904bbbe4c from 1-phase boundary
   overwriting c expanding keywords
   committed changeset 2:25736cf2f5cbe41f6be4e6784ef6ecf9f3bbcc7d
   $ cat a c
@@ -722,6 +723,7 @@
 
   $ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>'
   a
+  removing unknown node 40a904bbbe4c from 1-phase boundary
   overwriting a expanding keywords
   committed changeset 2:bb948857c743469b22bbf51f7ec8112279ca5d83
   $ rm log
--- a/tests/test-largefiles.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-largefiles.t	Sat May 12 13:20:26 2012 +0200
@@ -432,11 +432,21 @@
   large11
   $ cat sub/large2
   large22
+  $ cd ..
+
+Test cloning with --all-largefiles flag
+
+  $ rm -Rf ${USERCACHE}/*
+  $ hg clone --all-largefiles a a-backup
+  updating to branch default
+  5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  getting changed largefiles
+  3 largefiles updated, 0 removed
+  8 additional largefiles cached
 
 Rebasing between two repositories does not revert largefiles to old
 revisions (this was a very bad bug that took a lot of work to fix).
 
-  $ cd ..
   $ hg clone a d
   updating to branch default
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-mq-qpush-fail.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-mq-qpush-fail.t	Sat May 12 13:20:26 2012 +0200
@@ -150,3 +150,275 @@
   abort: cannot push to a previous patch: a
   [255]
 
+test qpop --force and backup files
+
+  $ hg qpop -a
+  popping b
+  patch queue now empty
+  $ hg qq --create force
+  $ echo a > a
+  $ echo b > b
+  $ echo c > c
+  $ hg ci -Am add a b c
+  $ echo a >> a
+  $ hg rm b
+  $ hg rm c
+  $ hg qnew p1
+  $ echo a >> a
+  $ echo bb > b
+  $ hg add b
+  $ echo cc > c
+  $ hg add c
+  $ hg qpop --force --verbose
+  saving current version of a as a.orig
+  saving current version of b as b.orig
+  saving current version of c as c.orig
+  popping p1
+  patch queue now empty
+  $ hg st
+  ? a.orig
+  ? b.orig
+  ? c.orig
+  ? untracked-file
+  $ cat a.orig
+  a
+  a
+  a
+  $ cat b.orig
+  bb
+  $ cat c.orig
+  cc
+
+test qpop --force --no-backup
+
+  $ hg qpush
+  applying p1
+  now at: p1
+  $ rm a.orig
+  $ echo a >> a
+  $ hg qpop --force --no-backup --verbose
+  popping p1
+  patch queue now empty
+  $ test -f a.orig && echo 'error: backup with --no-backup'
+  [1]
+
+test qpop --check
+
+  $ hg qpush
+  applying p1
+  now at: p1
+  $ hg qpop --check --force
+  abort: cannot use both --force and --check
+  [255]
+  $ echo a >> a
+  $ hg qpop --check
+  abort: local changes found, refresh first
+  [255]
+  $ hg revert -qa a
+  $ rm a
+  $ hg qpop --check
+  abort: local changes found, refresh first
+  [255]
+  $ hg rm -A a
+  $ hg qpop --check
+  abort: local changes found, refresh first
+  [255]
+  $ hg revert -qa a
+  $ echo b > b
+  $ hg add b
+  $ hg qpop --check
+  abort: local changes found, refresh first
+  [255]
+  $ hg forget b
+  $ echo d > d
+  $ hg add d
+  $ hg qpop --check
+  popping p1
+  patch queue now empty
+  $ hg forget d
+  $ rm d
+
+test qpush --force and backup files
+
+  $ echo a >> a
+  $ hg qnew p2
+  $ echo b >> b
+  $ echo d > d
+  $ echo e > e
+  $ hg add d e
+  $ hg rm c
+  $ hg qnew p3
+  $ hg qpop -a
+  popping p3
+  popping p2
+  patch queue now empty
+  $ echo a >> a
+  $ echo b1 >> b
+  $ echo d1 > d
+  $ hg add d
+  $ echo e1 > e
+  $ hg qpush -a --force --verbose
+  applying p2
+  saving current version of a as a.orig
+  patching file a
+  a
+  applying p3
+  saving current version of b as b.orig
+  saving current version of d as d.orig
+  patching file b
+  patching file c
+  patching file d
+  file d already exists
+  1 out of 1 hunks FAILED -- saving rejects to file d.rej
+  patching file e
+  file e already exists
+  1 out of 1 hunks FAILED -- saving rejects to file e.rej
+  patch failed to apply
+  b
+  patch failed, rejects left in working dir
+  errors during apply, please fix and refresh p3
+  [2]
+  $ cat a.orig
+  a
+  a
+  $ cat b.orig
+  b
+  b1
+  $ cat d.orig
+  d1
+
+test qpush --force --no-backup
+
+  $ hg revert -qa
+  $ hg qpop -a
+  popping p3
+  popping p2
+  patch queue now empty
+  $ echo a >> a
+  $ rm a.orig
+  $ hg qpush --force --no-backup --verbose
+  applying p2
+  patching file a
+  a
+  now at: p2
+  $ test -f a.orig && echo 'error: backup with --no-backup'
+  [1]
+
+test qgoto --force --no-backup
+
+  $ hg qpop
+  popping p2
+  patch queue now empty
+  $ echo a >> a
+  $ hg qgoto --force --no-backup p2 --verbose
+  applying p2
+  patching file a
+  a
+  now at: p2
+  $ test -f a.orig && echo 'error: backup with --no-backup'
+  [1]
+
+test qpush --check
+
+  $ hg qpush --check --force
+  abort: cannot use both --force and --check
+  [255]
+  $ hg qpush --check --exact
+  abort: cannot use --exact and --check together
+  [255]
+  $ echo b >> b
+  $ hg qpush --check
+  applying p3
+  errors during apply, please fix and refresh p2
+  [2]
+  $ rm b
+  $ hg qpush --check
+  applying p3
+  errors during apply, please fix and refresh p2
+  [2]
+  $ hg rm -A b
+  $ hg qpush --check
+  applying p3
+  errors during apply, please fix and refresh p2
+  [2]
+  $ hg revert -aq b
+  $ echo d > d
+  $ hg add d
+  $ hg qpush --check
+  applying p3
+  errors during apply, please fix and refresh p2
+  [2]
+  $ hg forget d
+  $ rm d
+  $ hg qpop
+  popping p2
+  patch queue now empty
+  $ echo b >> b
+  $ hg qpush -a --check
+  applying p2
+  applying p3
+  errors during apply, please fix and refresh p2
+  [2]
+  $ hg qtop
+  p2
+  $ hg parents --template "{rev} {desc}\n"
+  2 imported patch p2
+  $ hg st b
+  M b
+  $ cat b
+  b
+  b
+
+test qgoto --check
+
+  $ hg revert -aq b
+  $ rm e
+  $ hg qgoto --check --force p3
+  abort: cannot use both --force and --check
+  [255]
+  $ echo a >> a
+  $ hg qgoto --check p3
+  applying p3
+  now at: p3
+  $ hg st a
+  M a
+  $ hg qgoto --check p2
+  popping p3
+  now at: p2
+  $ hg st a
+  M a
+
+test mq.check setting
+
+  $ hg --config mq.check=1 qpush
+  applying p3
+  now at: p3
+  $ hg st a
+  M a
+  $ hg --config mq.check=1 qpop
+  popping p3
+  now at: p2
+  $ hg st a
+  M a
+  $ hg --config mq.check=1 qgoto p3
+  applying p3
+  now at: p3
+  $ hg st a
+  M a
+  $ echo b >> b
+  $ hg --config mq.check=1 qpop --force
+  popping p3
+  now at: p2
+  $ hg st b
+  $ hg --config mq.check=1 qpush --exact
+  abort: local changes found, refresh first
+  [255]
+  $ hg revert -qa a
+  $ hg qpop
+  popping p2
+  patch queue now empty
+  $ echo a >> a
+  $ hg --config mq.check=1 qpush --force
+  applying p2
+  now at: p2
+  $ hg st a
--- a/tests/test-mq.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-mq.t	Sat May 12 13:20:26 2012 +0200
@@ -59,6 +59,15 @@
   You will by default be managing a patch queue named "patches". You can create
   other, independent patch queues with the "hg qqueue" command.
   
+  If the working directory contains uncommitted files, qpush, qpop and qgoto
+  abort immediately. If -f/--force is used, the changes are discarded. Setting:
+  
+    [mq] check = True
+  
+  make them behave as if -c/--check were passed, and non-conflicting local
+  changes will be tolerated and preserved. If incompatible options such as
+  -f/--force or --exact are passed, this setting is ignored.
+  
   list of commands:
   
    qapplied      print the patches already applied
@@ -1356,11 +1365,15 @@
 
 apply force, should discard changes in hello, but not bye
 
-  $ hg qpush -f
+  $ hg qpush -f --verbose
   applying empty
+  saving current version of hello.txt as hello.txt.orig
+  patching file hello.txt
+  hello.txt
   now at: empty
   $ hg st
   M bye.txt
+  ? hello.txt.orig
   $ hg diff --config diff.nodates=True
   diff -r ba252371dbc1 bye.txt
   --- a/bye.txt
--- a/tests/test-phases.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-phases.t	Sat May 12 13:20:26 2012 +0200
@@ -9,6 +9,15 @@
 
   $ hg init initialrepo
   $ cd initialrepo
+
+Cannot change null revision phase
+
+  $ hg phase --force --secret null
+  abort: cannot change null revision phase
+  [255]
+  $ hg phase null
+  -1: public
+
   $ mkcommit A
 
 New commit are draft by default
--- a/tests/test-revset.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-revset.t	Sat May 12 13:20:26 2012 +0200
@@ -32,6 +32,13 @@
   (branches are permanent and global, did you want a bookmark?)
   $ hg ci -Aqm2 -u Bob
 
+  $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
+  2
+  $ hg log -r "extra('branch')" --template '{rev}\n'
+  0
+  1
+  2
+
   $ hg co 1
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg branch +a+b+c+
@@ -410,6 +417,10 @@
   0
   $ log '4::8 - 8'
   4
+  $ log 'matching(1 or 2 or 3) and (2 or 3 or 1)'
+  2
+  3
+  1
 
 issue2437
 
--- a/tests/test-transplant.t	Wed Apr 25 01:35:39 2012 +0200
+++ b/tests/test-transplant.t	Sat May 12 13:20:26 2012 +0200
@@ -120,7 +120,25 @@
   1  r2
   0  r1
 
+test same-parent transplant with --log
 
+  $ hg clone -r 1 ../t ../sameparent
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd ../sameparent
+  $ hg transplant --log -s ../prune 5
+  searching for changes
+  applying e234d668f844
+  e234d668f844 transplanted to e07aea8ecf9c
+  $ hg log --template '{rev} {parents} {desc}\n'
+  2  b1
+  (transplanted from e234d668f844e1b1a765f01db83a32c0c7bfa170)
+  1  r2
+  0  r1
 remote transplant
 
   $ hg clone -r 1 ../t ../remote