# HG changeset patch # User Matt Mackall # Date 1358637873 21600 # Node ID f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 # Parent 7648b87e76dbf89c9cdfb7993c52479480ff43c7# Parent 8bd338c7c4c9831850e1c4ddf24b747222a44131 merge default into stable for 2.5 code freeze diff -r 7648b87e76db -r f5fbe15ca744 Makefile --- a/Makefile Mon Jan 14 23:14:45 2013 +0900 +++ b/Makefile Sat Jan 19 17:24:33 2013 -0600 @@ -11,6 +11,9 @@ PYFILES:=$(shell find mercurial hgext doc -name '*.py') DOCFILES=mercurial/help/*.txt +# Set this to e.g. "mingw32" to use a non-default compiler. +COMPILER= + help: @echo 'Commonly used make targets:' @echo ' all - build program and documentation' @@ -33,11 +36,15 @@ all: build doc local: - $(PYTHON) setup.py $(PURE) build_py -c -d . build_ext -i build_hgexe -i build_mo - $(PYTHON) hg version + $(PYTHON) setup.py $(PURE) \ + build_py -c -d . \ + build_ext $(COMPILER:%=-c %) -i \ + build_hgexe $(COMPILER:%=-c %) -i \ + build_mo + env HGRCPATH= $(PYTHON) hg version build: - $(PYTHON) setup.py $(PURE) build + $(PYTHON) setup.py $(PURE) build $(COMPILER:%=-c %) doc: $(MAKE) -C doc diff -r 7648b87e76db -r f5fbe15ca744 contrib/check-code.py --- a/contrib/check-code.py Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/check-code.py Sat Jan 19 17:24:33 2013 -0600 @@ -129,13 +129,14 @@ (r'(?\s', '<> operator is not available in Python 3+, use !='), (r'^\s*\t', "don't use tabs"), (r'\S;\s*\n', "semicolon"), (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"), (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"), - (r'\w,\w', "missing whitespace after ,"), - (r'\w[+/*\-<>]\w', "missing whitespace in expression"), - (r'^\s+\w+=\w+[^,)\n]$', "missing whitespace in assignment"), + (r'(\w|\)),\w', "missing whitespace after ,"), + (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), + (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n' r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'), (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?' @@ -185,6 +186,8 @@ (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="), (r'raise Exception', "don't raise generic exceptions"), + (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', + "don't use old-style two-argument raise, use Exception(message)"), (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"), (r' [=!]=\s+(True|False|None)', "comparison with singleton, use 'is' or 'is not' instead"), @@ -211,11 +214,11 @@ (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), (r'^\s*except\s*:', "warning: naked except clause", r'#.*re-raises'), (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"), + (r'ui\.(status|progress|write|note|warn)\([\'\"]x', + "missing _() in ui message (use () to hide false-positives)"), ], # warnings [ - (r'ui\.(status|progress|write|note|warn)\([\'\"]x', - "warning: unwrapped ui message"), ] ] diff -r 7648b87e76db -r f5fbe15ca744 contrib/hgk --- a/contrib/hgk Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/hgk Sat Jan 19 17:24:33 2013 -0600 @@ -15,8 +15,43 @@ # The whole snipped is activated only under windows, mouse wheel # bindings working already under MacOSX and Linux. +if {[catch {package require Ttk}]} { + # use a shim + namespace eval ttk { + proc style args {} + + proc entry args { + eval [linsert $args 0 ::entry] -relief flat + } + } + + interp alias {} ttk::button {} button + interp alias {} ttk::frame {} frame + interp alias {} ttk::label {} label + interp alias {} ttk::scrollbar {} scrollbar + interp alias {} ttk::optionMenu {} tk_optionMenu +} else { + proc ::ttk::optionMenu {w varName firstValue args} { + upvar #0 $varName var + + if {![info exists var]} { + set var $firstValue + } + ttk::menubutton $w -textvariable $varName -menu $w.menu \ + -direction flush + menu $w.menu -tearoff 0 + $w.menu add radiobutton -label $firstValue -variable $varName + foreach i $args { + $w.menu add radiobutton -label $i -variable $varName + } + return $w.menu + } +} + if {[tk windowingsystem] eq "win32"} { +ttk::style theme use xpnative + set mw_classes [list Text Listbox Table TreeCtrl] foreach class $mw_classes { bind $class {} } @@ -72,6 +107,12 @@ bind all [list ::tk::MouseWheel %W %X %Y %D 0] # end of win32 section +} else { + +if {[ttk::style theme use] eq "default"} { + ttk::style theme use clam +} + } @@ -480,7 +521,7 @@ wm transient $w . message $w.m -text $msg -justify center -aspect 400 pack $w.m -side top -fill x -padx 20 -pady 20 - button $w.ok -text OK -command "destroy $w" + ttk::button $w.ok -text OK -command "destroy $w" pack $w.ok -side bottom -fill x bind $w "grab $w; focus $w" tkwait window $w @@ -526,11 +567,11 @@ set geometry(ctexth) [expr {($texth - 8) / [font metrics $textfont -linespace]}] } - frame .ctop.top - frame .ctop.top.bar + ttk::frame .ctop.top + ttk::frame .ctop.top.bar pack .ctop.top.bar -side bottom -fill x set cscroll .ctop.top.csb - scrollbar $cscroll -command {allcanvs yview} -highlightthickness 0 + ttk::scrollbar $cscroll -command {allcanvs yview} pack $cscroll -side right -fill y panedwindow .ctop.top.clist -orient horizontal -sashpad 0 -handlesize 4 pack .ctop.top.clist -side top -fill both -expand 1 @@ -538,15 +579,15 @@ set canv .ctop.top.clist.canv canvas $canv -height $geometry(canvh) -width $geometry(canv1) \ -bg $bgcolor -bd 0 \ - -yscrollincr $linespc -yscrollcommand "$cscroll set" -selectbackground grey + -yscrollincr $linespc -yscrollcommand "$cscroll set" -selectbackground "#c0c0c0" .ctop.top.clist add $canv set canv2 .ctop.top.clist.canv2 canvas $canv2 -height $geometry(canvh) -width $geometry(canv2) \ - -bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground grey + -bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground "#c0c0c0" .ctop.top.clist add $canv2 set canv3 .ctop.top.clist.canv3 canvas $canv3 -height $geometry(canvh) -width $geometry(canv3) \ - -bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground grey + -bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground "#c0c0c0" .ctop.top.clist add $canv3 bind .ctop.top.clist {resizeclistpanes %W %w} @@ -557,7 +598,7 @@ -command gotocommit -width 8 $sha1but conf -disabledforeground [$sha1but cget -foreground] pack .ctop.top.bar.sha1label -side left - entry $sha1entry -width 40 -font $textfont -textvariable sha1string + ttk::entry $sha1entry -width 40 -font $textfont -textvariable sha1string trace add variable sha1string write sha1change pack $sha1entry -side left -pady 2 @@ -577,25 +618,25 @@ 0x00, 0x38, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x80, 0x03, 0xc0, 0x01}; } - button .ctop.top.bar.leftbut -image bm-left -command goback \ + ttk::button .ctop.top.bar.leftbut -image bm-left -command goback \ -state disabled -width 26 pack .ctop.top.bar.leftbut -side left -fill y - button .ctop.top.bar.rightbut -image bm-right -command goforw \ + ttk::button .ctop.top.bar.rightbut -image bm-right -command goforw \ -state disabled -width 26 pack .ctop.top.bar.rightbut -side left -fill y - button .ctop.top.bar.findbut -text "Find" -command dofind + ttk::button .ctop.top.bar.findbut -text "Find" -command dofind pack .ctop.top.bar.findbut -side left set findstring {} set fstring .ctop.top.bar.findstring lappend entries $fstring - entry $fstring -width 30 -font $textfont -textvariable findstring + ttk::entry $fstring -width 30 -font $textfont -textvariable findstring pack $fstring -side left -expand 1 -fill x set findtype Exact - set findtypemenu [tk_optionMenu .ctop.top.bar.findtype \ + set findtypemenu [ttk::optionMenu .ctop.top.bar.findtype \ findtype Exact IgnCase Regexp] set findloc "All fields" - tk_optionMenu .ctop.top.bar.findloc findloc "All fields" Headline \ + ttk::optionMenu .ctop.top.bar.findloc findloc "All fields" Headline \ Comments Author Committer Files Pickaxe pack .ctop.top.bar.findloc -side right pack .ctop.top.bar.findtype -side right @@ -604,14 +645,14 @@ panedwindow .ctop.cdet -orient horizontal .ctop add .ctop.cdet - frame .ctop.cdet.left + ttk::frame .ctop.cdet.left set ctext .ctop.cdet.left.ctext text $ctext -fg $fgcolor -bg $bgcolor -state disabled -font $textfont \ -width $geometry(ctextw) -height $geometry(ctexth) \ -yscrollcommand ".ctop.cdet.left.sb set" \ -xscrollcommand ".ctop.cdet.left.hb set" -wrap none - scrollbar .ctop.cdet.left.sb -command "$ctext yview" - scrollbar .ctop.cdet.left.hb -orient horizontal -command "$ctext xview" + ttk::scrollbar .ctop.cdet.left.sb -command "$ctext yview" + ttk::scrollbar .ctop.cdet.left.hb -orient horizontal -command "$ctext xview" pack .ctop.cdet.left.sb -side right -fill y pack .ctop.cdet.left.hb -side bottom -fill x pack $ctext -side left -fill both -expand 1 @@ -643,12 +684,12 @@ $ctext tag conf found -back yellow } - frame .ctop.cdet.right + ttk::frame .ctop.cdet.right set cflist .ctop.cdet.right.cfiles listbox $cflist -fg $fgcolor -bg $bgcolor \ -selectmode extended -width $geometry(cflistw) \ -yscrollcommand ".ctop.cdet.right.sb set" - scrollbar .ctop.cdet.right.sb -command "$cflist yview" + ttk::scrollbar .ctop.cdet.right.sb -command "$cflist yview" pack .ctop.cdet.right.sb -side right -fill y pack $cflist -side left -fill both -expand 1 .ctop.cdet add .ctop.cdet.right @@ -901,7 +942,7 @@ Use and redistribute under the terms of the GNU General Public License} \ -justify center -aspect 400 pack $w.m -side top -fill x -padx 20 -pady 20 - button $w.ok -text Close -command "destroy $w" + ttk::button $w.ok -text Close -command "destroy $w" pack $w.ok -side bottom } @@ -1219,7 +1260,7 @@ } else { # draw a head or other ref if {[incr nheads -1] >= 0} { - set col green + set col "#00ff00" } else { set col "#ddddff" } @@ -2417,8 +2458,7 @@ set currentid $id $sha1entry delete 0 end $sha1entry insert 0 $id - $sha1entry selection from 0 - $sha1entry selection to end + $sha1entry selection range 0 end $ctext conf -state normal $ctext delete 0.0 end @@ -3675,36 +3715,36 @@ set patchtop $top catch {destroy $top} toplevel $top - label $top.title -text "Generate patch" + ttk::label $top.title -text "Generate patch" grid $top.title - -pady 10 - label $top.from -text "From:" - entry $top.fromsha1 -width 40 -relief flat + ttk::label $top.from -text "From:" + ttk::entry $top.fromsha1 -width 40 $top.fromsha1 insert 0 $oldid $top.fromsha1 conf -state readonly grid $top.from $top.fromsha1 -sticky w - entry $top.fromhead -width 60 -relief flat + ttk::entry $top.fromhead -width 60 $top.fromhead insert 0 $oldhead $top.fromhead conf -state readonly grid x $top.fromhead -sticky w - label $top.to -text "To:" - entry $top.tosha1 -width 40 -relief flat + ttk::label $top.to -text "To:" + ttk::entry $top.tosha1 -width 40 $top.tosha1 insert 0 $newid $top.tosha1 conf -state readonly grid $top.to $top.tosha1 -sticky w - entry $top.tohead -width 60 -relief flat + ttk::entry $top.tohead -width 60 $top.tohead insert 0 $newhead $top.tohead conf -state readonly grid x $top.tohead -sticky w - button $top.rev -text "Reverse" -command mkpatchrev -padx 5 + ttk::button $top.rev -text "Reverse" -command mkpatchrev grid $top.rev x -pady 10 - label $top.flab -text "Output file:" - entry $top.fname -width 60 + ttk::label $top.flab -text "Output file:" + ttk::entry $top.fname -width 60 $top.fname insert 0 [file normalize "patch$patchnum.patch"] incr patchnum grid $top.flab $top.fname -sticky w - frame $top.buts - button $top.buts.gen -text "Generate" -command mkpatchgo - button $top.buts.can -text "Cancel" -command mkpatchcan + ttk::frame $top.buts + ttk::button $top.buts.gen -text "Generate" -command mkpatchgo + ttk::button $top.buts.can -text "Cancel" -command mkpatchcan grid $top.buts.gen $top.buts.can grid columnconfigure $top.buts 0 -weight 1 -uniform a grid columnconfigure $top.buts 1 -weight 1 -uniform a @@ -3755,23 +3795,23 @@ set mktagtop $top catch {destroy $top} toplevel $top - label $top.title -text "Create tag" + ttk::label $top.title -text "Create tag" grid $top.title - -pady 10 - label $top.id -text "ID:" - entry $top.sha1 -width 40 -relief flat + ttk::label $top.id -text "ID:" + ttk::entry $top.sha1 -width 40 $top.sha1 insert 0 $rowmenuid $top.sha1 conf -state readonly grid $top.id $top.sha1 -sticky w - entry $top.head -width 60 -relief flat + ttk::entry $top.head -width 60 $top.head insert 0 [lindex $commitinfo($rowmenuid) 0] $top.head conf -state readonly grid x $top.head -sticky w - label $top.tlab -text "Tag name:" - entry $top.tag -width 60 + ttk::label $top.tlab -text "Tag name:" + ttk::entry $top.tag -width 60 grid $top.tlab $top.tag -sticky w - frame $top.buts - button $top.buts.gen -text "Create" -command mktaggo - button $top.buts.can -text "Cancel" -command mktagcan + ttk::frame $top.buts + ttk::button $top.buts.gen -text "Create" -command mktaggo + ttk::button $top.buts.can -text "Cancel" -command mktagcan grid $top.buts.gen $top.buts.can grid columnconfigure $top.buts 0 -weight 1 -uniform a grid columnconfigure $top.buts 1 -weight 1 -uniform a @@ -3835,27 +3875,27 @@ set wrcomtop $top catch {destroy $top} toplevel $top - label $top.title -text "Write commit to file" + ttk::label $top.title -text "Write commit to file" grid $top.title - -pady 10 - label $top.id -text "ID:" - entry $top.sha1 -width 40 -relief flat + ttk::label $top.id -text "ID:" + ttk::entry $top.sha1 -width 40 $top.sha1 insert 0 $rowmenuid $top.sha1 conf -state readonly grid $top.id $top.sha1 -sticky w - entry $top.head -width 60 -relief flat + ttk::entry $top.head -width 60 $top.head insert 0 [lindex $commitinfo($rowmenuid) 0] $top.head conf -state readonly grid x $top.head -sticky w - label $top.clab -text "Command:" - entry $top.cmd -width 60 -textvariable wrcomcmd + ttk::label $top.clab -text "Command:" + ttk::entry $top.cmd -width 60 -textvariable wrcomcmd grid $top.clab $top.cmd -sticky w -pady 10 - label $top.flab -text "Output file:" - entry $top.fname -width 60 + ttk::label $top.flab -text "Output file:" + ttk::entry $top.fname -width 60 $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6]"] grid $top.flab $top.fname -sticky w - frame $top.buts - button $top.buts.gen -text "Write" -command wrcomgo - button $top.buts.can -text "Cancel" -command wrcomcan + ttk::frame $top.buts + ttk::button $top.buts.gen -text "Write" -command wrcomgo + ttk::button $top.buts.can -text "Cancel" -command wrcomcan grid $top.buts.gen $top.buts.can grid columnconfigure $top.buts 0 -weight 1 -uniform a grid columnconfigure $top.buts 1 -weight 1 -uniform a diff -r 7648b87e76db -r f5fbe15ca744 contrib/mergetools.hgrc --- a/contrib/mergetools.hgrc Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/mergetools.hgrc Sat Jan 19 17:24:33 2013 -0600 @@ -19,7 +19,7 @@ vimdiff.check=changed vimdiff.priority=-10 -merge.checkconflicts=True +merge.check=conflicts merge.priority=-100 gpyfm.gui=True @@ -43,7 +43,7 @@ diffmerge.regname=Location diffmerge.priority=-7 diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output -diffmerge.checkchanged=True +diffmerge.check=changed diffmerge.gui=True diffmerge.diffargs=--nosplash --title1='$plabel1' --title2='$clabel' $parent $child @@ -59,7 +59,7 @@ tortoisemerge.args=/base:$base /mine:$local /theirs:$other /merged:$output tortoisemerge.regkey=Software\TortoiseSVN tortoisemerge.regkeyalt=Software\Wow6432Node\TortoiseSVN -tortoisemerge.checkchanged=True +tortoisemerge.check=changed tortoisemerge.gui=True tortoisemerge.priority=-8 tortoisemerge.diffargs=/base:$parent /mine:$child /basename:'$plabel1' /minename:'$clabel' @@ -93,7 +93,7 @@ winmerge.regkey=Software\Thingamahoochie\WinMerge winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\ winmerge.regname=Executable -winmerge.checkchanged=True +winmerge.check=changed winmerge.gui=True winmerge.priority=-10 winmerge.diffargs=/r /e /x /ub /wl /dl '$plabel1' /dr '$clabel' $parent $child @@ -119,6 +119,5 @@ UltraCompare.priority = -2 UltraCompare.gui = True UltraCompare.binary = True -UltraCompare.checkconflicts = True -UltraCompare.checkchanged = True +UltraCompare.check = conflicts,changed UltraCompare.diffargs=$child $parent -title1 $clabel -title2 $plabel1 diff -r 7648b87e76db -r f5fbe15ca744 contrib/perf.py --- a/contrib/perf.py Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/perf.py Sat Jan 19 17:24:33 2013 -0600 @@ -1,9 +1,13 @@ # perf.py - performance test routines '''helper extension to measure performance''' -from mercurial import cmdutil, scmutil, util, match, commands +from mercurial import cmdutil, scmutil, util, match, commands, obsolete +from mercurial import repoview, branchmap import time, os, sys +cmdtable = {} +command = cmdutil.command(cmdtable) + def timer(func, title=None): results = [] begin = time.time() @@ -29,6 +33,7 @@ sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" % (m[0], m[1] + m[2], m[1], m[2], count)) +@command('perfwalk') def perfwalk(ui, repo, *pats): try: m = scmutil.match(repo[None], pats, {}) @@ -40,11 +45,14 @@ except Exception: timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) -def perfstatus(ui, repo, *pats): +@command('perfstatus', + [('u', 'unknown', False, + 'ask status to look for unknown files')]) +def perfstatus(ui, repo, **opts): #m = match.always(repo.root, repo.getcwd()) #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, # False)))) - timer(lambda: sum(map(len, repo.status()))) + timer(lambda: sum(map(len, repo.status(**opts)))) def clearcaches(cl): # behave somewhat consistently across internal API changes @@ -55,6 +63,7 @@ cl._nodecache = {nullid: nullrev} cl._nodepos = None +@command('perfheads') def perfheads(ui, repo): cl = repo.changelog def d(): @@ -62,6 +71,7 @@ clearcaches(cl) timer(d) +@command('perftags') def perftags(ui, repo): import mercurial.changelog, mercurial.manifest def t(): @@ -71,6 +81,7 @@ return len(repo.tags()) timer(t) +@command('perfancestors') def perfancestors(ui, repo): heads = repo.changelog.headrevs() def d(): @@ -78,6 +89,17 @@ pass timer(d) +@command('perfancestorset') +def perfancestorset(ui, repo, revset): + revs = repo.revs(revset) + heads = repo.changelog.headrevs() + def d(): + s = repo.changelog.ancestors(heads) + for rev in revs: + rev in s + timer(d) + +@command('perfdirstate') def perfdirstate(ui, repo): "a" in repo.dirstate def d(): @@ -85,6 +107,7 @@ "a" in repo.dirstate timer(d) +@command('perfdirstatedirs') def perfdirstatedirs(ui, repo): "a" in repo.dirstate def d(): @@ -92,6 +115,7 @@ del repo.dirstate._dirs timer(d) +@command('perfdirstatewrite') def perfdirstatewrite(ui, repo): ds = repo.dirstate "a" in ds @@ -100,6 +124,7 @@ ds.write() timer(d) +@command('perfmanifest') def perfmanifest(ui, repo): def d(): t = repo.manifest.tip() @@ -108,6 +133,7 @@ repo.manifest._cache = None timer(d) +@command('perfchangeset') def perfchangeset(ui, repo, rev): n = repo[rev].node() def d(): @@ -115,6 +141,7 @@ #repo.changelog._cache = None timer(d) +@command('perfindex') def perfindex(ui, repo): import mercurial.revlog mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg @@ -124,12 +151,14 @@ cl.rev(n) timer(d) +@command('perfstartup') def perfstartup(ui, repo): cmd = sys.argv[0] def d(): os.system("HGRCPATH= %s version -q > /dev/null" % cmd) timer(d) +@command('perfparents') def perfparents(ui, repo): nl = [repo.changelog.node(i) for i in xrange(1000)] def d(): @@ -137,22 +166,16 @@ repo.changelog.parents(n) timer(d) +@command('perflookup') def perflookup(ui, repo, rev): timer(lambda: len(repo.lookup(rev))) +@command('perfrevrange') def perfrevrange(ui, repo, *specs): revrange = scmutil.revrange timer(lambda: len(revrange(repo, specs))) -def perfnodelookup(ui, repo, rev): - import mercurial.revlog - mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg - n = repo[rev].node() - def d(): - cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") - cl.rev(n) - timer(d) - +@command('perfnodelookup') def perfnodelookup(ui, repo, rev): import mercurial.revlog mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg @@ -163,12 +186,15 @@ clearcaches(cl) timer(d) +@command('perflog', + [('', 'rename', False, 'ask log to follow renames')]) def perflog(ui, repo, **opts): ui.pushbuffer() timer(lambda: commands.log(ui, repo, rev=[], date='', user='', copies=opts.get('rename'))) ui.popbuffer() +@command('perftemplating') def perftemplating(ui, repo): ui.pushbuffer() timer(lambda: commands.log(ui, repo, rev=[], date='', user='', @@ -176,15 +202,18 @@ ' {author|person}: {desc|firstline}\n')) ui.popbuffer() +@command('perfcca') def perfcca(ui, repo): timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) +@command('perffncacheload') def perffncacheload(ui, repo): s = repo.store def d(): s.fncache._load() timer(d) +@command('perffncachewrite') def perffncachewrite(ui, repo): s = repo.store s.fncache._load() @@ -193,6 +222,7 @@ s.fncache.write() timer(d) +@command('perffncacheencode') def perffncacheencode(ui, repo): s = repo.store s.fncache._load() @@ -201,6 +231,7 @@ s.encode(p) timer(d) +@command('perfdiffwd') def perfdiffwd(ui, repo): """Profile diff of working directory changes""" options = { @@ -218,6 +249,9 @@ title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') timer(d, title) +@command('perfrevlog', + [('d', 'dist', 100, 'distance between the revisions')], + "[INDEXFILE]") def perfrevlog(ui, repo, file_, **opts): from mercurial import revlog dist = opts['dist'] @@ -228,32 +262,105 @@ timer(d) -cmdtable = { - 'perfcca': (perfcca, []), - 'perffncacheload': (perffncacheload, []), - 'perffncachewrite': (perffncachewrite, []), - 'perffncacheencode': (perffncacheencode, []), - 'perflookup': (perflookup, []), - 'perfrevrange': (perfrevrange, []), - 'perfnodelookup': (perfnodelookup, []), - 'perfparents': (perfparents, []), - 'perfstartup': (perfstartup, []), - 'perfstatus': (perfstatus, []), - 'perfwalk': (perfwalk, []), - 'perfmanifest': (perfmanifest, []), - 'perfchangeset': (perfchangeset, []), - 'perfindex': (perfindex, []), - 'perfheads': (perfheads, []), - 'perftags': (perftags, []), - 'perfancestors': (perfancestors, []), - 'perfdirstate': (perfdirstate, []), - 'perfdirstatedirs': (perfdirstate, []), - 'perfdirstatewrite': (perfdirstatewrite, []), - 'perflog': (perflog, - [('', 'rename', False, 'ask log to follow renames')]), - 'perftemplating': (perftemplating, []), - 'perfdiffwd': (perfdiffwd, []), - 'perfrevlog': (perfrevlog, - [('d', 'dist', 100, 'distance between the revisions')], - "[INDEXFILE]"), -} +@command('perfrevset', + [('C', 'clear', False, 'clear volatile cache between each call.')], + "REVSET") +def perfrevset(ui, repo, expr, clear=False): + """benchmark the execution time of a revset + + Use the --clean option if need to evaluate the impact of build volative + revisions set cache on the revset execution. Volatile cache hold filtered + and obsolete related cache.""" + def d(): + if clear: + repo.invalidatevolatilesets() + repo.revs(expr) + timer(d) + +@command('perfvolatilesets') +def perfvolatilesets(ui, repo, *names): + """benchmark the computation of various volatile set + + Volatile set computes element related to filtering and obsolescence.""" + repo = repo.unfiltered() + + def getobs(name): + def d(): + repo.invalidatevolatilesets() + obsolete.getrevs(repo, name) + return d + + allobs = sorted(obsolete.cachefuncs) + if names: + allobs = [n for n in allobs if n in names] + + for name in allobs: + timer(getobs(name), title=name) + + def getfiltered(name): + def d(): + repo.invalidatevolatilesets() + repoview.filteredrevs(repo, name) + return d + + allfilter = sorted(repoview.filtertable) + if names: + allfilter = [n for n in allfilter if n in names] + + for name in allfilter: + timer(getfiltered(name), title=name) + +@command('perfbranchmap', + [('f', 'full', False, + 'Includes build time of subset'), + ]) +def perfbranchmap(ui, repo, full=False): + """benchmark the update of a branchmap + + This benchmarks the full repo.branchmap() call with read and write disabled + """ + def getbranchmap(filtername): + """generate a benchmark function for the filtername""" + if filtername is None: + view = repo + else: + view = repo.filtered(filtername) + def d(): + if full: + view._branchcaches.clear() + else: + view._branchcaches.pop(filtername, None) + view.branchmap() + return d + # add filter in smaller subset to bigger subset + possiblefilters = set(repoview.filtertable) + allfilters = [] + while possiblefilters: + for name in possiblefilters: + subset = repoview.subsettable.get(name) + if subset not in possiblefilters: + break + else: + assert False, 'subset cycle %s!' % possiblefilters + allfilters.append(name) + possiblefilters.remove(name) + + # warm the cache + if not full: + for name in allfilters: + repo.filtered(name).branchmap() + # add unfiltered + allfilters.append(None) + oldread = branchmap.read + oldwrite = branchmap.branchcache.write + try: + branchmap.read = lambda repo: None + branchmap.write = lambda repo: None + for name in allfilters: + timer(getbranchmap(name), title=str(name)) + finally: + branchmap.read = oldread + branchmap.branchcache.write = oldwrite + + + diff -r 7648b87e76db -r f5fbe15ca744 contrib/synthrepo.py --- a/contrib/synthrepo.py Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/synthrepo.py Sat Jan 19 17:24:33 2013 -0600 @@ -231,6 +231,8 @@ fp.close() def cdf(l): + if not l: + return [], [] vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True)) t = float(sum(probs, 0)) s, cdfs = 0, [] diff -r 7648b87e76db -r f5fbe15ca744 contrib/vim/hgtest.vim --- a/contrib/vim/hgtest.vim Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/vim/hgtest.vim Sat Jan 19 17:24:33 2013 -0600 @@ -2,7 +2,8 @@ " Language: Mercurial unified tests " Author: Steve Losh (steve@stevelosh.com) " -" Add the following line to your ~/.vimrc to enable: +" Place this file in ~/.vim/syntax/ and add the following line to your +" ~/.vimrc to enable: " au BufNewFile,BufRead *.t set filetype=hgtest " " If you want folding you'll need the following line as well: diff -r 7648b87e76db -r f5fbe15ca744 contrib/zsh_completion --- a/contrib/zsh_completion Mon Jan 14 23:14:45 2013 +0900 +++ b/contrib/zsh_completion Sat Jan 19 17:24:33 2013 -0600 @@ -174,11 +174,10 @@ _hg_cmd tags | while read tag do - tags+=(${tag/ # [0-9]#:*}) + tags+=(${tag/ #[0-9]#:*}) done - (( $#tags )) && _describe -t tags 'tags' tags + (( $#tags )) && _describe -t tags 'tags' tags } - _hg_bookmarks() { typeset -a bookmark bookmarks @@ -198,7 +197,7 @@ _hg_cmd branches | while read branch do - branches+=(${branch/ # [0-9]#:*}) + branches+=(${branch/ #[0-9]#:*}) done (( $#branches )) && _describe -t branches 'branches' branches } @@ -208,12 +207,19 @@ typeset -a heads local myrev - heads=(${(f)"$(_hg_cmd heads --template '{rev}\\n')"}) + heads=(${(f)"$(_hg_cmd heads --template '{rev}:{branch}\\n')"}) # exclude own revision - myrev=$(_hg_cmd log -r . --template '{rev}\\n') + myrev=$(_hg_cmd log -r . --template '{rev}:{branch}\\n') heads=(${heads:#$myrev}) (( $#heads )) && _describe -t heads 'heads' heads + + branches=(${(f)"$(_hg_cmd heads --template '{branch}\\n')"}) + # exclude own revision + myrev=$(_hg_cmd log -r . --template '{branch}\\n') + branches=(${branches:#$myrev}) + + (( $#branches )) && _describe -t branches 'branches' branches } _hg_files() { diff -r 7648b87e76db -r f5fbe15ca744 doc/hgmanpage.py --- a/doc/hgmanpage.py Mon Jan 14 23:14:45 2013 +0900 +++ b/doc/hgmanpage.py Sat Jan 19 17:24:33 2013 -0600 @@ -146,7 +146,7 @@ text.extend(cell) if not text[-1].endswith('\n'): text[-1] += '\n' - if i < len(row)-1: + if i < len(row) - 1: text.append('T}'+self._tab_char+'T{\n') else: text.append('T}\n') @@ -258,7 +258,7 @@ # ensure we get a ".TH" as viewers require it. self.head.append(self.header()) # filter body - for i in xrange(len(self.body)-1, 0, -1): + for i in xrange(len(self.body) - 1, 0, -1): # remove superfluous vertical gaps. if self.body[i] == '.sp\n': if self.body[i - 1][:4] in ('.BI ','.IP '): @@ -880,7 +880,7 @@ self.context[-3] = '.BI' # bold/italic alternate if node['delimiter'] != ' ': self.body.append('\\fB%s ' % node['delimiter']) - elif self.body[len(self.body)-1].endswith('='): + elif self.body[len(self.body) - 1].endswith('='): # a blank only means no blank in output, just changing font self.body.append(' ') else: diff -r 7648b87e76db -r f5fbe15ca744 hgext/churn.py --- a/hgext/churn.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/churn.py Sat Jan 19 17:24:33 2013 -0600 @@ -144,8 +144,10 @@ if not rate: return - sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None) - rate.sort(key=sortkey) + if opts.get('sort'): + rate.sort() + else: + rate.sort(key=lambda x: (-sum(x[1]), x)) # Be careful not to have a zero maxcount (issue833) maxcount = float(max(sum(v) for k, v in rate)) or 1.0 diff -r 7648b87e76db -r f5fbe15ca744 hgext/color.py --- a/hgext/color.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/color.py Sat Jan 19 17:24:33 2013 -0600 @@ -103,6 +103,7 @@ import os from mercurial import commands, dispatch, extensions, ui as uimod, util +from mercurial import templater from mercurial.i18n import _ testedwith = 'internal' @@ -354,6 +355,28 @@ for s in msg.split('\n')]) return msg +def templatelabel(context, mapping, args): + if len(args) != 2: + # i18n: "label" is a keyword + raise error.ParseError(_("label expects two arguments")) + + thing = templater.stringify(args[1][0](context, mapping, args[1][1])) + thing = templater.runtemplate(context, mapping, + templater.compiletemplate(thing, context)) + + # apparently, repo could be a string that is the favicon? + repo = mapping.get('repo', '') + if isinstance(repo, str): + return thing + + label = templater.stringify(args[0][0](context, mapping, args[0][1])) + label = templater.runtemplate(context, mapping, + templater.compiletemplate(label, context)) + + thing = templater.stringify(thing) + label = templater.stringify(label) + + return repo.ui.label(thing, label) def uisetup(ui): global _terminfo_params @@ -370,6 +393,7 @@ configstyles(ui_) return orig(ui_, opts, cmd, cmdfunc) extensions.wrapfunction(dispatch, '_runcommand', colorcmd) + templater.funcs['label'] = templatelabel def extsetup(ui): commands.globalopts.append( diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/__init__.py --- a/hgext/convert/__init__.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/__init__.py Sat Jan 19 17:24:33 2013 -0600 @@ -191,6 +191,10 @@ branch indicated in the regex as the second parent of the changeset. Default is ``{{mergefrombranch ([-\\w]+)}}`` + :convert.localtimezone: use local time (as determined by the TZ + environment variable) for changeset date/times. The default + is False (use UTC). + :hooks.cvslog: Specify a Python function to be called at the end of gathering the CVS log. The function is passed a list with the log entries, and can modify the entries in-place, or add or @@ -231,6 +235,10 @@ :convert.svn.trunk: specify the name of the trunk branch. The default is ``trunk``. + :convert.localtimezone: use local time (as determined by the TZ + environment variable) for changeset date/times. The default + is False (use UTC). + Source history can be retrieved starting at a specific revision, instead of being integrally converted. Only single branch conversions are supported. diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/common.py --- a/hgext/convert/common.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/common.py Sat Jan 19 17:24:33 2013 -0600 @@ -5,7 +5,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import base64, errno, subprocess, os +import base64, errno, subprocess, os, datetime import cPickle as pickle from mercurial import util from mercurial.i18n import _ @@ -446,3 +446,10 @@ if e.errno != errno.ENOENT: raise return m + +def makedatetimestamp(t): + """Like util.makedate() but for time t instead of current time""" + delta = (datetime.datetime.utcfromtimestamp(t) - + datetime.datetime.fromtimestamp(t)) + tz = delta.days * 86400 + delta.seconds + return t, tz diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/convcmd.py --- a/hgext/convert/convcmd.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/convcmd.py Sat Jan 19 17:24:33 2013 -0600 @@ -147,7 +147,7 @@ map contains valid revision identifiers and merge the new links in the source graph. """ - for c in splicemap: + for c in sorted(splicemap): if c not in parents: if not self.dest.hascommit(self.map.get(c, c)): # Could be in source but not converted during this run @@ -175,7 +175,7 @@ revisions without parents. 'parents' must be a mapping of revision identifier to its parents ones. """ - visit = parents.keys() + visit = sorted(parents) seen = set() children = {} roots = [] diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/cvs.py --- a/hgext/convert/cvs.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/cvs.py Sat Jan 19 17:24:33 2013 -0600 @@ -11,6 +11,7 @@ from mercurial.i18n import _ from common import NoRepo, commit, converter_source, checktool +from common import makedatetimestamp import cvsps class convert_cvs(converter_source): @@ -70,6 +71,8 @@ cs.author = self.recode(cs.author) self.lastbranch[cs.branch] = id cs.comment = self.recode(cs.comment) + if self.ui.configbool('convert', 'localtimezone'): + cs.date = makedatetimestamp(cs.date[0]) date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2') self.tags.update(dict.fromkeys(cs.tags, id)) diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/cvsps.py --- a/hgext/convert/cvsps.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/cvsps.py Sat Jan 19 17:24:33 2013 -0600 @@ -19,6 +19,7 @@ .branch - name of branch this revision is on .branches - revision tuple of branches starting at this revision .comment - commit message + .commitid - CVS commitid or None .date - the commit date as a (time, tz) tuple .dead - true if file revision is dead .file - Name of file @@ -28,19 +29,17 @@ .revision - revision number as tuple .tags - list of tags on the file .synthetic - is this a synthetic "file ... added on ..." revision? - .mergepoint- the branch that has been merged from - (if present in rlog output) - .branchpoints- the branches that start at the current entry + .mergepoint - the branch that has been merged from (if present in + rlog output) or None + .branchpoints - the branches that start at the current entry or empty ''' def __init__(self, **entries): self.synthetic = False self.__dict__.update(entries) def __repr__(self): - return "<%s at 0x%x: %s %s>" % (self.__class__.__name__, - id(self), - self.file, - ".".join(map(str, self.revision))) + items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) + return "%s(%s)"%(type(self).__name__, ", ".join(items)) class logerror(Exception): pass @@ -113,6 +112,7 @@ re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' + r'(\s+commitid:\s+([^;]+);)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') @@ -171,6 +171,14 @@ try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) + for e in oldlog: + if not (util.safehasattr(e, 'branchpoints') and + util.safehasattr(e, 'commitid') and + util.safehasattr(e, 'mergepoint')): + ui.status(_('ignoring old cache\n')) + oldlog = [] + break + ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception, e: ui.note(_('error reading cache: %r\n') % e) @@ -296,9 +304,16 @@ # as this state is re-entered for subsequent revisions of a file. match = re_50.match(line) assert match, _('expected revision number') - e = logentry(rcs=scache(rcs), file=scache(filename), - revision=tuple([int(x) for x in match.group(1).split('.')]), - branches=[], parent=None) + e = logentry(rcs=scache(rcs), + file=scache(filename), + revision=tuple([int(x) for x in + match.group(1).split('.')]), + branches=[], + parent=None, + commitid=None, + mergepoint=None, + branchpoints=set()) + state = 6 elif state == 6: @@ -329,8 +344,11 @@ else: e.lines = None - if match.group(7): # cvsnt mergepoint - myrev = match.group(8).split('.') + if match.group(7): # cvs 1.12 commitid + e.commitid = match.group(8) + + if match.group(9): # cvsnt mergepoint + myrev = match.group(10).split('.') if len(myrev) == 2: # head e.mergepoint = 'HEAD' else: @@ -339,8 +357,7 @@ assert len(branches) == 1, ('unknown branch: %s' % e.mergepoint) e.mergepoint = branches[0] - else: - e.mergepoint = None + e.comment = [] state = 7 @@ -469,23 +486,22 @@ .author - author name as CVS knows it .branch - name of branch this changeset is on, or None .comment - commit message + .commitid - CVS commitid or None .date - the commit date as a (time,tz) tuple .entries - list of logentry objects in this changeset .parents - list of one or two parent changesets .tags - list of tags on this changeset .synthetic - from synthetic revision "file ... added on branch ..." - .mergepoint- the branch that has been merged from - (if present in rlog output) - .branchpoints- the branches that start at the current entry + .mergepoint- the branch that has been merged from or None + .branchpoints- the branches that start at the current entry or empty ''' def __init__(self, **entries): self.synthetic = False self.__dict__.update(entries) def __repr__(self): - return "<%s at 0x%x: %s>" % (self.__class__.__name__, - id(self), - getattr(self, 'id', "(no id)")) + items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) + return "%s(%s)"%(type(self).__name__, ", ".join(items)) def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None): '''Convert log into changesets.''' @@ -493,8 +509,8 @@ ui.status(_('creating changesets\n')) # Merge changesets - - log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date)) + log.sort(key=lambda x: (x.commitid, x.comment, x.author, x.branch, x.date, + x.branchpoints)) changesets = [] files = set() @@ -517,22 +533,24 @@ # first changeset and bar the next and MYBRANCH and MYBRANCH2 # should both start off of the bar changeset. No provisions are # made to ensure that this is, in fact, what happens. - if not (c and - e.comment == c.comment and - e.author == c.author and - e.branch == c.branch and - (not util.safehasattr(e, 'branchpoints') or - not util.safehasattr (c, 'branchpoints') or - e.branchpoints == c.branchpoints) and - ((c.date[0] + c.date[1]) <= - (e.date[0] + e.date[1]) <= - (c.date[0] + c.date[1]) + fuzz) and - e.file not in files): + if not (c and e.branchpoints == c.branchpoints and + (# cvs commitids + (e.commitid is not None and e.commitid == c.commitid) or + (# no commitids, use fuzzy commit detection + (e.commitid is None or c.commitid is None) and + e.comment == c.comment and + e.author == c.author and + e.branch == c.branch and + ((c.date[0] + c.date[1]) <= + (e.date[0] + e.date[1]) <= + (c.date[0] + c.date[1]) + fuzz) and + e.file not in files))): c = changeset(comment=e.comment, author=e.author, - branch=e.branch, date=e.date, entries=[], - mergepoint=getattr(e, 'mergepoint', None), - branchpoints=getattr(e, 'branchpoints', set())) + branch=e.branch, date=e.date, + entries=[], mergepoint=e.mergepoint, + branchpoints=e.branchpoints, commitid=e.commitid) changesets.append(c) + files = set() if len(changesets) % 100 == 0: t = '%d %s' % (len(changesets), repr(e.comment)[1:-1]) @@ -801,22 +819,22 @@ # Note: trailing spaces on several lines here are needed to have # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') - ui.write('PatchSet %d \n' % cs.id) - ui.write('Date: %s\n' % util.datestr(cs.date, - '%Y/%m/%d %H:%M:%S %1%2')) - ui.write('Author: %s\n' % cs.author) - ui.write('Branch: %s\n' % (cs.branch or 'HEAD')) - ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], - ','.join(cs.tags) or '(none)')) - branchpoints = getattr(cs, 'branchpoints', None) - if branchpoints: - ui.write('Branchpoints: %s \n' % ', '.join(branchpoints)) + ui.write(('PatchSet %d \n' % cs.id)) + ui.write(('Date: %s\n' % util.datestr(cs.date, + '%Y/%m/%d %H:%M:%S %1%2'))) + ui.write(('Author: %s\n' % cs.author)) + ui.write(('Branch: %s\n' % (cs.branch or 'HEAD'))) + ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], + ','.join(cs.tags) or '(none)'))) + if cs.branchpoints: + ui.write(('Branchpoints: %s \n') % + ', '.join(sorted(cs.branchpoints))) if opts["parents"] and cs.parents: if len(cs.parents) > 1: - ui.write('Parents: %s\n' % - (','.join([str(p.id) for p in cs.parents]))) + ui.write(('Parents: %s\n' % + (','.join([str(p.id) for p in cs.parents])))) else: - ui.write('Parent: %d\n' % cs.parents[0].id) + ui.write(('Parent: %d\n' % cs.parents[0].id)) if opts["ancestors"]: b = cs.branch @@ -825,11 +843,11 @@ b, c = ancestors[b] r.append('%s:%d:%d' % (b or "HEAD", c, branches[b])) if r: - ui.write('Ancestors: %s\n' % (','.join(r))) + ui.write(('Ancestors: %s\n' % (','.join(r)))) - ui.write('Log:\n') + ui.write(('Log:\n')) ui.write('%s\n\n' % cs.comment) - ui.write('Members: \n') + ui.write(('Members: \n')) for f in cs.entries: fn = f.file if fn.startswith(opts["prefix"]): diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/git.py --- a/hgext/convert/git.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/git.py Sat Jan 19 17:24:33 2013 -0600 @@ -6,12 +6,24 @@ # GNU General Public License version 2 or any later version. import os -from mercurial import util +from mercurial import util, config from mercurial.node import hex, nullid from mercurial.i18n import _ from common import NoRepo, commit, converter_source, checktool +class submodule(object): + def __init__(self, path, node, url): + self.path = path + self.node = node + self.url = url + + def hgsub(self): + return "%s = [git]%s" % (self.path, self.url) + + def hgsubstate(self): + return "%s %s" % (self.node, self.path) + class convert_git(converter_source): # Windows does not support GIT_DIR= construct while other systems # cannot remove environment variable. Just assume none have @@ -55,6 +67,7 @@ checktool('git', 'git') self.path = path + self.submodules = [] def getheads(self): if not self.rev: @@ -76,16 +89,57 @@ return data def getfile(self, name, rev): - data = self.catfile(rev, "blob") - mode = self.modecache[(name, rev)] + if name == '.hgsub': + data = '\n'.join([m.hgsub() for m in self.submoditer()]) + mode = '' + elif name == '.hgsubstate': + data = '\n'.join([m.hgsubstate() for m in self.submoditer()]) + mode = '' + else: + data = self.catfile(rev, "blob") + mode = self.modecache[(name, rev)] return data, mode + def submoditer(self): + null = hex(nullid) + for m in sorted(self.submodules, key=lambda p: p.path): + if m.node != null: + yield m + + def parsegitmodules(self, content): + """Parse the formatted .gitmodules file, example file format: + [submodule "sub"]\n + \tpath = sub\n + \turl = git://giturl\n + """ + self.submodules = [] + c = config.config() + # Each item in .gitmodules starts with \t that cant be parsed + c.parse('.gitmodules', content.replace('\t','')) + for sec in c.sections(): + s = c[sec] + if 'url' in s and 'path' in s: + self.submodules.append(submodule(s['path'], '', s['url'])) + + def retrievegitmodules(self, version): + modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules')) + if ret: + raise util.Abort(_('cannot read submodules config file in %s') % + version) + self.parsegitmodules(modules) + for m in self.submodules: + node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path)) + if ret: + continue + m.node = node.strip() + def getchanges(self, version): self.modecache = {} fh = self.gitopen("git diff-tree -z --root -m -r %s" % version) changes = [] seen = set() entry = None + subexists = False for l in fh.read().split('\x00'): if not entry: if not l.startswith(':'): @@ -97,15 +151,24 @@ seen.add(f) entry = entry.split() h = entry[3] - if entry[1] == '160000': - raise util.Abort('git submodules are not supported!') p = (entry[1] == "100755") s = (entry[1] == "120000") - self.modecache[(f, h)] = (p and "x") or (s and "l") or "" - changes.append((f, h)) + + if f == '.gitmodules': + subexists = True + changes.append(('.hgsub', '')) + elif entry[1] == '160000' or entry[0] == ':160000': + subexists = True + else: + self.modecache[(f, h)] = (p and "x") or (s and "l") or "" + changes.append((f, h)) entry = None if fh.close(): raise util.Abort(_('cannot read changes in %s') % version) + + if subexists: + self.retrievegitmodules(version) + changes.append(('.hgsubstate', '')) return (changes, {}) def getcommit(self, version): diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/hg.py --- a/hgext/convert/hg.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/hg.py Sat Jan 19 17:24:33 2013 -0600 @@ -110,7 +110,7 @@ if missings: self.after() - for pbranch, heads in missings.iteritems(): + for pbranch, heads in sorted(missings.iteritems()): pbranchpath = os.path.join(self.path, pbranch) prepo = hg.peer(self.ui, {}, pbranchpath) self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch)) @@ -219,9 +219,10 @@ return self.ui.status(_("updating bookmarks\n")) + destmarks = self.repo._bookmarks for bookmark in updatedbookmark: - self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark]) - bookmarks.write(self.repo) + destmarks[bookmark] = bin(updatedbookmark[bookmark]) + destmarks.write() def hascommit(self, rev): if rev not in self.repo and self.clonebranches: diff -r 7648b87e76db -r f5fbe15ca744 hgext/convert/subversion.py --- a/hgext/convert/subversion.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/convert/subversion.py Sat Jan 19 17:24:33 2013 -0600 @@ -18,6 +18,7 @@ from common import NoRepo, MissingTool, commit, encodeargs, decodeargs from common import commandline, converter_source, converter_sink, mapfile +from common import makedatetimestamp try: from svn.core import SubversionException, Pool @@ -376,7 +377,7 @@ rpath = self.url.strip('/') branchnames = svn.client.ls(rpath + '/' + quote(branches), rev, False, self.ctx) - for branch in branchnames.keys(): + for branch in sorted(branchnames): module = '%s/%s/%s' % (oldmodule, branches, branch) if not isdir(module, self.last_changed): continue @@ -802,6 +803,8 @@ # ISO-8601 conformant # '2007-01-04T17:35:00.902377Z' date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) + if self.ui.configbool('convert', 'localtimezone'): + date = makedatetimestamp(date[0]) log = message and self.recode(message) or '' author = author and self.recode(author) or '' diff -r 7648b87e76db -r f5fbe15ca744 hgext/eol.py --- a/hgext/eol.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/eol.py Sat Jan 19 17:24:33 2013 -0600 @@ -307,7 +307,7 @@ eolmtime = 0 if eolmtime > cachemtime: - ui.debug("eol: detected change in .hgeol\n") + self.ui.debug("eol: detected change in .hgeol\n") wlock = None try: wlock = self.wlock() diff -r 7648b87e76db -r f5fbe15ca744 hgext/graphlog.py --- a/hgext/graphlog.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/graphlog.py Sat Jan 19 17:24:33 2013 -0600 @@ -39,7 +39,6 @@ _('show changesets within the given named branch'), _('BRANCH')), ('P', 'prune', [], _('do not display revision or any of its ancestors'), _('REV')), - ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')), ] + commands.logopts + commands.walkopts, _('[OPTION]... [FILE]')) def graphlog(ui, repo, *pats, **opts): diff -r 7648b87e76db -r f5fbe15ca744 hgext/hgk.py --- a/hgext/hgk.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/hgk.py Sat Jan 19 17:24:33 2013 -0600 @@ -98,9 +98,9 @@ if ctx is None: ctx = repo[n] # use ctx.node() instead ?? - ui.write("tree %s\n" % short(ctx.changeset()[0])) + ui.write(("tree %s\n" % short(ctx.changeset()[0]))) for p in ctx.parents(): - ui.write("parent %s\n" % p) + ui.write(("parent %s\n" % p)) date = ctx.date() description = ctx.description().replace("\0", "") @@ -108,12 +108,13 @@ if lines and lines[-1].startswith('committer:'): committer = lines[-1].split(': ')[1].rstrip() else: - committer = ctx.user() + committer = "" - ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])) - ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1])) - ui.write("revision %d\n" % ctx.rev()) - ui.write("branch %s\n\n" % ctx.branch()) + ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))) + if committer != '': + ui.write(("committer %s %s %s\n" % (committer, int(date[0]), date[1]))) + ui.write(("revision %d\n" % ctx.rev())) + ui.write(("branch %s\n\n" % ctx.branch())) if prefix != "": ui.write("%s%s\n" % (prefix, @@ -302,7 +303,7 @@ def config(ui, repo, **opts): """print extension options""" def writeopt(name, value): - ui.write('k=%s\nv=%s\n' % (name, value)) + ui.write(('k=%s\nv=%s\n' % (name, value))) writeopt('vdiff', ui.config('hgk', 'vdiff', '')) diff -r 7648b87e76db -r f5fbe15ca744 hgext/highlight/highlight.py --- a/hgext/highlight/highlight.py Mon Jan 14 23:14:45 2013 +0900 +++ b/hgext/highlight/highlight.py Sat Jan 19 17:24:33 2013 -0600 @@ -50,7 +50,7 @@ colorized = highlight(text, lexer, formatter) # strip wrapping div colorized = colorized[:colorized.find('\n')] - colorized = colorized[colorized.find('
')+5:]
+    colorized = colorized[colorized.find('
') + 5:]
     coloriter = (s.encode(encoding.encoding, 'replace')
                  for s in colorized.splitlines())
 
diff -r 7648b87e76db -r f5fbe15ca744 hgext/histedit.py
--- a/hgext/histedit.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/histedit.py	Sat Jan 19 17:24:33 2013 -0600
@@ -144,7 +144,6 @@
     import pickle
 import os
 
-from mercurial import bookmarks
 from mercurial import cmdutil
 from mercurial import discovery
 from mercurial import error
@@ -177,6 +176,31 @@
 #
 """)
 
+def commitfuncfor(repo, src):
+    """Build a commit function for the replacement of 
+
+    This function ensure we apply the same treatement to all changesets.
+
+    - Add a 'histedit_source' entry in extra.
+
+    Note that fold have its own separated logic because its handling is a bit
+    different and not easily factored out of the fold method.
+    """
+    phasemin = src.phase()
+    def commitfunc(**kwargs):
+        phasebackup = repo.ui.backupconfig('phases', 'new-commit')
+        try:
+            repo.ui.setconfig('phases', 'new-commit', phasemin)
+            extra = kwargs.get('extra', {}).copy()
+            extra['histedit_source'] = src.hex()
+            kwargs['extra'] = extra
+            return repo.commit(**kwargs)
+        finally:
+            repo.ui.restoreconfig(phasebackup)
+    return commitfunc
+
+
+
 def applychanges(ui, repo, ctx, opts):
     """Merge changeset from ctx (only) in the current working directory"""
     wcpar = repo.dirstate.parents()[0]
@@ -255,7 +279,7 @@
         message = first.description()
     user = commitopts.get('user')
     date = commitopts.get('date')
-    extra = first.extra()
+    extra = commitopts.get('extra')
 
     parents = (first.p1().node(), first.p2().node())
     new = context.memctx(repo,
@@ -280,8 +304,9 @@
         raise util.Abort(_('Fix up the change and run '
                            'hg histedit --continue'))
     # drop the second merge parent
-    n = repo.commit(text=oldctx.description(), user=oldctx.user(),
-                    date=oldctx.date(), extra=oldctx.extra())
+    commit = commitfuncfor(repo, oldctx)
+    n = commit(text=oldctx.description(), user=oldctx.user(),
+               date=oldctx.date(), extra=oldctx.extra())
     if n is None:
         ui.warn(_('%s: empty changeset\n')
                      % node.hex(ha))
@@ -332,7 +357,19 @@
     commitopts['message'] = newmessage
     # date
     commitopts['date'] = max(ctx.date(), oldctx.date())
-    n = collapse(repo, ctx, repo[newnode], commitopts)
+    extra = ctx.extra().copy()
+    # histedit_source
+    # note: ctx is likely a temporary commit but that the best we can do here
+    #       This is sufficient to solve issue3681 anyway
+    extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
+    commitopts['extra'] = extra
+    phasebackup = repo.ui.backupconfig('phases', 'new-commit')
+    try:
+        phasemin = max(ctx.phase(), oldctx.phase())
+        repo.ui.setconfig('phases', 'new-commit', phasemin)
+        n = collapse(repo, ctx, repo[newnode], commitopts)
+    finally:
+        repo.ui.restoreconfig(phasebackup)
     if n is None:
         return ctx, []
     hg.update(repo, n)
@@ -357,8 +394,9 @@
                            'hg histedit --continue'))
     message = oldctx.description() + '\n'
     message = ui.edit(message, ui.username())
-    new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(),
-                      extra=oldctx.extra())
+    commit = commitfuncfor(repo, oldctx)
+    new = commit(text=message, user=oldctx.user(), date=oldctx.date(),
+                 extra=oldctx.extra())
     newctx = repo[new]
     if oldctx.node() != newctx.node():
         return newctx, [(oldctx.node(), (new,))]
@@ -559,9 +597,10 @@
             editor = cmdutil.commitforceeditor
         else:
             editor = False
-        new = repo.commit(text=message, user=ctx.user(),
-                          date=ctx.date(), extra=ctx.extra(),
-                          editor=editor)
+        commit = commitfuncfor(repo, ctx)
+        new = commit(text=message, user=ctx.user(),
+                     date=ctx.date(), extra=ctx.extra(),
+                     editor=editor)
         if new is not None:
             newchildren.append(new)
 
@@ -594,7 +633,8 @@
     When keep is false, the specified set can't have children."""
     ctxs = list(repo.set('%n::%n', old, new))
     if ctxs and not keep:
-        if repo.revs('(%ld::) - (%ld + hidden())', ctxs, ctxs):
+        if (not obsolete._enabled and
+            repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
             raise util.Abort(_('cannot edit history that would orphan nodes'))
         root = ctxs[0] # list is already sorted by repo.set
         if not root.phase():
@@ -720,9 +760,9 @@
         # if nothing got rewritten there is not purpose for this function
         return
     moves = []
-    for bk, old in repo._bookmarks.iteritems():
+    for bk, old in sorted(repo._bookmarks.iteritems()):
         if old == oldtopmost:
-            # special case ensure bookmark stay on tip. 
+            # special case ensure bookmark stay on tip.
             #
             # This is arguably a feature and we may only want that for the
             # active bookmark. But the behavior is kept compatible with the old
@@ -740,12 +780,13 @@
             # nothing to move
         moves.append((bk, new[-1]))
     if moves:
+        marks = repo._bookmarks
         for mark, new in moves:
-            old = repo._bookmarks[mark]
+            old = marks[mark]
             ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
                     % (mark, node.short(old), node.short(new)))
-            repo._bookmarks[mark] = new
-        bookmarks.write(repo)
+            marks[mark] = new
+        marks.write()
 
 def cleanupnode(ui, repo, name, nodes):
     """strip a group of nodes from the repository
diff -r 7648b87e76db -r f5fbe15ca744 hgext/inotify/linux/watcher.py
--- a/hgext/inotify/linux/watcher.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/inotify/linux/watcher.py	Sat Jan 19 17:24:33 2013 -0600
@@ -72,7 +72,7 @@
 
     def __repr__(self):
         r = repr(self.raw)
-        return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
+        return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:]
 
 
 _event_props = {
diff -r 7648b87e76db -r f5fbe15ca744 hgext/inotify/linuxserver.py
--- a/hgext/inotify/linuxserver.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/inotify/linuxserver.py	Sat Jan 19 17:24:33 2013 -0600
@@ -405,14 +405,7 @@
 
     def shutdown(self):
         self.sock.close()
-        try:
-            os.unlink(self.sockpath)
-            if self.realsockpath:
-                os.unlink(self.realsockpath)
-                os.rmdir(os.path.dirname(self.realsockpath))
-        except OSError, err:
-            if err.errno != errno.ENOENT:
-                raise
+        self.sock.cleanup()
 
     def answer_stat_query(self, cs):
         if self.repowatcher.timeout:
diff -r 7648b87e76db -r f5fbe15ca744 hgext/inotify/server.py
--- a/hgext/inotify/server.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/inotify/server.py	Sat Jan 19 17:24:33 2013 -0600
@@ -6,7 +6,7 @@
 # GNU General Public License version 2 or any later version.
 
 from mercurial.i18n import _
-from mercurial import cmdutil, osutil, util
+from mercurial import cmdutil, posix, osutil, util
 import common
 
 import errno
@@ -15,7 +15,6 @@
 import stat
 import struct
 import sys
-import tempfile
 
 class AlreadyStartedException(Exception):
     pass
@@ -330,42 +329,15 @@
     def __init__(self, ui, root, repowatcher, timeout):
         self.ui = ui
         self.repowatcher = repowatcher
-        self.sock = socket.socket(socket.AF_UNIX)
-        self.sockpath = join(root, '.hg/inotify.sock')
-
-        self.realsockpath = self.sockpath
-        if os.path.islink(self.sockpath):
-            if os.path.exists(self.sockpath):
-                self.realsockpath = os.readlink(self.sockpath)
-            else:
-                raise util.Abort('inotify-server: cannot start: '
-                                '.hg/inotify.sock is a broken symlink')
         try:
-            self.sock.bind(self.realsockpath)
-        except socket.error, err:
+            self.sock = posix.unixdomainserver(
+                lambda p: os.path.join(root, '.hg', p),
+                'inotify')
+        except (OSError, socket.error), err:
             if err.args[0] == errno.EADDRINUSE:
-                raise AlreadyStartedException(_('cannot start: socket is '
-                                                'already bound'))
-            if err.args[0] == "AF_UNIX path too long":
-                tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
-                self.realsockpath = os.path.join(tempdir, "inotify.sock")
-                try:
-                    self.sock.bind(self.realsockpath)
-                    os.symlink(self.realsockpath, self.sockpath)
-                except (OSError, socket.error), inst:
-                    try:
-                        os.unlink(self.realsockpath)
-                    except OSError:
-                        pass
-                    os.rmdir(tempdir)
-                    if inst.errno == errno.EEXIST:
-                        raise AlreadyStartedException(_('cannot start: tried '
-                            'linking .hg/inotify.sock to a temporary socket but'
-                            ' .hg/inotify.sock already exists'))
-                    raise
-            else:
-                raise
-        self.sock.listen(5)
+                raise AlreadyStartedException(_('cannot start: '
+                                                'socket is already bound'))
+            raise
         self.fileno = self.sock.fileno
 
     def answer_stat_query(self, cs):
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/basestore.py
--- a/hgext/largefiles/basestore.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/basestore.py	Sat Jan 19 17:24:33 2013 -0600
@@ -26,14 +26,8 @@
         self.detail = detail
 
     def longmessage(self):
-        if self.url:
-            return ('%s: %s\n'
-                    '(failed URL: %s)\n'
-                    % (self.filename, self.detail, self.url))
-        else:
-            return ('%s: %s\n'
-                    '(no default or default-push path set in hgrc)\n'
-                    % (self.filename, self.detail))
+        return (_("error getting %s from %s for %s: %s\n") %
+                 (self.hash, self.url, self.filename, self.detail))
 
     def __str__(self):
         return "%s: %s" % (self.url, self.detail)
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/lfcommands.py
--- a/hgext/largefiles/lfcommands.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/lfcommands.py	Sat Jan 19 17:24:33 2013 -0600
@@ -383,6 +383,13 @@
     store = basestore._openstore(repo)
     return store.verify(revs, contents=contents)
 
+def debugdirstate(ui, repo):
+    '''Show basic information for the largefiles dirstate'''
+    lfdirstate = lfutil.openlfdirstate(ui, repo)
+    for file_, ent in sorted(lfdirstate._map.iteritems()):
+        mode = '%3o' % (ent[1] & 0777 & ~util.umask)
+        ui.write("%c %s %10d %s\n" % (ent[0], mode, ent[2], file_))
+
 def cachelfiles(ui, repo, node, filelist=None):
     '''cachelfiles ensures that all largefiles needed by the specified revision
     are present in the repository's largefile cache.
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/lfutil.py
--- a/hgext/largefiles/lfutil.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/lfutil.py	Sat Jan 19 17:24:33 2013 -0600
@@ -18,43 +18,10 @@
 from mercurial.i18n import _
 
 shortname = '.hglf'
+shortnameslash = shortname + '/'
 longname = 'largefiles'
 
 
-# -- Portability wrappers ----------------------------------------------
-
-def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
-    return dirstate.walk(matcher, [], unknown, ignored)
-
-def repoadd(repo, list):
-    add = repo[None].add
-    return add(list)
-
-def reporemove(repo, list, unlink=False):
-    def remove(list, unlink):
-        wlock = repo.wlock()
-        try:
-            if unlink:
-                for f in list:
-                    try:
-                        util.unlinkpath(repo.wjoin(f))
-                    except OSError, inst:
-                        if inst.errno != errno.ENOENT:
-                            raise
-            repo[None].forget(list)
-        finally:
-            wlock.release()
-    return remove(list, unlink=unlink)
-
-def repoforget(repo, list):
-    forget = repo[None].forget
-    return forget(list)
-
-def findoutgoing(repo, remote, force):
-    from mercurial import discovery
-    outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
-    return outgoing.missing
-
 # -- Private worker functions ------------------------------------------
 
 def getminsize(ui, assumelfiles, opt, default=10):
@@ -139,24 +106,26 @@
         return super(largefilesdirstate, self).forget(unixpath(f))
     def normallookup(self, f):
         return super(largefilesdirstate, self).normallookup(unixpath(f))
+    def _ignore(self):
+        return False
 
 def openlfdirstate(ui, repo, create=True):
     '''
     Return a dirstate object that tracks largefiles: i.e. its root is
     the repo root, but it is saved in .hg/largefiles/dirstate.
     '''
-    admin = repo.join(longname)
-    opener = scmutil.opener(admin)
+    lfstoredir = repo.join(longname)
+    opener = scmutil.opener(lfstoredir)
     lfdirstate = largefilesdirstate(opener, ui, repo.root,
                                      repo.dirstate._validate)
 
     # If the largefiles dirstate does not exist, populate and create
     # it. This ensures that we create it on the first meaningful
     # largefiles operation in a new clone.
-    if create and not os.path.exists(os.path.join(admin, 'dirstate')):
-        util.makedirs(admin)
+    if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
+        util.makedirs(lfstoredir)
         matcher = getstandinmatcher(repo)
-        for standin in dirstatewalk(repo.dirstate, matcher):
+        for standin in repo.dirstate.walk(matcher, [], False, False):
             lfile = splitstandin(standin)
             hash = readstandin(repo, lfile)
             lfdirstate.normallookup(lfile)
@@ -173,8 +142,11 @@
     s = lfdirstate.status(match, [], False, False, False)
     unsure, modified, added, removed, missing, unknown, ignored, clean = s
     for lfile in unsure:
-        if repo[rev][standin(lfile)].data().strip() != \
-                hashfile(repo.wjoin(lfile)):
+        try:
+            fctx = repo[rev][standin(lfile)]
+        except LookupError:
+            fctx = None
+        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
             modified.append(lfile)
         else:
             clean.append(lfile)
@@ -250,7 +222,7 @@
 
 def getstandinmatcher(repo, pats=[], opts={}):
     '''Return a match object that applies pats to the standin directory'''
-    standindir = repo.pathto(shortname)
+    standindir = repo.wjoin(shortname)
     if pats:
         # patterns supplied: search standin directory relative to current dir
         cwd = repo.getcwd()
@@ -264,19 +236,11 @@
         pats = [standindir]
     else:
         # no patterns and no standin dir: return matcher that matches nothing
-        match = match_.match(repo.root, None, [], exact=True)
-        match.matchfn = lambda f: False
-        return match
-    return getmatcher(repo, pats, opts, showbad=False)
+        return match_.match(repo.root, None, [], exact=True)
 
-def getmatcher(repo, pats=[], opts={}, showbad=True):
-    '''Wrapper around scmutil.match() that adds showbad: if false,
-    neuter the match object's bad() method so it does not print any
-    warnings about missing files or directories.'''
+    # no warnings about missing files or directories
     match = scmutil.match(repo[None], pats, opts)
-
-    if not showbad:
-        match.bad = lambda f, msg: None
+    match.bad = lambda f, msg: None
     return match
 
 def composestandinmatcher(repo, rmatcher):
@@ -296,17 +260,17 @@
     file.'''
     # Notes:
     # 1) Some callers want an absolute path, but for instance addlargefiles
-    #    needs it repo-relative so it can be passed to repoadd().  So leave
-    #    it up to the caller to use repo.wjoin() to get an absolute path.
+    #    needs it repo-relative so it can be passed to repo[None].add().  So
+    #    leave it up to the caller to use repo.wjoin() to get an absolute path.
     # 2) Join with '/' because that's what dirstate always uses, even on
     #    Windows. Change existing separator to '/' first in case we are
     #    passed filenames from an external source (like the command line).
-    return shortname + '/' + util.pconvert(filename)
+    return shortnameslash + util.pconvert(filename)
 
 def isstandin(filename):
     '''Return true if filename is a big file standin. filename must be
     in Mercurial's internal form (slash-separated).'''
-    return filename.startswith(shortname + '/')
+    return filename.startswith(shortnameslash)
 
 def splitstandin(filename):
     # Split on / because that's what dirstate always uses, even on Windows.
@@ -435,7 +399,7 @@
 
 def islfilesrepo(repo):
     if ('largefiles' in repo.requirements and
-            util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
+            util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
         return True
 
     return util.any(openlfdirstate(repo.ui, repo, False))
@@ -455,9 +419,13 @@
 def getstandinsstate(repo):
     standins = []
     matcher = getstandinmatcher(repo)
-    for standin in dirstatewalk(repo.dirstate, matcher):
+    for standin in repo.dirstate.walk(matcher, [], False, False):
         lfile = splitstandin(standin)
-        standins.append((lfile, readstandin(repo, lfile)))
+        try:
+            hash = readstandin(repo, lfile)
+        except IOError:
+            hash = None
+        standins.append((lfile, hash))
     return standins
 
 def getlfilestoupdate(oldstandins, newstandins):
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/localstore.py
--- a/hgext/largefiles/localstore.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/localstore.py	Sat Jan 19 17:24:33 2013 -0600
@@ -22,9 +22,8 @@
     the user cache.'''
 
     def __init__(self, ui, repo, remote):
-        url = os.path.join(remote.local().path, '.hg', lfutil.longname)
-        super(localstore, self).__init__(ui, repo, util.expandpath(url))
         self.remote = remote.local()
+        super(localstore, self).__init__(ui, repo, self.remote.url())
 
     def put(self, source, hash):
         util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
@@ -46,7 +45,7 @@
         elif lfutil.inusercache(self.ui, hash):
             path = lfutil.usercachepath(self.ui, hash)
         else:
-            raise basestore.StoreError(filename, hash, '',
+            raise basestore.StoreError(filename, hash, self.url,
                 _("can't get file locally"))
         fd = open(path, 'rb')
         try:
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/overrides.py
--- a/hgext/largefiles/overrides.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/overrides.py	Sat Jan 19 17:24:33 2013 -0600
@@ -12,7 +12,7 @@
 import copy
 
 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
-    node, archival, error, merge
+    node, archival, error, merge, discovery
 from mercurial.i18n import _
 from mercurial.node import hex
 from hgext import rebase
@@ -116,7 +116,7 @@
                     lfdirstate.add(f)
             lfdirstate.write()
             bad += [lfutil.splitstandin(f)
-                    for f in lfutil.repoadd(repo, standins)
+                    for f in repo[None].add(standins)
                     if f in m.files()]
     finally:
         wlock.release()
@@ -137,21 +137,23 @@
                                         if lfutil.standin(f) in manifest]
                                        for list in [s[0], s[1], s[3], s[6]]]
 
-    def warn(files, reason):
+    def warn(files, msg):
         for f in files:
-            ui.warn(_('not removing %s: %s (use forget to undo)\n')
-                    % (m.rel(f), reason))
+            ui.warn(msg % m.rel(f))
         return int(len(files) > 0)
 
     result = 0
 
     if after:
         remove, forget = deleted, []
-        result = warn(modified + added + clean, _('file still exists'))
+        result = warn(modified + added + clean,
+                      _('not removing %s: file still exists\n'))
     else:
         remove, forget = deleted + clean, []
-        result = warn(modified, _('file is modified'))
-        result = warn(added, _('file has been marked for add')) or result
+        result = warn(modified, _('not removing %s: file is modified (use -f'
+                                  ' to force removal)\n'))
+        result = warn(added, _('not removing %s: file has been marked for add'
+                               ' (use forget to undo)\n')) or result
 
     for f in sorted(remove + forget):
         if ui.verbose or not m.exact(f):
@@ -168,19 +170,18 @@
                 # are removing the file.
                 if getattr(repo, "_isaddremove", False):
                     ui.status(_('removing %s\n') % f)
-                if os.path.exists(repo.wjoin(f)):
-                    util.unlinkpath(repo.wjoin(f))
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
             lfdirstate.remove(f)
         lfdirstate.write()
         forget = [lfutil.standin(f) for f in forget]
         remove = [lfutil.standin(f) for f in remove]
-        lfutil.repoforget(repo, forget)
+        repo[None].forget(forget)
         # If this is being called by addremove, let the original addremove
         # function handle this.
         if not getattr(repo, "_isaddremove", False):
-            lfutil.reporemove(repo, remove, unlink=True)
-        else:
-            lfutil.reporemove(repo, remove, unlink=False)
+            for f in remove:
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+        repo[None].forget(remove)
     finally:
         wlock.release()
 
@@ -238,11 +239,34 @@
         repo._repo.lfstatus = False
 
 def overridelog(orig, ui, repo, *pats, **opts):
+    def overridematch(ctx, pats=[], opts={}, globbed=False,
+            default='relpath'):
+        """Matcher that merges root directory with .hglf, suitable for log.
+        It is still possible to match .hglf directly.
+        For any listed files run log on the standin too.
+        matchfn tries both the given filename and with .hglf stripped.
+        """
+        match = oldmatch(ctx, pats, opts, globbed, default)
+        m = copy.copy(match)
+        standins = [lfutil.standin(f) for f in m._files]
+        m._files.extend(standins)
+        m._fmap = set(m._files)
+        origmatchfn = m.matchfn
+        def lfmatchfn(f):
+            lf = lfutil.splitstandin(f)
+            if lf is not None and origmatchfn(lf):
+                return True
+            r = origmatchfn(f)
+            return r
+        m.matchfn = lfmatchfn
+        return m
+    oldmatch = installmatchfn(overridematch)
     try:
         repo.lfstatus = True
         return orig(ui, repo, *pats, **opts)
     finally:
         repo.lfstatus = False
+        restorematchfn()
 
 def overrideverify(orig, ui, repo, *pats, **opts):
     large = opts.pop('large', False)
@@ -254,6 +278,13 @@
         result = result or lfcommands.verifylfiles(ui, repo, all, contents)
     return result
 
+def overridedebugstate(orig, ui, repo, *pats, **opts):
+    large = opts.pop('large', False)
+    if large:
+        lfcommands.debugdirstate(ui, repo)
+    else:
+        orig(ui, repo, *pats, **opts)
+
 # Override needs to refresh standins so that update's normal merge
 # will go through properly. Then the other update hook (overriding repo.update)
 # will get the new files. Filemerge is also overridden so that the merge
@@ -746,7 +777,7 @@
         # .hg/largefiles, and the standin matcher won't match anything anyway.)
         if 'largefiles' in repo.requirements:
             if opts.get('noupdate'):
-                util.makedirs(repo.pathto(lfutil.shortname))
+                util.makedirs(repo.wjoin(lfutil.shortname))
                 util.makedirs(repo.join(lfutil.longname))
 
         # Caching is implicitly limited to 'rev' option, since the dest repo was
@@ -839,7 +870,7 @@
         write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
 
     if subrepos:
-        for subpath in ctx.substate:
+        for subpath in sorted(ctx.substate):
             sub = ctx.sub(subpath)
             submatch = match_.narrowmatcher(subpath, matchfn)
             sub.archive(repo.ui, archiver, prefix, submatch)
@@ -886,7 +917,7 @@
 
         write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
 
-    for subpath in ctx.substate:
+    for subpath in sorted(ctx.substate):
         sub = ctx.sub(subpath)
         submatch = match_.narrowmatcher(subpath, match)
         sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
@@ -949,8 +980,10 @@
             else:
                 lfdirstate.remove(f)
         lfdirstate.write()
-        lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
-            unlink=True)
+        standins = [lfutil.standin(f) for f in forget]
+        for f in standins:
+            util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+        repo[None].forget(standins)
     finally:
         wlock.release()
 
@@ -967,10 +1000,10 @@
         remote = hg.peer(repo, opts, dest)
     except error.RepoError:
         return None
-    o = lfutil.findoutgoing(repo, remote, False)
-    if not o:
-        return o
-    o = repo.changelog.nodesbetween(o, revs)[0]
+    outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
+    if not outgoing.missing:
+        return outgoing.missing
+    o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
     if opts.get('newest_first'):
         o.reverse()
 
@@ -994,7 +1027,7 @@
                     files.add(f)
         toupload = toupload.union(
             set([f for f in files if lfutil.isstandin(f) and f in ctx]))
-    return toupload
+    return sorted(toupload)
 
 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
     result = orig(ui, repo, dest, **opts)
@@ -1065,6 +1098,9 @@
 # Calling purge with --all will cause the largefiles to be deleted.
 # Override repo.status to prevent this from happening.
 def overridepurge(orig, ui, repo, *dirs, **opts):
+    # XXX large file status is buggy when used on repo proxy.
+    # XXX this needs to be investigate.
+    repo = repo.unfiltered()
     oldstatus = repo.status
     def overridestatus(node1='.', node2=None, match=None, ignored=False,
                         clean=False, unknown=False, listsubrepos=False):
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/proto.py
--- a/hgext/largefiles/proto.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/proto.py	Sat Jan 19 17:24:33 2013 -0600
@@ -140,19 +140,6 @@
 def capabilities(repo, proto):
     return capabilitiesorig(repo, proto) + ' largefiles=serve'
 
-# duplicate what Mercurial's new out-of-band errors mechanism does, because
-# clients old and new alike both handle it well
-def webprotorefuseclient(self, message):
-    self.req.header([('Content-Type', 'application/hg-error')])
-    return message
-
-def sshprotorefuseclient(self, message):
-    self.ui.write_err('%s\n-\n' % message)
-    self.fout.write('\n')
-    self.fout.flush()
-
-    return ''
-
 def heads(repo, proto):
     if lfutil.islfilesrepo(repo):
         return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/reposetup.py
--- a/hgext/largefiles/reposetup.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/reposetup.py	Sat Jan 19 17:24:33 2013 -0600
@@ -11,9 +11,11 @@
 import types
 import os
 
-from mercurial import context, error, manifest, match as match_, util
+from mercurial import context, error, manifest, match as match_, util, \
+    discovery
 from mercurial import node as node_
 from mercurial.i18n import _
+from mercurial import localrepo
 
 import lfcommands
 import proto
@@ -88,6 +90,9 @@
         # appropriate list in the result. Also removes standin files
         # from the listing. Revert to the original status if
         # self.lfstatus is False.
+        # XXX large file status is buggy when used on repo proxy.
+        # XXX this needs to be investigated.
+        @localrepo.unfilteredmethod
         def status(self, node1='.', node2=None, match=None, ignored=False,
                 clean=False, unknown=False, listsubrepos=False):
             listignored, listclean, listunknown = ignored, clean, unknown
@@ -153,78 +158,54 @@
                             newfiles.append(f)
                     return newfiles
 
-                # Create a function that we can use to override what is
-                # normally the ignore matcher.  We've already checked
-                # for ignored files on the first dirstate walk, and
-                # unnecessarily re-checking here causes a huge performance
-                # hit because lfdirstate only knows about largefiles
-                def _ignoreoverride(self):
-                    return False
-
                 m = copy.copy(match)
                 m._files = tostandins(m._files)
 
                 result = super(lfilesrepo, self).status(node1, node2, m,
                     ignored, clean, unknown, listsubrepos)
                 if working:
-                    try:
-                        # Any non-largefiles that were explicitly listed must be
-                        # taken out or lfdirstate.status will report an error.
-                        # The status of these files was already computed using
-                        # super's status.
-                        # Override lfdirstate's ignore matcher to not do
-                        # anything
-                        origignore = lfdirstate._ignore
-                        lfdirstate._ignore = _ignoreoverride
+
+                    def sfindirstate(f):
+                        sf = lfutil.standin(f)
+                        dirstate = self.dirstate
+                        return sf in dirstate or sf in dirstate.dirs()
 
-                        def sfindirstate(f):
-                            sf = lfutil.standin(f)
-                            dirstate = self.dirstate
-                            return sf in dirstate or sf in dirstate.dirs()
-                        match._files = [f for f in match._files
-                                        if sfindirstate(f)]
-                        # Don't waste time getting the ignored and unknown
-                        # files again; we already have them
-                        s = lfdirstate.status(match, [], False,
-                                listclean, False)
-                        (unsure, modified, added, removed, missing, unknown,
-                                ignored, clean) = s
-                        # Replace the list of ignored and unknown files with
-                        # the previously calculated lists, and strip out the
-                        # largefiles
-                        lfiles = set(lfdirstate._map)
-                        ignored = set(result[5]).difference(lfiles)
-                        unknown = set(result[4]).difference(lfiles)
-                        if parentworking:
-                            for lfile in unsure:
-                                standin = lfutil.standin(lfile)
-                                if standin not in ctx1:
-                                    # from second parent
-                                    modified.append(lfile)
-                                elif ctx1[standin].data().strip() \
-                                        != lfutil.hashfile(self.wjoin(lfile)):
+                    match._files = [f for f in match._files
+                                    if sfindirstate(f)]
+                    # Don't waste time getting the ignored and unknown
+                    # files from lfdirstate
+                    s = lfdirstate.status(match, [], False,
+                            listclean, False)
+                    (unsure, modified, added, removed, missing, _unknown,
+                            _ignored, clean) = s
+                    if parentworking:
+                        for lfile in unsure:
+                            standin = lfutil.standin(lfile)
+                            if standin not in ctx1:
+                                # from second parent
+                                modified.append(lfile)
+                            elif ctx1[standin].data().strip() \
+                                    != lfutil.hashfile(self.wjoin(lfile)):
+                                modified.append(lfile)
+                            else:
+                                clean.append(lfile)
+                                lfdirstate.normal(lfile)
+                    else:
+                        tocheck = unsure + modified + added + clean
+                        modified, added, clean = [], [], []
+
+                        for lfile in tocheck:
+                            standin = lfutil.standin(lfile)
+                            if inctx(standin, ctx1):
+                                if ctx1[standin].data().strip() != \
+                                        lfutil.hashfile(self.wjoin(lfile)):
                                     modified.append(lfile)
                                 else:
                                     clean.append(lfile)
-                                    lfdirstate.normal(lfile)
-                        else:
-                            tocheck = unsure + modified + added + clean
-                            modified, added, clean = [], [], []
+                            else:
+                                added.append(lfile)
 
-                            for lfile in tocheck:
-                                standin = lfutil.standin(lfile)
-                                if inctx(standin, ctx1):
-                                    if ctx1[standin].data().strip() != \
-                                            lfutil.hashfile(self.wjoin(lfile)):
-                                        modified.append(lfile)
-                                    else:
-                                        clean.append(lfile)
-                                else:
-                                    added.append(lfile)
-                    finally:
-                        # Replace the original ignore function
-                        lfdirstate._ignore = origignore
-
+                    # Standins no longer found in lfdirstate has been removed
                     for standin in ctx1.manifest():
                         if not lfutil.isstandin(standin):
                             continue
@@ -239,20 +220,17 @@
 
                     # Largefiles are not really removed when they're
                     # still in the normal dirstate. Likewise, normal
-                    # files are not really removed if it's still in
+                    # files are not really removed if they are still in
                     # lfdirstate. This happens in merges where files
                     # change type.
                     removed = [f for f in removed if f not in self.dirstate]
                     result[2] = [f for f in result[2] if f not in lfdirstate]
 
+                    lfiles = set(lfdirstate._map)
                     # Unknown files
-                    unknown = set(unknown).difference(ignored)
-                    result[4] = [f for f in unknown
-                                 if (self.dirstate[f] == '?' and
-                                     not lfutil.isstandin(f))]
-                    # Ignored files were calculated earlier by the dirstate,
-                    # and we already stripped out the largefiles from the list
-                    result[5] = ignored
+                    result[4] = set(result[4]).difference(lfiles)
+                    # Ignored files
+                    result[5] = set(result[5]).difference(lfiles)
                     # combine normal files and largefiles
                     normals = [[fn for fn in filelist
                                 if not lfutil.isstandin(fn)]
@@ -361,7 +339,7 @@
                 # Case 2: user calls commit with specified patterns: refresh
                 # any matching big files.
                 smatcher = lfutil.composestandinmatcher(self, match)
-                standins = lfutil.dirstatewalk(self.dirstate, smatcher)
+                standins = self.dirstate.walk(smatcher, [], False, False)
 
                 # No matching big files: get out of the way and pass control to
                 # the usual commit() method.
@@ -377,7 +355,7 @@
                 lfdirstate = lfutil.openlfdirstate(ui, self)
                 for standin in standins:
                     lfile = lfutil.splitstandin(standin)
-                    if lfdirstate[lfile] <> 'r':
+                    if lfdirstate[lfile] != 'r':
                         lfutil.updatestandin(self, standin)
                         lfdirstate.normal(lfile)
                     else:
@@ -427,10 +405,11 @@
                 wlock.release()
 
         def push(self, remote, force=False, revs=None, newbranch=False):
-            o = lfutil.findoutgoing(self, remote, force)
-            if o:
+            outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
+                                                    force=force)
+            if outgoing.missing:
                 toupload = set()
-                o = self.changelog.nodesbetween(o, revs)[0]
+                o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
                 for n in o:
                     parents = [p for p in self.changelog.parents(n)
                                if p != node_.nullid]
diff -r 7648b87e76db -r f5fbe15ca744 hgext/largefiles/uisetup.py
--- a/hgext/largefiles/uisetup.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/largefiles/uisetup.py	Sat Jan 19 17:24:33 2013 -0600
@@ -9,9 +9,9 @@
 '''setup for largefiles extension: uisetup'''
 
 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
-    httppeer, localrepo, merge, scmutil, sshpeer, sshserver, wireproto
+    httppeer, localrepo, merge, scmutil, sshpeer, wireproto
 from mercurial.i18n import _
-from mercurial.hgweb import hgweb_mod, protocol, webcommands
+from mercurial.hgweb import hgweb_mod, webcommands
 from mercurial.subrepo import hgsubrepo
 
 import overrides
@@ -59,6 +59,11 @@
                      _('verify largefile contents not just existence'))]
     entry[1].extend(verifyopt)
 
+    entry = extensions.wrapcommand(commands.table, 'debugstate',
+                                   overrides.overridedebugstate)
+    debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
+    entry[1].extend(debugstateopt)
+
     entry = extensions.wrapcommand(commands.table, 'outgoing',
         overrides.overrideoutgoing)
     outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
@@ -139,11 +144,6 @@
     proto.capabilitiesorig = wireproto.capabilities
     wireproto.capabilities = proto.capabilities
 
-    # these let us reject non-largefiles clients and make them display
-    # our error messages
-    protocol.webproto.refuseclient = proto.webprotorefuseclient
-    sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
-
     # can't do this in reposetup because it needs to have happened before
     # wirerepo.__init__ is called
     proto.ssholdcallstream = sshpeer.sshpeer._callstream
diff -r 7648b87e76db -r f5fbe15ca744 hgext/mq.py
--- a/hgext/mq.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/mq.py	Sat Jan 19 17:24:33 2013 -0600
@@ -63,7 +63,7 @@
 from mercurial.node import bin, hex, short, nullid, nullrev
 from mercurial.lock import release
 from mercurial import commands, cmdutil, hg, scmutil, util, revset
-from mercurial import repair, extensions, error, phases, bookmarks
+from mercurial import repair, extensions, error, phases
 from mercurial import patch as patchmod
 import os, re, errno, shutil
 
@@ -275,6 +275,7 @@
     It should be used instead of repo.commit inside the mq source for operation
     creating new changeset.
     """
+    repo = repo.unfiltered()
     if phase is None:
         if repo.ui.configbool('mq', 'secret', False):
             phase = phases.secret
@@ -826,7 +827,11 @@
             if r:
                 r[None].forget(patches)
             for p in patches:
-                os.unlink(self.join(p))
+                try:
+                    os.unlink(self.join(p))
+                except OSError, inst:
+                    if inst.errno != errno.ENOENT:
+                        raise
 
         qfinished = []
         if numrevs:
@@ -924,11 +929,11 @@
         self._cleanup(realpatches, numrevs, opts.get('keep'))
 
     def checktoppatch(self, repo):
+        '''check that working directory is at qtip'''
         if self.applied:
             top = self.applied[-1].node
             patch = self.applied[-1].name
-            pp = repo.dirstate.parents()
-            if top not in pp:
+            if repo.dirstate.p1() != top:
                 raise util.Abort(_("working directory revision is not qtip"))
             return top, patch
         return None, None
@@ -942,7 +947,7 @@
             bctx = repo[baserev]
         else:
             bctx = wctx.parents()[0]
-        for s in wctx.substate:
+        for s in sorted(wctx.substate):
             if wctx.sub(s).dirty(True):
                 raise util.Abort(
                     _("uncommitted changes in subrepository %s") % s)
@@ -1146,7 +1151,7 @@
                 return matches[0]
             if self.series and self.applied:
                 if s == 'qtip':
-                    return self.series[self.seriesend(True)-1]
+                    return self.series[self.seriesend(True) - 1]
                 if s == 'qbase':
                     return self.series[0]
             return None
@@ -1324,11 +1329,7 @@
                 # created while patching
                 for f in all_files:
                     if f not in repo.dirstate:
-                        try:
-                            util.unlinkpath(repo.wjoin(f))
-                        except OSError, inst:
-                            if inst.errno != errno.ENOENT:
-                                raise
+                        util.unlinkpath(repo.wjoin(f), ignoremissing=True)
                 self.ui.warn(_('done\n'))
                 raise
 
@@ -1405,8 +1406,6 @@
             self.applieddirty = True
             end = len(self.applied)
             rev = self.applied[start].node
-            if update:
-                top = self.checktoppatch(repo)[0]
 
             try:
                 heads = repo.changelog.heads(rev)
@@ -1427,7 +1426,7 @@
             if update:
                 qp = self.qparents(repo, rev)
                 ctx = repo[qp]
-                m, a, r, d = repo.status(qp, top)[:4]
+                m, a, r, d = repo.status(qp, '.')[:4]
                 if d:
                     raise util.Abort(_("deletions found between repo revs"))
 
@@ -1437,11 +1436,7 @@
                 self.backup(repo, tobackup)
 
                 for f in a:
-                    try:
-                        util.unlinkpath(repo.wjoin(f))
-                    except OSError, e:
-                        if e.errno != errno.ENOENT:
-                            raise
+                    util.unlinkpath(repo.wjoin(f), ignoremissing=True)
                     repo.dirstate.drop(f)
                 for f in m + r:
                     fctx = ctx[f]
@@ -1625,7 +1620,7 @@
                 # if the patch excludes a modified file, mark that
                 # file with mtime=0 so status can see it.
                 mm = []
-                for i in xrange(len(m)-1, -1, -1):
+                for i in xrange(len(m) - 1, -1, -1):
                     if not matchfn(m[i]):
                         mm.append(m[i])
                         del m[i]
@@ -1675,9 +1670,10 @@
                     patchf.write(chunk)
                 patchf.close()
 
+                marks = repo._bookmarks
                 for bm in bmlist:
-                    repo._bookmarks[bm] = n
-                bookmarks.write(repo)
+                    marks[bm] = n
+                marks.write()
 
                 self.applied.append(statusentry(n, patchfn))
             except: # re-raises
@@ -2999,7 +2995,7 @@
             revs.update(set(rsrevs))
         if not revs:
             del marks[mark]
-            repo._writebookmarks(mark)
+            marks.write()
             ui.write(_("bookmark '%s' deleted\n") % mark)
 
     if not revs:
@@ -3036,7 +3032,7 @@
             del q.applied[start:end]
             q.savedirty()
 
-    revs = list(rootnodes)
+    revs = sorted(rootnodes)
     if update and opts.get('keep'):
         wlock = repo.wlock()
         try:
@@ -3049,7 +3045,7 @@
 
     if opts.get('bookmark'):
         del marks[mark]
-        repo._writebookmarks(marks)
+        marks.write()
         ui.write(_("bookmark '%s' deleted\n") % mark)
 
     repo.mq.strip(repo, revs, backup=backup, update=update,
@@ -3435,7 +3431,7 @@
                             outapplied.pop()
                 # looking for pushed and shared changeset
                 for node in outapplied:
-                    if repo[node].phase() < phases.secret:
+                    if self[node].phase() < phases.secret:
                         raise util.Abort(_('source has mq patches applied'))
                 # no non-secret patches pushed
             super(mqrepo, self).checkpush(force, revs)
@@ -3451,7 +3447,8 @@
             mqtags = [(patch.node, patch.name) for patch in q.applied]
 
             try:
-                self.changelog.rev(mqtags[-1][0])
+                # for now ignore filtering business
+                self.unfiltered().changelog.rev(mqtags[-1][0])
             except error.LookupError:
                 self.ui.warn(_('mq status file refers to unknown node %s\n')
                              % short(mqtags[-1][0]))
@@ -3470,41 +3467,6 @@
 
             return result
 
-        def _branchtags(self, partial, lrev):
-            q = self.mq
-            cl = self.changelog
-            qbase = None
-            if not q.applied:
-                if getattr(self, '_committingpatch', False):
-                    # Committing a new patch, must be tip
-                    qbase = len(cl) - 1
-            else:
-                qbasenode = q.applied[0].node
-                try:
-                    qbase = cl.rev(qbasenode)
-                except error.LookupError:
-                    self.ui.warn(_('mq status file refers to unknown node %s\n')
-                                 % short(qbasenode))
-            if qbase is None:
-                return super(mqrepo, self)._branchtags(partial, lrev)
-
-            start = lrev + 1
-            if start < qbase:
-                # update the cache (excluding the patches) and save it
-                ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
-                self._updatebranchcache(partial, ctxgen)
-                self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
-                start = qbase
-            # if start = qbase, the cache is as updated as it should be.
-            # if start > qbase, the cache includes (part of) the patches.
-            # we might as well use it, but we won't save it.
-
-            # update the cache up to the tip
-            ctxgen = (self[r] for r in xrange(start, len(cl)))
-            self._updatebranchcache(partial, ctxgen)
-
-            return partial
-
     if repo.local():
         repo.__class__ = mqrepo
 
diff -r 7648b87e76db -r f5fbe15ca744 hgext/patchbomb.py
--- a/hgext/patchbomb.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/patchbomb.py	Sat Jan 19 17:24:33 2013 -0600
@@ -474,11 +474,11 @@
 
     if opts.get('diffstat') or opts.get('confirm'):
         ui.write(_('\nFinal summary:\n\n'))
-        ui.write('From: %s\n' % sender)
+        ui.write(('From: %s\n' % sender))
         for addr in showaddrs:
             ui.write('%s\n' % addr)
         for m, subj, ds in msgs:
-            ui.write('Subject: %s\n' % subj)
+            ui.write(('Subject: %s\n' % subj))
             if ds:
                 ui.write(ds)
         ui.write('\n')
diff -r 7648b87e76db -r f5fbe15ca744 hgext/rebase.py
--- a/hgext/rebase.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/rebase.py	Sat Jan 19 17:24:33 2013 -0600
@@ -23,6 +23,7 @@
 import os, errno
 
 nullmerge = -2
+revignored = -3
 
 cmdtable = {}
 command = cmdutil.command(cmdtable)
@@ -184,8 +185,6 @@
                 rebaseset = repo.revs(
                     '(children(ancestor(%ld, %d)) and ::(%ld))::',
                     base, dest, base)
-            # temporary top level filtering of extinct revisions
-            rebaseset = repo.revs('%ld - hidden()', rebaseset)
             if rebaseset:
                 root = min(rebaseset)
             else:
@@ -194,8 +193,9 @@
             if not rebaseset:
                 repo.ui.debug('base is ancestor of destination\n')
                 result = None
-            elif not keepf and repo.revs('first(children(%ld) - %ld)-hidden()',
-                                         rebaseset, rebaseset):
+            elif (not (keepf or obsolete._enabled)
+                  and repo.revs('first(children(%ld) - %ld)',
+                                rebaseset, rebaseset)):
                 raise util.Abort(
                     _("can't remove original changesets with"
                       " unrebased descendants"),
@@ -214,8 +214,8 @@
             else:
                 originalwd, target, state = result
                 if collapsef:
-                    targetancestors = set(repo.changelog.ancestors([target]))
-                    targetancestors.add(target)
+                    targetancestors = repo.changelog.ancestors([target],
+                                                               inclusive=True)
                     external = checkexternal(repo, state, targetancestors)
 
         if keepbranchesf:
@@ -233,8 +233,7 @@
 
         # Rebase
         if not targetancestors:
-            targetancestors = set(repo.changelog.ancestors([target]))
-            targetancestors.add(target)
+            targetancestors = repo.changelog.ancestors([target], inclusive=True)
 
         # Keep track of the current bookmarks in order to reset them later
         currentbookmarks = repo._bookmarks.copy()
@@ -294,7 +293,7 @@
             else:
                 commitmsg = 'Collapsed revision'
                 for rebased in state:
-                    if rebased not in skipped and state[rebased] != nullmerge:
+                    if rebased not in skipped and state[rebased] > nullmerge:
                         commitmsg += '\n* %s' % repo[rebased].description()
                 commitmsg = ui.edit(commitmsg, repo.ui.username())
             newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
@@ -307,22 +306,21 @@
             # Nodeids are needed to reset bookmarks
             nstate = {}
             for k, v in state.iteritems():
-                if v != nullmerge:
+                if v > nullmerge:
                     nstate[repo[k].node()] = repo[v].node()
 
         if not keepf:
             collapsedas = None
             if collapsef:
                 collapsedas = newrev
-            clearrebased(ui, repo, state, collapsedas)
+            clearrebased(ui, repo, state, skipped, collapsedas)
 
         if currentbookmarks:
             updatebookmarks(repo, nstate, currentbookmarks, **opts)
 
         clearstatus(repo)
         ui.note(_("rebase completed\n"))
-        if os.path.exists(repo.sjoin('undo')):
-            util.unlinkpath(repo.sjoin('undo'))
+        util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
         if skipped:
             ui.note(_("%d revisions have been skipped\n") % len(skipped))
 
@@ -395,6 +393,15 @@
     # have to allow merging with it.
     return merge.update(repo, rev, True, True, False, base, collapse)
 
+def nearestrebased(repo, rev, state):
+    """return the nearest ancestors of rev in the rebase result"""
+    rebased = [r for r in state if state[r] > nullmerge]
+    candidates = repo.revs('max(%ld  and (::%d))', rebased, rev)
+    if candidates:
+        return state[candidates[0]]
+    else:
+        return None
+
 def defineparents(repo, rev, target, state, targetancestors):
     'Return the new parent relationship of the revision that will be rebased'
     parents = repo[rev].parents()
@@ -406,6 +413,10 @@
     elif P1n in state:
         if state[P1n] == nullmerge:
             p1 = target
+        elif state[P1n] == revignored:
+            p1 = nearestrebased(repo, P1n, state)
+            if p1 is None:
+                p1 = target
         else:
             p1 = state[P1n]
     else: # P1n external
@@ -418,6 +429,11 @@
         if P2n in state:
             if p1 == target: # P1n in targetancestors or external
                 p1 = state[P2n]
+            elif state[P2n] == revignored:
+                p2 = nearestrebased(repo, P2n, state)
+                if p2 is None:
+                    # no ancestors rebased yet, detach
+                    p2 = target
             else:
                 p2 = state[P2n]
         else: # P2n external
@@ -479,13 +495,14 @@
 
 def updatebookmarks(repo, nstate, originalbookmarks, **opts):
     'Move bookmarks to their correct changesets'
+    marks = repo._bookmarks
     for k, v in originalbookmarks.iteritems():
         if v in nstate:
-            if nstate[v] != nullmerge:
+            if nstate[v] > nullmerge:
                 # update the bookmarks for revs that have moved
-                repo._bookmarks[k] = nstate[v]
+                marks[k] = nstate[v]
 
-    bookmarks.write(repo)
+    marks.write()
 
 def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
                                                                 external):
@@ -499,7 +516,7 @@
     f.write('%d\n' % int(keepbranches))
     for d, v in state.iteritems():
         oldrev = repo[d].hex()
-        if v != nullmerge:
+        if v > nullmerge:
             newrev = repo[v].hex()
         else:
             newrev = v
@@ -509,8 +526,7 @@
 
 def clearstatus(repo):
     'Remove the status files'
-    if os.path.exists(repo.join("rebasestate")):
-        util.unlinkpath(repo.join("rebasestate"))
+    util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
 
 def restorestatus(repo):
     'Restore a previously stored status'
@@ -535,10 +551,10 @@
                 keepbranches = bool(int(l))
             else:
                 oldrev, newrev = l.split(':')
-                if newrev != str(nullmerge):
+                if newrev in (str(nullmerge), str(revignored)):
+                    state[repo[oldrev].rev()] = int(newrev)
+                else:
                     state[repo[oldrev].rev()] = repo[newrev].rev()
-                else:
-                    state[repo[oldrev].rev()] = int(newrev)
         skipped = set()
         # recompute the set of skipped revs
         if not collapse:
@@ -577,9 +593,9 @@
         merge.update(repo, repo[originalwd].rev(), False, True, False)
         rebased = filter(lambda x: x > -1 and x != target, state.values())
         if rebased:
-            strippoint = min(rebased)
+            strippoints = [c.node()  for c in repo.set('roots(%ld)', rebased)]
             # no backup of rebased cset versions needed
-            repair.strip(repo.ui, repo, repo[strippoint].node())
+            repair.strip(repo.ui, repo, strippoints)
         clearstatus(repo)
         repo.ui.warn(_('rebase aborted\n'))
         return 0
@@ -602,65 +618,77 @@
     roots = list(repo.set('roots(%ld)', rebaseset))
     if not roots:
         raise util.Abort(_('no matching revisions'))
-    if len(roots) > 1:
-        raise util.Abort(_("can't rebase multiple roots"))
-    root = roots[0]
-
-    commonbase = root.ancestor(dest)
-    if commonbase == root:
-        raise util.Abort(_('source is ancestor of destination'))
-    if commonbase == dest:
-        samebranch = root.branch() == dest.branch()
-        if not collapse and samebranch and root in dest.children():
-            repo.ui.debug('source is a child of destination\n')
-            return None
+    roots.sort()
+    state = {}
+    detachset = set()
+    for root in roots:
+        commonbase = root.ancestor(dest)
+        if commonbase == root:
+            raise util.Abort(_('source is ancestor of destination'))
+        if commonbase == dest:
+            samebranch = root.branch() == dest.branch()
+            if not collapse and samebranch and root in dest.children():
+                repo.ui.debug('source is a child of destination\n')
+                return None
 
-    repo.ui.debug('rebase onto %d starting from %d\n' % (dest, root))
-    state = dict.fromkeys(rebaseset, nullrev)
-    # Rebase tries to turn  into a parent of  while
-    # preserving the number of parents of rebased changesets:
-    #
-    # - A changeset with a single parent will always be rebased as a
-    #   changeset with a single parent.
-    #
-    # - A merge will be rebased as merge unless its parents are both
-    #   ancestors of  or are themselves in the rebased set and
-    #   pruned while rebased.
-    #
-    # If one parent of  is an ancestor of , the rebased
-    # version of this parent will be . This is always true with
-    # --base option.
-    #
-    # Otherwise, we need to *replace* the original parents with
-    # . This "detaches" the rebased set from its former location
-    # and rebases it onto . Changes introduced by ancestors of
-    #  not common with  (the detachset, marked as
-    # nullmerge) are "removed" from the rebased changesets.
-    #
-    # - If  has a single parent, set it to .
-    #
-    # - If  is a merge, we cannot decide which parent to
-    #   replace, the rebase operation is not clearly defined.
-    #
-    # The table below sums up this behavior:
-    #
-    # +--------------------+----------------------+-------------------------+
-    # |                    |     one parent       |  merge                  |
-    # +--------------------+----------------------+-------------------------+
-    # | parent in :: | new parent is  | parents in :: are |
-    # |                    |                      | remapped to       |
-    # +--------------------+----------------------+-------------------------+
-    # | unrelated source   | new parent is  | ambiguous, abort        |
-    # +--------------------+----------------------+-------------------------+
-    #
-    # The actual abort is handled by `defineparents`
-    if len(root.parents()) <= 1:
-        # (strict) ancestors of  not ancestors of 
-        detachset = repo.revs('::%d - ::%d - %d', root, commonbase, root)
-        state.update(dict.fromkeys(detachset, nullmerge))
+        repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots))
+        state.update(dict.fromkeys(rebaseset, nullrev))
+        # Rebase tries to turn  into a parent of  while
+        # preserving the number of parents of rebased changesets:
+        #
+        # - A changeset with a single parent will always be rebased as a
+        #   changeset with a single parent.
+        #
+        # - A merge will be rebased as merge unless its parents are both
+        #   ancestors of  or are themselves in the rebased set and
+        #   pruned while rebased.
+        #
+        # If one parent of  is an ancestor of , the rebased
+        # version of this parent will be . This is always true with
+        # --base option.
+        #
+        # Otherwise, we need to *replace* the original parents with
+        # . This "detaches" the rebased set from its former location
+        # and rebases it onto . Changes introduced by ancestors of
+        #  not common with  (the detachset, marked as
+        # nullmerge) are "removed" from the rebased changesets.
+        #
+        # - If  has a single parent, set it to .
+        #
+        # - If  is a merge, we cannot decide which parent to
+        #   replace, the rebase operation is not clearly defined.
+        #
+        # The table below sums up this behavior:
+        #
+        # +------------------+----------------------+-------------------------+
+        # |                  |     one parent       |  merge                  |
+        # +------------------+----------------------+-------------------------+
+        # | parent in        | new parent is  | parents in :: are |
+        # | ::         |                      | remapped to       |
+        # +------------------+----------------------+-------------------------+
+        # | unrelated source | new parent is  | ambiguous, abort        |
+        # +------------------+----------------------+-------------------------+
+        #
+        # The actual abort is handled by `defineparents`
+        if len(root.parents()) <= 1:
+            # ancestors of  not ancestors of 
+            detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
+                                                            [root.rev()]))
+    for r in detachset:
+        if r not in state:
+            state[r] = nullmerge
+    if len(roots) > 1:
+        # If we have multiple roots, we may have "hole" in the rebase set.
+        # Rebase roots that descend from those "hole" should not be detached as
+        # other root are. We use the special `revignored` to inform rebase that
+        # the revision should be ignored but that `defineparent` should search
+        # a rebase destination that make sense regarding rebaset topology.
+        rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
+        for ignored in set(rebasedomain) - set(rebaseset):
+            state[ignored] = revignored
     return repo['.'].rev(), dest.rev(), state
 
-def clearrebased(ui, repo, state, collapsedas=None):
+def clearrebased(ui, repo, state, skipped, collapsedas=None):
     """dispose of rebased revision at the end of the rebase
 
     If `collapsedas` is not None, the rebase was a collapse whose result if the
@@ -669,20 +697,28 @@
         markers = []
         for rev, newrev in sorted(state.items()):
             if newrev >= 0:
-                if collapsedas is not None:
-                    newrev = collapsedas
-                markers.append((repo[rev], (repo[newrev],)))
+                if rev in skipped:
+                    succs = ()
+                elif collapsedas is not None:
+                    succs = (repo[collapsedas],)
+                else:
+                    succs = (repo[newrev],)
+                markers.append((repo[rev], succs))
         if markers:
             obsolete.createmarkers(repo, markers)
     else:
-        rebased = [rev for rev in state if state[rev] != nullmerge]
+        rebased = [rev for rev in state if state[rev] > nullmerge]
         if rebased:
-            if set(repo.changelog.descendants([min(rebased)])) - set(state):
-                ui.warn(_("warning: new changesets detected "
-                          "on source branch, not stripping\n"))
-            else:
+            stripped = []
+            for root in repo.set('roots(%ld)', rebased):
+                if set(repo.changelog.descendants([root.rev()])) - set(state):
+                    ui.warn(_("warning: new changesets detected "
+                              "on source branch, not stripping\n"))
+                else:
+                    stripped.append(root.node())
+            if stripped:
                 # backup the old csets by default
-                repair.strip(ui, repo, repo[min(rebased)].node(), "all")
+                repair.strip(ui, repo, stripped, "all")
 
 
 def pullrebase(orig, ui, repo, *args, **opts):
diff -r 7648b87e76db -r f5fbe15ca744 hgext/record.py
--- a/hgext/record.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/record.py	Sat Jan 19 17:24:33 2013 -0600
@@ -8,7 +8,7 @@
 '''commands to interactively select changes for commit/qrefresh'''
 
 from mercurial.i18n import gettext, _
-from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
+from mercurial import cmdutil, commands, extensions, hg, patch
 from mercurial import util
 import copy, cStringIO, errno, os, re, shutil, tempfile
 
@@ -520,11 +520,11 @@
                                '(use "hg commit" instead)'))
 
         changes = repo.status(match=match)[:3]
-        diffopts = mdiff.diffopts(
+        diffopts = patch.diffopts(ui, opts=dict(
             git=True, nodates=True,
             ignorews=opts.get('ignore_all_space'),
             ignorewsamount=opts.get('ignore_space_change'),
-            ignoreblanklines=opts.get('ignore_blank_lines'))
+            ignoreblanklines=opts.get('ignore_blank_lines')))
         chunks = patch.diff(repo, changes=changes, opts=diffopts)
         fp = cStringIO.StringIO()
         fp.write(''.join(chunks))
diff -r 7648b87e76db -r f5fbe15ca744 hgext/transplant.py
--- a/hgext/transplant.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/transplant.py	Sat Jan 19 17:24:33 2013 -0600
@@ -94,7 +94,8 @@
             parentrev = repo.changelog.rev(parent)
         if hasnode(repo, node):
             rev = repo.changelog.rev(node)
-            reachable = repo.changelog.incancestors([parentrev], rev)
+            reachable = repo.changelog.ancestors([parentrev], rev,
+                                                 inclusive=True)
             if rev in reachable:
                 return True
         for t in self.transplants.get(node):
@@ -103,7 +104,8 @@
                 self.transplants.remove(t)
                 return False
             lnoderev = repo.changelog.rev(t.lnode)
-            if lnoderev in repo.changelog.incancestors([parentrev], lnoderev):
+            if lnoderev in repo.changelog.ancestors([parentrev], lnoderev,
+                                                    inclusive=True):
                 return True
         return False
 
diff -r 7648b87e76db -r f5fbe15ca744 hgext/win32text.py
--- a/hgext/win32text.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/hgext/win32text.py	Sat Jan 19 17:24:33 2013 -0600
@@ -121,7 +121,7 @@
     # changegroup that contains an unacceptable commit followed later
     # by a commit that fixes the problem.
     tip = repo['tip']
-    for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
+    for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1):
         c = repo[rev]
         for f in c.files():
             if f in seen or f not in tip or f not in c:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/ancestor.py
--- a/mercurial/ancestor.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/ancestor.py	Sat Jan 19 17:24:33 2013 -0600
@@ -5,7 +5,8 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-import heapq
+import heapq, util
+from node import nullrev
 
 def ancestor(a, b, pfunc):
     """
@@ -89,3 +90,175 @@
                 gx = x.next()
     except StopIteration:
         return None
+
+def missingancestors(revs, bases, pfunc):
+    """Return all the ancestors of revs that are not ancestors of bases.
+
+    This may include elements from revs.
+
+    Equivalent to the revset (::revs - ::bases). Revs are returned in
+    revision number order, which is a topological order.
+
+    revs and bases should both be iterables. pfunc must return a list of
+    parent revs for a given revs.
+    """
+
+    revsvisit = set(revs)
+    basesvisit = set(bases)
+    if not revsvisit:
+        return []
+    if not basesvisit:
+        basesvisit.add(nullrev)
+    start = max(max(revsvisit), max(basesvisit))
+    bothvisit = revsvisit.intersection(basesvisit)
+    revsvisit.difference_update(bothvisit)
+    basesvisit.difference_update(bothvisit)
+    # At this point, we hold the invariants that:
+    # - revsvisit is the set of nodes we know are an ancestor of at least one
+    #   of the nodes in revs
+    # - basesvisit is the same for bases
+    # - bothvisit is the set of nodes we know are ancestors of at least one of
+    #   the nodes in revs and one of the nodes in bases
+    # - a node may be in none or one, but not more, of revsvisit, basesvisit
+    #   and bothvisit at any given time
+    # Now we walk down in reverse topo order, adding parents of nodes already
+    # visited to the sets while maintaining the invariants. When a node is
+    # found in both revsvisit and basesvisit, it is removed from them and
+    # added to bothvisit instead. When revsvisit becomes empty, there are no
+    # more ancestors of revs that aren't also ancestors of bases, so exit.
+
+    missing = []
+    for curr in xrange(start, nullrev, -1):
+        if not revsvisit:
+            break
+
+        if curr in bothvisit:
+            bothvisit.remove(curr)
+            # curr's parents might have made it into revsvisit or basesvisit
+            # through another path
+            for p in pfunc(curr):
+                revsvisit.discard(p)
+                basesvisit.discard(p)
+                bothvisit.add(p)
+            continue
+
+        # curr will never be in both revsvisit and basesvisit, since if it
+        # were it'd have been pushed to bothvisit
+        if curr in revsvisit:
+            missing.append(curr)
+            thisvisit = revsvisit
+            othervisit = basesvisit
+        elif curr in basesvisit:
+            thisvisit = basesvisit
+            othervisit = revsvisit
+        else:
+            # not an ancestor of revs or bases: ignore
+            continue
+
+        thisvisit.remove(curr)
+        for p in pfunc(curr):
+            if p == nullrev:
+                pass
+            elif p in othervisit or p in bothvisit:
+                # p is implicitly in thisvisit. This means p is or should be
+                # in bothvisit
+                revsvisit.discard(p)
+                basesvisit.discard(p)
+                bothvisit.add(p)
+            else:
+                # visit later
+                thisvisit.add(p)
+
+    missing.reverse()
+    return missing
+
+class lazyancestors(object):
+    def __init__(self, cl, revs, stoprev=0, inclusive=False):
+        """Create a new object generating ancestors for the given revs. Does
+        not generate revs lower than stoprev.
+
+        This is computed lazily starting from revs. The object supports
+        iteration and membership.
+
+        cl should be a changelog and revs should be an iterable. inclusive is
+        a boolean that indicates whether revs should be included. Revs lower
+        than stoprev will not be generated.
+
+        Result does not include the null revision."""
+        self._parentrevs = cl.parentrevs
+        self._initrevs = revs
+        self._stoprev = stoprev
+        self._inclusive = inclusive
+
+        # Initialize data structures for __contains__.
+        # For __contains__, we use a heap rather than a deque because
+        # (a) it minimizes the number of parentrevs calls made
+        # (b) it makes the loop termination condition obvious
+        # Python's heap is a min-heap. Multiply all values by -1 to convert it
+        # into a max-heap.
+        self._containsvisit = [-rev for rev in revs]
+        heapq.heapify(self._containsvisit)
+        if inclusive:
+            self._containsseen = set(revs)
+        else:
+            self._containsseen = set()
+
+    def __iter__(self):
+        """Generate the ancestors of _initrevs in reverse topological order.
+
+        If inclusive is False, yield a sequence of revision numbers starting
+        with the parents of each revision in revs, i.e., each revision is *not*
+        considered an ancestor of itself.  Results are in breadth-first order:
+        parents of each rev in revs, then parents of those, etc.
+
+        If inclusive is True, yield all the revs first (ignoring stoprev),
+        then yield all the ancestors of revs as when inclusive is False.
+        If an element in revs is an ancestor of a different rev it is not
+        yielded again."""
+        seen = set()
+        revs = self._initrevs
+        if self._inclusive:
+            for rev in revs:
+                yield rev
+            seen.update(revs)
+
+        parentrevs = self._parentrevs
+        stoprev = self._stoprev
+        visit = util.deque(revs)
+
+        while visit:
+            for parent in parentrevs(visit.popleft()):
+                if parent >= stoprev and parent not in seen:
+                    visit.append(parent)
+                    seen.add(parent)
+                    yield parent
+
+    def __contains__(self, target):
+        """Test whether target is an ancestor of self._initrevs."""
+        # Trying to do both __iter__ and __contains__ using the same visit
+        # heap and seen set is complex enough that it slows down both. Keep
+        # them separate.
+        seen = self._containsseen
+        if target in seen:
+            return True
+
+        parentrevs = self._parentrevs
+        visit = self._containsvisit
+        stoprev = self._stoprev
+        heappop = heapq.heappop
+        heappush = heapq.heappush
+
+        targetseen = False
+
+        while visit and -visit[0] > target and not targetseen:
+            for parent in parentrevs(-heappop(visit)):
+                if parent < stoprev or parent in seen:
+                    continue
+                # We need to make sure we push all parents into the heap so
+                # that we leave it in a consistent state for future calls.
+                heappush(visit, -parent)
+                seen.add(parent)
+                if parent == target:
+                    targetseen = True
+
+        return targetseen
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/archival.py
--- a/mercurial/archival.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/archival.py	Sat Jan 19 17:24:33 2013 -0600
@@ -74,8 +74,11 @@
         def _write_gzip_header(self):
             self.fileobj.write('\037\213')             # magic header
             self.fileobj.write('\010')                 # compression method
-            # Python 2.6 deprecates self.filename
-            fname = getattr(self, 'name', None) or self.filename
+            # Python 2.6 introduced self.name and deprecated self.filename
+            try:
+                fname = self.name
+            except AttributeError:
+                fname = self.filename
             if fname and fname.endswith('.gz'):
                 fname = fname[:-3]
             flags = 0
@@ -103,7 +106,6 @@
                 self.fileobj = gzfileobj
                 return tarfile.TarFile.taropen(name, mode, gzfileobj)
             else:
-                self.fileobj = fileobj
                 return tarfile.open(name, mode + kind, fileobj)
 
         if isinstance(dest, str):
@@ -191,7 +193,7 @@
                                0x5455,     # block type: "extended-timestamp"
                                1 + 4,      # size of this block
                                1,          # "modification time is present"
-                               self.mtime) # time of last modification (UTC)
+                               int(self.mtime)) # last modification (UTC)
         self.z.writestr(i, data)
 
     def done(self):
@@ -297,7 +299,7 @@
     repo.ui.progress(_('archiving'), None)
 
     if subrepos:
-        for subpath in ctx.substate:
+        for subpath in sorted(ctx.substate):
             sub = ctx.sub(subpath)
             submatch = matchmod.narrowmatcher(subpath, matchfn)
             sub.archive(repo.ui, archiver, prefix, submatch)
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/bookmarks.py
--- a/mercurial/bookmarks.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/bookmarks.py	Sat Jan 19 17:24:33 2013 -0600
@@ -7,40 +7,80 @@
 
 from mercurial.i18n import _
 from mercurial.node import hex
-from mercurial import encoding, error, util, obsolete, phases
+from mercurial import encoding, error, util, obsolete
 import errno, os
 
-def read(repo):
-    '''Parse .hg/bookmarks file and return a dictionary
+class bmstore(dict):
+    """Storage for bookmarks.
+
+    This object should do all bookmark reads and writes, so that it's
+    fairly simple to replace the storage underlying bookmarks without
+    having to clone the logic surrounding bookmarks.
+
+    This particular bmstore implementation stores bookmarks as
+    {hash}\s{name}\n (the same format as localtags) in
+    .hg/bookmarks. The mapping is stored as {name: nodeid}.
+
+    This class does NOT handle the "current" bookmark state at this
+    time.
+    """
 
-    Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
-    in the .hg/bookmarks file.
-    Read the file and return a (name=>nodeid) dictionary
-    '''
-    bookmarks = {}
-    try:
-        for line in repo.opener('bookmarks'):
-            line = line.strip()
-            if not line:
-                continue
-            if ' ' not in line:
-                repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
-                continue
-            sha, refspec = line.split(' ', 1)
-            refspec = encoding.tolocal(refspec)
+    def __init__(self, repo):
+        dict.__init__(self)
+        self._repo = repo
+        try:
+            for line in repo.vfs('bookmarks'):
+                line = line.strip()
+                if not line:
+                    continue
+                if ' ' not in line:
+                    repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
+                                 % line)
+                    continue
+                sha, refspec = line.split(' ', 1)
+                refspec = encoding.tolocal(refspec)
+                try:
+                    self[refspec] = repo.changelog.lookup(sha)
+                except LookupError:
+                    pass
+        except IOError, inst:
+            if inst.errno != errno.ENOENT:
+                raise
+
+    def write(self):
+        '''Write bookmarks
+
+        Write the given bookmark => hash dictionary to the .hg/bookmarks file
+        in a format equal to those of localtags.
+
+        We also store a backup of the previous state in undo.bookmarks that
+        can be copied back on rollback.
+        '''
+        repo = self._repo
+        if repo._bookmarkcurrent not in self:
+            setcurrent(repo, None)
+
+        wlock = repo.wlock()
+        try:
+
+            file = repo.vfs('bookmarks', 'w', atomictemp=True)
+            for name, node in self.iteritems():
+                file.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
+            file.close()
+
+            # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
             try:
-                bookmarks[refspec] = repo.changelog.lookup(sha)
-            except LookupError:
+                os.utime(repo.sjoin('00changelog.i'), None)
+            except OSError:
                 pass
-    except IOError, inst:
-        if inst.errno != errno.ENOENT:
-            raise
-    return bookmarks
+
+        finally:
+            wlock.release()
 
 def readcurrent(repo):
     '''Get the current bookmark
 
-    If we use gittishsh branches we have a current bookmark that
+    If we use gittish branches we have a current bookmark that
     we are on. This function returns the name of the bookmark. It
     is stored in .hg/bookmarks.current
     '''
@@ -60,37 +100,6 @@
         file.close()
     return mark
 
-def write(repo):
-    '''Write bookmarks
-
-    Write the given bookmark => hash dictionary to the .hg/bookmarks file
-    in a format equal to those of localtags.
-
-    We also store a backup of the previous state in undo.bookmarks that
-    can be copied back on rollback.
-    '''
-    refs = repo._bookmarks
-
-    if repo._bookmarkcurrent not in refs:
-        setcurrent(repo, None)
-
-    wlock = repo.wlock()
-    try:
-
-        file = repo.opener('bookmarks', 'w', atomictemp=True)
-        for refspec, node in refs.iteritems():
-            file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
-        file.close()
-
-        # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
-        try:
-            os.utime(repo.sjoin('00changelog.i'), None)
-        except OSError:
-            pass
-
-    finally:
-        wlock.release()
-
 def setcurrent(repo, mark):
     '''Set the name of the bookmark that we are currently on
 
@@ -152,7 +161,7 @@
             if mark != cur:
                 del marks[mark]
     if update:
-        repo._writebookmarks(marks)
+        marks.write()
     return update
 
 def listbookmarks(repo):
@@ -179,7 +188,7 @@
             if new not in repo:
                 return False
             marks[key] = repo[new].node()
-        write(repo)
+        marks.write()
         return True
     finally:
         w.release()
@@ -188,16 +197,17 @@
     ui.debug("checking for updated bookmarks\n")
     rb = remote.listkeys('bookmarks')
     changed = False
-    for k in rb.keys():
-        if k in repo._bookmarks:
-            nr, nl = rb[k], repo._bookmarks[k]
+    localmarks = repo._bookmarks
+    for k in sorted(rb):
+        if k in localmarks:
+            nr, nl = rb[k], localmarks[k]
             if nr in repo:
                 cr = repo[nr]
                 cl = repo[nl]
                 if cl.rev() >= cr.rev():
                     continue
                 if validdest(repo, cl, cr):
-                    repo._bookmarks[k] = cr.node()
+                    localmarks[k] = cr.node()
                     changed = True
                     ui.status(_("updating bookmark %s\n") % k)
                 else:
@@ -208,7 +218,7 @@
                     # find a unique @ suffix
                     for x in range(1, 100):
                         n = '%s@%d' % (kd, x)
-                        if n not in repo._bookmarks:
+                        if n not in localmarks:
                             break
                     # try to use an @pathalias suffix
                     # if an @pathalias already exists, we overwrite (update) it
@@ -216,17 +226,17 @@
                         if path == u:
                             n = '%s@%s' % (kd, p)
 
-                    repo._bookmarks[n] = cr.node()
+                    localmarks[n] = cr.node()
                     changed = True
                     ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
         elif rb[k] in repo:
             # add remote bookmarks for changes we already have
-            repo._bookmarks[k] = repo[rb[k]].node()
+            localmarks[k] = repo[rb[k]].node()
             changed = True
             ui.status(_("adding remote bookmark %s\n") % k)
 
     if changed:
-        write(repo)
+        localmarks.write()
 
 def diff(ui, dst, src):
     ui.status(_("searching for changed bookmarks\n"))
@@ -246,6 +256,7 @@
 
 def validdest(repo, old, new):
     """Is the new bookmark destination a valid update from the old one"""
+    repo = repo.unfiltered()
     if old == new:
         # Old == new -> nothing to update.
         return False
@@ -263,14 +274,10 @@
         while len(validdests) != plen:
             plen = len(validdests)
             succs = set(c.node() for c in validdests)
-            for c in validdests:
-                if c.phase() > phases.public:
-                    # obsolescence marker does not apply to public changeset
-                    succs.update(obsolete.allsuccessors(repo.obsstore,
-                                                        [c.node()]))
+            mutable = [c.node() for c in validdests if c.mutable()]
+            succs.update(obsolete.allsuccessors(repo.obsstore, mutable))
             known = (n for n in succs if n in nm)
             validdests = set(repo.set('%ln::', known))
-        validdests.remove(old)
         return new in validdests
     else:
         return old.descendant(new)
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/branchmap.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/branchmap.py	Sat Jan 19 17:24:33 2013 -0600
@@ -0,0 +1,223 @@
+# branchmap.py - logic to computes, maintain and stores branchmap for local repo
+#
+# Copyright 2005-2007 Matt Mackall 
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import bin, hex, nullid, nullrev
+import encoding
+import util, repoview
+
+def _filename(repo):
+    """name of a branchcache file for a given repo or repoview"""
+    filename = "cache/branchheads"
+    if repo.filtername:
+        filename = '%s-%s' % (filename, repo.filtername)
+    return filename
+
+def read(repo):
+    try:
+        f = repo.opener(_filename(repo))
+        lines = f.read().split('\n')
+        f.close()
+    except (IOError, OSError):
+        return None
+
+    try:
+        cachekey = lines.pop(0).split(" ", 2)
+        last, lrev = cachekey[:2]
+        last, lrev = bin(last), int(lrev)
+        filteredhash = None
+        if len(cachekey) > 2:
+            filteredhash = bin(cachekey[2])
+        partial = branchcache(tipnode=last, tiprev=lrev,
+                              filteredhash=filteredhash)
+        if not partial.validfor(repo):
+            # invalidate the cache
+            raise ValueError('tip differs')
+        for l in lines:
+            if not l:
+                continue
+            node, label = l.split(" ", 1)
+            label = encoding.tolocal(label.strip())
+            if not node in repo:
+                raise ValueError('node %s does not exist' % node)
+            partial.setdefault(label, []).append(bin(node))
+    except KeyboardInterrupt:
+        raise
+    except Exception, inst:
+        if repo.ui.debugflag:
+            msg = 'invalid branchheads cache'
+            if repo.filtername is not None:
+                msg += ' (%s)' % repo.filtername
+            msg += ': %s\n'
+            repo.ui.warn(msg % inst)
+        partial = None
+    return partial
+
+
+
+def updatecache(repo):
+    cl = repo.changelog
+    filtername = repo.filtername
+    partial = repo._branchcaches.get(filtername)
+
+    revs = []
+    if partial is None or not partial.validfor(repo):
+        partial = read(repo)
+        if partial is None:
+            subsetname = repoview.subsettable.get(filtername)
+            if subsetname is None:
+                partial = branchcache()
+            else:
+                subset = repo.filtered(subsetname)
+                partial = subset.branchmap().copy()
+                extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
+                revs.extend(r for  r in extrarevs if r <= partial.tiprev)
+    revs.extend(cl.revs(start=partial.tiprev + 1))
+    if revs:
+        partial.update(repo, revs)
+        partial.write(repo)
+    assert partial.validfor(repo), filtername
+    repo._branchcaches[repo.filtername] = partial
+
+class branchcache(dict):
+    """A dict like object that hold branches heads cache"""
+
+    def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
+                 filteredhash=None):
+        super(branchcache, self).__init__(entries)
+        self.tipnode = tipnode
+        self.tiprev = tiprev
+        self.filteredhash = filteredhash
+
+    def _hashfiltered(self, repo):
+        """build hash of revision filtered in the current cache
+
+        Tracking tipnode and tiprev is not enough to ensure validaty of the
+        cache as they do not help to distinct cache that ignored various
+        revision bellow tiprev.
+
+        To detect such difference, we build a cache of all ignored revisions.
+        """
+        cl = repo.changelog
+        if not cl.filteredrevs:
+            return None
+        key = None
+        revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
+        if revs:
+            s = util.sha1()
+            for rev in revs:
+                s.update('%s;' % rev)
+            key = s.digest()
+        return key
+
+    def validfor(self, repo):
+        """Is the cache content valide regarding a repo
+
+        - False when cached tipnode are unknown or if we detect a strip.
+        - True when cache is up to date or a subset of current repo."""
+        try:
+            return ((self.tipnode == repo.changelog.node(self.tiprev))
+                    and (self.filteredhash == self._hashfiltered(repo)))
+        except IndexError:
+            return False
+
+    def copy(self):
+        """return an deep copy of the branchcache object"""
+        return branchcache(self, self.tipnode, self.tiprev, self.filteredhash)
+
+    def write(self, repo):
+        try:
+            f = repo.opener(_filename(repo), "w", atomictemp=True)
+            cachekey = [hex(self.tipnode), str(self.tiprev)]
+            if self.filteredhash is not None:
+                cachekey.append(hex(self.filteredhash))
+            f.write(" ".join(cachekey) + '\n')
+            for label, nodes in sorted(self.iteritems()):
+                for node in nodes:
+                    f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
+            f.close()
+        except (IOError, OSError, util.Abort):
+            # Abort may be raise by read only opener
+            pass
+
+    def update(self, repo, revgen):
+        """Given a branchhead cache, self, that may have extra nodes or be
+        missing heads, and a generator of nodes that are at least a superset of
+        heads missing, this function updates self to be correct.
+        """
+        cl = repo.changelog
+        # collect new branch entries
+        newbranches = {}
+        getbranch = cl.branch
+        for r in revgen:
+            newbranches.setdefault(getbranch(r), []).append(cl.node(r))
+        # if older branchheads are reachable from new ones, they aren't
+        # really branchheads. Note checking parents is insufficient:
+        # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
+        for branch, newnodes in newbranches.iteritems():
+            bheads = self.setdefault(branch, [])
+            # Remove candidate heads that no longer are in the repo (e.g., as
+            # the result of a strip that just happened).  Avoid using 'node in
+            # self' here because that dives down into branchcache code somewhat
+            # recursively.
+            bheadrevs = [cl.rev(node) for node in bheads
+                         if cl.hasnode(node)]
+            newheadrevs = [cl.rev(node) for node in newnodes
+                           if cl.hasnode(node)]
+            ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
+            # Remove duplicates - nodes that are in newheadrevs and are already
+            # in bheadrevs.  This can happen if you strip a node whose parent
+            # was already a head (because they're on different branches).
+            bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
+
+            # Starting from tip means fewer passes over reachable.  If we know
+            # the new candidates are not ancestors of existing heads, we don't
+            # have to examine ancestors of existing heads
+            if ctxisnew:
+                iterrevs = sorted(newheadrevs)
+            else:
+                iterrevs = list(bheadrevs)
+
+            # This loop prunes out two kinds of heads - heads that are
+            # superseded by a head in newheadrevs, and newheadrevs that are not
+            # heads because an existing head is their descendant.
+            while iterrevs:
+                latest = iterrevs.pop()
+                if latest not in bheadrevs:
+                    continue
+                ancestors = set(cl.ancestors([latest],
+                                                         bheadrevs[0]))
+                if ancestors:
+                    bheadrevs = [b for b in bheadrevs if b not in ancestors]
+            self[branch] = [cl.node(rev) for rev in bheadrevs]
+            tiprev = max(bheadrevs)
+            if tiprev > self.tiprev:
+                self.tipnode = cl.node(tiprev)
+                self.tiprev = tiprev
+
+        # There may be branches that cease to exist when the last commit in the
+        # branch was stripped.  This code filters them out.  Note that the
+        # branch that ceased to exist may not be in newbranches because
+        # newbranches is the set of candidate heads, which when you strip the
+        # last commit in a branch will be the parent branch.
+        droppednodes = []
+        for branch in self.keys():
+            nodes = [head for head in self[branch]
+                     if cl.hasnode(head)]
+            if not nodes:
+                droppednodes.extend(nodes)
+                del self[branch]
+        if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
+
+            # cache key are not valid anymore
+            self.tipnode = nullid
+            self.tiprev = nullrev
+            for heads in self.values():
+                tiprev = max(cl.rev(node) for node in heads)
+                if tiprev > self.tiprev:
+                    self.tipnode = cl.node(tiprev)
+                    self.tiprev = tiprev
+        self.filteredhash = self._hashfiltered(repo)
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/bundlerepo.py
--- a/mercurial/bundlerepo.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/bundlerepo.py	Sat Jan 19 17:24:33 2013 -0600
@@ -14,25 +14,28 @@
 from node import nullid
 from i18n import _
 import os, tempfile, shutil
-import changegroup, util, mdiff, discovery, cmdutil
+import changegroup, util, mdiff, discovery, cmdutil, scmutil
 import localrepo, changelog, manifest, filelog, revlog, error
 
 class bundlerevlog(revlog.revlog):
     def __init__(self, opener, indexfile, bundle, linkmapper):
         # How it works:
-        # to retrieve a revision, we need to know the offset of
-        # the revision in the bundle (an unbundle object).
+        # To retrieve a revision, we need to know the offset of the revision in
+        # the bundle (an unbundle object). We store this offset in the index
+        # (start).
         #
-        # We store this offset in the index (start), to differentiate a
-        # rev in the bundle and from a rev in the revlog, we check
-        # len(index[r]). If the tuple is bigger than 7, it is a bundle
-        # (it is bigger since we store the node to which the delta is)
+        # basemap is indexed with revisions coming from the bundle, and it
+        # maps to the revision that is the base of the corresponding delta.
         #
+        # To differentiate a rev in the bundle from a rev in the revlog, we
+        # check revision against basemap.
+        opener = scmutil.readonlyvfs(opener)
         revlog.revlog.__init__(self, opener, indexfile)
         self.bundle = bundle
-        self.basemap = {}
+        self.basemap = {} # mapping rev to delta base rev
         n = len(self)
         chain = None
+        self.bundlerevs = set() # used by 'bundle()' revset expression
         while True:
             chunkdata = bundle.deltachunk(chain)
             if not chunkdata:
@@ -51,49 +54,50 @@
             if node in self.nodemap:
                 # this can happen if two branches make the same change
                 chain = node
+                self.bundlerevs.add(self.nodemap[node])
                 continue
 
             for p in (p1, p2):
                 if p not in self.nodemap:
                     raise error.LookupError(p, self.indexfile,
                                             _("unknown parent"))
+
+            if deltabase not in self.nodemap:
+                raise LookupError(deltabase, self.indexfile,
+                                  _('unknown delta base'))
+
+            baserev = self.rev(deltabase)
             # start, size, full unc. size, base (unused), link, p1, p2, node
             e = (revlog.offset_type(start, 0), size, -1, -1, link,
                  self.rev(p1), self.rev(p2), node)
-            self.basemap[n] = deltabase
+            self.basemap[n] = baserev
             self.index.insert(-1, e)
             self.nodemap[node] = n
+            self.bundlerevs.add(n)
             chain = node
             n += 1
 
-    def inbundle(self, rev):
-        """is rev from the bundle"""
-        if rev < 0:
-            return False
-        return rev in self.basemap
-    def bundlebase(self, rev):
-        return self.basemap[rev]
     def _chunk(self, rev):
-        # Warning: in case of bundle, the diff is against bundlebase,
+        # Warning: in case of bundle, the diff is against self.basemap,
         # not against rev - 1
         # XXX: could use some caching
-        if not self.inbundle(rev):
+        if rev not in self.basemap:
             return revlog.revlog._chunk(self, rev)
         self.bundle.seek(self.start(rev))
         return self.bundle.read(self.length(rev))
 
     def revdiff(self, rev1, rev2):
         """return or calculate a delta between two revisions"""
-        if self.inbundle(rev1) and self.inbundle(rev2):
+        if rev1 in self.basemap and rev2 in self.basemap:
             # hot path for bundle
-            revb = self.rev(self.bundlebase(rev2))
+            revb = self.basemap[rev2]
             if revb == rev1:
                 return self._chunk(rev2)
-        elif not self.inbundle(rev1) and not self.inbundle(rev2):
+        elif rev1 not in self.basemap and rev2 not in self.basemap:
             return revlog.revlog.revdiff(self, rev1, rev2)
 
         return mdiff.textdiff(self.revision(self.node(rev1)),
-                         self.revision(self.node(rev2)))
+                              self.revision(self.node(rev2)))
 
     def revision(self, nodeorrev):
         """return an uncompressed revision of a given node or revision
@@ -111,28 +115,23 @@
 
         text = None
         chain = []
-        iter_node = node
+        iterrev = rev
         # reconstruct the revision if it is from a changegroup
-        while self.inbundle(rev):
-            if self._cache and self._cache[0] == iter_node:
+        while iterrev in self.basemap:
+            if self._cache and self._cache[1] == iterrev:
                 text = self._cache[2]
                 break
-            chain.append(rev)
-            iter_node = self.bundlebase(rev)
-            rev = self.rev(iter_node)
+            chain.append(iterrev)
+            iterrev = self.basemap[iterrev]
         if text is None:
-            text = revlog.revlog.revision(self, iter_node)
+            text = revlog.revlog.revision(self, iterrev)
 
         while chain:
             delta = self._chunk(chain.pop())
             text = mdiff.patches(text, [delta])
 
-        p1, p2 = self.parents(node)
-        if node != revlog.hash(text, p1, p2):
-            raise error.RevlogError(_("integrity check failed on %s:%d")
-                                     % (self.datafile, self.rev(node)))
-
-        self._cache = (node, self.rev(node), text)
+        self._checkhash(text, node, rev)
+        self._cache = (node, rev, text)
         return text
 
     def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
@@ -212,7 +211,7 @@
         # dict with the mapping 'filename' -> position in the bundle
         self.bundlefilespos = {}
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def changelog(self):
         # consume the header if it exists
         self.bundle.changelogheader()
@@ -220,7 +219,7 @@
         self.manstart = self.bundle.tell()
         return c
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def manifest(self):
         self.bundle.seek(self.manstart)
         # consume the header if it exists
@@ -229,12 +228,12 @@
         self.filestart = self.bundle.tell()
         return m
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def manstart(self):
         self.changelog
         return self.manstart
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def filestart(self):
         self.manifest
         return self.filestart
@@ -256,8 +255,6 @@
                     if not c:
                         break
 
-        if f[0] == '/':
-            f = f[1:]
         if f in self.bundlefilespos:
             self.bundle.seek(self.bundlefilespos[f])
             return bundlefilelog(self.sopener, f, self.bundle,
@@ -282,9 +279,6 @@
     def getcwd(self):
         return os.getcwd() # always outside the repo
 
-    def _writebranchcache(self, branches, tip, tiprev):
-        # don't overwrite the disk cache with bundle-augmented data
-        pass
 
 def instance(ui, path, create):
     if create:
@@ -384,4 +378,3 @@
         other.close()
 
     return (localrepo, csets, cleanup)
-
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/changelog.py
--- a/mercurial/changelog.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/changelog.py	Sat Jan 19 17:24:33 2013 -0600
@@ -27,10 +27,13 @@
 
 def decodeextra(text):
     """
-    >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'}))
-    {'foo': 'bar', 'baz': '\\x002', 'branch': 'default'}
-    >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(92) + chr(0) + '2'}))
-    {'foo': 'bar', 'baz': '\\\\\\x002', 'branch': 'default'}
+    >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
+    ...                    ).iteritems())
+    [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
+    >>> sorted(decodeextra(encodeextra({'foo': 'bar',
+    ...                                 'baz': chr(92) + chr(0) + '2'})
+    ...                    ).iteritems())
+    [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
     """
     extra = _defaultextra.copy()
     for l in text.split('\0'):
@@ -124,7 +127,7 @@
         self._realopener = opener
         self._delayed = False
         self._divert = False
-        self.filteredrevs = ()
+        self.filteredrevs = frozenset()
 
     def tip(self):
         """filtered version of revlog.tip"""
@@ -337,3 +340,10 @@
         l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
         text = "\n".join(l)
         return self.addrevision(text, transaction, len(self), p1, p2)
+
+    def branch(self, rev):
+        """return the branch of a revision
+
+        This function exists because creating a changectx object
+        just to access this is costly."""
+        return encoding.tolocal(self.read(rev)[5].get("branch"))
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/cmdutil.py
--- a/mercurial/cmdutil.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/cmdutil.py	Sat Jan 19 17:24:33 2013 -0600
@@ -10,7 +10,7 @@
 import os, sys, errno, re, tempfile
 import util, scmutil, templater, patch, error, templatekw, revlog, copies
 import match as matchmod
-import subrepo, context, repair, bookmarks, graphmod, revset, phases, obsolete
+import subrepo, context, repair, graphmod, revset, phases, obsolete
 import changelog
 import lock as lockmod
 
@@ -85,7 +85,7 @@
     if modified or added or removed or deleted:
         raise util.Abort(_("outstanding uncommitted changes"))
     ctx = repo[None]
-    for s in ctx.substate:
+    for s in sorted(ctx.substate):
         if ctx.sub(s).dirty():
             raise util.Abort(_("uncommitted changes in subrepo %s") % s)
 
@@ -1137,8 +1137,8 @@
             for path in match.files():
                 if path == '.' or path in repo.store:
                     break
-                else:
-                    return []
+            else:
+                return []
 
     if slowpath:
         # We have to read the changelog to match filenames against
@@ -1399,39 +1399,18 @@
     callable taking a revision number and returning a match objects
     filtering the files to be detailed when displaying the revision.
     """
-    def increasingrevs(repo, revs, matcher):
-        # The sorted input rev sequence is chopped in sub-sequences
-        # which are sorted in ascending order and passed to the
-        # matcher. The filtered revs are sorted again as they were in
-        # the original sub-sequence. This achieve several things:
-        #
-        # - getlogrevs() now returns a generator which behaviour is
-        #   adapted to log need. First results come fast, last ones
-        #   are batched for performances.
-        #
-        # - revset matchers often operate faster on revision in
-        #   changelog order, because most filters deal with the
-        #   changelog.
-        #
-        # - revset matchers can reorder revisions. "A or B" typically
-        #   returns returns the revision matching A then the revision
-        #   matching B. We want to hide this internal implementation
-        #   detail from the caller, and sorting the filtered revision
-        #   again achieves this.
-        for i, window in increasingwindows(0, len(revs), windowsize=1):
-            orevs = revs[i:i + window]
-            nrevs = set(matcher(repo, sorted(orevs)))
-            for rev in orevs:
-                if rev in nrevs:
-                    yield rev
-
     if not len(repo):
-        return iter([]), None, None
+        return [], None, None
+    limit = loglimit(opts)
     # Default --rev value depends on --follow but --follow behaviour
     # depends on revisions resolved from --rev...
     follow = opts.get('follow') or opts.get('follow_first')
+    possiblyunsorted = False # whether revs might need sorting
     if opts.get('rev'):
         revs = scmutil.revrange(repo, opts['rev'])
+        # Don't sort here because _makegraphlogrevset might depend on the
+        # order of revs
+        possiblyunsorted = True
     else:
         if follow and len(repo) > 0:
             revs = repo.revs('reverse(:.)')
@@ -1439,17 +1418,23 @@
             revs = list(repo.changelog)
             revs.reverse()
     if not revs:
-        return iter([]), None, None
+        return [], None, None
     expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
+    if possiblyunsorted:
+        revs.sort(reverse=True)
     if expr:
+        # Revset matchers often operate faster on revisions in changelog
+        # order, because most filters deal with the changelog.
+        revs.reverse()
         matcher = revset.match(repo.ui, expr)
-        revs = increasingrevs(repo, revs, matcher)
-    if not opts.get('hidden'):
-        # --hidden is still experimental and not worth a dedicated revset
-        # yet. Fortunately, filtering revision number is fast.
-        revs = (r for r in revs if r not in repo.hiddenrevs)
-    else:
-        revs = iter(revs)
+        # Revset matches can reorder revisions. "A or B" typically returns
+        # returns the revision matching A then the revision matching B. Sort
+        # again to fix that.
+        revs = matcher(repo, revs)
+        revs.sort(reverse=True)
+    if limit is not None:
+        revs = revs[:limit]
+
     return revs, expr, filematcher
 
 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
@@ -1484,10 +1469,6 @@
 def graphlog(ui, repo, *pats, **opts):
     # Parameters are identical to log command ones
     revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
-    revs = sorted(revs, reverse=1)
-    limit = loglimit(opts)
-    if limit is not None:
-        revs = revs[:limit]
     revdag = graphmod.dagwalker(repo, revs)
 
     getrenamed = None
@@ -1534,7 +1515,7 @@
             if ui.verbose or not exact:
                 ui.status(_('adding %s\n') % match.rel(join(f)))
 
-    for subpath in wctx.substate:
+    for subpath in sorted(wctx.substate):
         sub = wctx.sub(subpath)
         try:
             submatch = matchmod.narrowmatcher(subpath, match)
@@ -1565,7 +1546,7 @@
     if explicitonly:
         forget = [f for f in forget if match.exact(f)]
 
-    for subpath in wctx.substate:
+    for subpath in sorted(wctx.substate):
         sub = wctx.sub(subpath)
         try:
             submatch = matchmod.narrowmatcher(subpath, match)
@@ -1762,9 +1743,10 @@
                 # Move bookmarks from old parent to amend commit
                 bms = repo.nodebookmarks(old.node())
                 if bms:
+                    marks = repo._bookmarks
                     for bm in bms:
-                        repo._bookmarks[bm] = newid
-                    bookmarks.write(repo)
+                        marks[bm] = newid
+                    marks.write()
             #commit the whole amend process
             if obsolete._enabled and newid != old.node():
                 # mark the new changeset as successor of the rewritten one
@@ -1875,7 +1857,7 @@
                 names[abs] = m.rel(abs), m.exact(abs)
 
         # get the list of subrepos that must be reverted
-        targetsubs = [s for s in ctx.substate if m(s)]
+        targetsubs = sorted(s for s in ctx.substate if m(s))
         m = scmutil.matchfiles(repo, names)
         changes = repo.status(match=m)[:4]
         modified, added, removed, deleted = map(set, changes)
@@ -2015,12 +1997,12 @@
     '''returns a function object bound to table which can be used as
     a decorator for populating table as a command table'''
 
-    def cmd(name, options, synopsis=None):
+    def cmd(name, options=(), synopsis=None):
         def decorator(func):
             if synopsis:
-                table[name] = func, options[:], synopsis
+                table[name] = func, list(options), synopsis
             else:
-                table[name] = func, options[:]
+                table[name] = func, list(options)
             return func
         return decorator
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/commands.py
--- a/mercurial/commands.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/commands.py	Sat Jan 19 17:24:33 2013 -0600
@@ -49,6 +49,7 @@
     ('', 'profile', None, _('print command execution profile')),
     ('', 'version', None, _('output version information and exit')),
     ('h', 'help', None, _('display help and exit')),
+    ('', 'hidden', False, _('consider hidden changesets')),
 ]
 
 dryrunopts = [('n', 'dry-run', None,
@@ -549,6 +550,10 @@
           hg bisect --skip
           hg bisect --skip 23
 
+      - skip all revisions that do not touch directories ``foo`` or ``bar``
+
+          hg bisect --skip '!( file("path:foo") & file("path:bar") )'
+
       - forget the current bisection::
 
           hg bisect --reset
@@ -754,7 +759,7 @@
             cmdutil.bailifchanged(repo)
             return hg.clean(repo, node)
 
-@command('bookmarks',
+@command('bookmarks|bookmark',
     [('f', 'force', False, _('force')),
     ('r', 'rev', '', _('revision'), _('REV')),
     ('d', 'delete', False, _('delete a given bookmark')),
@@ -821,7 +826,7 @@
         if mark == repo._bookmarkcurrent:
             bookmarks.setcurrent(repo, None)
         del marks[mark]
-        bookmarks.write(repo)
+        marks.write()
 
     elif rename:
         if mark is None:
@@ -834,7 +839,7 @@
         if repo._bookmarkcurrent == rename and not inactive:
             bookmarks.setcurrent(repo, mark)
         del marks[rename]
-        bookmarks.write(repo)
+        marks.write()
 
     elif mark is not None:
         mark = checkformat(mark)
@@ -848,7 +853,7 @@
             marks[mark] = cur
         if not inactive and cur == marks[mark]:
             bookmarks.setcurrent(repo, mark)
-        bookmarks.write(repo)
+        marks.write()
 
     # Same message whether trying to deactivate the current bookmark (-i
     # with no NAME) or listing bookmarks
@@ -924,7 +929,7 @@
                                        ' exists'),
                                      # i18n: "it" refers to an existing branch
                                      hint=_("use 'hg update' to switch to it"))
-            scmutil.checknewlabel(None, label, 'branch')
+            scmutil.checknewlabel(repo, label, 'branch')
             repo.dirstate.setbranch(label)
             ui.status(_('marked working directory as branch %s\n') % label)
             ui.status(_('(branches are permanent and global, '
@@ -1292,7 +1297,7 @@
             raise util.Abort(_('cannot amend merge changesets'))
         if len(repo[None].parents()) > 1:
             raise util.Abort(_('cannot amend while merging'))
-        if old.children():
+        if (not obsolete._enabled) and old.children():
             raise util.Abort(_('cannot amend changeset with children'))
 
         e = cmdutil.commiteditor
@@ -1322,11 +1327,12 @@
         elif marks:
             ui.debug('moving bookmarks %r from %s to %s\n' %
                      (marks, old.hex(), hex(node)))
+            newmarks = repo._bookmarks
             for bm in marks:
-                repo._bookmarks[bm] = node
+                newmarks[bm] = node
                 if bm == current:
                     bookmarks.setcurrent(repo, bm)
-            bookmarks.write(repo)
+            newmarks.write()
     else:
         e = cmdutil.commiteditor
         if opts.get('force_editor'):
@@ -1513,7 +1519,7 @@
         ui.progress(_('building'), id, unit=_('revisions'), total=total)
         for type, data in dagparser.parsedag(text):
             if type == 'n':
-                ui.note('node %s\n' % str(data))
+                ui.note(('node %s\n' % str(data)))
                 id, ps = data
 
                 files = []
@@ -1526,7 +1532,8 @@
                     if len(ps) > 1:
                         p2 = repo[ps[1]]
                         pa = p1.ancestor(p2)
-                        base, local, other = [x[fn].data() for x in pa, p1, p2]
+                        base, local, other = [x[fn].data() for x in (pa, p1,
+                                                                     p2)]
                         m3 = simplemerge.Merge3Text(base, local, other)
                         ml = [l.strip() for l in m3.merge_lines()]
                         ml.append("")
@@ -1574,10 +1581,10 @@
                 at = id
             elif type == 'l':
                 id, name = data
-                ui.note('tag %s\n' % name)
+                ui.note(('tag %s\n' % name))
                 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
             elif type == 'a':
-                ui.note('branch %s\n' % data)
+                ui.note(('branch %s\n' % data))
                 atbranch = data
             ui.progress(_('building'), id, unit=_('revisions'), total=total)
         tr.close()
@@ -1595,7 +1602,7 @@
     try:
         gen = changegroup.readbundle(f, bundlepath)
         if all:
-            ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
+            ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
 
             def showchunks(named):
                 ui.write("\n%s\n" % named)
@@ -1787,11 +1794,11 @@
         d = util.parsedate(date, util.extendeddateformats)
     else:
         d = util.parsedate(date)
-    ui.write("internal: %s %s\n" % d)
-    ui.write("standard: %s\n" % util.datestr(d))
+    ui.write(("internal: %s %s\n") % d)
+    ui.write(("standard: %s\n") % util.datestr(d))
     if range:
         m = util.matchdate(range)
-        ui.write("match: %s\n" % m(d[0]))
+        ui.write(("match: %s\n") % m(d[0]))
 
 @command('debugdiscovery',
     [('', 'old', None, _('use old-style discovery')),
@@ -1821,8 +1828,8 @@
                                                                 force=True)
             common = set(common)
             if not opts.get('nonheads'):
-                ui.write("unpruned common: %s\n" % " ".join([short(n)
-                                                            for n in common]))
+                ui.write(("unpruned common: %s\n") %
+                         " ".join(sorted(short(n) for n in common)))
                 dag = dagutil.revlogdag(repo.changelog)
                 all = dag.ancestorset(dag.internalizeall(common))
                 common = dag.externalizeall(dag.headsetofconnecteds(all))
@@ -1831,11 +1838,12 @@
         common = set(common)
         rheads = set(hds)
         lheads = set(repo.heads())
-        ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
+        ui.write(("common heads: %s\n") %
+                 " ".join(sorted(short(n) for n in common)))
         if lheads <= common:
-            ui.write("local is subset\n")
+            ui.write(("local is subset\n"))
         elif rheads <= common:
-            ui.write("remote is subset\n")
+            ui.write(("remote is subset\n"))
 
     serverlogs = opts.get('serverlog')
     if serverlogs:
@@ -1879,9 +1887,9 @@
 def debugfsinfo(ui, path = "."):
     """show information detected about current filesystem"""
     util.writefile('.debugfsinfo', '')
-    ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
-    ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
-    ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
+    ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
+    ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
+    ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
                                 and 'yes' or 'no'))
     os.unlink('.debugfsinfo')
 
@@ -1979,7 +1987,7 @@
             r = filelog
     if not r:
         r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
-    ui.write("digraph G {\n")
+    ui.write(("digraph G {\n"))
     for i in r:
         node = r.node(i)
         pp = r.parents(node)
@@ -2128,7 +2136,8 @@
                 ui.write(' ')
                 ui.write(hex(repl))
             ui.write(' %X ' % m._data[2])
-            ui.write(m.metadata())
+            ui.write('{%s}' % (', '.join('%r: %r' % t for t in
+                                         sorted(m.metadata().items()))))
             ui.write('\n')
 
 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
@@ -2148,7 +2157,7 @@
         ui.status(str(r) + '\n')
         return not r
     else:
-        for k, v in target.listkeys(namespace).iteritems():
+        for k, v in sorted(target.listkeys(namespace).iteritems()):
             ui.write("%s\t%s\n" % (k.encode('string-escape'),
                                    v.encode('string-escape')))
 
@@ -2325,52 +2334,54 @@
     def pcfmt(value, total):
         return (value, 100 * float(value) / total)
 
-    ui.write('format : %d\n' % format)
-    ui.write('flags  : %s\n' % ', '.join(flags))
+    ui.write(('format : %d\n') % format)
+    ui.write(('flags  : %s\n') % ', '.join(flags))
 
     ui.write('\n')
     fmt = pcfmtstr(totalsize)
     fmt2 = dfmtstr(totalsize)
-    ui.write('revisions     : ' + fmt2 % numrevs)
-    ui.write('    merges    : ' + fmt % pcfmt(nummerges, numrevs))
-    ui.write('    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
-    ui.write('revisions     : ' + fmt2 % numrevs)
-    ui.write('    full      : ' + fmt % pcfmt(numfull, numrevs))
-    ui.write('    deltas    : ' + fmt % pcfmt(numdeltas, numrevs))
-    ui.write('revision size : ' + fmt2 % totalsize)
-    ui.write('    full      : ' + fmt % pcfmt(fulltotal, totalsize))
-    ui.write('    deltas    : ' + fmt % pcfmt(deltatotal, totalsize))
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
+    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
+    ui.write(('revision size : ') + fmt2 % totalsize)
+    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
 
     ui.write('\n')
     fmt = dfmtstr(max(avgchainlen, compratio))
-    ui.write('avg chain length  : ' + fmt % avgchainlen)
-    ui.write('compression ratio : ' + fmt % compratio)
+    ui.write(('avg chain length  : ') + fmt % avgchainlen)
+    ui.write(('compression ratio : ') + fmt % compratio)
 
     if format > 0:
         ui.write('\n')
-        ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
+        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
                  % tuple(datasize))
-    ui.write('full revision size (min/max/avg)     : %d / %d / %d\n'
+    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
              % tuple(fullsize))
-    ui.write('delta size (min/max/avg)             : %d / %d / %d\n'
+    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
              % tuple(deltasize))
 
     if numdeltas > 0:
         ui.write('\n')
         fmt = pcfmtstr(numdeltas)
         fmt2 = pcfmtstr(numdeltas, 4)
-        ui.write('deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas))
+        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
         if numprev > 0:
-            ui.write('    where prev = p1  : ' + fmt2 % pcfmt(nump1prev,
+            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
                                                               numprev))
-            ui.write('    where prev = p2  : ' + fmt2 % pcfmt(nump2prev,
+            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
                                                               numprev))
-            ui.write('    other            : ' + fmt2 % pcfmt(numoprev,
+            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
                                                               numprev))
         if gdelta:
-            ui.write('deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas))
-            ui.write('deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas))
-            ui.write('deltas against other : ' + fmt % pcfmt(numother,
+            ui.write(('deltas against p1    : ')
+                     + fmt % pcfmt(nump1, numdeltas))
+            ui.write(('deltas against p2    : ')
+                     + fmt % pcfmt(nump2, numdeltas))
+            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
                                                              numdeltas))
 
 @command('debugrevspec', [], ('REVSPEC'))
@@ -2448,9 +2459,63 @@
 def debugsub(ui, repo, rev=None):
     ctx = scmutil.revsingle(repo, rev, None)
     for k, v in sorted(ctx.substate.items()):
-        ui.write('path %s\n' % k)
-        ui.write(' source   %s\n' % v[0])
-        ui.write(' revision %s\n' % v[1])
+        ui.write(('path %s\n') % k)
+        ui.write((' source   %s\n') % v[0])
+        ui.write((' revision %s\n') % v[1])
+
+@command('debugsuccessorssets',
+    [],
+    _('[REV]'))
+def debugsuccessorssets(ui, repo, *revs):
+    """show set of successors for revision
+
+    A successors set of changeset A is a consistent group of revisions that
+    succeed A. It contains non-obsolete changesets only.
+
+    In most cases a changeset A has a single successors set containing a single
+    successors (changeset A replaced by A').
+
+    A changeset that is made obsolete with no successors are called "pruned".
+    Such changesets have no successors sets at all.
+
+    A changeset that has been "split" will have a successors set containing
+    more than one successors.
+
+    A changeset that has been rewritten in multiple different ways is called
+    "divergent". Such changesets have multiple successor sets (each of which
+    may also be split, i.e. have multiple successors).
+
+    Results are displayed as follows::
+
+        
+            
+        
+            
+              
+
+    Here rev2 has two possible (i.e. divergent) successors sets. The first
+    holds one element, whereas the second holds three (i.e. the changeset has
+    been split).
+    """
+    # passed to successorssets caching computation from one call to another
+    cache = {}
+    ctx2str = str
+    node2str = short
+    if ui.debug():
+        def ctx2str(ctx):
+            return ctx.hex()
+        node2str = hex
+    for rev in scmutil.revrange(repo, revs):
+        ctx = repo[rev]
+        ui.write('%s\n'% ctx2str(ctx))
+        for succsset in obsolete.successorssets(repo, ctx.node(), cache):
+            if succsset:
+                ui.write('    ')
+                ui.write(node2str(succsset[0]))
+                for node in succsset[1:]:
+                    ui.write(' ')
+                    ui.write(node2str(node))
+            ui.write('\n')
 
 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
 def debugwalk(ui, repo, *pats, **opts):
@@ -2823,13 +2888,27 @@
 
     wlock = repo.wlock()
     try:
+        current = repo['.']
         for pos, ctx in enumerate(repo.set("%ld", revs)):
-            current = repo['.']
 
             ui.status(_('grafting revision %s\n') % ctx.rev())
             if opts.get('dry_run'):
                 continue
 
+            source = ctx.extra().get('source')
+            if not source:
+                source = ctx.hex()
+            extra = {'source': source}
+            user = ctx.user()
+            if opts.get('user'):
+                user = opts['user']
+            date = ctx.date()
+            if opts.get('date'):
+                date = opts['date']
+            message = ctx.description()
+            if opts.get('log'):
+                message += '\n(grafted from %s)' % ctx.hex()
+
             # we don't merge the first commit when continuing
             if not cont:
                 # perform the graft merge with p1(rev) as 'ancestor'
@@ -2858,29 +2937,18 @@
             cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
 
             # commit
-            source = ctx.extra().get('source')
-            if not source:
-                source = ctx.hex()
-            extra = {'source': source}
-            user = ctx.user()
-            if opts.get('user'):
-                user = opts['user']
-            date = ctx.date()
-            if opts.get('date'):
-                date = opts['date']
-            message = ctx.description()
-            if opts.get('log'):
-                message += '\n(grafted from %s)' % ctx.hex()
             node = repo.commit(text=message, user=user,
                         date=date, extra=extra, editor=editor)
             if node is None:
                 ui.status(_('graft for revision %s is empty\n') % ctx.rev())
+            else:
+                current = repo[node]
     finally:
         wlock.release()
 
     # remove state when we complete successfully
-    if not opts.get('dry_run') and os.path.exists(repo.join('graftstate')):
-        util.unlinkpath(repo.join('graftstate'))
+    if not opts.get('dry_run'):
+        util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
 
     return 0
 
@@ -3564,7 +3632,7 @@
                 bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
                        if bmr == hexremoterev]
 
-            return bms
+            return sorted(bms)
 
         if bookmarks:
             output.extend(getbms())
@@ -4024,7 +4092,6 @@
      _('show changesets within the given named branch'), _('BRANCH')),
     ('P', 'prune', [],
      _('do not display revision or any of its ancestors'), _('REV')),
-    ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
     ] + logopts + walkopts,
     _('[OPTION]... [FILE]'))
 def log(ui, repo, *pats, **opts):
@@ -4140,8 +4207,6 @@
             return
         if opts.get('branch') and ctx.branch() not in opts['branch']:
             return
-        if not opts.get('hidden') and ctx.hidden():
-            return
         if df and not df(ctx.date()[0]):
             return
 
@@ -4207,6 +4272,9 @@
 
     Returns 0 on success.
     """
+
+    fm = ui.formatter('manifest', opts)
+
     if opts.get('all'):
         if rev or node:
             raise util.Abort(_("can't specify a revision with --all"))
@@ -4224,7 +4292,9 @@
         finally:
             lock.release()
         for f in res:
-            ui.write("%s\n" % f)
+            fm.startitem()
+            fm.write("path", '%s\n', f)
+        fm.end()
         return
 
     if rev and node:
@@ -4233,14 +4303,17 @@
     if not node:
         node = rev
 
-    decor = {'l':'644 @ ', 'x':'755 * ', '':'644   '}
+    char = {'l': '@', 'x': '*', '': ''}
+    mode = {'l': '644', 'x': '755', '': '644'}
     ctx = scmutil.revsingle(repo, node)
+    mf = ctx.manifest()
     for f in ctx:
-        if ui.debugflag:
-            ui.write("%40s " % hex(ctx.manifest()[f]))
-        if ui.verbose:
-            ui.write(decor[ctx.flags(f)])
-        ui.write("%s\n" % f)
+        fm.startitem()
+        fl = ctx[f].flags()
+        fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
+        fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
+        fm.write('path', '%s\n', f)
+    fm.end()
 
 @command('^merge',
     [('f', 'force', None, _('force a merge with outstanding changes')),
@@ -4556,10 +4629,14 @@
                 phases.retractboundary(repo, targetphase, nodes)
         finally:
             lock.release()
-        newdata = repo._phasecache.getphaserevs(repo)
+        # moving revision from public to draft may hide them
+        # We have to check result on an unfiltered repository
+        unfi = repo.unfiltered()
+        newdata = repo._phasecache.getphaserevs(unfi)
         changes = sum(o != newdata[i] for i, o in enumerate(olddata))
+        cl = unfi.changelog
         rejected = [n for n in nodes
-                    if newdata[repo[n].rev()] < targetphase]
+                    if newdata[cl.rev(n)] < targetphase]
         if rejected:
             ui.warn(_('cannot move %i changesets to a more permissive '
                       'phase, use --force\n') % len(rejected))
@@ -4666,11 +4743,12 @@
 
     # update specified bookmarks
     if opts.get('bookmark'):
+        marks = repo._bookmarks
         for b in opts['bookmark']:
             # explicit pull overrides local bookmark if any
             ui.status(_("importing bookmark %s\n") % b)
-            repo._bookmarks[b] = repo[rb[b]].node()
-        bookmarks.write(repo)
+            marks[b] = repo[rb[b]].node()
+        marks.write()
 
     return ret
 
@@ -4861,8 +4939,7 @@
     elif after:
         list = deleted
         for f in modified + added + clean:
-            ui.warn(_('not removing %s: file still exists (use -f'
-                      ' to force removal)\n') % m.rel(f))
+            ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
             ret = 1
     else:
         list = deleted + clean
@@ -4885,11 +4962,7 @@
             for f in list:
                 if f in added:
                     continue # we never unlink added files on remove
-                try:
-                    util.unlinkpath(repo.wjoin(f))
-                except OSError, inst:
-                    if inst.errno != errno.ENOENT:
-                        raise
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
         repo[None].forget(list)
     finally:
         wlock.release()
@@ -5427,17 +5500,16 @@
         copy = copies.pathcopies(repo[node1], repo[node2])
 
     fm = ui.formatter('status', opts)
-    format = '%s %s' + end
-    if opts.get('no_status'):
-        format = '%.0s%s' + end
+    fmt = '%s' + end
+    showchar = not opts.get('no_status')
 
     for state, char, files in changestates:
         if state in show:
             label = 'status.' + state
             for f in files:
                 fm.startitem()
-                fm.write("status path", format, char,
-                         repo.pathto(f, cwd), label=label)
+                fm.condwrite(showchar, 'status', '%s ', char, label=label)
+                fm.write('path', fmt, repo.pathto(f, cwd), label=label)
                 if f in copy:
                     fm.write("copy", '  %s' + end, repo.pathto(copy[f], cwd),
                              label='status.copied')
@@ -5743,7 +5815,7 @@
         release(lock, wlock)
 
 @command('tags', [], '')
-def tags(ui, repo):
+def tags(ui, repo, **opts):
     """list repository tags
 
     This lists both regular and local tags. When the -v/--verbose
@@ -5752,27 +5824,27 @@
     Returns 0 on success.
     """
 
+    fm = ui.formatter('tags', opts)
     hexfunc = ui.debugflag and hex or short
     tagtype = ""
 
     for t, n in reversed(repo.tagslist()):
-        if ui.quiet:
-            ui.write("%s\n" % t, label='tags.normal')
-            continue
-
         hn = hexfunc(n)
-        r = "%5d:%s" % (repo.changelog.rev(n), hn)
-        rev = ui.label(r, 'log.changeset changeset.%s' % repo[n].phasestr())
-        spaces = " " * (30 - encoding.colwidth(t))
-
-        tag = ui.label(t, 'tags.normal')
-        if ui.verbose:
-            if repo.tagtype(t) == 'local':
-                tagtype = " local"
-                tag = ui.label(t, 'tags.local')
-            else:
-                tagtype = ""
-        ui.write("%s%s %s%s\n" % (tag, spaces, rev, tagtype))
+        label = 'tags.normal'
+        tagtype = ''
+        if repo.tagtype(t) == 'local':
+            label = 'tags.local'
+            tagtype = 'local'
+
+        fm.startitem()
+        fm.write('tag', '%s', t, label=label)
+        fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
+        fm.condwrite(not ui.quiet, 'rev id', fmt,
+                     repo.changelog.rev(n), hn, label=label)
+        fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
+                     tagtype, label=label)
+        fm.plain('\n')
+    fm.end()
 
 @command('tip',
     [('p', 'patch', None, _('show patch')),
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/commandserver.py
--- a/mercurial/commandserver.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/commandserver.py	Sat Jan 19 17:24:33 2013 -0600
@@ -42,7 +42,7 @@
 
     def __getattr__(self, attr):
         if attr in ('isatty', 'fileno'):
-            raise AttributeError, attr
+            raise AttributeError(attr)
         return getattr(self.in_, attr)
 
 class channeledinput(object):
@@ -122,7 +122,7 @@
 
     def __getattr__(self, attr):
         if attr in ('isatty', 'fileno'):
-            raise AttributeError, attr
+            raise AttributeError(attr)
         return getattr(self.in_, attr)
 
 class server(object):
@@ -220,7 +220,7 @@
                     'getencoding' : getencoding}
 
     def serve(self):
-        hellomsg = 'capabilities: ' + ' '.join(self.capabilities.keys())
+        hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
         hellomsg += '\n'
         hellomsg += 'encoding: ' + encoding.encoding
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/context.py
--- a/mercurial/context.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/context.py	Sat Jan 19 17:24:33 2013 -0600
@@ -12,6 +12,7 @@
 import match as matchmod
 import os, errno, stat
 import obsolete as obsmod
+import repoview
 
 propertycache = util.propertycache
 
@@ -25,8 +26,12 @@
         self._repo = repo
 
         if isinstance(changeid, int):
+            try:
+                self._node = repo.changelog.node(changeid)
+            except IndexError:
+                raise error.RepoLookupError(
+                    _("unknown revision '%s'") % changeid)
             self._rev = changeid
-            self._node = repo.changelog.node(changeid)
             return
         if isinstance(changeid, long):
             changeid = str(changeid)
@@ -62,7 +67,7 @@
             self._rev = r
             self._node = repo.changelog.node(r)
             return
-        except (ValueError, OverflowError):
+        except (ValueError, OverflowError, IndexError):
             pass
 
         if len(changeid) == 40:
@@ -95,7 +100,10 @@
 
         # lookup failed
         # check if it might have come from damaged dirstate
-        if changeid in repo.dirstate.parents():
+        #
+        # XXX we could avoid the unfiltered if we had a recognizable exception
+        # for filtered changeset access
+        if changeid in repo.unfiltered().dirstate.parents():
             raise error.Abort(_("working directory has unknown parent '%s'!")
                               % short(changeid))
         try:
@@ -204,7 +212,7 @@
     def mutable(self):
         return self.phase() > phases.public
     def hidden(self):
-        return self._rev in self._repo.hiddenrevs
+        return self._rev in repoview.filterrevs(self._repo, 'visible')
 
     def parents(self):
         """return contexts for each parent changeset"""
@@ -250,6 +258,34 @@
         """
         return self.rev() in obsmod.getrevs(self._repo, 'bumped')
 
+    def divergent(self):
+        """Is a successors of a changeset with multiple possible successors set
+
+        Only non-public and non-obsolete changesets may be divergent.
+        """
+        return self.rev() in obsmod.getrevs(self._repo, 'divergent')
+
+    def troubled(self):
+        """True if the changeset is either unstable, bumped or divergent"""
+        return self.unstable() or self.bumped() or self.divergent()
+
+    def troubles(self):
+        """return the list of troubles affecting this changesets.
+
+        Troubles are returned as strings. possible values are:
+        - unstable,
+        - bumped,
+        - divergent.
+        """
+        troubles = []
+        if self.unstable():
+            troubles.append('unstable')
+        if self.bumped():
+            troubles.append('bumped')
+        if self.divergent():
+            troubles.append('divergent')
+        return troubles
+
     def _fileinfo(self, path):
         if '_manifest' in self.__dict__:
             try:
@@ -352,6 +388,9 @@
     def dirs(self):
         return self._dirs
 
+    def dirty(self):
+        return False
+
 class filectx(object):
     """A filecontext object makes access to data related to a particular
        filerevision convenient."""
@@ -380,7 +419,26 @@
 
     @propertycache
     def _changectx(self):
-        return changectx(self._repo, self._changeid)
+        try:
+            return changectx(self._repo, self._changeid)
+        except error.RepoLookupError:
+            # Linkrev may point to any revision in the repository.  When the
+            # repository is filtered this may lead to `filectx` trying to build
+            # `changectx` for filtered revision. In such case we fallback to
+            # creating `changectx` on the unfiltered version of the reposition.
+            # This fallback should not be an issue because`changectx` from
+            # `filectx` are not used in complexe operation that care about
+            # filtering.
+            #
+            # This fallback is a cheap and dirty fix that prevent several
+            # crash. It does not ensure the behavior is correct. However the
+            # behavior was not correct before filtering either and "incorrect
+            # behavior" is seen as better as "crash"
+            #
+            # Linkrevs have several serious troubles with filtering that are
+            # complicated to solve. Proper handling of the issue here should be
+            # considered when solving linkrev issue are on the table.
+            return changectx(self._repo.unfiltered(), self._changeid)
 
     @propertycache
     def _filelog(self):
@@ -977,13 +1035,13 @@
         return self._parents[0].ancestor(c2) # punt on two parents for now
 
     def walk(self, match):
-        return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
+        return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
                                                True, False))
 
     def dirty(self, missing=False, merge=True, branch=True):
         "check whether a working directory is modified"
         # check subrepos first
-        for s in self.substate:
+        for s in sorted(self.substate):
             if self.sub(s).dirty():
                 return True
         # check current working dir
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/copies.py
--- a/mercurial/copies.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/copies.py	Sat Jan 19 17:24:33 2013 -0600
@@ -145,12 +145,16 @@
 
     return cm
 
-def _backwardcopies(a, b):
-    # because the forward mapping is 1:n, we can lose renames here
-    # in particular, we find renames better than copies
+def _backwardrenames(a, b):
+    # Even though we're not taking copies into account, 1:n rename situations
+    # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
+    # arbitrarily pick one of the renames.
     f = _forwardcopies(b, a)
     r = {}
-    for k, v in f.iteritems():
+    for k, v in sorted(f.iteritems()):
+        # remove copies
+        if v in a:
+            continue
         r[v] = k
     return r
 
@@ -162,19 +166,25 @@
     if a == x:
         return _forwardcopies(x, y)
     if a == y:
-        return _backwardcopies(x, y)
-    return _chain(x, y, _backwardcopies(x, a), _forwardcopies(a, y))
+        return _backwardrenames(x, y)
+    return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
 
 def mergecopies(repo, c1, c2, ca):
     """
     Find moves and copies between context c1 and c2 that are relevant
     for merging.
 
-    Returns two dicts, "copy" and "diverge".
+    Returns four dicts: "copy", "movewithdir", "diverge", and
+    "renamedelete".
 
     "copy" is a mapping from destination name -> source name,
     where source is in c1 and destination is in c2 or vice-versa.
 
+    "movewithdir" is a mapping from source name -> destination name,
+    where the file at source present in one context but not the other
+    needs to be moved to destination by the merge process, because the
+    other context moved the directory it is in.
+
     "diverge" is a mapping of source name -> list of destination names
     for divergent renames.
 
@@ -183,16 +193,16 @@
     """
     # avoid silly behavior for update from empty dir
     if not c1 or not c2 or c1 == c2:
-        return {}, {}, {}
+        return {}, {}, {}, {}
 
     # avoid silly behavior for parent -> working dir
     if c2.node() is None and c1.node() == repo.dirstate.p1():
-        return repo.dirstate.copies(), {}, {}
+        return repo.dirstate.copies(), {}, {}, {}
 
     limit = _findlimit(repo, c1.rev(), c2.rev())
     if limit is None:
         # no common ancestor, no copies
-        return {}, {}, {}
+        return {}, {}, {}, {}
     m1 = c1.manifest()
     m2 = c2.manifest()
     ma = ca.manifest()
@@ -206,6 +216,7 @@
 
     ctx = util.lrucachefunc(makectx)
     copy = {}
+    movewithdir = {}
     fullcopy = {}
     diverge = {}
 
@@ -303,7 +314,7 @@
     if fullcopy:
         repo.ui.debug("  all copies found (* = to merge, ! = divergent, "
                       "% = renamed and deleted):\n")
-        for f in fullcopy:
+        for f in sorted(fullcopy):
             note = ""
             if f in copy:
                 note += "*"
@@ -311,11 +322,12 @@
                 note += "!"
             if f in renamedelete2:
                 note += "%"
-            repo.ui.debug("   %s -> %s %s\n" % (f, fullcopy[f], note))
+            repo.ui.debug("   src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
+                                                              note))
     del diverge2
 
     if not fullcopy:
-        return copy, diverge, renamedelete
+        return copy, movewithdir, diverge, renamedelete
 
     repo.ui.debug("  checking for directory renames\n")
 
@@ -352,10 +364,11 @@
     del d1, d2, invalid
 
     if not dirmove:
-        return copy, diverge, renamedelete
+        return copy, movewithdir, diverge, renamedelete
 
     for d in dirmove:
-        repo.ui.debug("  dir %s -> %s\n" % (d, dirmove[d]))
+        repo.ui.debug("   discovered dir src: '%s' -> dst: '%s'\n" %
+                      (d, dirmove[d]))
 
     # check unaccounted nonoverlapping files against directory moves
     for f in u1 + u2:
@@ -365,8 +378,9 @@
                     # new file added in a directory that was moved, move it
                     df = dirmove[d] + f[len(d):]
                     if df not in copy:
-                        copy[f] = df
-                        repo.ui.debug("  file %s -> %s\n" % (f, copy[f]))
+                        movewithdir[f] = df
+                        repo.ui.debug(("   pending file src: '%s' -> "
+                                       "dst: '%s'\n") % (f, df))
                     break
 
-    return copy, diverge, renamedelete
+    return copy, movewithdir, diverge, renamedelete
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/dirstate.py
--- a/mercurial/dirstate.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/dirstate.py	Sat Jan 19 17:24:33 2013 -0600
@@ -265,6 +265,12 @@
         try:
             f.write(self._branch + '\n')
             f.close()
+
+            # make sure filecache has the correct stat info for _branch after
+            # replacing the underlying file
+            ce = self._filecache['_branch']
+            if ce:
+                ce.refresh()
         except: # re-raises
             f.discard()
             raise
@@ -607,7 +613,7 @@
             normalize = self._normalize
             skipstep3 = False
         else:
-            normalize = lambda x, y, z: x
+            normalize = None
 
         files = sorted(match.files())
         subrepos.sort()
@@ -628,7 +634,10 @@
 
         # step 1: find all explicit files
         for ff in files:
-            nf = normalize(normpath(ff), False, True)
+            if normalize:
+                nf = normalize(normpath(ff), False, True)
+            else:
+                nf = normpath(ff)
             if nf in results:
                 continue
 
@@ -678,7 +687,10 @@
                     continue
                 raise
             for f, kind, st in entries:
-                nf = normalize(nd and (nd + "/" + f) or f, True, True)
+                if normalize:
+                    nf = normalize(nd and (nd + "/" + f) or f, True, True)
+                else:
+                    nf = nd and (nd + "/" + f) or f
                 if nf not in results:
                     if kind == dirkind:
                         if not ignore(nf):
@@ -698,11 +710,9 @@
         # step 3: report unseen items in the dmap hash
         if not skipstep3 and not exact:
             visit = sorted([f for f in dmap if f not in results and matchfn(f)])
-            for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
-                if (not st is None and
-                    getkind(st.st_mode) not in (regkind, lnkkind)):
-                    st = None
-                results[nf] = st
+            nf = iter(visit).next
+            for st in util.statfiles([join(i) for i in visit]):
+                results[nf()] = st
         for s in subrepos:
             del results[s]
         del results['.hg']
@@ -748,13 +758,19 @@
         radd = removed.append
         dadd = deleted.append
         cadd = clean.append
+        mexact = match.exact
+        dirignore = self._dirignore
+        checkexec = self._checkexec
+        checklink = self._checklink
+        copymap = self._copymap
+        lastnormaltime = self._lastnormaltime
 
         lnkkind = stat.S_IFLNK
 
         for fn, st in self.walk(match, subrepos, listunknown,
                                 listignored).iteritems():
             if fn not in dmap:
-                if (listignored or match.exact(fn)) and self._dirignore(fn):
+                if (listignored or mexact(fn)) and dirignore(fn):
                     if listignored:
                         iadd(fn)
                 elif listunknown:
@@ -773,15 +789,15 @@
                 mtime = int(st.st_mtime)
                 if (size >= 0 and
                     ((size != st.st_size and size != st.st_size & _rangemask)
-                     or ((mode ^ st.st_mode) & 0100 and self._checkexec))
-                    and (mode & lnkkind != lnkkind or self._checklink)
+                     or ((mode ^ st.st_mode) & 0100 and checkexec))
+                    and (mode & lnkkind != lnkkind or checklink)
                     or size == -2 # other parent
-                    or fn in self._copymap):
+                    or fn in copymap):
                     madd(fn)
                 elif ((time != mtime and time != mtime & _rangemask)
-                      and (mode & lnkkind != lnkkind or self._checklink)):
+                      and (mode & lnkkind != lnkkind or checklink)):
                     ladd(fn)
-                elif mtime == self._lastnormaltime:
+                elif mtime == lastnormaltime:
                     # fn may have been changed in the same timeslot without
                     # changing its size. This can happen if we quickly do
                     # multiple commits in a single transaction.
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/discovery.py
--- a/mercurial/discovery.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/discovery.py	Sat Jan 19 17:24:33 2013 -0600
@@ -8,6 +8,7 @@
 from node import nullid, short
 from i18n import _
 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
+import branchmap
 
 def findcommonincoming(repo, remote, heads=None, force=False):
     """Return a tuple (common, anyincoming, heads) used to identify the common
@@ -114,7 +115,7 @@
         og.missingheads = onlyheads or repo.heads()
     elif onlyheads is None:
         # use visible heads as it should be cached
-        og.missingheads = visibleheads(repo)
+        og.missingheads = repo.filtered("served").heads()
         og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
     else:
         # compute common, missing and exclude secret stuff
@@ -192,9 +193,10 @@
 
     # D. Update newmap with outgoing changes.
     # This will possibly add new heads and remove existing ones.
-    newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
-                  if heads[0] is not None)
-    repo._updatebranchcache(newmap, missingctx)
+    newmap = branchmap.branchcache((branch, heads[1])
+                                 for branch, heads in headssum.iteritems()
+                                 if heads[0] is not None)
+    newmap.update(repo, (ctx.rev() for ctx in missingctx))
     for branch, newheads in newmap.iteritems():
         headssum[branch][1][:] = newheads
     return headssum
@@ -205,7 +207,7 @@
     cl = repo.changelog
     # 1-4b. old servers: Check for new topological heads.
     # Construct {old,new}map with branch = None (topological branch).
-    # (code based on _updatebranchcache)
+    # (code based on update)
     oldheads = set(h for h in remoteheads if h in cl.nodemap)
     # all nodes in outgoing.missing are children of either:
     # - an element of oldheads
@@ -266,7 +268,7 @@
     allmissing = set(outgoing.missing)
     allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
     allfuturecommon.update(allmissing)
-    for branch, heads in headssum.iteritems():
+    for branch, heads in sorted(headssum.iteritems()):
         if heads[0] is None:
             # Maybe we should abort if we push more that one head
             # for new branches ?
@@ -310,7 +312,7 @@
             unsynced = True
         if len(newhs) > len(oldhs):
             # strip updates to existing remote heads from the new heads list
-            dhs = list(newhs - bookmarkedheads - oldhs)
+            dhs = sorted(newhs - bookmarkedheads - oldhs)
         if dhs:
             if error is None:
                 if branch not in ('default', None):
@@ -335,43 +337,3 @@
     # 6. Check for unsynced changes on involved branches.
     if unsynced:
         repo.ui.warn(_("note: unsynced remote changes!\n"))
-
-def visibleheads(repo):
-    """return the set of visible head of this repo"""
-    # XXX we want a cache on this
-    sroots = repo._phasecache.phaseroots[phases.secret]
-    if sroots or repo.obsstore:
-        # XXX very slow revset. storing heads or secret "boundary"
-        # would help.
-        revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
-
-        vheads = [ctx.node() for ctx in revset]
-        if not vheads:
-            vheads.append(nullid)
-    else:
-        vheads = repo.heads()
-    return vheads
-
-
-def visiblebranchmap(repo):
-    """return a branchmap for the visible set"""
-    # XXX Recomputing this data on the fly is very slow.  We should build a
-    # XXX cached version while computing the standard branchmap version.
-    sroots = repo._phasecache.phaseroots[phases.secret]
-    if sroots or repo.obsstore:
-        vbranchmap = {}
-        for branch, nodes in  repo.branchmap().iteritems():
-            # search for secret heads.
-            for n in nodes:
-                if repo[n].phase() >= phases.secret:
-                    nodes = None
-                    break
-            # if secret heads were found we must compute them again
-            if nodes is None:
-                s = repo.set('heads(branch(%s) - secret() - extinct())',
-                             branch)
-                nodes = [c.node() for c in s]
-            vbranchmap[branch] = nodes
-    else:
-        vbranchmap = repo.branchmap()
-    return vbranchmap
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/dispatch.py
--- a/mercurial/dispatch.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/dispatch.py	Sat Jan 19 17:24:33 2013 -0600
@@ -183,8 +183,8 @@
         else:
             raise
     except OSError, inst:
-        if getattr(inst, "filename", None):
-            ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
+        if getattr(inst, "filename", None) is not None:
+            ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
         else:
             ui.warn(_("abort: %s\n") % inst.strerror)
     except KeyboardInterrupt:
@@ -710,6 +710,8 @@
                 repo = hg.repository(ui, path=path)
                 if not repo.local():
                     raise util.Abort(_("repository '%s' is not local") % path)
+                if options['hidden']:
+                    repo = repo.unfiltered()
                 repo.ui.setconfig("bundle", "mainreporoot", repo.root)
             except error.RequirementError:
                 raise
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/encoding.py
--- a/mercurial/encoding.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/encoding.py	Sat Jan 19 17:24:33 2013 -0600
@@ -80,8 +80,8 @@
     'foo: \\xc3\\xa4'
     >>> u2 = 'foo: \\xc3\\xa1'
     >>> d = { l: 1, tolocal(u2): 2 }
-    >>> d # no collision
-    {'foo: ?': 1, 'foo: ?': 2}
+    >>> len(d) # no collision
+    2
     >>> 'foo: ?' in d
     False
     >>> l1 = 'foo: \\xe4' # historical latin1 fallback
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/filemerge.py
--- a/mercurial/filemerge.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/filemerge.py	Sat Jan 19 17:24:33 2013 -0600
@@ -171,13 +171,15 @@
 
 def _premerge(repo, toolconf, files):
     tool, toolpath, binary, symlink = toolconf
+    if symlink:
+        return 1
     a, b, c, back = files
 
     ui = repo.ui
 
     # do we attempt to simplemerge first?
     try:
-        premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
+        premerge = _toolbool(ui, tool, "premerge", not binary)
     except error.ConfigError:
         premerge = _toolstr(ui, tool, "premerge").lower()
         valid = 'keep'.split()
@@ -204,6 +206,12 @@
     Uses the internal non-interactive simple merge algorithm for merging
     files. It will fail if there are any conflicts and leave markers in
     the partially merged file."""
+    tool, toolpath, binary, symlink = toolconf
+    if symlink:
+        repo.ui.warn(_('warning: internal:merge cannot merge symlinks '
+                       'for %s\n') % fcd.path())
+        return False, 1
+
     r = _premerge(repo, toolconf, files)
     if r:
         a, b, c, back = files
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/fileset.py
--- a/mercurial/fileset.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/fileset.py	Sat Jan 19 17:24:33 2013 -0600
@@ -373,7 +373,7 @@
     # i18n: "subrepo" is a keyword
     getargs(x, 0, 1, _("subrepo takes at most one argument"))
     ctx = mctx.ctx
-    sstate = ctx.substate
+    sstate = sorted(ctx.substate)
     if x:
         pat = getstring(x, _("subrepo requires a pattern or no arguments"))
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/formatter.py
--- a/mercurial/formatter.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/formatter.py	Sat Jan 19 17:24:33 2013 -0600
@@ -31,6 +31,10 @@
         '''do default text output while assigning data to item'''
         for k, v in zip(fields.split(), fielddata):
             self._item[k] = v
+    def condwrite(self, cond, fields, deftext, *fielddata, **opts):
+        '''do conditional write (primarily for plain formatter)'''
+        for k, v in zip(fields.split(), fielddata):
+            self._item[k] = v
     def plain(self, text, **opts):
         '''show raw text for non-templated mode'''
         pass
@@ -51,6 +55,10 @@
         pass
     def write(self, fields, deftext, *fielddata, **opts):
         self._ui.write(deftext % fielddata, **opts)
+    def condwrite(self, cond, fields, deftext, *fielddata, **opts):
+        '''do conditional write'''
+        if cond:
+            self._ui.write(deftext % fielddata, **opts)
     def plain(self, text, **opts):
         self._ui.write(text, **opts)
     def end(self):
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hbisect.py
--- a/mercurial/hbisect.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hbisect.py	Sat Jan 19 17:24:33 2013 -0600
@@ -147,7 +147,7 @@
     f = repo.opener("bisect.state", "w", atomictemp=True)
     wlock = repo.wlock()
     try:
-        for kind in state:
+        for kind in sorted(state):
             for node in state[kind]:
                 f.write("%s %s\n" % (kind, hex(node)))
         f.close()
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/help/config.txt
--- a/mercurial/help/config.txt	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/help/config.txt	Sat Jan 19 17:24:33 2013 -0600
@@ -850,14 +850,6 @@
   ``prompt``
     Always prompt for merge success, regardless of success reported by tool.
 
-``checkchanged``
-  True is equivalent to ``check = changed``.
-  Default: False
-
-``checkconflicts``
-  True is equivalent to ``check = conflicts``.
-  Default: False
-
 ``fixeol``
   Attempt to fix up EOL changes caused by the merge tool.
   Default: False
@@ -1295,6 +1287,10 @@
     (DEPRECATED) Whether to allow .zip downloading of repository
     revisions. Default is False. This feature creates temporary files.
 
+``archivesubrepos``
+    Whether to recurse into subrepositories when archiving. Default is
+    False.
+
 ``baseurl``
     Base URL to use when publishing URLs in other locations, so
     third-party tools like email notification hooks can construct
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hg.py
--- a/mercurial/hg.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hg.py	Sat Jan 19 17:24:33 2013 -0600
@@ -113,7 +113,7 @@
     if not repo:
         raise util.Abort(_("repository '%s' is not local") %
                          (path or peer.url()))
-    return repo
+    return repo.filtered('visible')
 
 def peer(uiorrepo, opts, path, create=False):
     '''return a repository peer for the specified path'''
@@ -171,11 +171,14 @@
     r = repository(ui, root)
 
     default = srcrepo.ui.config('paths', 'default')
-    if default:
-        fp = r.opener("hgrc", "w", text=True)
-        fp.write("[paths]\n")
-        fp.write("default = %s\n" % default)
-        fp.close()
+    if not default:
+        # set default to source for being able to clone subrepos
+        default = os.path.abspath(util.urllocalpath(origsource))
+    fp = r.opener("hgrc", "w", text=True)
+    fp.write("[paths]\n")
+    fp.write("default = %s\n" % default)
+    fp.close()
+    r.ui.setconfig('paths', 'default', default)
 
     if update:
         r.ui.status(_("updating working directory\n"))
@@ -288,17 +291,7 @@
         elif os.listdir(dest):
             raise util.Abort(_("destination '%s' is not empty") % dest)
 
-    class DirCleanup(object):
-        def __init__(self, dir_):
-            self.rmtree = shutil.rmtree
-            self.dir_ = dir_
-        def close(self):
-            self.dir_ = None
-        def cleanup(self):
-            if self.dir_:
-                self.rmtree(self.dir_, True)
-
-    srclock = destlock = dircleanup = None
+    srclock = destlock = cleandir = None
     srcrepo = srcpeer.local()
     try:
         abspath = origsource
@@ -306,7 +299,7 @@
             abspath = os.path.abspath(util.urllocalpath(origsource))
 
         if islocal(dest):
-            dircleanup = DirCleanup(dest)
+            cleandir = dest
 
         copy = False
         if (srcrepo and srcrepo.cancopy() and islocal(dest)
@@ -330,13 +323,13 @@
                 os.mkdir(dest)
             else:
                 # only clean up directories we create ourselves
-                dircleanup.dir_ = hgdir
+                cleandir = hgdir
             try:
                 destpath = hgdir
                 util.makedir(destpath, notindexed=True)
             except OSError, inst:
                 if inst.errno == errno.EEXIST:
-                    dircleanup.close()
+                    cleandir = None
                     raise util.Abort(_("destination '%s' already exists")
                                      % dest)
                 raise
@@ -364,7 +357,7 @@
                                 # only pass ui when no srcrepo
             except OSError, inst:
                 if inst.errno == errno.EEXIST:
-                    dircleanup.close()
+                    cleandir = None
                     raise util.Abort(_("destination '%s' already exists")
                                      % dest)
                 raise
@@ -384,21 +377,21 @@
             else:
                 raise util.Abort(_("clone from remote to remote not supported"))
 
-        if dircleanup:
-            dircleanup.close()
+        cleandir = None
 
         # clone all bookmarks except divergent ones
         destrepo = destpeer.local()
         if destrepo and srcpeer.capable("pushkey"):
             rb = srcpeer.listkeys('bookmarks')
+            marks = destrepo._bookmarks
             for k, n in rb.iteritems():
                 try:
                     m = destrepo.lookup(n)
-                    destrepo._bookmarks[k] = m
+                    marks[k] = m
                 except error.RepoLookupError:
                     pass
             if rb:
-                bookmarks.write(destrepo)
+                marks.write()
         elif srcrepo and destpeer.capable("pushkey"):
             for k, n in srcrepo._bookmarks.iteritems():
                 destpeer.pushkey('bookmarks', k, '', hex(n))
@@ -450,8 +443,8 @@
         return srcpeer, destpeer
     finally:
         release(srclock, destlock)
-        if dircleanup is not None:
-            dircleanup.cleanup()
+        if cleandir is not None:
+            shutil.rmtree(cleandir, True)
         if srcpeer is not None:
             srcpeer.close()
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/common.py
--- a/mercurial/hgweb/common.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/common.py	Sat Jan 19 17:24:33 2013 -0600
@@ -140,11 +140,11 @@
     try:
         os.stat(path)
         ct = mimetypes.guess_type(path)[0] or "text/plain"
-        req.respond(HTTP_OK, ct, length = os.path.getsize(path))
         fp = open(path, 'rb')
         data = fp.read()
         fp.close()
-        return data
+        req.respond(HTTP_OK, ct, body=data)
+        return ""
     except TypeError:
         raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
     except OSError, err:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/hgweb_mod.py
--- a/mercurial/hgweb/hgweb_mod.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/hgweb_mod.py	Sat Jan 19 17:24:33 2013 -0600
@@ -24,6 +24,30 @@
     'pushkey': 'push',
 }
 
+def makebreadcrumb(url):
+    '''Return a 'URL breadcrumb' list
+
+    A 'URL breadcrumb' is a list of URL-name pairs,
+    corresponding to each of the path items on a URL.
+    This can be used to create path navigation entries.
+    '''
+    if url.endswith('/'):
+        url = url[:-1]
+    relpath = url
+    if relpath.startswith('/'):
+        relpath = relpath[1:]
+
+    breadcrumb = []
+    urlel = url
+    pathitems = [''] + relpath.split('/')
+    for pathel in reversed(pathitems):
+        if not pathel or not urlel:
+            break
+        breadcrumb.append({'url': urlel, 'name': pathel})
+        urlel = os.path.dirname(urlel)
+    return reversed(breadcrumb)
+
+
 class hgweb(object):
     def __init__(self, repo, name=None, baseui=None):
         if isinstance(repo, str):
@@ -35,6 +59,7 @@
         else:
             self.repo = repo
 
+        self.repo =  self.repo.filtered('served')
         self.repo.ui.setconfig('ui', 'report_untrusted', 'off')
         self.repo.ui.setconfig('ui', 'nontty', 'true')
         hook.redirect(True)
@@ -71,6 +96,7 @@
             self.mtime = st.st_mtime
             self.size = st.st_size
             self.repo = hg.repository(self.repo.ui, self.repo.root)
+            self.repo =  self.repo.filtered('served')
             self.maxchanges = int(self.config("web", "maxchanges", 10))
             self.stripecount = int(self.config("web", "stripes", 1))
             self.maxshortchanges = int(self.config("web", "maxshortchanges",
@@ -134,8 +160,9 @@
                                  '').lower() != '100-continue') or
                     req.env.get('X-HgHttp2', '')):
                     req.drain()
-                req.respond(inst, protocol.HGTYPE)
-                return '0\n%s\n' % inst.message
+                req.respond(inst, protocol.HGTYPE,
+                            body='0\n%s\n' % inst.message)
+                return ''
 
         # translate user-visible url structure to internal structure
 
@@ -285,7 +312,8 @@
                                              "header": header,
                                              "footer": footer,
                                              "motd": motd,
-                                             "sessionvars": sessionvars
+                                             "sessionvars": sessionvars,
+                                             "pathdef": makebreadcrumb(req.url),
                                             })
         return tmpl
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/hgwebdir_mod.py
--- a/mercurial/hgweb/hgwebdir_mod.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/hgwebdir_mod.py	Sat Jan 19 17:24:33 2013 -0600
@@ -12,7 +12,7 @@
 from mercurial import error, encoding
 from common import ErrorResponse, get_mtime, staticfile, paritygen, \
                    get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
-from hgweb_mod import hgweb
+from hgweb_mod import hgweb, makebreadcrumb
 from request import wsgirequest
 import webutil
 
@@ -310,7 +310,8 @@
                                description_sort="",
                                lastchange=d,
                                lastchange_sort=d[1]-d[0],
-                               archives=[])
+                               archives=[],
+                               isdirectory=True)
 
                     seendirs.add(name)
                     yield row
@@ -394,6 +395,7 @@
         self.updatereqenv(req.env)
 
         return tmpl("index", entries=entries, subdir=subdir,
+                    pathdef=makebreadcrumb('/' + subdir),
                     sortcolumn=sortcolumn, descending=descending,
                     **dict(sort))
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/protocol.py
--- a/mercurial/hgweb/protocol.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/protocol.py	Sat Jan 19 17:24:33 2013 -0600
@@ -75,23 +75,24 @@
     p = webproto(req, repo.ui)
     rsp = wireproto.dispatch(repo, p, cmd)
     if isinstance(rsp, str):
-        req.respond(HTTP_OK, HGTYPE, length=len(rsp))
-        return [rsp]
+        req.respond(HTTP_OK, HGTYPE, body=rsp)
+        return []
     elif isinstance(rsp, wireproto.streamres):
         req.respond(HTTP_OK, HGTYPE)
         return rsp.gen
     elif isinstance(rsp, wireproto.pushres):
         val = p.restore()
-        req.respond(HTTP_OK, HGTYPE)
-        return ['%d\n%s' % (rsp.res, val)]
+        rsp = '%d\n%s' % (rsp.res, val)
+        req.respond(HTTP_OK, HGTYPE, body=rsp)
+        return []
     elif isinstance(rsp, wireproto.pusherr):
         # drain the incoming bundle
         req.drain()
         p.restore()
         rsp = '0\n%s\n' % rsp.res
-        req.respond(HTTP_OK, HGTYPE, length=len(rsp))
-        return [rsp]
+        req.respond(HTTP_OK, HGTYPE, body=rsp)
+        return []
     elif isinstance(rsp, wireproto.ooberror):
         rsp = rsp.message
-        req.respond(HTTP_OK, HGERRTYPE, length=len(rsp))
-        return [rsp]
+        req.respond(HTTP_OK, HGERRTYPE, body=rsp)
+        return []
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/request.py
--- a/mercurial/hgweb/request.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/request.py	Sat Jan 19 17:24:33 2013 -0600
@@ -70,19 +70,23 @@
         for s in util.filechunkiter(self.inp, limit=length):
             pass
 
-    def respond(self, status, type=None, filename=None, length=0):
+    def respond(self, status, type, filename=None, body=None):
         if self._start_response is not None:
-
-            self.httphdr(type, filename, length)
-            if not self.headers:
-                raise RuntimeError("request.write called before headers sent")
+            self.headers.append(('Content-Type', type))
+            if filename:
+                filename = (filename.split('/')[-1]
+                            .replace('\\', '\\\\').replace('"', '\\"'))
+                self.headers.append(('Content-Disposition',
+                                     'inline; filename="%s"' % filename))
+            if body is not None:
+                self.headers.append(('Content-Length', str(len(body))))
 
             for k, v in self.headers:
                 if not isinstance(v, str):
-                    raise TypeError('header value must be string: %r' % v)
+                    raise TypeError('header value must be string: %r' % (v,))
 
             if isinstance(status, ErrorResponse):
-                self.header(status.headers)
+                self.headers.extend(status.headers)
                 if status.code == HTTP_NOT_MODIFIED:
                     # RFC 2616 Section 10.3.5: 304 Not Modified has cases where
                     # it MUST NOT include any headers other than these and no
@@ -99,13 +103,12 @@
             self.server_write = self._start_response(status, self.headers)
             self._start_response = None
             self.headers = []
+        if body is not None:
+            self.write(body)
+            self.server_write = None
 
     def write(self, thing):
-        if util.safehasattr(thing, "__iter__"):
-            for part in thing:
-                self.write(part)
-        else:
-            thing = str(thing)
+        if thing:
             try:
                 self.server_write(thing)
             except socket.error, inst:
@@ -122,22 +125,6 @@
     def close(self):
         return None
 
-    def header(self, headers=[('Content-Type','text/html')]):
-        self.headers.extend(headers)
-
-    def httphdr(self, type=None, filename=None, length=0, headers={}):
-        headers = headers.items()
-        if type is not None:
-            headers.append(('Content-Type', type))
-        if filename:
-            filename = (filename.split('/')[-1]
-                        .replace('\\', '\\\\').replace('"', '\\"'))
-            headers.append(('Content-Disposition',
-                            'inline; filename="%s"' % filename))
-        if length:
-            headers.append(('Content-Length', str(length)))
-        self.header(headers)
-
 def wsgiapplication(app_maker):
     '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
     can and should now be used as a WSGI application.'''
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/server.py
--- a/mercurial/hgweb/server.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/server.py	Sat Jan 19 17:24:33 2013 -0600
@@ -129,13 +129,16 @@
                                               SocketServer.ForkingMixIn)
         env['wsgi.run_once'] = 0
 
-        self.close_connection = True
         self.saved_status = None
         self.saved_headers = []
         self.sent_headers = False
         self.length = None
+        self._chunked = None
         for chunk in self.server.application(env, self._start_response):
             self._write(chunk)
+        if not self.sent_headers:
+            self.send_headers()
+        self._done()
 
     def send_headers(self):
         if not self.saved_status:
@@ -144,20 +147,20 @@
         saved_status = self.saved_status.split(None, 1)
         saved_status[0] = int(saved_status[0])
         self.send_response(*saved_status)
-        should_close = True
+        self.length = None
+        self._chunked = False
         for h in self.saved_headers:
             self.send_header(*h)
             if h[0].lower() == 'content-length':
-                should_close = False
                 self.length = int(h[1])
-        # The value of the Connection header is a list of case-insensitive
-        # tokens separated by commas and optional whitespace.
-        if 'close' in [token.strip().lower() for token in
-                       self.headers.get('connection', '').split(',')]:
-            should_close = True
-        if should_close:
-            self.send_header('Connection', 'close')
-        self.close_connection = should_close
+        if (self.length is None and
+            saved_status[0] != common.HTTP_NOT_MODIFIED):
+            self._chunked = (not self.close_connection and
+                             self.request_version == "HTTP/1.1")
+            if self._chunked:
+                self.send_header('Transfer-Encoding', 'chunked')
+            else:
+                self.send_header('Connection', 'close')
         self.end_headers()
         self.sent_headers = True
 
@@ -180,9 +183,16 @@
                 raise AssertionError("Content-length header sent, but more "
                                      "bytes than specified are being written.")
             self.length = self.length - len(data)
+        elif self._chunked and data:
+            data = '%x\r\n%s\r\n' % (len(data), data)
         self.wfile.write(data)
         self.wfile.flush()
 
+    def _done(self):
+        if self._chunked:
+            self.wfile.write('0\r\n\r\n')
+            self.wfile.flush()
+
 class _httprequesthandleropenssl(_httprequesthandler):
     """HTTPS handler based on pyOpenSSL"""
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/webcommands.py
--- a/mercurial/hgweb/webcommands.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/webcommands.py	Sat Jan 19 17:24:33 2013 -0600
@@ -14,6 +14,7 @@
 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
 from mercurial import graphmod, patch
 from mercurial import help as helpmod
+from mercurial import scmutil
 from mercurial.i18n import _
 
 # __all__ is populated with the allowed commands. Be sure to add to it if
@@ -60,8 +61,8 @@
     if mt.startswith('text/'):
         mt += '; charset="%s"' % encoding.encoding
 
-    req.respond(HTTP_OK, mt, path, len(text))
-    return [text]
+    req.respond(HTTP_OK, mt, path, body=text)
+    return []
 
 def _filerevision(web, tmpl, fctx):
     f = fctx.path()
@@ -193,34 +194,37 @@
         except error.RepoError:
             return _search(web, req, tmpl) # XXX redirect to 404 page?
 
-    def changelist(limit=0, **map):
+    def changelist(latestonly, **map):
         l = [] # build a list in forward order for efficiency
-        for i in xrange(start, end):
+        revs = []
+        if start < end:
+            revs = web.repo.changelog.revs(start, end - 1)
+        if latestonly:
+            for r in revs:
+                pass
+            revs = (r,)
+        for i in revs:
             ctx = web.repo[i]
             n = ctx.node()
             showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
             files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
 
-            l.insert(0, {"parity": parity.next(),
-                         "author": ctx.user(),
-                         "parent": webutil.parents(ctx, i - 1),
-                         "child": webutil.children(ctx, i + 1),
-                         "changelogtag": showtags,
-                         "desc": ctx.description(),
-                         "date": ctx.date(),
-                         "files": files,
-                         "rev": i,
-                         "node": hex(n),
-                         "tags": webutil.nodetagsdict(web.repo, n),
-                         "bookmarks": webutil.nodebookmarksdict(web.repo, n),
-                         "inbranch": webutil.nodeinbranch(web.repo, ctx),
-                         "branches": webutil.nodebranchdict(web.repo, ctx)
-                        })
-
-        if limit > 0:
-            l = l[:limit]
-
-        for e in l:
+            l.append({"parity": parity.next(),
+                      "author": ctx.user(),
+                      "parent": webutil.parents(ctx, i - 1),
+                      "child": webutil.children(ctx, i + 1),
+                      "changelogtag": showtags,
+                      "desc": ctx.description(),
+                      "date": ctx.date(),
+                      "files": files,
+                      "rev": i,
+                      "node": hex(n),
+                      "tags": webutil.nodetagsdict(web.repo, n),
+                      "bookmarks": webutil.nodebookmarksdict(web.repo, n),
+                      "inbranch": webutil.nodeinbranch(web.repo, ctx),
+                      "branches": webutil.nodebranchdict(web.repo, ctx)
+                     })
+        for e in reversed(l):
             yield e
 
     revcount = shortlog and web.maxshortchanges or web.maxchanges
@@ -241,12 +245,12 @@
     pos = end - 1
     parity = paritygen(web.stripecount, offset=start - end)
 
-    changenav = webutil.revnavgen(pos, revcount, count, web.repo.changectx)
+    changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
 
     return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
                 node=ctx.hex(), rev=pos, changesets=count,
-                entries=lambda **x: changelist(limit=0,**x),
-                latestentry=lambda **x: changelist(limit=1,**x),
+                entries=lambda **x: changelist(latestonly=False, **x),
+                latestentry=lambda **x: changelist(latestonly=True, **x),
                 archives=web.archivelist("tip"), revcount=revcount,
                 morevars=morevars, lessvars=lessvars)
 
@@ -255,6 +259,9 @@
 
 def changeset(web, req, tmpl):
     ctx = webutil.changectx(web.repo, req)
+    basectx = webutil.basechangectx(web.repo, req)
+    if basectx is None:
+        basectx = ctx.p1()
     showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
     showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark',
                                          ctx.node())
@@ -273,10 +280,10 @@
         style = req.form['style'][0]
 
     parity = paritygen(web.stripecount)
-    diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity, style)
+    diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
 
     parity = paritygen(web.stripecount)
-    diffstatgen = webutil.diffstatgen(ctx)
+    diffstatgen = webutil.diffstatgen(ctx, basectx)
     diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity)
 
     return tmpl('changeset',
@@ -285,6 +292,7 @@
                 node=ctx.hex(),
                 parent=webutil.parents(ctx),
                 child=webutil.children(ctx),
+                currentbaseline=basectx.hex(),
                 changesettag=showtags,
                 changesetbookmark=showbookmarks,
                 changesetbranch=showbranch,
@@ -397,14 +405,13 @@
     i = list(reversed(web.repo.tagslist()))
     parity = paritygen(web.stripecount)
 
-    def entries(notip=False, limit=0, **map):
-        count = 0
-        for k, n in i:
-            if notip and k == "tip":
-                continue
-            if limit > 0 and count >= limit:
-                continue
-            count = count + 1
+    def entries(notip, latestonly, **map):
+        t = i
+        if notip:
+            t = [(k, n) for k, n in i if k != "tip"]
+        if latestonly:
+            t = t[:1]
+        for k, n in t:
             yield {"parity": parity.next(),
                    "tag": k,
                    "date": web.repo[n].date(),
@@ -412,20 +419,20 @@
 
     return tmpl("tags",
                 node=hex(web.repo.changelog.tip()),
-                entries=lambda **x: entries(False, 0, **x),
-                entriesnotip=lambda **x: entries(True, 0, **x),
-                latestentry=lambda **x: entries(True, 1, **x))
+                entries=lambda **x: entries(False, False, **x),
+                entriesnotip=lambda **x: entries(True, False, **x),
+                latestentry=lambda **x: entries(True, True, **x))
 
 def bookmarks(web, req, tmpl):
     i = web.repo._bookmarks.items()
     parity = paritygen(web.stripecount)
 
-    def entries(limit=0, **map):
-        count = 0
-        for k, n in sorted(i):
-            if limit > 0 and count >= limit:
-                continue
-            count = count + 1
+    def entries(latestonly, **map):
+        if latestonly:
+            t = [min(i)]
+        else:
+            t = sorted(i)
+        for k, n in t:
             yield {"parity": parity.next(),
                    "bookmark": k,
                    "date": web.repo[n].date(),
@@ -433,8 +440,8 @@
 
     return tmpl("bookmarks",
                 node=hex(web.repo.changelog.tip()),
-                entries=lambda **x: entries(0, **x),
-                latestentry=lambda **x: entries(1, **x))
+                entries=lambda **x: entries(latestonly=False, **x),
+                latestentry=lambda **x: entries(latestonly=True, **x))
 
 def branches(web, req, tmpl):
     tips = []
@@ -515,7 +522,7 @@
             n = ctx.node()
             hn = hex(n)
 
-            l.insert(0, tmpl(
+            l.append(tmpl(
                'shortlogentry',
                 parity=parity.next(),
                 author=ctx.user(),
@@ -528,6 +535,7 @@
                 inbranch=webutil.nodeinbranch(web.repo, ctx),
                 branches=webutil.nodebranchdict(web.repo, ctx)))
 
+        l.reverse()
         yield l
 
     tip = web.repo['tip']
@@ -569,7 +577,7 @@
     if 'style' in req.form:
         style = req.form['style'][0]
 
-    diffs = webutil.diffs(web.repo, tmpl, ctx, [path], parity, style)
+    diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
     rename = fctx and webutil.renamelink(fctx) or []
     ctx = fctx and fctx or ctx
     return tmpl("filediff",
@@ -736,41 +744,42 @@
     end = min(count, start + revcount) # last rev on this page
     parity = paritygen(web.stripecount, offset=start - end)
 
-    def entries(limit=0, **map):
+    def entries(latestonly, **map):
         l = []
 
         repo = web.repo
-        for i in xrange(start, end):
+        revs = repo.changelog.revs(start, end - 1)
+        if latestonly:
+            for r in revs:
+                pass
+            revs = (r,)
+        for i in revs:
             iterfctx = fctx.filectx(i)
 
-            l.insert(0, {"parity": parity.next(),
-                         "filerev": i,
-                         "file": f,
-                         "node": iterfctx.hex(),
-                         "author": iterfctx.user(),
-                         "date": iterfctx.date(),
-                         "rename": webutil.renamelink(iterfctx),
-                         "parent": webutil.parents(iterfctx),
-                         "child": webutil.children(iterfctx),
-                         "desc": iterfctx.description(),
-                         "tags": webutil.nodetagsdict(repo, iterfctx.node()),
-                         "bookmarks": webutil.nodebookmarksdict(
-                             repo, iterfctx.node()),
-                         "branch": webutil.nodebranchnodefault(iterfctx),
-                         "inbranch": webutil.nodeinbranch(repo, iterfctx),
-                         "branches": webutil.nodebranchdict(repo, iterfctx)})
-
-        if limit > 0:
-            l = l[:limit]
-
-        for e in l:
+            l.append({"parity": parity.next(),
+                      "filerev": i,
+                      "file": f,
+                      "node": iterfctx.hex(),
+                      "author": iterfctx.user(),
+                      "date": iterfctx.date(),
+                      "rename": webutil.renamelink(iterfctx),
+                      "parent": webutil.parents(iterfctx),
+                      "child": webutil.children(iterfctx),
+                      "desc": iterfctx.description(),
+                      "tags": webutil.nodetagsdict(repo, iterfctx.node()),
+                      "bookmarks": webutil.nodebookmarksdict(
+                          repo, iterfctx.node()),
+                      "branch": webutil.nodebranchnodefault(iterfctx),
+                      "inbranch": webutil.nodeinbranch(repo, iterfctx),
+                      "branches": webutil.nodebranchdict(repo, iterfctx)})
+        for e in reversed(l):
             yield e
 
-    nodefunc = lambda x: fctx.filectx(fileid=x)
-    nav = webutil.revnavgen(end - 1, revcount, count, nodefunc)
+    revnav = webutil.filerevnav(web.repo, fctx.path())
+    nav = revnav.gen(end - 1, revcount, count)
     return tmpl("filelog", file=f, node=fctx.hex(), nav=nav,
-                entries=lambda **x: entries(limit=0, **x),
-                latestentry=lambda **x: entries(limit=1, **x),
+                entries=lambda **x: entries(latestonly=False, **x),
+                latestentry=lambda **x: entries(latestonly=True, **x),
                 revcount=revcount, morevars=morevars, lessvars=lessvars)
 
 def archive(web, req, tmpl):
@@ -795,14 +804,17 @@
     name = "%s-%s" % (reponame, arch_version)
     mimetype, artype, extension, encoding = web.archive_specs[type_]
     headers = [
-        ('Content-Type', mimetype),
         ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
-    ]
+        ]
     if encoding:
         headers.append(('Content-Encoding', encoding))
-    req.header(headers)
-    req.respond(HTTP_OK)
-    archival.archive(web.repo, req, cnode, artype, prefix=name)
+    req.headers.extend(headers)
+    req.respond(HTTP_OK, mimetype)
+
+    ctx = webutil.changectx(web.repo, req)
+    archival.archive(web.repo, req, cnode, artype, prefix=name,
+                     matchfn=scmutil.match(ctx, []),
+                     subrepos=web.configbool("web", "archivesubrepos"))
     return []
 
 
@@ -843,10 +855,13 @@
 
     uprev = min(max(0, count - 1), rev + revcount)
     downrev = max(0, rev - revcount)
-    changenav = webutil.revnavgen(pos, revcount, count, web.repo.changectx)
+    changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
 
-    dag = graphmod.dagwalker(web.repo, range(start, end)[::-1])
-    tree = list(graphmod.colored(dag, web.repo))
+    tree = []
+    if start < end:
+        revs = list(web.repo.changelog.revs(end - 1, start))
+        dag = graphmod.dagwalker(web.repo, revs)
+        tree = list(graphmod.colored(dag, web.repo))
 
     def getcolumns(tree):
         cols = 0
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hgweb/webutil.py
--- a/mercurial/hgweb/webutil.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hgweb/webutil.py	Sat Jan 19 17:24:33 2013 -0600
@@ -24,46 +24,100 @@
         return "/"
     return up + "/"
 
-def revnavgen(pos, pagelen, limit, nodefunc):
-    def seq(factor, limit=None):
-        if limit:
-            yield limit
-            if limit >= 20 and limit <= 40:
-                yield 50
-        else:
-            yield 1 * factor
-            yield 3 * factor
-        for f in seq(factor * 10):
-            yield f
+def _navseq(step, firststep=None):
+    if firststep:
+        yield firststep
+        if firststep >= 20 and firststep <= 40:
+            firststep = 50
+            yield firststep
+        assert step > 0
+        assert firststep > 0
+        while step <= firststep:
+            step *= 10
+    while True:
+        yield 1 * step
+        yield 3 * step
+        step *= 10
+
+class revnav(object):
+
+    def __init__(self, repo):
+        """Navigation generation object
 
-    navbefore = []
-    navafter = []
+        :repo: repo object we generate nav for
+        """
+        # used for hex generation
+        self._revlog = repo.changelog
+
+    def __nonzero__(self):
+        """return True if any revision to navigate over"""
+        try:
+            self._revlog.node(0)
+            return True
+        except error.RepoError:
+            return False
+
+    def hex(self, rev):
+        return hex(self._revlog.node(rev))
+
+    def gen(self, pos, pagelen, limit):
+        """computes label and revision id for navigation link
+
+        :pos: is the revision relative to which we generate navigation.
+        :pagelen: the size of each navigation page
+        :limit: how far shall we link
 
-    last = 0
-    for f in seq(1, pagelen):
-        if f < pagelen or f <= last:
-            continue
-        if f > limit:
-            break
-        last = f
-        if pos + f < limit:
-            navafter.append(("+%d" % f, hex(nodefunc(pos + f).node())))
-        if pos - f >= 0:
-            navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
+        The return is:
+            - a single element tuple
+            - containing a dictionary with a `before` and `after` key
+            - values are generator functions taking arbitrary number of kwargs
+            - yield items are dictionaries with `label` and `node` keys
+        """
+        if not self:
+            # empty repo
+            return ({'before': (), 'after': ()},)
+
+        targets = []
+        for f in _navseq(1, pagelen):
+            if f > limit:
+                break
+            targets.append(pos + f)
+            targets.append(pos - f)
+        targets.sort()
 
-    navafter.append(("tip", "tip"))
-    try:
-        navbefore.insert(0, ("(0)", hex(nodefunc('0').node())))
-    except error.RepoError:
-        pass
+        navbefore = [("(0)", self.hex(0))]
+        navafter = []
+        for rev in targets:
+            if rev not in self._revlog:
+                continue
+            if pos < rev < limit:
+                navafter.append(("+%d" % f, self.hex(rev)))
+            if 0 < rev < pos:
+                navbefore.append(("-%d" % f, self.hex(rev)))
+
+
+        navafter.append(("tip", "tip"))
+
+        data = lambda i: {"label": i[0], "node": i[1]}
+        return ({'before': lambda **map: (data(i) for i in navbefore),
+                 'after':  lambda **map: (data(i) for i in navafter)},)
 
-    def gen(l):
-        def f(**map):
-            for label, node in l:
-                yield {"label": label, "node": node}
-        return f
+class filerevnav(revnav):
+
+    def __init__(self, repo, path):
+        """Navigation generation object
 
-    return (dict(before=gen(navbefore), after=gen(navafter)),)
+        :repo: repo object we generate nav for
+        :path: path of the file we generate nav for
+        """
+        # used for iteration
+        self._changelog = repo.unfiltered().changelog
+        # used for hex generation
+        self._revlog = repo.file(path)
+
+    def hex(self, rev):
+        return hex(self._changelog.node(self._revlog.linkrev(rev)))
+
 
 def _siblings(siblings=[], hiderev=None):
     siblings = [s for s in siblings if s.node() != nullid]
@@ -140,13 +194,7 @@
     path = path.lstrip('/')
     return scmutil.canonpath(repo.root, '', path)
 
-def changectx(repo, req):
-    changeid = "tip"
-    if 'node' in req.form:
-        changeid = req.form['node'][0]
-    elif 'manifest' in req.form:
-        changeid = req.form['manifest'][0]
-
+def changeidctx (repo, changeid):
     try:
         ctx = repo[changeid]
     except error.RepoError:
@@ -155,6 +203,28 @@
 
     return ctx
 
+def changectx (repo, req):
+    changeid = "tip"
+    if 'node' in req.form:
+        changeid = req.form['node'][0]
+        ipos=changeid.find(':')
+        if ipos != -1:
+            changeid = changeid[(ipos + 1):]
+    elif 'manifest' in req.form:
+        changeid = req.form['manifest'][0]
+
+    return changeidctx(repo, changeid)
+
+def basechangectx(repo, req):
+    if 'node' in req.form:
+        changeid = req.form['node'][0]
+        ipos=changeid.find(':')
+        if ipos != -1:
+            changeid = changeid[:ipos]
+            return changeidctx(repo, changeid)
+
+    return None
+
 def filectx(repo, req):
     if 'file' not in req.form:
         raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
@@ -178,7 +248,7 @@
     if len(files) > max:
         yield tmpl('fileellipses')
 
-def diffs(repo, tmpl, ctx, files, parity, style):
+def diffs(repo, tmpl, ctx, basectx, files, parity, style):
 
     def countgen():
         start = 1
@@ -209,8 +279,11 @@
         m = match.always(repo.root, repo.getcwd())
 
     diffopts = patch.diffopts(repo.ui, untrusted=True)
-    parents = ctx.parents()
-    node1 = parents and parents[0].node() or nullid
+    if basectx is None:
+        parents = ctx.parents()
+        node1 = parents and parents[0].node() or nullid
+    else:
+        node1 = basectx.node()
     node2 = ctx.node()
 
     block = []
@@ -274,10 +347,10 @@
         for oc in s.get_grouped_opcodes(n=context):
             yield tmpl('comparisonblock', lines=getblock(oc))
 
-def diffstatgen(ctx):
+def diffstatgen(ctx, basectx):
     '''Generator function that provides the diffstat data.'''
 
-    stats = patch.diffstatdata(util.iterlines(ctx.diff()))
+    stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
     maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
     while True:
         yield stats, maxname, maxtotal, addtotal, removetotal, binary
@@ -321,7 +394,7 @@
         return sessionvars(copy.copy(self.vars), self.start)
     def __iter__(self):
         separator = self.start
-        for key, value in self.vars.iteritems():
+        for key, value in sorted(self.vars.iteritems()):
             yield {'name': key, 'value': str(value), 'separator': separator}
             separator = '&'
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/hook.py
--- a/mercurial/hook.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/hook.py	Sat Jan 19 17:24:33 2013 -0600
@@ -7,7 +7,7 @@
 
 from i18n import _
 import os, sys
-import extensions, util
+import extensions, util, demandimport
 
 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
     '''call python hook. hook is callable object, looked up as
@@ -35,13 +35,17 @@
                 sys.path = sys.path[:] + [modpath]
                 modname = modfile
         try:
+            demandimport.disable()
             obj = __import__(modname)
+            demandimport.enable()
         except ImportError:
             e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
             try:
                 # extensions are loaded with hgext_ prefix
                 obj = __import__("hgext_%s" % modname)
+                demandimport.enable()
             except ImportError:
+                demandimport.enable()
                 e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
                 if ui.tracebackflag:
                     ui.warn(_('exception from first failed import attempt:\n'))
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/httpclient/socketutil.py
--- a/mercurial/httpclient/socketutil.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/httpclient/socketutil.py	Sat Jan 19 17:24:33 2013 -0600
@@ -70,7 +70,7 @@
                 continue
             break
         if not sock:
-            raise socket.error, msg
+            raise socket.error(msg)
         return sock
 
 if ssl:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/ignore.py
--- a/mercurial/ignore.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/ignore.py	Sat Jan 19 17:24:33 2013 -0600
@@ -46,12 +46,32 @@
                 pat = line
                 break
             elif line.startswith(s+':'):
-                pat = rels + line[len(s)+1:]
+                pat = rels + line[len(s) + 1:]
                 break
         patterns.append(pat)
 
     return patterns, warnings
 
+def readpats(root, files, warn):
+    '''return a dict mapping ignore-file-name to list-of-patterns'''
+
+    pats = {}
+    for f in files:
+        if f in pats:
+            continue
+        try:
+            pats[f] = []
+            fp = open(f)
+            pats[f], warnings = ignorepats(fp)
+            fp.close()
+            for warning in warnings:
+                warn("%s: %s\n" % (f, warning))
+        except IOError, inst:
+            if f != files[0]:
+                warn(_("skipping unreadable ignore file '%s': %s\n") %
+                     (f, inst.strerror))
+    return [(f, pats[f]) for f in files if f in pats]
+
 def ignore(root, files, warn):
     '''return matcher covering patterns in 'files'.
 
@@ -72,22 +92,10 @@
     glob:pattern   # non-rooted glob
     pattern        # pattern of the current default type'''
 
-    pats = {}
-    for f in files:
-        try:
-            pats[f] = []
-            fp = open(f)
-            pats[f], warnings = ignorepats(fp)
-            fp.close()
-            for warning in warnings:
-                warn("%s: %s\n" % (f, warning))
-        except IOError, inst:
-            if f != files[0]:
-                warn(_("skipping unreadable ignore file '%s': %s\n") %
-                     (f, inst.strerror))
+    pats = readpats(root, files, warn)
 
     allpats = []
-    for patlist in pats.values():
+    for f, patlist in pats:
         allpats.extend(patlist)
     if not allpats:
         return util.never
@@ -96,7 +104,7 @@
         ignorefunc = match.match(root, '', [], allpats)
     except util.Abort:
         # Re-raise an exception where the src is the right file
-        for f, patlist in pats.iteritems():
+        for f, patlist in pats:
             try:
                 match.match(root, '', [], patlist)
             except util.Abort, inst:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/localrepo.py
--- a/mercurial/localrepo.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/localrepo.py	Sat Jan 19 17:24:33 2013 -0600
@@ -4,9 +4,9 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
-from node import bin, hex, nullid, nullrev, short
+from node import hex, nullid, short
 from i18n import _
-import peer, changegroup, subrepo, discovery, pushkey, obsolete
+import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
 import lock, transaction, store, encoding, base85
 import scmutil, util, extensions, hook, error, revset
@@ -15,14 +15,49 @@
 import tags as tagsmod
 from lock import release
 import weakref, errno, os, time, inspect
+import branchmap
 propertycache = util.propertycache
 filecache = scmutil.filecache
 
-class storecache(filecache):
+class repofilecache(filecache):
+    """All filecache usage on repo are done for logic that should be unfiltered
+    """
+
+    def __get__(self, repo, type=None):
+        return super(repofilecache, self).__get__(repo.unfiltered(), type)
+    def __set__(self, repo, value):
+        return super(repofilecache, self).__set__(repo.unfiltered(), value)
+    def __delete__(self, repo):
+        return super(repofilecache, self).__delete__(repo.unfiltered())
+
+class storecache(repofilecache):
     """filecache for files in the store"""
     def join(self, obj, fname):
         return obj.sjoin(fname)
 
+class unfilteredpropertycache(propertycache):
+    """propertycache that apply to unfiltered repo only"""
+
+    def __get__(self, repo, type=None):
+        return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
+
+class filteredpropertycache(propertycache):
+    """propertycache that must take filtering in account"""
+
+    def cachevalue(self, obj, value):
+        object.__setattr__(obj, self.name, value)
+
+
+def hasunfilteredcache(repo, name):
+    """check if an repo and a unfilteredproperty cached value for """
+    return name in vars(repo.unfiltered())
+
+def unfilteredmethod(orig):
+    """decorate method that always need to be run on unfiltered version"""
+    def wrapper(repo, *args, **kwargs):
+        return orig(repo.unfiltered(), *args, **kwargs)
+    return wrapper
+
 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
 
@@ -31,7 +66,7 @@
 
     def __init__(self, repo, caps=MODERNCAPS):
         peer.peerrepository.__init__(self)
-        self._repo = repo
+        self._repo = repo.filtered('served')
         self.ui = repo.ui
         self._caps = repo._restrictcapabilities(caps)
         self.requirements = repo.requirements
@@ -56,10 +91,10 @@
         return self._repo.lookup(key)
 
     def branchmap(self):
-        return discovery.visiblebranchmap(self._repo)
+        return self._repo.branchmap()
 
     def heads(self):
-        return discovery.visibleheads(self._repo)
+        return self._repo.heads()
 
     def known(self, nodes):
         return self._repo.known(nodes)
@@ -112,6 +147,7 @@
                                         'dotencode'))
     openerreqs = set(('revlogv1', 'generaldelta'))
     requirements = ['revlogv1']
+    filtername = None
 
     def _baserequirements(self, create):
         return self.requirements[:]
@@ -193,8 +229,7 @@
             self._writerequirements()
 
 
-        self._branchcache = None
-        self._branchcachetip = None
+        self._branchcaches = {}
         self.filterpats = {}
         self._datafilters = {}
         self._transref = self._lockref = self._wlockref = None
@@ -205,6 +240,15 @@
         # Maps a property name to its util.filecacheentry
         self._filecache = {}
 
+        # hold sets of revision to be filtered
+        # should be cleared when something might have changed the filter value:
+        # - new changesets,
+        # - phase change,
+        # - new obsolescence marker,
+        # - working directory parent change,
+        # - bookmark changes
+        self.filteredrevcache = {}
+
     def close(self):
         pass
 
@@ -218,7 +262,7 @@
 
     def _writerequirements(self):
         reqfile = self.opener("requires", "w")
-        for r in self.requirements:
+        for r in sorted(self.requirements):
             reqfile.write("%s\n" % r)
         reqfile.close()
 
@@ -263,17 +307,28 @@
     def peer(self):
         return localpeer(self) # not cached to avoid reference cycle
 
-    @filecache('bookmarks')
+    def unfiltered(self):
+        """Return unfiltered version of the repository
+
+        Intended to be ovewritten by filtered repo."""
+        return self
+
+    def filtered(self, name):
+        """Return a filtered version of a repository"""
+        # build a new class with the mixin and the current class
+        # (possibily subclass of the repo)
+        class proxycls(repoview.repoview, self.unfiltered().__class__):
+            pass
+        return proxycls(self, name)
+
+    @repofilecache('bookmarks')
     def _bookmarks(self):
-        return bookmarks.read(self)
+        return bookmarks.bmstore(self)
 
-    @filecache('bookmarks.current')
+    @repofilecache('bookmarks.current')
     def _bookmarkcurrent(self):
         return bookmarks.readcurrent(self)
 
-    def _writebookmarks(self, marks):
-        bookmarks.write(self)
-
     def bookmarkheads(self, bookmark):
         name = bookmark.split('@', 1)[0]
         heads = []
@@ -295,27 +350,6 @@
             self.ui.warn(msg % len(list(store)))
         return store
 
-    @propertycache
-    def hiddenrevs(self):
-        """hiddenrevs: revs that should be hidden by command and tools
-
-        This set is carried on the repo to ease initialization and lazy
-        loading; it'll probably move back to changelog for efficiency and
-        consistency reasons.
-
-        Note that the hiddenrevs will needs invalidations when
-        - a new changesets is added (possible unstable above extinct)
-        - a new obsolete marker is added (possible new extinct changeset)
-
-        hidden changesets cannot have non-hidden descendants
-        """
-        hidden = set()
-        if self.obsstore:
-            ### hide extinct changeset that are not accessible by any mean
-            hiddenquery = 'extinct() - ::(. + bookmark())'
-            hidden.update(self.revs(hiddenquery))
-        return hidden
-
     @storecache('00changelog.i')
     def changelog(self):
         c = changelog.changelog(self.sopener)
@@ -329,7 +363,7 @@
     def manifest(self):
         return manifest.manifest(self.sopener)
 
-    @filecache('dirstate')
+    @repofilecache('dirstate')
     def dirstate(self):
         warned = [0]
         def validate(node):
@@ -385,6 +419,7 @@
     def hook(self, name, throw=False, **args):
         return hook.hook(self.ui, self, name, throw, **args)
 
+    @unfilteredmethod
     def _tag(self, names, node, message, local, user, date, extra={}):
         if isinstance(names, str):
             names = (names,)
@@ -482,7 +517,7 @@
         self.tags() # instantiate the cache
         self._tag(names, node, message, local, user, date)
 
-    @propertycache
+    @filteredpropertycache
     def _tagscache(self):
         '''Returns a tagscache object that contains various tags related
         caches.'''
@@ -594,43 +629,10 @@
                 marks.append(bookmark)
         return sorted(marks)
 
-    def _branchtags(self, partial, lrev):
-        # TODO: rename this function?
-        tiprev = len(self) - 1
-        if lrev != tiprev:
-            ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
-            self._updatebranchcache(partial, ctxgen)
-            self._writebranchcache(partial, self.changelog.tip(), tiprev)
-
-        return partial
-
-    def updatebranchcache(self):
-        tip = self.changelog.tip()
-        if self._branchcache is not None and self._branchcachetip == tip:
-            return
-
-        oldtip = self._branchcachetip
-        self._branchcachetip = tip
-        if oldtip is None or oldtip not in self.changelog.nodemap:
-            partial, last, lrev = self._readbranchcache()
-        else:
-            lrev = self.changelog.rev(oldtip)
-            partial = self._branchcache
-
-        self._branchtags(partial, lrev)
-        # this private cache holds all heads (not just the branch tips)
-        self._branchcache = partial
-
     def branchmap(self):
         '''returns a dictionary {branch: [branchheads]}'''
-        if self.changelog.filteredrevs:
-            # some changeset are excluded we can't use the cache
-            branchmap = {}
-            self._updatebranchcache(branchmap, (self[r] for r in self))
-            return branchmap
-        else:
-            self.updatebranchcache()
-            return self._branchcache
+        branchmap.updatecache(self)
+        return self._branchcaches[self.filtername]
 
 
     def _branchtip(self, heads):
@@ -656,109 +658,6 @@
             bt[bn] = self._branchtip(heads)
         return bt
 
-    def _readbranchcache(self):
-        partial = {}
-        try:
-            f = self.opener("cache/branchheads")
-            lines = f.read().split('\n')
-            f.close()
-        except (IOError, OSError):
-            return {}, nullid, nullrev
-
-        try:
-            last, lrev = lines.pop(0).split(" ", 1)
-            last, lrev = bin(last), int(lrev)
-            if lrev >= len(self) or self[lrev].node() != last:
-                # invalidate the cache
-                raise ValueError('invalidating branch cache (tip differs)')
-            for l in lines:
-                if not l:
-                    continue
-                node, label = l.split(" ", 1)
-                label = encoding.tolocal(label.strip())
-                if not node in self:
-                    raise ValueError('invalidating branch cache because node '+
-                                     '%s does not exist' % node)
-                partial.setdefault(label, []).append(bin(node))
-        except KeyboardInterrupt:
-            raise
-        except Exception, inst:
-            if self.ui.debugflag:
-                self.ui.warn(str(inst), '\n')
-            partial, last, lrev = {}, nullid, nullrev
-        return partial, last, lrev
-
-    def _writebranchcache(self, branches, tip, tiprev):
-        try:
-            f = self.opener("cache/branchheads", "w", atomictemp=True)
-            f.write("%s %s\n" % (hex(tip), tiprev))
-            for label, nodes in branches.iteritems():
-                for node in nodes:
-                    f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
-            f.close()
-        except (IOError, OSError):
-            pass
-
-    def _updatebranchcache(self, partial, ctxgen):
-        """Given a branchhead cache, partial, that may have extra nodes or be
-        missing heads, and a generator of nodes that are at least a superset of
-        heads missing, this function updates partial to be correct.
-        """
-        # collect new branch entries
-        newbranches = {}
-        for c in ctxgen:
-            newbranches.setdefault(c.branch(), []).append(c.node())
-        # if older branchheads are reachable from new ones, they aren't
-        # really branchheads. Note checking parents is insufficient:
-        # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
-        for branch, newnodes in newbranches.iteritems():
-            bheads = partial.setdefault(branch, [])
-            # Remove candidate heads that no longer are in the repo (e.g., as
-            # the result of a strip that just happened).  Avoid using 'node in
-            # self' here because that dives down into branchcache code somewhat
-            # recursively.
-            bheadrevs = [self.changelog.rev(node) for node in bheads
-                         if self.changelog.hasnode(node)]
-            newheadrevs = [self.changelog.rev(node) for node in newnodes
-                           if self.changelog.hasnode(node)]
-            ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
-            # Remove duplicates - nodes that are in newheadrevs and are already
-            # in bheadrevs.  This can happen if you strip a node whose parent
-            # was already a head (because they're on different branches).
-            bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
-
-            # Starting from tip means fewer passes over reachable.  If we know
-            # the new candidates are not ancestors of existing heads, we don't
-            # have to examine ancestors of existing heads
-            if ctxisnew:
-                iterrevs = sorted(newheadrevs)
-            else:
-                iterrevs = list(bheadrevs)
-
-            # This loop prunes out two kinds of heads - heads that are
-            # superseded by a head in newheadrevs, and newheadrevs that are not
-            # heads because an existing head is their descendant.
-            while iterrevs:
-                latest = iterrevs.pop()
-                if latest not in bheadrevs:
-                    continue
-                ancestors = set(self.changelog.ancestors([latest],
-                                                         bheadrevs[0]))
-                if ancestors:
-                    bheadrevs = [b for b in bheadrevs if b not in ancestors]
-            partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
-
-        # There may be branches that cease to exist when the last commit in the
-        # branch was stripped.  This code filters them out.  Note that the
-        # branch that ceased to exist may not be in newbranches because
-        # newbranches is the set of candidate heads, which when you strip the
-        # last commit in a branch will be the parent branch.
-        for branch in partial.keys():
-            nodes = [head for head in partial[branch]
-                     if self.changelog.hasnode(head)]
-            if not nodes:
-                del partial[branch]
-
     def lookup(self, key):
         return self[key].node()
 
@@ -865,11 +764,11 @@
 
         return data
 
-    @propertycache
+    @unfilteredpropertycache
     def _encodefilterpats(self):
         return self._loadfilter('encode')
 
-    @propertycache
+    @unfilteredpropertycache
     def _decodefilterpats(self):
         return self._loadfilter('decode')
 
@@ -964,6 +863,7 @@
         finally:
             release(lock, wlock)
 
+    @unfilteredmethod # Until we get smarter cache management
     def _rollback(self, dryrun, force):
         ui = self.ui
         try:
@@ -995,6 +895,7 @@
             return 0
 
         parents = self.dirstate.parents()
+        self.destroying()
         transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
         if os.path.exists(self.join('undo.bookmarks')):
             util.rename(self.join('undo.bookmarks'),
@@ -1004,9 +905,6 @@
                         self.sjoin('phaseroots'))
         self.invalidate()
 
-        # Discard all cache entries to force reloading everything.
-        self._filecache.clear()
-
         parentgone = (parents[0] not in self.changelog.nodemap or
                       parents[1] not in self.changelog.nodemap)
         if parentgone:
@@ -1034,16 +932,16 @@
         return 0
 
     def invalidatecaches(self):
-        def delcache(name):
-            try:
-                delattr(self, name)
-            except AttributeError:
-                pass
+
+        if '_tagscache' in vars(self):
+            # can't use delattr on proxy
+            del self.__dict__['_tagscache']
 
-        delcache('_tagscache')
+        self.unfiltered()._branchcaches.clear()
+        self.invalidatevolatilesets()
 
-        self._branchcache = None # in UTF-8
-        self._branchcachetip = None
+    def invalidatevolatilesets(self):
+        self.filteredrevcache.clear()
         obsolete.clearobscaches(self)
 
     def invalidatedirstate(self):
@@ -1055,22 +953,23 @@
         rereads the dirstate. Use dirstate.invalidate() if you want to
         explicitly read the dirstate again (i.e. restoring it to a previous
         known good state).'''
-        if 'dirstate' in self.__dict__:
+        if hasunfilteredcache(self, 'dirstate'):
             for k in self.dirstate._filecache:
                 try:
                     delattr(self.dirstate, k)
                 except AttributeError:
                     pass
-            delattr(self, 'dirstate')
+            delattr(self.unfiltered(), 'dirstate')
 
     def invalidate(self):
+        unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
         for k in self._filecache:
             # dirstate is invalidated separately in invalidatedirstate()
             if k == 'dirstate':
                 continue
 
             try:
-                delattr(self, k)
+                delattr(unfiltered, k)
             except AttributeError:
                 pass
         self.invalidatecaches()
@@ -1111,10 +1010,10 @@
 
         def unlock():
             self.store.write()
-            if '_phasecache' in vars(self):
+            if hasunfilteredcache(self, '_phasecache'):
                 self._phasecache.write()
             for k, ce in self._filecache.items():
-                if k == 'dirstate':
+                if k == 'dirstate' or k not in self.__dict__:
                     continue
                 ce.refresh()
 
@@ -1134,9 +1033,7 @@
 
         def unlock():
             self.dirstate.write()
-            ce = self._filecache.get('dirstate')
-            if ce:
-                ce.refresh()
+            self._filecache['dirstate'].refresh()
 
         l = self._lock(self.join("wlock"), wait, unlock,
                        self.invalidatedirstate, _('working directory of %s') %
@@ -1224,6 +1121,7 @@
 
         return fparent1
 
+    @unfilteredmethod
     def commit(self, text="", user=None, date=None, match=None, force=False,
                editor=False, extra={}):
         """Add a new revision to current repository.
@@ -1394,6 +1292,7 @@
         self._afterlock(commithook)
         return ret
 
+    @unfilteredmethod
     def commitctx(self, ctx, error=False):
         """Add a new revision to current repository.
         Revision information is passed via the context argument.
@@ -1468,14 +1367,33 @@
                 # if minimal phase was 0 we don't need to retract anything
                 phases.retractboundary(self, targetphase, [n])
             tr.close()
-            self.updatebranchcache()
+            branchmap.updatecache(self.filtered('served'))
             return n
         finally:
             if tr:
                 tr.release()
             lock.release()
 
-    def destroyed(self, newheadnodes=None):
+    @unfilteredmethod
+    def destroying(self):
+        '''Inform the repository that nodes are about to be destroyed.
+        Intended for use by strip and rollback, so there's a common
+        place for anything that has to be done before destroying history.
+
+        This is mostly useful for saving state that is in memory and waiting
+        to be flushed when the current lock is released. Because a call to
+        destroyed is imminent, the repo will be invalidated causing those
+        changes to stay in memory (waiting for the next unlock), or vanish
+        completely.
+        '''
+        # When using the same lock to commit and strip, the phasecache is left
+        # dirty after committing. Then when we strip, the repo is invalidated,
+        # causing those changes to disappear.
+        if '_phasecache' in vars(self):
+            self._phasecache.write()
+
+    @unfilteredmethod
+    def destroyed(self):
         '''Inform the repository that nodes have been destroyed.
         Intended for use by strip and rollback, so there's a common
         place for anything that has to be done after destroying history.
@@ -1486,16 +1404,22 @@
         code to update the branchheads cache, rather than having future code
         decide it's invalid and regenerating it from scratch.
         '''
-        # If we have info, newheadnodes, on how to update the branch cache, do
-        # it, Otherwise, since nodes were destroyed, the cache is stale and this
-        # will be caught the next time it is read.
-        if newheadnodes:
-            tiprev = len(self) - 1
-            ctxgen = (self[node] for node in newheadnodes
-                      if self.changelog.hasnode(node))
-            self._updatebranchcache(self._branchcache, ctxgen)
-            self._writebranchcache(self._branchcache, self.changelog.tip(),
-                                   tiprev)
+        # When one tries to:
+        # 1) destroy nodes thus calling this method (e.g. strip)
+        # 2) use phasecache somewhere (e.g. commit)
+        #
+        # then 2) will fail because the phasecache contains nodes that were
+        # removed. We can either remove phasecache from the filecache,
+        # causing it to reload next time it is accessed, or simply filter
+        # the removed nodes now and write the updated cache.
+        if '_phasecache' in self._filecache:
+            self._phasecache.filterunknown(self)
+            self._phasecache.write()
+
+        # update the 'served' branch cache to help read only server process
+        # Thanks to branchcach collaboration this is done from the nearest
+        # filtered subset and it is expected to be fast.
+        branchmap.updatecache(self.filtered('served'))
 
         # Ensure the persistent tag cache is updated.  Doing it now
         # means that the tag cache only has to worry about destroyed
@@ -1507,10 +1431,7 @@
         # head, refresh the tag cache, then immediately add a new head.
         # But I think doing it this way is necessary for the "instant
         # tag cache retrieval" case to work.
-        self.invalidatecaches()
-
-        # Discard all cache entries to force reloading everything.
-        self._filecache.clear()
+        self.invalidate()
 
     def walk(self, match, node=None):
         '''
@@ -1568,7 +1489,7 @@
         if working: # we need to scan the working dir
             subrepos = []
             if '.hgsub' in self.dirstate:
-                subrepos = ctx2.substate.keys()
+                subrepos = sorted(ctx2.substate)
             s = self.dirstate.status(match, subrepos, listignored,
                                      listclean, listunknown)
             cmp, modified, added, removed, deleted, unknown, ignored, clean = s
@@ -1806,6 +1727,7 @@
                         if key.startswith('dump'):
                             data = base85.b85decode(remoteobs[key])
                             self.obsstore.mergemarkers(tr, data)
+                    self.invalidatevolatilesets()
             if tr is not None:
                 tr.close()
         finally:
@@ -1841,6 +1763,7 @@
 
         if not remote.canpush():
             raise util.Abort(_("destination does not support push"))
+        unfi = self.unfiltered()
         # get local lock as we might write phase data
         locallock = self.lock()
         try:
@@ -1852,40 +1775,43 @@
             try:
                 # discovery
                 fci = discovery.findcommonincoming
-                commoninc = fci(self, remote, force=force)
+                commoninc = fci(unfi, remote, force=force)
                 common, inc, remoteheads = commoninc
                 fco = discovery.findcommonoutgoing
-                outgoing = fco(self, remote, onlyheads=revs,
+                outgoing = fco(unfi, remote, onlyheads=revs,
                                commoninc=commoninc, force=force)
 
 
                 if not outgoing.missing:
                     # nothing to push
-                    scmutil.nochangesfound(self.ui, self, outgoing.excluded)
+                    scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
                     ret = None
                 else:
                     # something to push
                     if not force:
                         # if self.obsstore == False --> no obsolete
                         # then, save the iteration
-                        if self.obsstore:
+                        if unfi.obsstore:
                             # this message are here for 80 char limit reason
                             mso = _("push includes obsolete changeset: %s!")
-                            msu = _("push includes unstable changeset: %s!")
-                            msb = _("push includes bumped changeset: %s!")
+                            mst = "push includes %s changeset: %s!"
+                            # plain versions for i18n tool to detect them
+                            _("push includes unstable changeset: %s!")
+                            _("push includes bumped changeset: %s!")
+                            _("push includes divergent changeset: %s!")
                             # If we are to push if there is at least one
                             # obsolete or unstable changeset in missing, at
                             # least one of the missinghead will be obsolete or
                             # unstable. So checking heads only is ok
                             for node in outgoing.missingheads:
-                                ctx = self[node]
+                                ctx = unfi[node]
                                 if ctx.obsolete():
                                     raise util.Abort(mso % ctx)
-                                elif ctx.unstable():
-                                    raise util.Abort(msu % ctx)
-                                elif ctx.bumped():
-                                    raise util.Abort(msb % ctx)
-                        discovery.checkheads(self, remote, outgoing,
+                                elif ctx.troubled():
+                                    raise util.Abort(_(mst)
+                                                     % (ctx.troubles()[0],
+                                                        ctx))
+                        discovery.checkheads(unfi, remote, outgoing,
                                              remoteheads, newbranch,
                                              bool(inc))
 
@@ -1938,7 +1864,7 @@
                     cheads = [node for node in revs if node in common]
                     # and
                     # * commonheads parents on missing
-                    revset = self.set('%ln and parents(roots(%ln))',
+                    revset = unfi.set('%ln and parents(roots(%ln))',
                                      outgoing.commonheads,
                                      outgoing.missing)
                     cheads.extend(c.node() for c in revset)
@@ -1961,7 +1887,7 @@
                     # Get the list of all revs draft on remote by public here.
                     # XXX Beware that revset break if droots is not strictly
                     # XXX root we may want to ensure it is but it is costly
-                    outdated =  self.set('heads((%ln::%ln) and public())',
+                    outdated =  unfi.set('heads((%ln::%ln) and public())',
                                          droots, cheads)
                     for newremotehead in outdated:
                         r = remote.pushkey('phases',
@@ -1992,12 +1918,12 @@
         self.ui.debug("checking for updated bookmarks\n")
         rb = remote.listkeys('bookmarks')
         for k in rb.keys():
-            if k in self._bookmarks:
+            if k in unfi._bookmarks:
                 nr, nl = rb[k], hex(self._bookmarks[k])
-                if nr in self:
-                    cr = self[nr]
-                    cl = self[nl]
-                    if bookmarks.validdest(self, cr, cl):
+                if nr in unfi:
+                    cr = unfi[nr]
+                    cl = unfi[nl]
+                    if bookmarks.validdest(unfi, cr, cl):
                         r = remote.pushkey('bookmarks', k, nr, nl)
                         if r:
                             self.ui.status(_("updating bookmark %s\n") % k)
@@ -2033,7 +1959,7 @@
             bases = [nullid]
         csets, bases, heads = cl.nodesbetween(bases, heads)
         # We assume that all ancestors of bases are known
-        common = set(cl.ancestors([cl.rev(n) for n in bases]))
+        common = cl.ancestors([cl.rev(n) for n in bases])
         return self._changegroupsubset(common, csets, heads, source)
 
     def getlocalbundle(self, source, outgoing):
@@ -2059,8 +1985,8 @@
         """
         cl = self.changelog
         if common:
-            nm = cl.nodemap
-            common = [n for n in common if n in nm]
+            hasnode = cl.hasnode
+            common = [n for n in common if hasnode(n)]
         else:
             common = [nullid]
         if not heads:
@@ -2068,6 +1994,7 @@
         return self.getlocalbundle(source,
                                    discovery.outgoing(cl, common, heads))
 
+    @unfilteredmethod
     def _changegroupsubset(self, commonrevs, csets, heads, source):
 
         cl = self.changelog
@@ -2179,6 +2106,7 @@
         # to avoid a race we use changegroupsubset() (issue1320)
         return self.changegroupsubset(basenodes, self.heads(), source)
 
+    @unfilteredmethod
     def _changegroup(self, nodes, source):
         """Compute the changegroup of all nodes that we have that a recipient
         doesn't.  Return a chunkbuffer object whose read() method will return
@@ -2272,6 +2200,7 @@
 
         return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
 
+    @unfilteredmethod
     def addchangegroup(self, source, srctype, url, emptyok=False):
         """Add the changegroup returned by source.read() to this repo.
         srctype is a string like 'push', 'pull', or 'unbundle'.  url is
@@ -2382,6 +2311,9 @@
                         n = fl.node(new)
                         if n in needs:
                             needs.remove(n)
+                        else:
+                            raise util.Abort(
+                                _("received spurious file revlog entry"))
                     if not needs:
                         del needfiles[f]
             self.ui.progress(_('files'), None)
@@ -2410,7 +2342,7 @@
             self.ui.status(_("added %d changesets"
                              " with %d changes to %d files%s\n")
                              % (changesets, revisions, files, htext))
-            obsolete.clearobscaches(self)
+            self.invalidatevolatilesets()
 
             if changesets > 0:
                 p = lambda: cl.writepending() and self.root or ""
@@ -2444,7 +2376,11 @@
             tr.close()
 
             if changesets > 0:
-                self.updatebranchcache()
+                if srctype != 'strip':
+                    # During strip, branchcache is invalid but coming call to
+                    # `destroyed` will repair it.
+                    # In other case we can safely update cache on disk.
+                    branchmap.updatecache(self.filtered('served'))
                 def runhooks():
                     # forcefully update the on-disk branch cache
                     self.ui.debug("updating the branch cache\n")
@@ -2538,12 +2474,20 @@
                 for bheads in rbranchmap.itervalues():
                     rbheads.extend(bheads)
 
-                self.branchcache = rbranchmap
                 if rbheads:
                     rtiprev = max((int(self.changelog.rev(node))
                             for node in rbheads))
-                    self._writebranchcache(self.branchcache,
-                            self[rtiprev].node(), rtiprev)
+                    cache = branchmap.branchcache(rbranchmap,
+                                                  self[rtiprev].node(),
+                                                  rtiprev)
+                    # Try to stick it as low as possible
+                    # filter above served are unlikely to be fetch from a clone
+                    for candidate in ('base', 'immutable', 'served'):
+                        rview = self.filtered(candidate)
+                        if cache.validfor(rview):
+                            self._branchcaches[candidate] = cache
+                            cache.write(rview)
+                            break
             self.invalidate()
             return len(self.heads()) + 1
         finally:
@@ -2607,7 +2551,7 @@
             fp.write(text)
         finally:
             fp.close()
-        return self.pathto(fp.name[len(self.root)+1:])
+        return self.pathto(fp.name[len(self.root) + 1:])
 
 # used to avoid circular references so destructors work
 def aftertrans(files):
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/manifest.py
--- a/mercurial/manifest.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/manifest.py	Sat Jan 19 17:24:33 2013 -0600
@@ -117,15 +117,23 @@
         # apply the changes collected during the bisect loop to our addlist
         # return a delta suitable for addrevision
         def addlistdelta(addlist, x):
-            # start from the bottom up
-            # so changes to the offsets don't mess things up.
-            for start, end, content in reversed(x):
+            # for large addlist arrays, building a new array is cheaper
+            # than repeatedly modifying the existing one
+            currentposition = 0
+            newaddlist = array.array('c')
+
+            for start, end, content in x:
+                newaddlist += addlist[currentposition:start]
                 if content:
-                    addlist[start:end] = array.array('c', content)
-                else:
-                    del addlist[start:end]
-            return "".join(struct.pack(">lll", start, end, len(content))
+                    newaddlist += array.array('c', content)
+
+                currentposition = end
+
+            newaddlist += addlist[currentposition:]
+
+            deltatext = "".join(struct.pack(">lll", start, end, len(content))
                            + content for start, end, content in x)
+            return deltatext, newaddlist
 
         def checkforbidden(l):
             for f in l:
@@ -194,7 +202,8 @@
             if dstart is not None:
                 delta.append([dstart, dend, "".join(dline)])
             # apply the delta to the addlist, and get a delta for addrevision
-            cachedelta = (self.rev(p1), addlistdelta(addlist, delta))
+            deltatext, addlist = addlistdelta(addlist, delta)
+            cachedelta = (self.rev(p1), deltatext)
             arraytext = addlist
             text = util.buffer(arraytext)
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/mdiff.py
--- a/mercurial/mdiff.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/mdiff.py	Sat Jan 19 17:24:33 2013 -0600
@@ -7,7 +7,7 @@
 
 from i18n import _
 import bdiff, mpatch, util
-import re, struct
+import re, struct, base85, zlib
 
 def splitnewlines(text):
     '''like str.splitlines, but only split on newlines.'''
@@ -142,20 +142,7 @@
             yield s, type
         yield s1, '='
 
-def diffline(revs, a, b, opts):
-    parts = ['diff']
-    if opts.git:
-        parts.append('--git')
-    if revs and not opts.git:
-        parts.append(' '.join(["-r %s" % rev for rev in revs]))
-    if opts.git:
-        parts.append('a/%s' % a)
-        parts.append('b/%s' % b)
-    else:
-        parts.append(a)
-    return ' '.join(parts) + '\n'
-
-def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
+def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
     def datetag(date, fn=None):
         if not opts.git and not opts.nodates:
             return '\t%s\n' % date
@@ -206,9 +193,6 @@
         if l[ln][-1] != '\n':
             l[ln] += "\n\ No newline at end of file\n"
 
-    if r:
-        l.insert(0, diffline(r, fn1, fn2, opts))
-
     return "".join(l)
 
 # creates a headerless unified diff
@@ -314,6 +298,41 @@
         for x in yieldhunk(hunk):
             yield x
 
+def b85diff(to, tn):
+    '''print base85-encoded binary diff'''
+    def fmtline(line):
+        l = len(line)
+        if l <= 26:
+            l = chr(ord('A') + l - 1)
+        else:
+            l = chr(l - 26 + ord('a') - 1)
+        return '%c%s\n' % (l, base85.b85encode(line, True))
+
+    def chunk(text, csize=52):
+        l = len(text)
+        i = 0
+        while i < l:
+            yield text[i:i + csize]
+            i += csize
+
+    if to is None:
+        to = ''
+    if tn is None:
+        tn = ''
+
+    if to == tn:
+        return ''
+
+    # TODO: deltas
+    ret = []
+    ret.append('GIT binary patch\n')
+    ret.append('literal %s\n' % len(tn))
+    for l in chunk(zlib.compress(tn)):
+        ret.append(fmtline(l))
+    ret.append('\n')
+
+    return ''.join(ret)
+
 def patchtext(bin):
     pos = 0
     t = []
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/merge.py
--- a/mercurial/merge.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/merge.py	Sat Jan 19 17:24:33 2013 -0600
@@ -7,7 +7,7 @@
 
 from node import nullid, nullrev, hex, bin
 from i18n import _
-import error, scmutil, util, filemerge, copies, subrepo
+import error, util, filemerge, copies, subrepo
 import errno, os, shutil
 
 class mergestate(object):
@@ -45,11 +45,11 @@
                 f.write("\0".join([d] + v) + "\n")
             f.close()
             self._dirty = False
-    def add(self, fcl, fco, fca, fd, flags):
+    def add(self, fcl, fco, fca, fd):
         hash = util.sha1(fcl.path()).hexdigest()
         self._repo.opener.write("merge/" + hash, fcl.data())
         self._state[fd] = ['u', hash, fcl.path(), fca.path(),
-                           hex(fca.filenode()), fco.path(), flags]
+                           hex(fca.filenode()), fco.path(), fcl.flags()]
         self._dirty = True
     def __contains__(self, dfile):
         return dfile in self._state
@@ -67,12 +67,22 @@
         if self[dfile] == 'r':
             return 0
         state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
+        fcd = wctx[dfile]
+        fco = octx[ofile]
+        fca = self._repo.filectx(afile, fileid=anode)
+        # "premerge" x flags
+        flo = fco.flags()
+        fla = fca.flags()
+        if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
+            if fca.node() == nullid:
+                self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
+                                   afile)
+            elif flags == fla:
+                flags = flo
+        # restore local
         f = self._repo.opener("merge/" + hash)
         self._repo.wwrite(dfile, f.read(), flags)
         f.close()
-        fcd = wctx[dfile]
-        fco = octx[ofile]
-        fca = self._repo.filectx(afile, fileid=anode)
         r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
         if r is None:
             # no real conflict
@@ -162,18 +172,18 @@
     as removed.
     """
 
-    action = []
+    actions = []
     state = branchmerge and 'r' or 'f'
     for f in wctx.deleted():
         if f not in mctx:
-            action.append((f, state))
+            actions.append((f, state))
 
     if not branchmerge:
         for f in wctx.removed():
             if f not in mctx:
-                action.append((f, "f"))
+                actions.append((f, "f"))
 
-    return action
+    return actions
 
 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
     """
@@ -183,44 +193,19 @@
     partial = function to filter file lists
     """
 
-    def fmerge(f, f2, fa):
-        """merge flags"""
-        a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
-        if m == n: # flags agree
-            return m # unchanged
-        if m and n and not a: # flags set, don't agree, differ from parent
-            r = repo.ui.promptchoice(
-                _(" conflicting flags for %s\n"
-                  "(n)one, e(x)ec or sym(l)ink?") % f,
-                (_("&None"), _("E&xec"), _("Sym&link")), 0)
-            if r == 1:
-                return "x" # Exec
-            if r == 2:
-                return "l" # Symlink
-            return ""
-        if m and m != a: # changed from a to m
-            return m
-        if n and n != a: # changed from a to n
-            if (n == 'l' or a == 'l') and m1.get(f) != ma.get(f):
-                # can't automatically merge symlink flag when there
-                # are file-level conflicts here, let filemerge take
-                # care of it
-                return m
-            return n
-        return '' # flag was cleared
-
     def act(msg, m, f, *args):
         repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
-        action.append((f, m) + args)
+        actions.append((f, m) + args)
 
-    action, copy = [], {}
+    actions, copy, movewithdir = [], {}, {}
 
     if overwrite:
         pa = p1
     elif pa == p2: # backwards
         pa = p1.p1()
     elif pa and repo.ui.configbool("merge", "followcopies", True):
-        copy, diverge, renamedelete = copies.mergecopies(repo, p1, p2, pa)
+        ret = copies.mergecopies(repo, p1, p2, pa)
+        copy, movewithdir, diverge, renamedelete = ret
         for of, fl in diverge.iteritems():
             act("divergent renames", "dr", of, fl)
         for of, fl in renamedelete.iteritems():
@@ -233,40 +218,48 @@
 
     m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
     copied = set(copy.values())
+    copied.update(movewithdir.values())
 
     if '.hgsubstate' in m1:
         # check whether sub state is modified
-        for s in p1.substate:
+        for s in sorted(p1.substate):
             if p1.sub(s).dirty():
                 m1['.hgsubstate'] += "+"
                 break
 
     # Compare manifests
-    for f, n in m1.iteritems():
+    for f, n in sorted(m1.iteritems()):
         if partial and not partial(f):
             continue
         if f in m2:
-            rflags = fmerge(f, f, f)
+            n2 = m2[f]
+            fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
+            nol = 'l' not in fl1 + fl2 + fla
             a = ma.get(f, nullid)
-            if n == m2[f] or m2[f] == a: # same or local newer
-                # is file locally modified or flags need changing?
-                # dirstate flags may need to be made current
-                if m1.flags(f) != rflags or n[20:]:
-                    act("update permissions", "e", f, rflags)
-            elif n == a: # remote newer
-                act("remote is newer", "g", f, rflags)
-            else: # both changed
-                act("versions differ", "m", f, f, f, rflags, False)
+            if n == n2 and fl1 == fl2:
+                pass # same - keep local
+            elif n2 == a and fl2 == fla:
+                pass # remote unchanged - keep local
+            elif n == a and fl1 == fla: # local unchanged - use remote
+                if n == n2: # optimization: keep local content
+                    act("update permissions", "e", f, fl2)
+                else:
+                    act("remote is newer", "g", f, fl2)
+            elif nol and n2 == a: # remote only changed 'x'
+                act("update permissions", "e", f, fl2)
+            elif nol and n == a: # local only changed 'x'
+                act("remote is newer", "g", f, fl)
+            else: # both changed something
+                act("versions differ", "m", f, f, f, False)
         elif f in copied: # files we'll deal with on m2 side
             pass
+        elif f in movewithdir: # directory rename
+            f2 = movewithdir[f]
+            act("remote renamed directory to " + f2, "d", f, None, f2,
+                m1.flags(f))
         elif f in copy:
             f2 = copy[f]
-            if f2 not in m2: # directory rename
-                act("remote renamed directory to " + f2, "d",
-                    f, None, f2, m1.flags(f))
-            else: # case 2 A,B/B/B or case 4,21 A/B/B
-                act("local copied/moved to " + f2, "m",
-                    f, f2, f, fmerge(f, f2, f2), False)
+            act("local copied/moved to " + f2, "m", f, f2, f, False)
         elif f in ma: # clean, a different, no remote
             if n != ma[f]:
                 if repo.ui.promptchoice(
@@ -281,28 +274,28 @@
             else:
                 act("other deleted", "r", f)
 
-    for f, n in m2.iteritems():
+    for f, n in sorted(m2.iteritems()):
         if partial and not partial(f):
             continue
         if f in m1 or f in copied: # files already visited
             continue
-        if f in copy:
+        if f in movewithdir:
+            f2 = movewithdir[f]
+            act("local renamed directory to " + f2, "d", None, f, f2,
+                m2.flags(f))
+        elif f in copy:
             f2 = copy[f]
-            if f2 not in m1: # directory rename
-                act("local renamed directory to " + f2, "d",
-                    None, f, f2, m2.flags(f))
-            elif f2 in m2: # rename case 1, A/A,B/A
+            if f2 in m2:
                 act("remote copied to " + f, "m",
-                    f2, f, f, fmerge(f2, f, f2), False)
-            else: # case 3,20 A/B/A
+                    f2, f, f, False)
+            else:
                 act("remote moved to " + f, "m",
-                    f2, f, f, fmerge(f2, f, f2), True)
+                    f2, f, f, True)
         elif f not in ma:
             if (not overwrite
                 and _checkunknownfile(repo, p1, p2, f)):
-                rflags = fmerge(f, f, f)
                 act("remote differs from untracked local",
-                    "m", f, f, f, rflags, False)
+                    "m", f, f, f, False)
             else:
                 act("remote created", "g", f, m2.flags(f))
         elif n != ma[f]:
@@ -312,12 +305,12 @@
                 (_("&Changed"), _("&Deleted")), 0) == 0:
                 act("prompt recreating", "g", f, m2.flags(f))
 
-    return action
+    return actions
 
 def actionkey(a):
-    return a[1] == 'r' and -1 or 0, a
+    return a[1] == "r" and -1 or 0, a
 
-def applyupdates(repo, action, wctx, mctx, actx, overwrite):
+def applyupdates(repo, actions, wctx, mctx, actx, overwrite):
     """apply the merge action list to the working directory
 
     wctx is the working copy context
@@ -332,14 +325,14 @@
     ms = mergestate(repo)
     ms.reset(wctx.p1().node())
     moves = []
-    action.sort(key=actionkey)
+    actions.sort(key=actionkey)
 
     # prescan for merges
-    for a in action:
+    for a in actions:
         f, m = a[:2]
-        if m == 'm': # merge
-            f2, fd, flags, move = a[2:]
-            if f == '.hgsubstate': # merged internally
+        if m == "m": # merge
+            f2, fd, move = a[2:]
+            if fd == '.hgsubstate': # merged internally
                 continue
             repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
             fcl = wctx[f]
@@ -353,45 +346,42 @@
                 fca = fcl.ancestor(fco, actx)
             if not fca:
                 fca = repo.filectx(f, fileid=nullrev)
-            ms.add(fcl, fco, fca, fd, flags)
+            ms.add(fcl, fco, fca, fd)
             if f != fd and move:
                 moves.append(f)
 
-    audit = scmutil.pathauditor(repo.root)
+    audit = repo.wopener.audit
 
     # remove renamed files after safely stored
     for f in moves:
         if os.path.lexists(repo.wjoin(f)):
             repo.ui.debug("removing %s\n" % f)
             audit(f)
-            os.unlink(repo.wjoin(f))
+            util.unlinkpath(repo.wjoin(f))
 
-    numupdates = len(action)
-    for i, a in enumerate(action):
+    numupdates = len(actions)
+    for i, a in enumerate(actions):
         f, m = a[:2]
         repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
                          unit=_('files'))
-        if f and f[0] == "/":
-            continue
         if m == "r": # remove
             repo.ui.note(_("removing %s\n") % f)
             audit(f)
             if f == '.hgsubstate': # subrepo states need updating
                 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
             try:
-                util.unlinkpath(repo.wjoin(f))
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
             except OSError, inst:
-                if inst.errno != errno.ENOENT:
-                    repo.ui.warn(_("update failed to remove %s: %s!\n") %
-                                 (f, inst.strerror))
+                repo.ui.warn(_("update failed to remove %s: %s!\n") %
+                             (f, inst.strerror))
             removed += 1
         elif m == "m": # merge
-            if f == '.hgsubstate': # subrepo states need updating
+            if fd == '.hgsubstate': # subrepo states need updating
                 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
                                  overwrite)
                 continue
-            f2, fd, flags, move = a[2:]
-            repo.wopener.audit(fd)
+            f2, fd, move = a[2:]
+            audit(fd)
             r = ms.resolve(fd, wctx, mctx)
             if r is not None and r > 0:
                 unresolved += 1
@@ -400,17 +390,10 @@
                     updated += 1
                 else:
                     merged += 1
-            if (move and repo.dirstate.normalize(fd) != f
-                and os.path.lexists(repo.wjoin(f))):
-                repo.ui.debug("removing %s\n" % f)
-                audit(f)
-                os.unlink(repo.wjoin(f))
         elif m == "g": # get
             flags = a[2]
             repo.ui.note(_("getting %s\n") % f)
-            t = mctx.filectx(f).data()
-            repo.wwrite(f, t, flags)
-            t = None
+            repo.wwrite(f, mctx.filectx(f).data(), flags)
             updated += 1
             if f == '.hgsubstate': # subrepo states need updating
                 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
@@ -419,13 +402,11 @@
             if f:
                 repo.ui.note(_("moving %s to %s\n") % (f, fd))
                 audit(f)
-                t = wctx.filectx(f).data()
-                repo.wwrite(fd, t, flags)
+                repo.wwrite(fd, wctx.filectx(f).data(), flags)
                 util.unlinkpath(repo.wjoin(f))
             if f2:
                 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
-                t = mctx.filectx(f2).data()
-                repo.wwrite(fd, t, flags)
+                repo.wwrite(fd, mctx.filectx(f2).data(), flags)
             updated += 1
         elif m == "dr": # divergent renames
             fl = a[2]
@@ -441,17 +422,39 @@
                 repo.ui.warn(" %s\n" % nf)
         elif m == "e": # exec
             flags = a[2]
-            repo.wopener.audit(f)
+            audit(f)
             util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
+            updated += 1
     ms.commit()
     repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
 
     return updated, merged, removed, unresolved
 
-def recordupdates(repo, action, branchmerge):
+def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial):
+    "Calculate the actions needed to merge mctx into tctx"
+    actions = []
+    folding = not util.checkcase(repo.path)
+    if folding:
+        # collision check is not needed for clean update
+        if (not branchmerge and
+            (force or not tctx.dirty(missing=True, branch=False))):
+            _checkcollision(mctx, None)
+        else:
+            _checkcollision(mctx, (tctx, ancestor))
+    if not force:
+        _checkunknown(repo, tctx, mctx)
+    if tctx.rev() is None:
+        actions += _forgetremoved(tctx, mctx, branchmerge)
+    actions += manifestmerge(repo, tctx, mctx,
+                             ancestor,
+                             force and not branchmerge,
+                             partial)
+    return actions
+
+def recordupdates(repo, actions, branchmerge):
     "record merge actions to the dirstate"
 
-    for a in action:
+    for a in actions:
         f, m = a[:2]
         if m == "r": # remove
             if branchmerge:
@@ -471,7 +474,7 @@
             else:
                 repo.dirstate.normal(f)
         elif m == "m": # merge
-            f2, fd, flag, move = a[2:]
+            f2, fd, move = a[2:]
             if branchmerge:
                 # We've done a branch merge, mark this file as merged
                 # so that we properly record the merger later
@@ -590,7 +593,7 @@
             if not force and (wc.files() or wc.deleted()):
                 raise util.Abort(_("outstanding uncommitted changes"),
                                  hint=_("use 'hg status' to list changes"))
-            for s in wc.substate:
+            for s in sorted(wc.substate):
                 if wc.sub(s).dirty():
                     raise util.Abort(_("outstanding uncommitted changes in "
                                        "subrepository '%s'") % s)
@@ -609,19 +612,8 @@
                 pa = p1
 
         ### calculate phase
-        action = []
-        folding = not util.checkcase(repo.path)
-        if folding:
-            # collision check is not needed for clean update
-            if (not branchmerge and
-                (force or not wc.dirty(missing=True, branch=False))):
-                _checkcollision(p2, None)
-            else:
-                _checkcollision(p2, (wc, pa))
-        if not force:
-            _checkunknown(repo, wc, p2)
-        action += _forgetremoved(wc, p2, branchmerge)
-        action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
+        actions = calculateupdates(repo, wc, p2, pa,
+                                   branchmerge, force, partial)
 
         ### apply phase
         if not branchmerge: # just jump to the new rev
@@ -629,11 +621,11 @@
         if not partial:
             repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
 
-        stats = applyupdates(repo, action, wc, p2, pa, overwrite)
+        stats = applyupdates(repo, actions, wc, p2, pa, overwrite)
 
         if not partial:
             repo.setparents(fp1, fp2)
-            recordupdates(repo, action, branchmerge)
+            recordupdates(repo, actions, branchmerge)
             if not branchmerge:
                 repo.dirstate.setbranch(p2.branch())
     finally:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/obsolete.py
--- a/mercurial/obsolete.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/obsolete.py	Sat Jan 19 17:24:33 2013 -0600
@@ -402,6 +402,200 @@
                     seen.add(suc)
                     remaining.add(suc)
 
+def successorssets(repo, initialnode, cache=None):
+    """Return all set of successors of initial nodes
+
+    Successors set of changeset A are a group of revision that succeed A. It
+    succeed A as a consistent whole, each revision being only partial
+    replacement.  Successors set contains non-obsolete changeset only.
+
+    In most cases a changeset A have zero (changeset pruned) or a single
+    successors set that contains a single successor (changeset A replaced by
+    A')
+
+    When changeset is split, it results successors set containing more than
+    a single element. Divergent rewriting will result in multiple successors
+    sets.
+
+    They are returned as a list of tuples containing all valid successors sets.
+
+    Final successors unknown locally are considered plain prune (obsoleted
+    without successors).
+
+    The optional `cache` parameter is a dictionary that may contains
+    precomputed successors sets. It is meant to reuse the computation of
+    previous call to `successorssets` when multiple calls are made at the same
+    time. The cache dictionary is updated in place. The caller is responsible
+    for its live spawn. Code that makes multiple calls to `successorssets`
+    *must* use this cache mechanism or suffer terrible performances."""
+
+    succmarkers = repo.obsstore.successors
+
+    # Stack of nodes we search successors sets for
+    toproceed = [initialnode]
+    # set version of above list for fast loop detection
+    # element added to "toproceed" must be added here
+    stackedset = set(toproceed)
+    if cache is None:
+        cache = {}
+
+    # This while loop is the flattened version of a recursive search for
+    # successors sets
+    #
+    # def successorssets(x):
+    #    successors = directsuccessors(x)
+    #    ss = [[]]
+    #    for succ in directsuccessors(x):
+    #        # product as in itertools cartesian product
+    #        ss = product(ss, successorssets(succ))
+    #    return ss
+    #
+    # But we can not use plain recursive calls here:
+    # - that would blow the python call stack
+    # - obsolescence markers may have cycles, we need to handle them.
+    #
+    # The `toproceed` list act as our call stack. Every node we search
+    # successors set for are stacked there.
+    #
+    # The `stackedset` is set version of this stack used to check if a node is
+    # already stacked. This check is used to detect cycles and prevent infinite
+    # loop.
+    #
+    # successors set of all nodes are stored in the `cache` dictionary.
+    #
+    # After this while loop ends we use the cache to return the successors sets
+    # for the node requested by the caller.
+    while toproceed:
+        # Every iteration tries to compute the successors sets of the topmost
+        # node of the stack: CURRENT.
+        #
+        # There are four possible outcomes:
+        #
+        # 1) We already know the successors sets of CURRENT:
+        #    -> mission accomplished, pop it from the stack.
+        # 2) Node is not obsolete:
+        #    -> the node is its own successors sets. Add it to the cache.
+        # 3) We do not know successors set of direct successors of CURRENT:
+        #    -> We add those successors to the stack.
+        # 4) We know successors sets of all direct successors of CURRENT:
+        #    -> We can compute CURRENT successors set and add it to the
+        #       cache.
+        #
+        current = toproceed[-1]
+        if current in cache:
+            # case (1): We already know the successors sets
+            stackedset.remove(toproceed.pop())
+        elif current not in succmarkers:
+            # case (2): The node is not obsolete.
+            if current in repo:
+                # We have a valid last successors.
+                cache[current] = [(current,)]
+            else:
+                # Final obsolete version is unknown locally.
+                # Do not count that as a valid successors
+                cache[current] = []
+        else:
+            # cases (3) and (4)
+            #
+            # We proceed in two phases. Phase 1 aims to distinguish case (3)
+            # from case (4):
+            #
+            #     For each direct successors of CURRENT, we check whether its
+            #     successors sets are known. If they are not, we stack the
+            #     unknown node and proceed to the next iteration of the while
+            #     loop. (case 3)
+            #
+            #     During this step, we may detect obsolescence cycles: a node
+            #     with unknown successors sets but already in the call stack.
+            #     In such a situation, we arbitrary set the successors sets of
+            #     the node to nothing (node pruned) to break the cycle.
+            #
+            #     If no break was encountered we proceeed to phase 2.
+            #
+            # Phase 2 computes successors sets of CURRENT (case 4); see details
+            # in phase 2 itself.
+            #
+            # Note the two levels of iteration in each phase.
+            # - The first one handles obsolescence markers using CURRENT as
+            #   precursor (successors markers of CURRENT).
+            #
+            #   Having multiple entry here means divergence.
+            #
+            # - The second one handles successors defined in each marker.
+            #
+            #   Having none means pruned node, multiple successors means split,
+            #   single successors are standard replacement.
+            #
+            for mark in sorted(succmarkers[current]):
+                for suc in mark[1]:
+                    if suc not in cache:
+                        if suc in stackedset:
+                            # cycle breaking
+                            cache[suc] = []
+                        else:
+                            # case (3) If we have not computed successors sets
+                            # of one of those successors we add it to the
+                            # `toproceed` stack and stop all work for this
+                            # iteration.
+                            toproceed.append(suc)
+                            stackedset.add(suc)
+                            break
+                else:
+                    continue
+                break
+            else:
+                # case (4): we know all successors sets of all direct
+                # successors
+                #
+                # Successors set contributed by each marker depends on the
+                # successors sets of all its "successors" node.
+                #
+                # Each different marker is a divergence in the obsolescence
+                # history. It contributes successors sets dictinct from other
+                # markers.
+                #
+                # Within a marker, a successor may have divergent successors
+                # sets. In such a case, the marker will contribute multiple
+                # divergent successors sets. If multiple successors have
+                # divergents successors sets, a cartesian product is used.
+                #
+                # At the end we post-process successors sets to remove
+                # duplicated entry and successors set that are strict subset of
+                # another one.
+                succssets = []
+                for mark in sorted(succmarkers[current]):
+                    # successors sets contributed by this marker
+                    markss = [[]]
+                    for suc in mark[1]:
+                        # cardinal product with previous successors
+                        productresult = []
+                        for prefix in markss:
+                            for suffix in cache[suc]:
+                                newss = list(prefix)
+                                for part in suffix:
+                                    # do not duplicated entry in successors set
+                                    # first entry wins.
+                                    if part not in newss:
+                                        newss.append(part)
+                                productresult.append(newss)
+                        markss = productresult
+                    succssets.extend(markss)
+                # remove duplicated and subset
+                seen = []
+                final = []
+                candidate = sorted(((set(s), s) for s in succssets if s),
+                                   key=lambda x: len(x[1]), reverse=True)
+                for setversion, listversion in candidate:
+                    for seenset in seen:
+                        if setversion.issubset(seenset):
+                            break
+                    else:
+                        final.append(listversion)
+                        seen.append(setversion)
+                final.reverse() # put small successors set first
+                cache[current] = final
+    return cache[initialnode]
+
 def _knownrevs(repo, nodes):
     """yield revision numbers of known nodes passed in parameters
 
@@ -426,6 +620,7 @@
     """Return the set of revision that belong to the  set
 
     Such access may compute the set and cache it for future use"""
+    repo = repo.unfiltered()
     if not repo.obsstore:
         return ()
     if name not in repo.obsstore.caches:
@@ -454,27 +649,35 @@
 def _computeobsoleteset(repo):
     """the set of obsolete revisions"""
     obs = set()
-    nm = repo.changelog.nodemap
+    getrev = repo.changelog.nodemap.get
+    getphase = repo._phasecache.phase
     for node in repo.obsstore.successors:
-        rev = nm.get(node)
-        if rev is not None:
+        rev = getrev(node)
+        if rev is not None and getphase(repo, rev):
             obs.add(rev)
-    return set(repo.revs('%ld - public()', obs))
+    return obs
 
 @cachefor('unstable')
 def _computeunstableset(repo):
     """the set of non obsolete revisions with obsolete parents"""
-    return set(repo.revs('(obsolete()::) - obsolete()'))
+    # revset is not efficient enough here
+    # we do (obsolete()::) - obsolete() by hand
+    obs = getrevs(repo, 'obsolete')
+    if not obs:
+        return set()
+    cl = repo.changelog
+    return set(r for r in cl.descendants(obs) if r not in obs)
 
 @cachefor('suspended')
 def _computesuspendedset(repo):
     """the set of obsolete parents with non obsolete descendants"""
-    return set(repo.revs('obsolete() and obsolete()::unstable()'))
+    suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
+    return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
 
 @cachefor('extinct')
 def _computeextinctset(repo):
     """the set of obsolete parents without non obsolete descendants"""
-    return set(repo.revs('obsolete() - obsolete()::unstable()'))
+    return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
 
 
 @cachefor('bumped')
@@ -489,6 +692,28 @@
     query = '%ld - obsolete() - public()'
     return set(repo.revs(query, _knownrevs(repo, successors)))
 
+@cachefor('divergent')
+def _computedivergentset(repo):
+    """the set of rev that compete to be the final successors of some revision.
+    """
+    divergent = set()
+    obsstore = repo.obsstore
+    newermap = {}
+    for ctx in repo.set('(not public()) - obsolete()'):
+        mark = obsstore.precursors.get(ctx.node(), ())
+        toprocess = set(mark)
+        while toprocess:
+            prec = toprocess.pop()[0]
+            if prec not in newermap:
+                successorssets(repo, prec, newermap)
+            newer = [n for n in newermap[prec] if n]
+            if len(newer) > 1:
+                divergent.add(ctx.rev())
+                break
+            toprocess.update(obsstore.precursors.get(prec, ()))
+    return divergent
+
+
 def createmarkers(repo, relations, flag=0, metadata=None):
     """Add obsolete markers between changesets in a repo
 
@@ -521,6 +746,7 @@
             if nprec in nsucs:
                 raise util.Abort("changeset %s cannot obsolete itself" % prec)
             repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
+            repo.filteredrevcache.clear()
         tr.close()
     finally:
         tr.release()
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/osutil.c
--- a/mercurial/osutil.c	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/osutil.c	Sat Jan 19 17:24:33 2013 -0600
@@ -276,6 +276,16 @@
 	return -1;
 }
 
+static PyObject *makestat(const struct stat *st)
+{
+	PyObject *stat;
+
+	stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
+	if (stat)
+		memcpy(&((struct listdir_stat *)stat)->st, st, sizeof(*st));
+	return stat;
+}
+
 static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip)
 {
 	PyObject *list, *elem, *stat, *ret = NULL;
@@ -351,10 +361,9 @@
 		}
 
 		if (keepstat) {
-			stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
+			stat = makestat(&st);
 			if (!stat)
 				goto error;
-			memcpy(&((struct listdir_stat *)stat)->st, &st, sizeof(st));
 			elem = Py_BuildValue("siN", ent->d_name, kind, stat);
 		} else
 			elem = Py_BuildValue("si", ent->d_name, kind);
@@ -380,6 +389,55 @@
 	return ret;
 }
 
+static PyObject *statfiles(PyObject *self, PyObject *args)
+{
+	PyObject *names, *stats;
+	Py_ssize_t i, count;
+
+	if (!PyArg_ParseTuple(args, "O:statfiles", &names))
+		return NULL;
+
+	count = PySequence_Length(names);
+	if (count == -1) {
+		PyErr_SetString(PyExc_TypeError, "not a sequence");
+		return NULL;
+	}
+
+	stats = PyList_New(count);
+	if (stats == NULL)
+		return NULL;
+
+	for (i = 0; i < count; i++) {
+		PyObject *stat;
+		struct stat st;
+		int ret, kind;
+		char *path;
+
+		path = PyString_AsString(PySequence_GetItem(names, i));
+		if (path == NULL) {
+			PyErr_SetString(PyExc_TypeError, "not a string");
+			goto bail;
+		}
+		ret = lstat(path, &st);
+		kind = st.st_mode & S_IFMT;
+		if (ret != -1 && (kind == S_IFREG || kind == S_IFLNK)) {
+			stat = makestat(&st);
+			if (stat == NULL)
+				goto bail;
+			PyList_SET_ITEM(stats, i, stat);
+		} else {
+			Py_INCREF(Py_None);
+			PyList_SET_ITEM(stats, i, Py_None);
+		}
+	}
+
+	return stats;
+
+bail:
+	Py_DECREF(stats);
+	return NULL;
+}
+
 #endif /* ndef _WIN32 */
 
 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
@@ -544,6 +602,10 @@
 	{"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS,
 	 "Open a file with POSIX-like semantics.\n"
 "On error, this function may raise either a WindowsError or an IOError."},
+#else
+	{"statfiles", (PyCFunction)statfiles, METH_VARARGS | METH_KEYWORDS,
+	 "stat a series of files or symlinks\n"
+"Returns None for non-existent entries and entries of other types.\n"},
 #endif
 #ifdef __APPLE__
 	{
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/parsers.c
--- a/mercurial/parsers.c	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/parsers.c	Sat Jan 19 17:24:33 2013 -0600
@@ -1508,6 +1508,7 @@
 
 PyObject *encodedir(PyObject *self, PyObject *args);
 PyObject *pathencode(PyObject *self, PyObject *args);
+PyObject *lowerencode(PyObject *self, PyObject *args);
 
 static PyMethodDef methods[] = {
 	{"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
@@ -1516,6 +1517,7 @@
 	{"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
 	{"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
 	{"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
+	{"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
 	{NULL, NULL}
 };
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/patch.py
--- a/mercurial/patch.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/patch.py	Sat Jan 19 17:24:33 2013 -0600
@@ -6,7 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-import cStringIO, email.Parser, os, errno, re
+import cStringIO, email.Parser, os, errno, re, posixpath
 import tempfile, zlib, shutil
 
 from i18n import _
@@ -439,11 +439,7 @@
                 util.setflags(self._join(fname), False, True)
 
     def unlink(self, fname):
-        try:
-            util.unlinkpath(self._join(fname))
-        except OSError, inst:
-            if inst.errno != errno.ENOENT:
-                raise
+        util.unlinkpath(self._join(fname), ignoremissing=True)
 
     def writerej(self, fname, failed, total, lines):
         fname = fname + ".rej"
@@ -1007,7 +1003,7 @@
 
             bot = min(fuzz, bot)
             top = min(fuzz, top)
-            return old[top:len(old)-bot], new[top:len(new)-bot], top
+            return old[top:len(old) - bot], new[top:len(new) - bot], top
         return old, new, 0
 
     def fuzzit(self, fuzz, toponly):
@@ -1514,44 +1510,6 @@
     finally:
         fp.close()
 
-def b85diff(to, tn):
-    '''print base85-encoded binary diff'''
-    def gitindex(text):
-        if not text:
-            return hex(nullid)
-        l = len(text)
-        s = util.sha1('blob %d\0' % l)
-        s.update(text)
-        return s.hexdigest()
-
-    def fmtline(line):
-        l = len(line)
-        if l <= 26:
-            l = chr(ord('A') + l - 1)
-        else:
-            l = chr(l - 26 + ord('a') - 1)
-        return '%c%s\n' % (l, base85.b85encode(line, True))
-
-    def chunk(text, csize=52):
-        l = len(text)
-        i = 0
-        while i < l:
-            yield text[i:i + csize]
-            i += csize
-
-    tohash = gitindex(to)
-    tnhash = gitindex(tn)
-    if tohash == tnhash:
-        return ""
-
-    # TODO: deltas
-    ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
-           (tohash, tnhash, len(tn))]
-    for l in chunk(zlib.compress(tn)):
-        ret.append(fmtline(l))
-    ret.append('\n')
-    return ''.join(ret)
-
 class GitDiffRequired(Exception):
     pass
 
@@ -1622,9 +1580,8 @@
         return []
 
     revs = None
-    if not repo.ui.quiet:
-        hexfunc = repo.ui.debugflag and hex or short
-        revs = [hexfunc(node) for node in [node1, node2] if node]
+    hexfunc = repo.ui.debugflag and hex or short
+    revs = [hexfunc(node) for node in [node1, node2] if node]
 
     copy = {}
     if opts.git or opts.upgrade:
@@ -1690,17 +1647,45 @@
     '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
     return difflabel(diff, *args, **kw)
 
-
-def _addmodehdr(header, omode, nmode):
-    if omode != nmode:
-        header.append('old mode %s\n' % omode)
-        header.append('new mode %s\n' % nmode)
-
 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
             copy, getfilectx, opts, losedatafn, prefix):
 
     def join(f):
-        return os.path.join(prefix, f)
+        return posixpath.join(prefix, f)
+
+    def addmodehdr(header, omode, nmode):
+        if omode != nmode:
+            header.append('old mode %s\n' % omode)
+            header.append('new mode %s\n' % nmode)
+
+    def addindexmeta(meta, revs):
+        if opts.git:
+            i = len(revs)
+            if i==2:
+                meta.append('index %s..%s\n' % tuple(revs))
+            elif i==3:
+                meta.append('index %s,%s..%s\n' % tuple(revs))
+
+    def gitindex(text):
+        if not text:
+            return hex(nullid)
+        l = len(text)
+        s = util.sha1('blob %d\0' % l)
+        s.update(text)
+        return s.hexdigest()
+
+    def diffline(a, b, revs):
+        if opts.git:
+            line = 'diff --git a/%s b/%s\n' % (a, b)
+        elif not repo.ui.quiet:
+            if revs:
+                revinfo = ' '.join(["-r %s" % rev for rev in revs])
+                line = 'diff %s %s\n' % (revinfo, a)
+            else:
+                line = 'diff %s\n' % a
+        else:
+            line = ''
+        return line
 
     date1 = util.datestr(ctx1.date())
     man1 = ctx1.manifest()
@@ -1733,7 +1718,7 @@
                         else:
                             a = copyto[f]
                         omode = gitmode[man1.flags(a)]
-                        _addmodehdr(header, omode, mode)
+                        addmodehdr(header, omode, mode)
                         if a in removed and a not in gone:
                             op = 'rename'
                             gone.add(a)
@@ -1779,22 +1764,24 @@
                 nflag = ctx2.flags(f)
                 binary = util.binary(to) or util.binary(tn)
                 if opts.git:
-                    _addmodehdr(header, gitmode[oflag], gitmode[nflag])
+                    addmodehdr(header, gitmode[oflag], gitmode[nflag])
                     if binary:
                         dodiff = 'binary'
                 elif binary or nflag != oflag:
                     losedatafn(f)
-            if opts.git:
-                header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
 
         if dodiff:
+            if opts.git or revs:
+                header.insert(0, diffline(join(a), join(b), revs))
             if dodiff == 'binary':
-                text = b85diff(to, tn)
+                text = mdiff.b85diff(to, tn)
+                if text:
+                    addindexmeta(header, [gitindex(to), gitindex(tn)])
             else:
                 text = mdiff.unidiff(to, date1,
                                     # ctx2 date may be dynamic
                                     tn, util.datestr(ctx2.date()),
-                                    join(a), join(b), revs, opts=opts)
+                                    join(a), join(b), opts=opts)
             if header and (text or len(header) > 1):
                 yield ''.join(header)
             if text:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/pathencode.c
--- a/mercurial/pathencode.c	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/pathencode.c	Sat Jan 19 17:24:33 2013 -0600
@@ -15,6 +15,7 @@
  * required.
  */
 
+#define PY_SSIZE_T_CLEAN
 #include 
 #include 
 #include 
@@ -481,12 +482,244 @@
 
 static const Py_ssize_t maxstorepathlen = 120;
 
+static Py_ssize_t _lowerencode(char *dest, size_t destsize,
+			       const char *src, Py_ssize_t len)
+{
+	static const uint32_t onebyte[8] = {
+		1, 0x2bfffbfb, 0xe8000001, 0x2fffffff
+	};
+
+	static const uint32_t lower[8] = { 0, 0, 0x7fffffe };
+
+	Py_ssize_t i, destlen = 0;
+
+	for (i = 0; i < len; i++) {
+		if (inset(onebyte, src[i]))
+			charcopy(dest, &destlen, destsize, src[i]);
+		else if (inset(lower, src[i]))
+			charcopy(dest, &destlen, destsize, src[i] + 32);
+		else
+			escape3(dest, &destlen, destsize, src[i]);
+	}
+
+	return destlen;
+}
+
+PyObject *lowerencode(PyObject *self, PyObject *args)
+{
+	char *path;
+	Py_ssize_t len, newlen;
+	PyObject *ret;
+
+	if (!PyArg_ParseTuple(args, "s#:lowerencode", &path, &len))
+		return NULL;
+
+	newlen = _lowerencode(NULL, 0, path, len);
+	ret = PyString_FromStringAndSize(NULL, newlen);
+	if (ret)
+		newlen = _lowerencode(PyString_AS_STRING(ret), newlen,
+				      path, len);
+
+	return ret;
+}
+
+/* See store.py:_auxencode for a description. */
+static Py_ssize_t auxencode(char *dest, size_t destsize,
+			    const char *src, Py_ssize_t len)
+{
+	static const uint32_t twobytes[8];
+
+	static const uint32_t onebyte[8] = {
+		~0, 0xffff3ffe, ~0, ~0, ~0, ~0, ~0, ~0,
+	};
+
+	return _encode(twobytes, onebyte, dest, 0, destsize, src, len, 0);
+}
+
+static PyObject *hashmangle(const char *src, Py_ssize_t len, const char sha[20])
+{
+	static const Py_ssize_t dirprefixlen = 8;
+	static const Py_ssize_t maxshortdirslen = 68;
+	char *dest;
+	PyObject *ret;
+
+	Py_ssize_t i, d, p, lastslash = len - 1, lastdot = -1;
+	Py_ssize_t destsize, destlen = 0, slop, used;
+
+	while (lastslash >= 0 && src[lastslash] != '/') {
+		if (src[lastslash] == '.' && lastdot == -1)
+			lastdot = lastslash;
+		lastslash--;
+	}
+
+#if 0
+	/* All paths should end in a suffix of ".i" or ".d".
+           Unfortunately, the file names in test-hybridencode.py
+           violate this rule.  */
+	if (lastdot != len - 3) {
+		PyErr_SetString(PyExc_ValueError,
+				"suffix missing or wrong length");
+		return NULL;
+	}
+#endif
+
+	/* If src contains a suffix, we will append it to the end of
+	   the new string, so make room. */
+	destsize = 120;
+	if (lastdot >= 0)
+		destsize += len - lastdot - 1;
+
+	ret = PyString_FromStringAndSize(NULL, destsize);
+	if (ret == NULL)
+		return NULL;
+
+	dest = PyString_AS_STRING(ret);
+	memcopy(dest, &destlen, destsize, "dh/", 3);
+
+	/* Copy up to dirprefixlen bytes of each path component, up to
+	   a limit of maxshortdirslen bytes. */
+	for (i = d = p = 0; i < lastslash; i++, p++) {
+		if (src[i] == '/') {
+			char d = dest[destlen - 1];
+			/* After truncation, a directory name may end
+			   in a space or dot, which are unportable. */
+			if (d == '.' || d == ' ')
+				dest[destlen - 1] = '_';
+			if (destlen > maxshortdirslen)
+				break;
+			charcopy(dest, &destlen, destsize, src[i]);
+			p = -1;
+		}
+		else if (p < dirprefixlen)
+			charcopy(dest, &destlen, destsize, src[i]);
+	}
+
+	/* Rewind to just before the last slash copied. */
+	if (destlen > maxshortdirslen + 3)
+		do {
+			destlen--;
+		} while (destlen > 0 && dest[destlen] != '/');
+
+	if (destlen > 3) {
+		if (lastslash > 0) {
+			char d = dest[destlen - 1];
+			/* The last directory component may be
+			   truncated, so make it safe. */
+			if (d == '.' || d == ' ')
+				dest[destlen - 1] = '_';
+		}
+
+		charcopy(dest, &destlen, destsize, '/');
+	}
+
+	/* Add a prefix of the original file's name. Its length
+	   depends on the number of bytes left after accounting for
+	   hash and suffix. */
+	used = destlen + 40;
+	if (lastdot >= 0)
+		used += len - lastdot - 1;
+	slop = maxstorepathlen - used;
+	if (slop > 0) {
+		Py_ssize_t basenamelen =
+			lastslash >= 0 ? len - lastslash - 2 : len - 1;
+
+		if (basenamelen > slop)
+			basenamelen = slop;
+		if (basenamelen > 0)
+			memcopy(dest, &destlen, destsize, &src[lastslash + 1],
+				basenamelen);
+	}
+
+	/* Add hash and suffix. */
+	for (i = 0; i < 20; i++)
+		hexencode(dest, &destlen, destsize, sha[i]);
+
+	if (lastdot >= 0)
+		memcopy(dest, &destlen, destsize, &src[lastdot],
+			len - lastdot - 1);
+
+	PyString_GET_SIZE(ret) = destlen;
+
+	return ret;
+}
+
 /*
- * We currently implement only basic encoding.
- *
- * If a name is too long to encode due to Windows path name limits,
- * this function returns None.
+ * Avoiding a trip through Python would improve performance by 50%,
+ * but we don't encounter enough long names to be worth the code.
  */
+static int sha1hash(char hash[20], const char *str, Py_ssize_t len)
+{
+	static PyObject *shafunc;
+	PyObject *shaobj, *hashobj;
+
+	if (shafunc == NULL) {
+		PyObject *util, *name = PyString_FromString("mercurial.util");
+
+		if (name == NULL)
+			return -1;
+
+		util = PyImport_Import(name);
+		Py_DECREF(name);
+
+		if (util == NULL) {
+			PyErr_SetString(PyExc_ImportError, "mercurial.util");
+			return -1;
+		}
+		shafunc = PyObject_GetAttrString(util, "sha1");
+		Py_DECREF(util);
+
+		if (shafunc == NULL) {
+			PyErr_SetString(PyExc_AttributeError,
+					"module 'mercurial.util' has no "
+					"attribute 'sha1'");
+			return -1;
+		}
+	}
+
+	shaobj = PyObject_CallFunction(shafunc, "s#", str, len);
+
+	if (shaobj == NULL)
+		return -1;
+
+	hashobj = PyObject_CallMethod(shaobj, "digest", "");
+	Py_DECREF(shaobj);
+
+	if (!PyString_Check(hashobj) || PyString_GET_SIZE(hashobj) != 20) {
+		PyErr_SetString(PyExc_TypeError,
+				"result of digest is not a 20-byte hash");
+		Py_DECREF(hashobj);
+		return -1;
+	}
+
+	memcpy(hash, PyString_AS_STRING(hashobj), 20);
+	Py_DECREF(hashobj);
+	return 0;
+}
+
+#define MAXENCODE 4096 * 3
+
+static PyObject *hashencode(const char *src, Py_ssize_t len)
+{
+	char dired[MAXENCODE];
+	char lowered[MAXENCODE];
+	char auxed[MAXENCODE];
+	Py_ssize_t dirlen, lowerlen, auxlen, baselen;
+	char sha[20];
+
+	baselen = (len - 5) * 3;
+	if (baselen >= MAXENCODE) {
+		PyErr_SetString(PyExc_ValueError, "string too long");
+		return NULL;
+	}
+
+	dirlen = _encodedir(dired, baselen, src, len);
+	if (sha1hash(sha, dired, dirlen - 1) == -1)
+		return NULL;
+	lowerlen = _lowerencode(lowered, baselen, dired + 5, dirlen - 5);
+	auxlen = auxencode(auxed, baselen, lowered, lowerlen);
+	return hashmangle(auxed, auxlen, sha);
+}
+
 PyObject *pathencode(PyObject *self, PyObject *args)
 {
 	Py_ssize_t len, newlen;
@@ -501,13 +734,10 @@
 		return NULL;
 	}
 
-	if (len > maxstorepathlen) {
-		newobj = Py_None;
-		Py_INCREF(newobj);
-		return newobj;
-	}
-
-	newlen = len ? basicencode(NULL, 0, path, len + 1) : 1;
+	if (len > maxstorepathlen)
+		newlen = maxstorepathlen + 2;
+	else
+		newlen = len ? basicencode(NULL, 0, path, len + 1) : 1;
 
 	if (newlen <= maxstorepathlen + 1) {
 		if (newlen == len + 1) {
@@ -522,10 +752,9 @@
 			basicencode(PyString_AS_STRING(newobj), newlen, path,
 				    len + 1);
 		}
-	} else {
-		newobj = Py_None;
-		Py_INCREF(newobj);
 	}
+	else
+		newobj = hashencode(path, len + 1);
 
 	return newobj;
 }
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/phases.py
--- a/mercurial/phases.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/phases.py	Sat Jan 19 17:24:33 2013 -0600
@@ -104,30 +104,11 @@
 from node import nullid, nullrev, bin, hex, short
 from i18n import _
 import util, error
-import obsolete
 
 allphases = public, draft, secret = range(3)
 trackedphases = allphases[1:]
 phasenames = ['public', 'draft', 'secret']
 
-def _filterunknown(ui, changelog, phaseroots):
-    """remove unknown nodes from the phase boundary
-
-    Nothing is lost as unknown nodes only hold data for their descendants.
-    """
-    updated = False
-    nodemap = changelog.nodemap # to filter unknown nodes
-    for phase, nodes in enumerate(phaseroots):
-        missing = [node for node in nodes if node not in nodemap]
-        if missing:
-            for mnode in missing:
-                ui.debug(
-                    'removing unknown node %s from %i-phase boundary\n'
-                    % (short(mnode), phase))
-            nodes.symmetric_difference_update(missing)
-            updated = True
-    return updated
-
 def _readroots(repo, phasedefaults=None):
     """Read phase roots from disk
 
@@ -139,6 +120,7 @@
     Return (roots, dirty) where dirty is true if roots differ from
     what is being stored.
     """
+    repo = repo.unfiltered()
     dirty = False
     roots = [set() for i in allphases]
     try:
@@ -156,8 +138,6 @@
             for f in phasedefaults:
                 roots = f(repo, roots)
         dirty = True
-    if _filterunknown(repo.ui, repo.changelog, roots):
-        dirty = True
     return roots, dirty
 
 class phasecache(object):
@@ -165,8 +145,9 @@
         if _load:
             # Cheap trick to allow shallow-copy without copy module
             self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
+            self._phaserevs = None
+            self.filterunknown(repo)
             self.opener = repo.sopener
-            self._phaserevs = None
 
     def copy(self):
         # Shallow copy meant to ensure isolation in
@@ -184,6 +165,7 @@
 
     def getphaserevs(self, repo, rebuild=False):
         if rebuild or self._phaserevs is None:
+            repo = repo.unfiltered()
             revs = [public] * len(repo.changelog)
             for phase in trackedphases:
                 roots = map(repo.changelog.rev, self.phaseroots[phase])
@@ -228,6 +210,7 @@
         # Be careful to preserve shallow-copied values: do not update
         # phaseroots values, replace them.
 
+        repo = repo.unfiltered()
         delroots = [] # set of root deleted by this path
         for phase in xrange(targetphase + 1, len(allphases)):
             # filter nodes that are not in a compatible phase already
@@ -245,12 +228,13 @@
             # declare deleted root in the target phase
             if targetphase != 0:
                 self.retractboundary(repo, targetphase, delroots)
-        obsolete.clearobscaches(repo)
+        repo.invalidatevolatilesets()
 
     def retractboundary(self, repo, targetphase, nodes):
         # Be careful to preserve shallow-copied values: do not update
         # phaseroots values, replace them.
 
+        repo = repo.unfiltered()
         currentroots = self.phaseroots[targetphase]
         newroots = [n for n in nodes
                     if self.phase(repo, repo[n].rev()) < targetphase]
@@ -262,7 +246,27 @@
             ctxs = repo.set('roots(%ln::)', currentroots)
             currentroots.intersection_update(ctx.node() for ctx in ctxs)
             self._updateroots(targetphase, currentroots)
-        obsolete.clearobscaches(repo)
+        repo.invalidatevolatilesets()
+
+    def filterunknown(self, repo):
+        """remove unknown nodes from the phase boundary
+
+        Nothing is lost as unknown nodes only hold data for their descendants.
+        """
+        filtered = False
+        nodemap = repo.changelog.nodemap # to filter unknown nodes
+        for phase, nodes in enumerate(self.phaseroots):
+            missing = [node for node in nodes if node not in nodemap]
+            if missing:
+                for mnode in missing:
+                    repo.ui.debug(
+                        'removing unknown node %s from %i-phase boundary\n'
+                        % (short(mnode), phase))
+                nodes.symmetric_difference_update(missing)
+                filtered = True
+        if filtered:
+            self.dirty = True
+            self._phaserevs = None
 
 def advanceboundary(repo, targetphase, nodes):
     """Add nodes to a phase changing other nodes phases if necessary.
@@ -316,6 +320,7 @@
 
 def pushphase(repo, nhex, oldphasestr, newphasestr):
     """List phases root for serialization over pushkey"""
+    repo = repo.unfiltered()
     lock = repo.lock()
     try:
         currentphase = repo[nhex].phase()
@@ -340,6 +345,7 @@
 
     Accept unknown element input
     """
+    repo = repo.unfiltered()
     # build list from dictionary
     draftroots = []
     nodemap = repo.changelog.nodemap # to filter unknown nodes
@@ -367,6 +373,7 @@
 
     * `heads`: define the first subset
     * `roots`: define the second we subtract from the first"""
+    repo = repo.unfiltered()
     revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
                       heads, roots, roots, heads)
     return [c.node() for c in revset]
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/posix.py
--- a/mercurial/posix.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/posix.py	Sat Jan 19 17:24:33 2013 -0600
@@ -7,7 +7,7 @@
 
 from i18n import _
 import encoding
-import os, sys, errno, stat, getpass, pwd, grp, tempfile, unicodedata
+import os, sys, errno, stat, getpass, pwd, grp, socket, tempfile, unicodedata
 
 posixfile = open
 normpath = os.path.normpath
@@ -21,14 +21,26 @@
 os.umask(umask)
 
 def split(p):
-    '''Same as os.path.split, but faster'''
+    '''Same as posixpath.split, but faster
+
+    >>> import posixpath
+    >>> for f in ['/absolute/path/to/file',
+    ...           'relative/path/to/file',
+    ...           'file_alone',
+    ...           'path/to/directory/',
+    ...           '/multiple/path//separators',
+    ...           '/file_at_root',
+    ...           '///multiple_leading_separators_at_root',
+    ...           '']:
+    ...     assert split(f) == posixpath.split(f), f
+    '''
     ht = p.rsplit('/', 1)
     if len(ht) == 1:
         return '', p
     nh = ht[0].rstrip('/')
     if nh:
         return nh, ht[1]
-    return ht
+    return ht[0] + '/', ht[1]
 
 def openhardlinks():
     '''return true if it is safe to hold open file handles to hardlinks'''
@@ -352,12 +364,18 @@
 def setsignalhandler():
     pass
 
+_wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
+
 def statfiles(files):
-    'Stat each file in files and yield stat or None if file does not exist.'
+    '''Stat each file in files. Yield each stat, or None if a file does not
+    exist or has a type we don't care about.'''
     lstat = os.lstat
+    getkind = stat.S_IFMT
     for nf in files:
         try:
             st = lstat(nf)
+            if getkind(st.st_mode) not in _wantedkinds:
+                st = None
         except OSError, err:
             if err.errno not in (errno.ENOENT, errno.ENOTDIR):
                 raise
@@ -437,9 +455,13 @@
 def makedir(path, notindexed):
     os.mkdir(path)
 
-def unlinkpath(f):
+def unlinkpath(f, ignoremissing=False):
     """unlink and remove the directory if it is empty"""
-    os.unlink(f)
+    try:
+        os.unlink(f)
+    except OSError, e:
+        if not (ignoremissing and e.errno == errno.ENOENT):
+            raise
     # try removing directories that might now be empty
     try:
         os.removedirs(os.path.dirname(f))
@@ -468,7 +490,20 @@
 
     def __eq__(self, other):
         try:
-            return self.stat == other.stat
+            # Only dev, ino, size, mtime and atime are likely to change. Out
+            # of these, we shouldn't compare atime but should compare the
+            # rest. However, one of the other fields changing indicates
+            # something fishy going on, so return False if anything but atime
+            # changes.
+            return (self.stat.st_mode == other.stat.st_mode and
+                    self.stat.st_ino == other.stat.st_ino and
+                    self.stat.st_dev == other.stat.st_dev and
+                    self.stat.st_nlink == other.stat.st_nlink and
+                    self.stat.st_uid == other.stat.st_uid and
+                    self.stat.st_gid == other.stat.st_gid and
+                    self.stat.st_size == other.stat.st_size and
+                    self.stat.st_mtime == other.stat.st_mtime and
+                    self.stat.st_ctime == other.stat.st_ctime)
         except AttributeError:
             return False
 
@@ -477,3 +512,43 @@
 
 def executablepath():
     return None # available on Windows only
+
+class unixdomainserver(socket.socket):
+    def __init__(self, join, subsystem):
+        '''Create a unix domain socket with the given prefix.'''
+        super(unixdomainserver, self).__init__(socket.AF_UNIX)
+        sockname = subsystem + '.sock'
+        self.realpath = self.path = join(sockname)
+        if os.path.islink(self.path):
+            if os.path.exists(self.path):
+                self.realpath = os.readlink(self.path)
+            else:
+                os.unlink(self.path)
+        try:
+            self.bind(self.realpath)
+        except socket.error, err:
+            if err.args[0] == 'AF_UNIX path too long':
+                tmpdir = tempfile.mkdtemp(prefix='hg-%s-' % subsystem)
+                self.realpath = os.path.join(tmpdir, sockname)
+                try:
+                    self.bind(self.realpath)
+                    os.symlink(self.realpath, self.path)
+                except (OSError, socket.error):
+                    self.cleanup()
+                    raise
+            else:
+                raise
+        self.listen(5)
+
+    def cleanup(self):
+        def okayifmissing(f, path):
+            try:
+                f(path)
+            except OSError, err:
+                if err.errno != errno.ENOENT:
+                    raise
+
+        okayifmissing(os.unlink, self.path)
+        if self.realpath != self.path:
+            okayifmissing(os.unlink, self.realpath)
+            okayifmissing(os.rmdir, os.path.dirname(self.realpath))
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/repair.py
--- a/mercurial/repair.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/repair.py	Sat Jan 19 17:24:33 2013 -0600
@@ -6,7 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-from mercurial import changegroup, bookmarks
+from mercurial import changegroup
 from mercurial.node import short
 from mercurial.i18n import _
 import os
@@ -56,10 +56,8 @@
     return s
 
 def strip(ui, repo, nodelist, backup="all", topic='backup'):
-    # It simplifies the logic around updating the branchheads cache if we only
-    # have to consider the effect of the stripped revisions and not revisions
-    # missing because the cache is out-of-date.
-    repo.updatebranchcache()
+    repo = repo.unfiltered()
+    repo.destroying()
 
     cl = repo.changelog
     # TODO handle undo of merge sets
@@ -68,17 +66,6 @@
     striplist = [cl.rev(node) for node in nodelist]
     striprev = min(striplist)
 
-    # Generate set of branches who will have nodes stripped.
-    striprevs = repo.revs("%ld::", striplist)
-    stripbranches = set([repo[rev].branch() for rev in striprevs])
-
-    # Set of potential new heads resulting from the strip.  The parents of any
-    # node removed could be a new head because the node to be removed could have
-    # been the only child of the parent.
-    newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs)
-    newheadnodes = set([cl.node(rev) for rev in newheadrevs])
-    newheadbranches = set([repo[rev].branch() for rev in newheadrevs])
-
     keeppartialbundle = backup == 'strip'
 
     # Some revisions with rev > striprev may not be descendants of striprev.
@@ -111,8 +98,10 @@
         saverevs.difference_update(descendants)
     savebases = [cl.node(r) for r in saverevs]
     stripbases = [cl.node(r) for r in tostrip]
-    newbmtarget = repo.revs('sort(heads((::%ld) - (%ld)), -rev)',
-                            tostrip, tostrip)
+
+    # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
+    # is much faster
+    newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
     if newbmtarget:
         newbmtarget = repo[newbmtarget[0]].node()
     else:
@@ -181,7 +170,7 @@
 
         for m in updatebm:
             bm[m] = repo[newbmtarget].node()
-        bookmarks.write(repo)
+        bm.write()
     except: # re-raises
         if backupfile:
             ui.warn(_("strip failed, full bundle stored in '%s'\n")
@@ -191,10 +180,4 @@
                     % chgrpfile)
         raise
 
-    if len(stripbranches) == 1 and len(newheadbranches) == 1 \
-            and stripbranches == newheadbranches:
-        repo.destroyed(newheadnodes)
-    else:
-        # Multiple branches involved in strip. Will allow branchcache to become
-        # invalid and later on rebuilt from scratch
-        repo.destroyed()
+    repo.destroyed()
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/repoview.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/repoview.py	Sat Jan 19 17:24:33 2013 -0600
@@ -0,0 +1,219 @@
+# repoview.py - Filtered view of a localrepo object
+#
+# Copyright 2012 Pierre-Yves David 
+#                Logilab SA        
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import copy
+import phases
+import util
+import obsolete, bookmarks, revset
+
+
+def hideablerevs(repo):
+    """Revisions candidates to be hidden
+
+    This is a standalone function to help extensions to wrap it."""
+    return obsolete.getrevs(repo, 'obsolete')
+
+def computehidden(repo):
+    """compute the set of hidden revision to filter
+
+    During most operation hidden should be filtered."""
+    assert not repo.changelog.filteredrevs
+    hideable = hideablerevs(repo)
+    if hideable:
+        cl = repo.changelog
+        firsthideable = min(hideable)
+        revs = cl.revs(start=firsthideable)
+        blockers = [r for r in revset._children(repo, revs, hideable)
+                      if r not in hideable]
+        for par in repo[None].parents():
+            blockers.append(par.rev())
+        for bm in bookmarks.listbookmarks(repo).values():
+            blockers.append(repo[bm].rev())
+        blocked = cl.ancestors(blockers, inclusive=True)
+        return frozenset(r for r in hideable if r not in blocked)
+    return frozenset()
+
+def computeunserved(repo):
+    """compute the set of revision that should be filtered when used a server
+
+    Secret and hidden changeset should not pretend to be here."""
+    assert not repo.changelog.filteredrevs
+    # fast path in simple case to avoid impact of non optimised code
+    hiddens = filterrevs(repo, 'visible')
+    if phases.hassecret(repo):
+        cl = repo.changelog
+        secret = phases.secret
+        getphase = repo._phasecache.phase
+        first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
+        revs = cl.revs(start=first)
+        secrets = set(r for r in revs if getphase(repo, r) >= secret)
+        return frozenset(hiddens | secrets)
+    else:
+        return hiddens
+    return frozenset()
+
+def computemutable(repo):
+    """compute the set of revision that should be filtered when used a server
+
+    Secret and hidden changeset should not pretend to be here."""
+    assert not repo.changelog.filteredrevs
+    # fast check to avoid revset call on huge repo
+    if util.any(repo._phasecache.phaseroots[1:]):
+        getphase = repo._phasecache.phase
+        maymutable = filterrevs(repo, 'base')
+        return frozenset(r for r in maymutable if getphase(repo, r))
+    return frozenset()
+
+def computeimpactable(repo):
+    """Everything impactable by mutable revision
+
+    The mutable filter still have some chance to get invalidated. This will
+    happen when:
+
+    - you garbage collect hidden changeset,
+    - public phase is moved backward,
+    - something is changed in the filtering (this could be fixed)
+
+    This filter out any mutable changeset and any public changeset that may be
+    impacted by something happening to a mutable revision.
+
+    This is achieved by filtered everything with a revision number egal or
+    higher than the first mutable changeset is filtered."""
+    assert not repo.changelog.filteredrevs
+    cl = repo.changelog
+    firstmutable = len(cl)
+    for roots in repo._phasecache.phaseroots[1:]:
+        if roots:
+            firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
+    # protect from nullrev root
+    firstmutable = max(0, firstmutable)
+    return frozenset(xrange(firstmutable, len(cl)))
+
+# function to compute filtered set
+filtertable = {'visible': computehidden,
+               'served': computeunserved,
+               'immutable':  computemutable,
+               'base':  computeimpactable}
+### Nearest subset relation
+# Nearest subset of filter X is a filter Y so that:
+# * Y is included in X,
+# * X - Y is as small as possible.
+# This create and ordering used for branchmap purpose.
+# the ordering may be partial
+subsettable = {None: 'visible',
+               'visible': 'served',
+               'served': 'immutable',
+               'immutable': 'base'}
+
+def filterrevs(repo, filtername):
+    """returns set of filtered revision for this filter name"""
+    if filtername not in repo.filteredrevcache:
+        func = filtertable[filtername]
+        repo.filteredrevcache[filtername] = func(repo.unfiltered())
+    return repo.filteredrevcache[filtername]
+
+class repoview(object):
+    """Provide a read/write view of a repo through a filtered changelog
+
+    This object is used to access a filtered version of a repository without
+    altering the original repository object itself. We can not alter the
+    original object for two main reasons:
+    - It prevents the use of a repo with multiple filters at the same time. In
+      particular when multiple threads are involved.
+    - It makes scope of the filtering harder to control.
+
+    This object behaves very closely to the original repository. All attribute
+    operations are done on the original repository:
+    - An access to `repoview.someattr` actually returns `repo.someattr`,
+    - A write to `repoview.someattr` actually sets value of `repo.someattr`,
+    - A deletion of `repoview.someattr` actually drops `someattr`
+      from `repo.__dict__`.
+
+    The only exception is the `changelog` property. It is overridden to return
+    a (surface) copy of `repo.changelog` with some revisions filtered. The
+    `filtername` attribute of the view control the revisions that need to be
+    filtered.  (the fact the changelog is copied is an implementation detail).
+
+    Unlike attributes, this object intercepts all method calls. This means that
+    all methods are run on the `repoview` object with the filtered `changelog`
+    property. For this purpose the simple `repoview` class must be mixed with
+    the actual class of the repository. This ensures that the resulting
+    `repoview` object have the very same methods than the repo object. This
+    leads to the property below.
+
+        repoview.method() --> repo.__class__.method(repoview)
+
+    The inheritance has to be done dynamically because `repo` can be of any
+    subclasses of `localrepo`. Eg: `bundlerepo` or `httprepo`.
+    """
+
+    def __init__(self, repo, filtername):
+        object.__setattr__(self, '_unfilteredrepo', repo)
+        object.__setattr__(self, 'filtername', filtername)
+        object.__setattr__(self, '_clcachekey', None)
+        object.__setattr__(self, '_clcache', None)
+
+    # not a cacheproperty on purpose we shall implement a proper cache later
+    @property
+    def changelog(self):
+        """return a filtered version of the changeset
+
+        this changelog must not be used for writing"""
+        # some cache may be implemented later
+        unfi = self._unfilteredrepo
+        unfichangelog = unfi.changelog
+        revs = filterrevs(unfi, self.filtername)
+        cl = self._clcache
+        newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs))
+        if cl is not None:
+            # we need to check curkey too for some obscure reason.
+            # MQ test show a corruption of the underlying repo (in _clcache)
+            # without change in the cachekey.
+            oldfilter = cl.filteredrevs
+            try:
+                cl.filterrevs = ()  # disable filtering for tip
+                curkey = (len(cl), cl.tip(), hash(oldfilter))
+            finally:
+                cl.filteredrevs = oldfilter
+            if newkey != self._clcachekey or newkey != curkey:
+                cl = None
+        # could have been made None by the previous if
+        if cl is None:
+            cl = copy.copy(unfichangelog)
+            cl.filteredrevs = revs
+            object.__setattr__(self, '_clcache', cl)
+            object.__setattr__(self, '_clcachekey', newkey)
+        return cl
+
+    def unfiltered(self):
+        """Return an unfiltered version of a repo"""
+        return self._unfilteredrepo
+
+    def filtered(self, name):
+        """Return a filtered version of a repository"""
+        if name == self.filtername:
+            return self
+        return self.unfiltered().filtered(name)
+
+    # everything access are forwarded to the proxied repo
+    def __getattr__(self, attr):
+        return getattr(self._unfilteredrepo, attr)
+
+    def __setattr__(self, attr, value):
+        return setattr(self._unfilteredrepo, attr, value)
+
+    def __delattr__(self, attr):
+        return delattr(self._unfilteredrepo, attr)
+
+    # The `requirement` attribut is initialiazed during __init__. But
+    # __getattr__ won't be called as it also exists on the class. We need
+    # explicit forwarding to main repo here
+    @property
+    def requirements(self):
+        return self._unfilteredrepo.requirements
+
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/revlog.py
--- a/mercurial/revlog.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/revlog.py	Sat Jan 19 17:24:33 2013 -0600
@@ -257,11 +257,14 @@
         return iter(xrange(len(self)))
     def revs(self, start=0, stop=None):
         """iterate over all rev in this revlog (from start to stop)"""
-        if stop is None:
-            stop = len(self)
+        step = 1
+        if stop is not None:
+            if start > stop:
+                step = -1
+            stop += step
         else:
-            stop += 1
-        return xrange(start, stop)
+            stop = len(self)
+        return xrange(start, stop, step)
 
     @util.propertycache
     def nodemap(self):
@@ -338,33 +341,14 @@
         return len(t)
     size = rawsize
 
-    def ancestors(self, revs, stoprev=0):
+    def ancestors(self, revs, stoprev=0, inclusive=False):
         """Generate the ancestors of 'revs' in reverse topological order.
         Does not generate revs lower than stoprev.
 
-        Yield a sequence of revision numbers starting with the parents
-        of each revision in revs, i.e., each revision is *not* considered
-        an ancestor of itself.  Results are in breadth-first order:
-        parents of each rev in revs, then parents of those, etc.  Result
-        does not include the null revision."""
-        visit = util.deque(revs)
-        seen = set([nullrev])
-        while visit:
-            for parent in self.parentrevs(visit.popleft()):
-                if parent < stoprev:
-                    continue
-                if parent not in seen:
-                    visit.append(parent)
-                    seen.add(parent)
-                    yield parent
+        See the documentation for ancestor.lazyancestors for more details."""
 
-    def incancestors(self, revs, stoprev=0):
-        """Identical to ancestors() except it also generates the
-        revisions, 'revs'"""
-        for rev in revs:
-            yield rev
-        for rev in self.ancestors(revs, stoprev):
-            yield rev
+        return ancestor.lazyancestors(self, revs, stoprev=stoprev,
+                                      inclusive=inclusive)
 
     def descendants(self, revs):
         """Generate the descendants of 'revs' in revision order.
@@ -429,6 +413,29 @@
         missing.sort()
         return has, [self.node(r) for r in missing]
 
+    def findmissingrevs(self, common=None, heads=None):
+        """Return the revision numbers of the ancestors of heads that
+        are not ancestors of common.
+
+        More specifically, return a list of revision numbers corresponding to
+        nodes N such that every N satisfies the following constraints:
+
+          1. N is an ancestor of some node in 'heads'
+          2. N is not an ancestor of any node in 'common'
+
+        The list is sorted by revision number, meaning it is
+        topologically sorted.
+
+        'heads' and 'common' are both lists of revision numbers.  If heads is
+        not supplied, uses all of the revlog's heads.  If common is not
+        supplied, uses nullid."""
+        if common is None:
+            common = [nullrev]
+        if heads is None:
+            heads = self.headrevs()
+
+        return ancestor.missingancestors(heads, common, self.parentrevs)
+
     def findmissing(self, common=None, heads=None):
         """Return the ancestors of heads that are not ancestors of common.
 
@@ -444,8 +451,16 @@
         'heads' and 'common' are both lists of node IDs.  If heads is
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
-        _common, missing = self.findcommonmissing(common, heads)
-        return missing
+        if common is None:
+            common = [nullid]
+        if heads is None:
+            heads = self.heads()
+
+        common = [self.rev(n) for n in common]
+        heads = [self.rev(n) for n in heads]
+
+        return [self.node(r) for r in
+                ancestor.missingancestors(heads, common, self.parentrevs)]
 
     def nodesbetween(self, roots=None, heads=None):
         """Return a topological path from 'roots' to 'heads'.
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/revset.py
--- a/mercurial/revset.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/revset.py	Sat Jan 19 17:24:33 2013 -0600
@@ -13,6 +13,7 @@
 from i18n import _
 import encoding
 import obsolete as obsmod
+import repoview
 
 def _revancestors(repo, revs, followfirst):
     """Like revlog.ancestors(), but supports followfirst."""
@@ -442,6 +443,18 @@
     bumped = obsmod.getrevs(repo, 'bumped')
     return [r for r in subset if r in bumped]
 
+def bundle(repo, subset, x):
+    """``bundle()``
+    Changesets in the bundle.
+
+    Bundle must be specified by the -R option."""
+
+    try:
+        bundlerevs = repo.changelog.bundlerevs
+    except AttributeError:
+        raise util.Abort(_("no bundle provided - specify with -R"))
+    return [r for r in subset if r in bundlerevs]
+
 def checkstatus(repo, subset, pat, field):
     m = None
     s = []
@@ -475,8 +488,13 @@
 
 def _children(repo, narrow, parentset):
     cs = set()
+    if not parentset:
+        return cs
     pr = repo.changelog.parentrevs
+    minrev = min(parentset)
     for r in narrow:
+        if r <= minrev:
+            continue
         for p in pr(r):
             if p in parentset:
                 cs.add(r)
@@ -628,6 +646,15 @@
 
     return [r for r in subset if r in dests]
 
+def divergent(repo, subset, x):
+    """``divergent()``
+    Final successors of changesets with an alternative set of final successors.
+    """
+    # i18n: "divergent" is a keyword
+    getargs(x, 0, 0, _("divergent takes no arguments"))
+    divergent = obsmod.getrevs(repo, 'divergent')
+    return [r for r in subset if r in divergent]
+
 def draft(repo, subset, x):
     """``draft()``
     Changeset in draft phase."""
@@ -865,7 +892,8 @@
     """
     # i18n: "hidden" is a keyword
     getargs(x, 0, 0, _("hidden takes no arguments"))
-    return [r for r in subset if r in repo.hiddenrevs]
+    hiddenrevs = repoview.filterrevs(repo, 'visible')
+    return [r for r in subset if r in hiddenrevs]
 
 def keyword(repo, subset, x):
     """``keyword(string)``
@@ -1513,6 +1541,7 @@
     "branch": branch,
     "branchpoint": branchpoint,
     "bumped": bumped,
+    "bundle": bundle,
     "children": children,
     "closed": closed,
     "contains": contains,
@@ -1522,6 +1551,7 @@
     "descendants": descendants,
     "_firstdescendants": _firstdescendants,
     "destination": destination,
+    "divergent": divergent,
     "draft": draft,
     "extinct": extinct,
     "extra": extra,
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/scmutil.py
--- a/mercurial/scmutil.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/scmutil.py	Sat Jan 19 17:24:33 2013 -0600
@@ -252,9 +252,9 @@
     def _setmustaudit(self, onoff):
         self._audit = onoff
         if onoff:
-            self.auditor = pathauditor(self.base)
+            self.audit = pathauditor(self.base)
         else:
-            self.auditor = util.always
+            self.audit = util.always
 
     mustaudit = property(_getmustaudit, _setmustaudit)
 
@@ -276,51 +276,52 @@
             r = util.checkosfilename(path)
             if r:
                 raise util.Abort("%s: %r" % (r, path))
-        self.auditor(path)
+        self.audit(path)
         f = self.join(path)
 
         if not text and "b" not in mode:
             mode += "b" # for that other OS
 
         nlink = -1
-        dirname, basename = util.split(f)
-        # If basename is empty, then the path is malformed because it points
-        # to a directory. Let the posixfile() call below raise IOError.
-        if basename and mode not in ('r', 'rb'):
-            if atomictemp:
-                if not os.path.isdir(dirname):
-                    util.makedirs(dirname, self.createmode)
-                return util.atomictempfile(f, mode, self.createmode)
-            try:
-                if 'w' in mode:
-                    util.unlink(f)
+        if mode not in ('r', 'rb'):
+            dirname, basename = util.split(f)
+            # If basename is empty, then the path is malformed because it points
+            # to a directory. Let the posixfile() call below raise IOError.
+            if basename:
+                if atomictemp:
+                    if not os.path.isdir(dirname):
+                        util.makedirs(dirname, self.createmode)
+                    return util.atomictempfile(f, mode, self.createmode)
+                try:
+                    if 'w' in mode:
+                        util.unlink(f)
+                        nlink = 0
+                    else:
+                        # nlinks() may behave differently for files on Windows
+                        # shares if the file is open.
+                        fd = util.posixfile(f)
+                        nlink = util.nlinks(f)
+                        if nlink < 1:
+                            nlink = 2 # force mktempcopy (issue1922)
+                        fd.close()
+                except (OSError, IOError), e:
+                    if e.errno != errno.ENOENT:
+                        raise
                     nlink = 0
-                else:
-                    # nlinks() may behave differently for files on Windows
-                    # shares if the file is open.
-                    fd = util.posixfile(f)
-                    nlink = util.nlinks(f)
-                    if nlink < 1:
-                        nlink = 2 # force mktempcopy (issue1922)
-                    fd.close()
-            except (OSError, IOError), e:
-                if e.errno != errno.ENOENT:
-                    raise
-                nlink = 0
-                if not os.path.isdir(dirname):
-                    util.makedirs(dirname, self.createmode)
-            if nlink > 0:
-                if self._trustnlink is None:
-                    self._trustnlink = nlink > 1 or util.checknlink(f)
-                if nlink > 1 or not self._trustnlink:
-                    util.rename(util.mktempcopy(f), f)
+                    if not os.path.isdir(dirname):
+                        util.makedirs(dirname, self.createmode)
+                if nlink > 0:
+                    if self._trustnlink is None:
+                        self._trustnlink = nlink > 1 or util.checknlink(f)
+                    if nlink > 1 or not self._trustnlink:
+                        util.rename(util.mktempcopy(f), f)
         fp = util.posixfile(f, mode)
         if nlink == 0:
             self._fixfilemode(f)
         return fp
 
     def symlink(self, src, dst):
-        self.auditor(dst)
+        self.audit(dst)
         linkname = self.join(dst)
         try:
             os.unlink(linkname)
@@ -340,9 +341,6 @@
         else:
             self.write(dst, src)
 
-    def audit(self, path):
-        self.auditor(path)
-
     def join(self, path):
         if path:
             return os.path.join(self.base, path)
@@ -381,6 +379,18 @@
 
 filteropener = filtervfs
 
+class readonlyvfs(abstractvfs, auditvfs):
+    '''Wrapper vfs preventing any writing.'''
+
+    def __init__(self, vfs):
+        auditvfs.__init__(self, vfs)
+
+    def __call__(self, path, mode='r', *args, **kw):
+        if mode not in ('r', 'rb'):
+            raise util.Abort('this vfs is read only')
+        return self.vfs(path, mode, *args, **kw)
+
+
 def canonpath(root, cwd, myname, auditor=None):
     '''return the canonical path of myname, given cwd and root'''
     if util.endswithsep(root):
@@ -425,7 +435,7 @@
                 break
             name = dirname
 
-        raise util.Abort('%s not under root' % myname)
+        raise util.Abort(_("%s not under root '%s'") % (myname, root))
 
 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
     '''yield every hg repository under path, always recursively.
@@ -637,13 +647,13 @@
                 start, end = spec.split(_revrangesep, 1)
                 start = revfix(repo, start, 0)
                 end = revfix(repo, end, len(repo) - 1)
-                step = start > end and -1 or 1
+                rangeiter = repo.changelog.revs(start, end)
                 if not seen and not l:
                     # by far the most common case: revs = ["-1:0"]
-                    l = range(start, end + step, step)
+                    l = list(rangeiter)
                     # defer syncing seen until next iteration
                     continue
-                newrevs = set(xrange(start, end + step, step))
+                newrevs = set(rangeiter)
                 if seen:
                     newrevs.difference_update(seen)
                     seen.update(newrevs)
@@ -850,15 +860,19 @@
     return requirements
 
 class filecacheentry(object):
-    def __init__(self, path):
+    def __init__(self, path, stat=True):
         self.path = path
-        self.cachestat = filecacheentry.stat(self.path)
+        self.cachestat = None
+        self._cacheable = None
 
-        if self.cachestat:
-            self._cacheable = self.cachestat.cacheable()
-        else:
-            # None means we don't know yet
-            self._cacheable = None
+        if stat:
+            self.cachestat = filecacheentry.stat(self.path)
+
+            if self.cachestat:
+                self._cacheable = self.cachestat.cacheable()
+            else:
+                # None means we don't know yet
+                self._cacheable = None
 
     def refresh(self):
         if self.cacheable():
@@ -933,6 +947,7 @@
     def __get__(self, obj, type=None):
         # do we need to check if the file changed?
         if self.name in obj.__dict__:
+            assert self.name in obj._filecache, self.name
             return obj.__dict__[self.name]
 
         entry = obj._filecache.get(self.name)
@@ -954,12 +969,19 @@
         return entry.obj
 
     def __set__(self, obj, value):
-        if self.name in obj._filecache:
-            obj._filecache[self.name].obj = value # update cached copy
+        if self.name not in obj._filecache:
+            # we add an entry for the missing value because X in __dict__
+            # implies X in _filecache
+            ce = filecacheentry(self.join(obj, self.path), False)
+            obj._filecache[self.name] = ce
+        else:
+            ce = obj._filecache[self.name]
+
+        ce.obj = value # update cached copy
         obj.__dict__[self.name] = value # update copy returned by obj.x
 
     def __delete__(self, obj):
         try:
             del obj.__dict__[self.name]
         except KeyError:
-            raise AttributeError, self.name
+            raise AttributeError(self.name)
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/statichttprepo.py
--- a/mercurial/statichttprepo.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/statichttprepo.py	Sat Jan 19 17:24:33 2013 -0600
@@ -134,8 +134,7 @@
         self.changelog = changelog.changelog(self.sopener)
         self._tags = None
         self.nodetagscache = None
-        self._branchcache = None
-        self._branchcachetip = None
+        self._branchcaches = {}
         self.encodepats = None
         self.decodepats = None
 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/store.py
--- a/mercurial/store.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/store.py	Sat Jan 19 17:24:33 2013 -0600
@@ -76,7 +76,7 @@
     cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
     for x in (range(32) + range(126, 256) + winreserved):
         cmap[chr(x)] = "~%02x" % x
-    for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
+    for x in range(ord("A"), ord("Z") + 1) + [ord(e)]:
         cmap[chr(x)] = e + chr(x).lower()
     dmap = {}
     for k, v in cmap.iteritems():
@@ -128,11 +128,11 @@
     cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
     for x in (range(32) + range(126, 256) + winreserved):
         cmap[chr(x)] = "~%02x" % x
-    for x in range(ord("A"), ord("Z")+1):
+    for x in range(ord("A"), ord("Z") + 1):
         cmap[chr(x)] = chr(x).lower()
     return lambda s: "".join([cmap[c] for c in s])
 
-lowerencode = _buildlowerencodefun()
+lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
 
 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
@@ -255,22 +255,17 @@
     return res
 
 def _pathencode(path):
+    de = encodedir(path)
     if len(path) > _maxstorepathlen:
-        return None
-    ef = _encodefname(encodedir(path)).split('/')
+        return _hashencode(de, True)
+    ef = _encodefname(de).split('/')
     res = '/'.join(_auxencode(ef, True))
     if len(res) > _maxstorepathlen:
-        return None
+        return _hashencode(de, True)
     return res
 
 _pathencode = getattr(parsers, 'pathencode', _pathencode)
 
-def _dothybridencode(f):
-    ef = _pathencode(f)
-    if ef is None:
-        return _hashencode(encodedir(f), True)
-    return ef
-
 def _plainhybridencode(f):
     return _hybridencode(f, False)
 
@@ -456,7 +451,7 @@
 class fncachestore(basicstore):
     def __init__(self, path, vfstype, dotencode):
         if dotencode:
-            encode = _dothybridencode
+            encode = _pathencode
         else:
             encode = _plainhybridencode
         self.encode = encode
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/subrepo.py
--- a/mercurial/subrepo.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/subrepo.py	Sat Jan 19 17:24:33 2013 -0600
@@ -14,6 +14,27 @@
 
 nullstate = ('', '', 'empty')
 
+class SubrepoAbort(error.Abort):
+    """Exception class used to avoid handling a subrepo error more than once"""
+    def __init__(self, *args, **kw):
+        error.Abort.__init__(self, *args, **kw)
+        self.subrepo = kw.get('subrepo')
+
+def annotatesubrepoerror(func):
+    def decoratedmethod(self, *args, **kargs):
+        try:
+            res = func(self, *args, **kargs)
+        except SubrepoAbort, ex:
+            # This exception has already been handled
+            raise ex
+        except error.Abort, ex:
+            subrepo = subrelpath(self)
+            errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
+            # avoid handling this exception by raising a SubrepoAbort exception
+            raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo)
+        return res
+    return decoratedmethod
+
 def state(ctx, ui):
     """return a state dict, mapping subrepo paths configured in .hgsub
     to tuple: (source from .hgsub, revision from .hgsubstate, kind
@@ -126,7 +147,7 @@
             r = "%s:%s:%s" % r
         repo.ui.debug("  subrepo %s: %s %s\n" % (s, msg, r))
 
-    for s, l in s1.items():
+    for s, l in sorted(s1.iteritems()):
         a = sa.get(s, nullstate)
         ld = l # local state with possible dirty flag for compares
         if wctx.sub(s).dirty():
@@ -244,8 +265,7 @@
         if repo.ui.config('paths', 'default'):
             return repo.ui.config('paths', 'default')
     if abort:
-        raise util.Abort(_("default path for subrepository %s not found") %
-            reporelpath(repo))
+        raise util.Abort(_("default path for subrepository not found"))
 
 def itersubrepos(ctx1, ctx2):
     """find subrepos in ctx1 or ctx2"""
@@ -402,6 +422,7 @@
                 self._repo.ui.setconfig(s, k, v)
         self._initrepo(r, state[0], create)
 
+    @annotatesubrepoerror
     def _initrepo(self, parentrepo, source, create):
         self._repo._subparent = parentrepo
         self._repo._subsource = source
@@ -422,10 +443,12 @@
                 addpathconfig('default-push', defpushpath)
             fp.close()
 
+    @annotatesubrepoerror
     def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
         return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
                            os.path.join(prefix, self._path), explicitonly)
 
+    @annotatesubrepoerror
     def status(self, rev2, **opts):
         try:
             rev1 = self._state[1]
@@ -437,6 +460,7 @@
                                % (inst, subrelpath(self)))
             return [], [], [], [], [], [], []
 
+    @annotatesubrepoerror
     def diff(self, ui, diffopts, node2, match, prefix, **opts):
         try:
             node1 = node.bin(self._state[1])
@@ -446,12 +470,13 @@
                 node2 = node.bin(node2)
             cmdutil.diffordiffstat(ui, self._repo, diffopts,
                                    node1, node2, match,
-                                   prefix=os.path.join(prefix, self._path),
+                                   prefix=posixpath.join(prefix, self._path),
                                    listsubrepos=True, **opts)
         except error.RepoLookupError, inst:
             self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
                                % (inst, subrelpath(self)))
 
+    @annotatesubrepoerror
     def archive(self, ui, archiver, prefix, match=None):
         self._get(self._state + ('hg',))
         abstractsubrepo.archive(self, ui, archiver, prefix, match)
@@ -463,6 +488,7 @@
             submatch = matchmod.narrowmatcher(subpath, match)
             s.archive(ui, archiver, os.path.join(prefix, self._path), submatch)
 
+    @annotatesubrepoerror
     def dirty(self, ignoreupdate=False):
         r = self._state[1]
         if r == '' and not ignoreupdate: # no state recorded
@@ -479,6 +505,7 @@
     def checknested(self, path):
         return self._repo._checknested(self._repo.wjoin(path))
 
+    @annotatesubrepoerror
     def commit(self, text, user, date):
         # don't bother committing in the subrepo if it's only been
         # updated
@@ -490,6 +517,7 @@
             return self._repo['.'].hex() # different version checked out
         return node.hex(n)
 
+    @annotatesubrepoerror
     def remove(self):
         # we can't fully delete the repository as it may contain
         # local-only history
@@ -519,12 +547,14 @@
                 bookmarks.updatefromremote(self._repo.ui, self._repo, other,
                                            srcurl)
 
+    @annotatesubrepoerror
     def get(self, state, overwrite=False):
         self._get(state)
         source, revision, kind = state
         self._repo.ui.debug("getting subrepo %s\n" % self._path)
         hg.updaterepo(self._repo, revision, overwrite)
 
+    @annotatesubrepoerror
     def merge(self, state):
         self._get(state)
         cur = self._repo['.']
@@ -551,6 +581,7 @@
         else:
             mergefunc()
 
+    @annotatesubrepoerror
     def push(self, opts):
         force = opts.get('force')
         newbranch = opts.get('new_branch')
@@ -569,12 +600,15 @@
         other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
         return self._repo.push(other, force, newbranch=newbranch)
 
+    @annotatesubrepoerror
     def outgoing(self, ui, dest, opts):
         return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
 
+    @annotatesubrepoerror
     def incoming(self, ui, source, opts):
         return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
 
+    @annotatesubrepoerror
     def files(self):
         rev = self._state[1]
         ctx = self._repo[rev]
@@ -593,10 +627,12 @@
         ctx = self._repo[None]
         return ctx.walk(match)
 
+    @annotatesubrepoerror
     def forget(self, ui, match, prefix):
         return cmdutil.forget(ui, self._repo, match,
                               os.path.join(prefix, self._path), True)
 
+    @annotatesubrepoerror
     def revert(self, ui, substate, *pats, **opts):
         # reverting a subrepo is a 2 step process:
         # 1. if the no_backup is not set, revert all modified
@@ -751,6 +787,7 @@
                 pass
         return rev
 
+    @annotatesubrepoerror
     def commit(self, text, user, date):
         # user and date are out of our hands since svn is centralized
         changed, extchanged, missing = self._wcchanged()
@@ -778,6 +815,7 @@
         self._ui.status(self._svncommand(['update', '-r', newrev])[0])
         return newrev
 
+    @annotatesubrepoerror
     def remove(self):
         if self.dirty():
             self._ui.warn(_('not removing repo %s because '
@@ -802,6 +840,7 @@
         except OSError:
             pass
 
+    @annotatesubrepoerror
     def get(self, state, overwrite=False):
         if overwrite:
             self._svncommand(['revert', '--recursive'])
@@ -822,6 +861,7 @@
             raise util.Abort((status or err).splitlines()[-1])
         self._ui.status(status)
 
+    @annotatesubrepoerror
     def merge(self, state):
         old = self._state[1]
         new = state[1]
@@ -835,6 +875,7 @@
         # push is a no-op for SVN
         return True
 
+    @annotatesubrepoerror
     def files(self):
         output = self._svncommand(['list', '--recursive', '--xml'])[0]
         doc = xml.dom.minidom.parseString(output)
@@ -1021,6 +1062,7 @@
             raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
                                (revision, self._relpath))
 
+    @annotatesubrepoerror
     def dirty(self, ignoreupdate=False):
         if self._gitmissing():
             return self._state[1] != ''
@@ -1037,6 +1079,7 @@
     def basestate(self):
         return self._gitstate()
 
+    @annotatesubrepoerror
     def get(self, state, overwrite=False):
         source, revision, kind = state
         if not revision:
@@ -1120,6 +1163,7 @@
             # a real merge would be required, just checkout the revision
             rawcheckout()
 
+    @annotatesubrepoerror
     def commit(self, text, user, date):
         if self._gitmissing():
             raise util.Abort(_("subrepo %s is missing") % self._relpath)
@@ -1137,6 +1181,7 @@
         # circumstances
         return self._gitstate()
 
+    @annotatesubrepoerror
     def merge(self, state):
         source, revision, kind = state
         self._fetch(source, revision)
@@ -1159,6 +1204,7 @@
         else:
             mergefunc()
 
+    @annotatesubrepoerror
     def push(self, opts):
         force = opts.get('force')
 
@@ -1198,6 +1244,7 @@
                           (self._relpath, self._state[1]))
             return False
 
+    @annotatesubrepoerror
     def remove(self):
         if self._gitmissing():
             return
@@ -1247,6 +1294,7 @@
         ui.progress(_('archiving (%s)') % relpath, None)
 
 
+    @annotatesubrepoerror
     def status(self, rev2, **opts):
         rev1 = self._state[1]
         if self._gitmissing() or not rev1:
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/templater.py
--- a/mercurial/templater.py	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/templater.py	Sat Jan 19 17:24:33 2013 -0600
@@ -8,6 +8,7 @@
 from i18n import _
 import sys, os, re
 import util, config, templatefilters, parser, error
+import types
 
 # template parsing
 
@@ -140,6 +141,10 @@
         v = context._defaults.get(key, '')
     if util.safehasattr(v, '__call__'):
         return v(**mapping)
+    if isinstance(v, types.GeneratorType):
+        v = list(v)
+        mapping[key] = v
+        return v
     return v
 
 def buildfilter(exp, context):
@@ -179,6 +184,7 @@
     for i in d:
         if isinstance(i, dict):
             lm.update(i)
+            lm['originalnode'] = mapping.get('node')
             yield runtemplate(context, lm, ctmpl)
         else:
             # v is not an iterable of dicts, this happen when 'key'
@@ -259,6 +265,15 @@
         t = stringify(args[3][0](context, mapping, args[3][1]))
         yield runtemplate(context, mapping, compiletemplate(t, context))
 
+def label(context, mapping, args):
+    if len(args) != 2:
+        # i18n: "label" is a keyword
+        raise error.ParseError(_("label expects two arguments"))
+
+    # ignore args[0] (the label string) since this is supposed to be a a no-op
+    t = stringify(args[1][0](context, mapping, args[1][1]))
+    yield runtemplate(context, mapping, compiletemplate(t, context))
+
 methods = {
     "string": lambda e, c: (runstring, e[1]),
     "symbol": lambda e, c: (runsymbol, e[1]),
@@ -274,6 +289,7 @@
     "ifeq": ifeq,
     "join": join,
     "sub": sub,
+    "label": label,
 }
 
 # template engine
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/templates/atom/branchentry.tmpl
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/atom/branchentry.tmpl	Sat Jan 19 17:24:33 2013 -0600
@@ -0,0 +1,8 @@
+ 
+  {branch|escape}
+  
+  {urlbase}{url}#branch-{node}
+  {date|rfc3339date}
+  {date|rfc3339date}
+  
+ 
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/templates/atom/branches.tmpl
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/atom/branches.tmpl	Sat Jan 19 17:24:33 2013 -0600
@@ -0,0 +1,11 @@
+{header}
+ {urlbase}{url}
+ 
+ 
+ {repo|escape}: branches
+ {repo|escape} branch history
+ Mercurial SCM
+ {latestentry%feedupdated}
+
+ {entries%branchentry}
+
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/templates/atom/map
--- a/mercurial/templates/atom/map	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/templates/atom/map	Sat Jan 19 17:24:33 2013 -0600
@@ -10,4 +10,6 @@
 tagentry = tagentry.tmpl
 bookmarks = bookmarks.tmpl
 bookmarkentry = bookmarkentry.tmpl
+branches = branches.tmpl
+branchentry = branchentry.tmpl
 error = error.tmpl
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/templates/coal/map
--- a/mercurial/templates/coal/map	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/templates/coal/map	Sat Jan 19 17:24:33 2013 -0600
@@ -223,3 +223,4 @@
 error = ../paper/error.tmpl
 urlparameter = '{separator}{name}={value|urlescape}'
 hiddenformentry = ''
+breadcrumb = '> {name} '
diff -r 7648b87e76db -r f5fbe15ca744 mercurial/templates/gitweb/bookmarks.tmpl
--- a/mercurial/templates/gitweb/bookmarks.tmpl	Mon Jan 14 23:14:45 2013 +0900
+++ b/mercurial/templates/gitweb/bookmarks.tmpl	Sat Jan 19 17:24:33 2013 -0600
@@ -8,7 +8,8 @@