# HG changeset patch # User Matt Mackall # Date 1318707050 18000 # Node ID 384082750f2c51dc917d85a7145748330fa6ef4d # Parent fccd350acf799f756e3e09166d02b22f1360336c# Parent 2889d4574726546fefd0c22a5cfd0c23bd311a6c merge default into stable for 2.0 code freeze diff -r fccd350acf79 -r 384082750f2c contrib/check-code.py --- a/contrib/check-code.py Sun Oct 02 16:41:07 2011 -0500 +++ b/contrib/check-code.py Sat Oct 15 14:30:50 2011 -0500 @@ -148,7 +148,7 @@ (r'(? 'capabilities: getencoding runcommand\nencoding: UTF-8' + +import sys, struct + +if len(sys.argv) != 2: + print 'usage: debugcmdserver.py FILE' + sys.exit(1) + +outputfmt = '>cI' +outputfmtsize = struct.calcsize(outputfmt) + +if sys.argv[1] == '-': + log = sys.stderr +else: + log = open(sys.argv[1], 'a') + +def read(size): + data = sys.stdin.read(size) + if not data: + raise EOFError() + sys.stdout.write(data) + sys.stdout.flush() + return data + +try: + while True: + header = read(outputfmtsize) + channel, length = struct.unpack(outputfmt, header) + log.write('%s, %-4d' % (channel, length)) + if channel in 'IL': + log.write(' -> waiting for input\n') + else: + data = read(length) + log.write(' -> %r\n' % data) + log.flush() +except EOFError: + pass +finally: + if log != sys.stderr: + log.close() diff -r fccd350acf79 -r 384082750f2c contrib/setup3k.py --- a/contrib/setup3k.py Sun Oct 02 16:41:07 2011 -0500 +++ b/contrib/setup3k.py Sat Oct 15 14:30:50 2011 -0500 @@ -8,7 +8,7 @@ from lib2to3.refactor import get_fixers_from_package as getfixers import sys -if not hasattr(sys, 'version_info') or sys.version_info < (2, 4, 0, 'final'): +if getattr(sys, 'version_info', (0, 0, 0)) < (2, 4, 0, 'final'): raise SystemExit("Mercurial requires Python 2.4 or later.") if sys.version_info[0] >= 3: @@ -236,7 +236,7 @@ try: build_ext.build_extension(self, ext) except CCompilerError: - if not hasattr(ext, 'optional') or not ext.optional: + if getattr(ext, 'optional', False): raise log.warn("Failed to build optional extension '%s' (skipping)", ext.name) diff -r fccd350acf79 -r 384082750f2c contrib/win32/hgwebdir_wsgi.py --- a/contrib/win32/hgwebdir_wsgi.py Sun Oct 02 16:41:07 2011 -0500 +++ b/contrib/win32/hgwebdir_wsgi.py Sat Oct 15 14:30:50 2011 -0500 @@ -50,7 +50,7 @@ #sys.path.insert(0, r'c:\path\to\python\lib') # Enable tracing. Run 'python -m win32traceutil' to debug -if hasattr(sys, 'isapidllhandle'): +if getattr(sys, 'isapidllhandle', None) is not None: import win32traceutil # To serve pages in local charset instead of UTF-8, remove the two lines below diff -r fccd350acf79 -r 384082750f2c contrib/zsh_completion --- a/contrib/zsh_completion Sun Oct 02 16:41:07 2011 -0500 +++ b/contrib/zsh_completion Sat Oct 15 14:30:50 2011 -0500 @@ -165,6 +165,7 @@ _hg_labels() { _hg_tags "$@" _hg_bookmarks "$@" + _hg_branches "$@" } _hg_tags() { @@ -191,6 +192,17 @@ (( $#bookmarks )) && _describe -t bookmarks 'bookmarks' bookmarks } +_hg_branches() { + typeset -a branches + local branch + + _hg_cmd branches | while read branch + do + branches+=(${branch/ # [0-9]#:*}) + done + (( $#branches )) && _describe -t branches 'branches' branches +} + # likely merge candidates _hg_mergerevs() { typeset -a heads @@ -617,6 +629,7 @@ '(--only-merges -m)'{-m,--only-merges}'[show only merges]' \ '(--patch -p)'{-p,--patch}'[show patch]' \ '(--prune -P)'{-P+,--prune}'[do not display revision or any of its ancestors]:revision:_hg_labels' \ + '(--branch -b)'{-b+,--branch}'[show changesets within the given named branch]:branch:_hg_branches' \ '*:files:_hg_files' } diff -r fccd350acf79 -r 384082750f2c doc/gendoc.py --- a/doc/gendoc.py Sun Oct 02 16:41:07 2011 -0500 +++ b/doc/gendoc.py Sat Oct 15 14:30:50 2011 -0500 @@ -9,6 +9,7 @@ from mercurial.i18n import _ from mercurial.help import helptable from mercurial import extensions +from mercurial import util def get_desc(docstr): if not docstr: @@ -95,7 +96,7 @@ ui.write(".. _%s:\n" % name) ui.write("\n") section(ui, sec) - if hasattr(doc, '__call__'): + if util.safehasattr(doc, '__call__'): doc = doc() ui.write(doc) ui.write("\n") diff -r fccd350acf79 -r 384082750f2c hgext/acl.py --- a/hgext/acl.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/acl.py Sat Oct 15 14:30:50 2011 -0500 @@ -216,6 +216,8 @@ if user is None: user = getpass.getuser() + ui.debug('acl: checking access for user "%s"\n' % user) + cfg = ui.config('acl', 'config') if cfg: ui.readconfig(cfg, sections = ['acl.groups', 'acl.allow.branches', @@ -242,9 +244,9 @@ for f in ctx.files(): if deny and deny(f): - ui.debug('acl: user %s denied on %s\n' % (user, f)) - raise util.Abort(_('acl: access denied for changeset %s') % ctx) + raise util.Abort(_('acl: user "%s" denied on "%s"' + ' (changeset "%s")') % (user, f, ctx)) if allow and not allow(f): - ui.debug('acl: user %s not allowed on %s\n' % (user, f)) - raise util.Abort(_('acl: access denied for changeset %s') % ctx) - ui.debug('acl: allowing changeset %s\n' % ctx) + raise util.Abort(_('acl: user "%s" not allowed on "%s"' + ' (changeset "%s")') % (user, f, ctx)) + ui.debug('acl: path access granted: "%s"\n' % ctx) diff -r fccd350acf79 -r 384082750f2c hgext/color.py --- a/hgext/color.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/color.py Sat Oct 15 14:30:50 2011 -0500 @@ -68,6 +68,9 @@ branches.current = green branches.inactive = none + tags.normal = green + tags.local = black bold + The available effects in terminfo mode are 'blink', 'bold', 'dim', 'inverse', 'invisible', 'italic', 'standout', and 'underline'; in ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and @@ -257,7 +260,9 @@ 'status.ignored': 'black bold', 'status.modified': 'blue bold', 'status.removed': 'red bold', - 'status.unknown': 'magenta bold underline'} + 'status.unknown': 'magenta bold underline', + 'tags.normal': 'green', + 'tags.local': 'black bold'} def _effect_str(effect): diff -r fccd350acf79 -r 384082750f2c hgext/convert/cvsps.py --- a/hgext/convert/cvsps.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/convert/cvsps.py Sat Oct 15 14:30:50 2011 -0500 @@ -11,6 +11,7 @@ from mercurial import util from mercurial.i18n import _ from mercurial import hook +from mercurial import util class logentry(object): '''Class logentry has the following attributes: @@ -362,8 +363,14 @@ elif state == 8: # store commit log message if re_31.match(line): - state = 5 - store = True + cpeek = peek + if cpeek.endswith('\n'): + cpeek = cpeek[:-1] + if re_50.match(cpeek): + state = 5 + store = True + else: + e.comment.append(line) elif re_32.match(line): state = 0 store = True @@ -513,8 +520,8 @@ e.comment == c.comment and e.author == c.author and e.branch == c.branch and - (not hasattr(e, 'branchpoints') or - not hasattr (c, 'branchpoints') or + (not util.safehasattr(e, 'branchpoints') or + not util.safehasattr (c, 'branchpoints') or e.branchpoints == c.branchpoints) and ((c.date[0] + c.date[1]) <= (e.date[0] + e.date[1]) <= diff -r fccd350acf79 -r 384082750f2c hgext/convert/filemap.py --- a/hgext/convert/filemap.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/convert/filemap.py Sat Oct 15 14:30:50 2011 -0500 @@ -375,3 +375,6 @@ def lookuprev(self, rev): return self.base.lookuprev(rev) + + def getbookmarks(self): + return self.base.getbookmarks() diff -r fccd350acf79 -r 384082750f2c hgext/convert/git.py --- a/hgext/convert/git.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/convert/git.py Sat Oct 15 14:30:50 2011 -0500 @@ -16,7 +16,7 @@ # Windows does not support GIT_DIR= construct while other systems # cannot remove environment variable. Just assume none have # both issues. - if hasattr(os, 'unsetenv'): + if util.safehasattr(os, 'unsetenv'): def gitopen(self, s, noerr=False): prevgitdir = os.environ.get('GIT_DIR') os.environ['GIT_DIR'] = self.path diff -r fccd350acf79 -r 384082750f2c hgext/convert/hg.py --- a/hgext/convert/hg.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/convert/hg.py Sat Oct 15 14:30:50 2011 -0500 @@ -70,10 +70,10 @@ self.wlock.release() def revmapfile(self): - return os.path.join(self.path, ".hg", "shamap") + return self.repo.join("shamap") def authorfile(self): - return os.path.join(self.path, ".hg", "authormap") + return self.repo.join("authormap") def getheads(self): h = self.repo.changelog.heads() @@ -178,7 +178,7 @@ closed = 'close' in commit.extra if not closed and not man.cmp(m1node, man.revision(mnode)): self.ui.status(_("filtering out empty revision\n")) - self.repo.rollback() + self.repo.rollback(force=True) return parent return p2 @@ -364,8 +364,7 @@ def converted(self, rev, destrev): if self.convertfp is None: - self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'), - 'a') + self.convertfp = open(self.repo.join('shamap'), 'a') self.convertfp.write('%s %s\n' % (destrev, rev)) self.convertfp.flush() diff -r fccd350acf79 -r 384082750f2c hgext/convert/subversion.py --- a/hgext/convert/subversion.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/convert/subversion.py Sat Oct 15 14:30:50 2011 -0500 @@ -501,11 +501,11 @@ and not p[2].startswith(badroot + '/')] # Tell tag renamings from tag creations - remainings = [] + renamings = [] for source, sourcerev, dest in pendings: tagname = dest.split('/')[-1] if source.startswith(srctagspath): - remainings.append([source, sourcerev, tagname]) + renamings.append([source, sourcerev, tagname]) continue if tagname in tags: # Keep the latest tag value @@ -521,7 +521,7 @@ # but were really created in the tag # directory. pass - pendings = remainings + pendings = renamings tagspath = srctagspath finally: stream.close() diff -r fccd350acf79 -r 384082750f2c hgext/convert/transport.py --- a/hgext/convert/transport.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/convert/transport.py Sat Oct 15 14:30:50 2011 -0500 @@ -54,7 +54,7 @@ if p: providers.append(p) else: - if hasattr(svn.client, 'get_windows_simple_provider'): + if util.safehasattr(svn.client, 'get_windows_simple_provider'): providers.append(svn.client.get_windows_simple_provider(pool)) return svn.core.svn_auth_open(providers, pool) @@ -73,7 +73,7 @@ self.password = '' # Only Subversion 1.4 has reparent() - if ra is None or not hasattr(svn.ra, 'reparent'): + if ra is None or not util.safehasattr(svn.ra, 'reparent'): self.client = svn.client.create_context(self.pool) ab = _create_auth_baton(self.pool) if False: diff -r fccd350acf79 -r 384082750f2c hgext/eol.py --- a/hgext/eol.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/eol.py Sat Oct 15 14:30:50 2011 -0500 @@ -52,9 +52,10 @@ The rules will first apply when files are touched in the working copy, e.g. by updating to null and back to tip to touch all files. -The extension uses an optional ``[eol]`` section in your hgrc file -(not the ``.hgeol`` file) for settings that control the overall -behavior. There are two settings: +The extension uses an optional ``[eol]`` section read from both the +normal Mercurial configuration files and the ``.hgeol`` file, with the +latter overriding the former. You can use that section to control the +overall behavior. There are three settings: - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or ``CRLF`` to override the default interpretation of ``native`` for @@ -67,6 +68,10 @@ Such files are normally not touched under the assumption that they have mixed EOLs on purpose. +- ``eol.fix-trailing-newline`` (default False) can be set to True to + ensure that converted files end with a EOL character (either ``\\n`` + or ``\\r\\n`` as per the configured patterns). + The extension provides ``cleverencode:`` and ``cleverdecode:`` filters like the deprecated win32text extension does. This means that you can disable win32text and enable eol and your filters will still work. You @@ -106,6 +111,8 @@ return s if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s): return s + if ui.configbool('eol', 'fix-trailing-newline', False) and s and s[-1] != '\n': + s = s + '\n' return eolre.sub('\n', s) def tocrlf(s, params, ui, **kwargs): @@ -114,6 +121,8 @@ return s if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s): return s + if ui.configbool('eol', 'fix-trailing-newline', False) and s and s[-1] != '\n': + s = s + '\n' return eolre.sub('\r\n', s) def isbinary(s, params): @@ -158,7 +167,7 @@ # about inconsistent newlines. self.match = match.match(root, '', [], include, exclude) - def setfilters(self, ui): + def copytoui(self, ui): for pattern, style in self.cfg.items('patterns'): key = style.upper() try: @@ -167,6 +176,9 @@ except KeyError: ui.warn(_("ignoring unknown EOL style '%s' from %s\n") % (style, self.cfg.source('patterns', pattern))) + # eol.only-consistent can be specified in ~/.hgrc or .hgeol + for k, v in self.cfg.items('eol'): + ui.setconfig('eol', k, v) def checkrev(self, repo, ctx, files): failed = [] @@ -273,7 +285,7 @@ eol = parseeol(self.ui, self, nodes) if eol is None: return None - eol.setfilters(self.ui) + eol.copytoui(self.ui) return eol.match def _hgcleardirstate(self): diff -r fccd350acf79 -r 384082750f2c hgext/inotify/__init__.py --- a/hgext/inotify/__init__.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/inotify/__init__.py Sat Oct 15 14:30:50 2011 -0500 @@ -11,6 +11,7 @@ # todo: socket permissions from mercurial.i18n import _ +from mercurial import util import server from client import client, QueryFailed @@ -31,7 +32,7 @@ ui.write((' %s/\n') % path) def reposetup(ui, repo): - if not hasattr(repo, 'dirstate'): + if not util.safehasattr(repo, 'dirstate'): return class inotifydirstate(repo.dirstate.__class__): diff -r fccd350acf79 -r 384082750f2c hgext/keyword.py --- a/hgext/keyword.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/keyword.py Sat Oct 15 14:30:50 2011 -0500 @@ -249,10 +249,14 @@ kwcmd = self.restrict and lookup # kwexpand/kwshrink if self.restrict or expand and lookup: mf = ctx.manifest() - lctx = ctx - re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp - msg = (expand and _('overwriting %s expanding keywords\n') - or _('overwriting %s shrinking keywords\n')) + if self.restrict or rekw: + re_kw = self.rekw + else: + re_kw = self.rekwexp + if expand: + msg = _('overwriting %s expanding keywords\n') + else: + msg = _('overwriting %s shrinking keywords\n') for f in candidates: if self.restrict: data = self.repo.file(f).read(mf[f]) @@ -262,18 +266,17 @@ continue if expand: if lookup: - lctx = self.linkctx(f, mf[f]) - data, found = self.substitute(data, f, lctx, re_kw.subn) + ctx = self.linkctx(f, mf[f]) + data, found = self.substitute(data, f, ctx, re_kw.subn) elif self.restrict: found = re_kw.search(data) else: data, found = _shrinktext(data, re_kw.subn) if found: self.ui.note(msg % f) - fpath = self.repo.wjoin(f) - mode = os.lstat(fpath).st_mode - self.repo.wwrite(f, data, ctx.flags(f)) - os.chmod(fpath, mode) + fp = self.repo.wopener(f, "wb", atomictemp=True) + fp.write(data) + fp.close() if kwcmd: self.repo.dirstate.normal(f) elif self.record: @@ -296,7 +299,9 @@ def wread(self, fname, data): '''If in restricted mode returns data read from wdir with keyword substitutions removed.''' - return self.restrict and self.shrink(fname, data) or data + if self.restrict: + return self.shrink(fname, data) + return data class kwfilelog(filelog.filelog): ''' @@ -325,11 +330,11 @@ text = self.kwt.shrink(self.path, text) return super(kwfilelog, self).cmp(node, text) -def _status(ui, repo, kwt, *pats, **opts): +def _status(ui, repo, wctx, kwt, *pats, **opts): '''Bails out if [keyword] configuration is not active. Returns status of working directory.''' if kwt: - return repo.status(match=scmutil.match(repo[None], pats, opts), clean=True, + return repo.status(match=scmutil.match(wctx, pats, opts), clean=True, unknown=opts.get('unknown') or opts.get('all')) if ui.configitems('keyword'): raise util.Abort(_('[keyword] patterns cannot match')) @@ -343,7 +348,7 @@ kwt = kwtools['templater'] wlock = repo.wlock() try: - status = _status(ui, repo, kwt, *pats, **opts) + status = _status(ui, repo, wctx, kwt, *pats, **opts) modified, added, removed, deleted, unknown, ignored, clean = status if modified or added or removed or deleted: raise util.Abort(_('outstanding uncommitted changes')) @@ -415,7 +420,10 @@ ui.setconfig('keywordmaps', k, v) else: ui.status(_('\n\tconfiguration using current keyword template maps\n')) - kwmaps = dict(uikwmaps) or _defaultkwmaps(ui) + if uikwmaps: + kwmaps = dict(uikwmaps) + else: + kwmaps = _defaultkwmaps(ui) uisetup(ui) reposetup(ui, repo) @@ -478,13 +486,13 @@ i = ignored (not tracked) ''' kwt = kwtools['templater'] - status = _status(ui, repo, kwt, *pats, **opts) + wctx = repo[None] + status = _status(ui, repo, wctx, kwt, *pats, **opts) cwd = pats and repo.getcwd() or '' modified, added, removed, deleted, unknown, ignored, clean = status files = [] if not opts.get('unknown') or opts.get('all'): files = sorted(modified + added + clean) - wctx = repo[None] kwfiles = kwt.iskwfile(files, wctx) kwdeleted = kwt.iskwfile(deleted, wctx) kwunknown = kwt.iskwfile(unknown, wctx) @@ -582,12 +590,12 @@ kwt.restrict = restrict return n - def rollback(self, dryrun=False): + def rollback(self, dryrun=False, force=False): wlock = self.wlock() try: if not dryrun: changed = self['.'].files() - ret = super(kwrepo, self).rollback(dryrun) + ret = super(kwrepo, self).rollback(dryrun, force) if not dryrun: ctx = self['.'] modified, added = _preselect(self[None].status(), changed) diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/CONTRIBUTORS --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/CONTRIBUTORS Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,4 @@ +Greg Ward, author of the original bfiles extension +Na'Tosha Bard of Unity Technologies +Fog Creek Software +Special thanks to the University of Toronto and the UCOSP program diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/__init__.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/__init__.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,94 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''track large binary files + +Large binary files tend to be not very compressible, not very +diffable, and not at all mergeable. Such files are not handled +efficiently by Mercurial's storage format (revlog), which is based on +compressed binary deltas; storing large binary files as regular +Mercurial files wastes bandwidth and disk space and increases +Mercurial's memory usage. The largefiles extension addresses these +problems by adding a centralized client-server layer on top of +Mercurial: largefiles live in a *central store* out on the network +somewhere, and you only fetch the revisions that you need when you +need them. + +largefiles works by maintaining a "standin file" in .hglf/ for each +largefile. The standins are small (41 bytes: an SHA-1 hash plus +newline) and are tracked by Mercurial. Largefile revisions are +identified by the SHA-1 hash of their contents, which is written to +the standin. largefiles uses that revision ID to get/put largefile +revisions from/to the central store. This saves both disk space and +bandwidth, since you don't need to retrieve all historical revisions +of large files when you clone or pull. + +To start a new repository or add new large binary files, just add +--large to your ``hg add`` command. For example:: + + $ dd if=/dev/urandom of=randomdata count=2000 + $ hg add --large randomdata + $ hg commit -m 'add randomdata as a largefile' + +When you push a changeset that adds/modifies largefiles to a remote +repository, its largefile revisions will be uploaded along with it. +Note that the remote Mercurial must also have the largefiles extension +enabled for this to work. + +When you pull a changeset that affects largefiles from a remote +repository, Mercurial behaves as normal. However, when you update to +such a revision, any largefiles needed by that revision are downloaded +and cached (if they have never been downloaded before). This means +that network access may be required to update to changesets you have +not previously updated to. + +If you already have large files tracked by Mercurial without the +largefiles extension, you will need to convert your repository in +order to benefit from largefiles. This is done with the 'hg lfconvert' +command:: + + $ hg lfconvert --size 10 oldrepo newrepo + +In repositories that already have largefiles in them, any new file +over 10MB will automatically be added as a largefile. To change this +threshhold, set ``largefiles.size`` in your Mercurial config file to +the minimum size in megabytes to track as a largefile, or use the +--lfsize option to the add command (also in megabytes):: + + [largefiles] + size = 2 XXX wouldn't minsize be a better name? + + $ hg add --lfsize 2 + +The ``largefiles.patterns`` config option allows you to specify a list +of filename patterns (see ``hg help patterns``) that should always be +tracked as largefiles:: + + [largefiles] + patterns = + *.jpg + re:.*\.(png|bmp)$ + library.zip + content/audio/* + +Files that match one of these patterns will be added as largefiles +regardless of their size. +''' + +from mercurial import commands + +import lfcommands +import reposetup +import uisetup + +reposetup = reposetup.reposetup +uisetup = uisetup.uisetup + +commands.norepo += " lfconvert" + +cmdtable = lfcommands.cmdtable diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/basestore.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/basestore.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,202 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''base class for store implementations and store-related utility code''' + +import os +import tempfile +import binascii +import re + +from mercurial import util, node, hg +from mercurial.i18n import _ + +import lfutil + +class StoreError(Exception): + '''Raised when there is a problem getting files from or putting + files to a central store.''' + def __init__(self, filename, hash, url, detail): + self.filename = filename + self.hash = hash + self.url = url + self.detail = detail + + def longmessage(self): + if self.url: + return ('%s: %s\n' + '(failed URL: %s)\n' + % (self.filename, self.detail, self.url)) + else: + return ('%s: %s\n' + '(no default or default-push path set in hgrc)\n' + % (self.filename, self.detail)) + + def __str__(self): + return "%s: %s" % (self.url, self.detail) + +class basestore(object): + def __init__(self, ui, repo, url): + self.ui = ui + self.repo = repo + self.url = url + + def put(self, source, hash): + '''Put source file into the store under /.''' + raise NotImplementedError('abstract method') + + def exists(self, hash): + '''Check to see if the store contains the given hash.''' + raise NotImplementedError('abstract method') + + def get(self, files): + '''Get the specified largefiles from the store and write to local + files under repo.root. files is a list of (filename, hash) + tuples. Return (success, missing), lists of files successfuly + downloaded and those not found in the store. success is a list + of (filename, hash) tuples; missing is a list of filenames that + we could not get. (The detailed error message will already have + been presented to the user, so missing is just supplied as a + summary.)''' + success = [] + missing = [] + ui = self.ui + + at = 0 + for filename, hash in files: + ui.progress(_('getting largefiles'), at, unit='lfile', + total=len(files)) + at += 1 + ui.note(_('getting %s:%s\n') % (filename, hash)) + + cachefilename = lfutil.cachepath(self.repo, hash) + cachedir = os.path.dirname(cachefilename) + + # No need to pass mode='wb' to fdopen(), since mkstemp() already + # opened the file in binary mode. + (tmpfd, tmpfilename) = tempfile.mkstemp( + dir=cachedir, prefix=os.path.basename(filename)) + tmpfile = os.fdopen(tmpfd, 'w') + + try: + hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash)) + except StoreError, err: + ui.warn(err.longmessage()) + hhash = "" + + if hhash != hash: + if hhash != "": + ui.warn(_('%s: data corruption (expected %s, got %s)\n') + % (filename, hash, hhash)) + tmpfile.close() # no-op if it's already closed + os.remove(tmpfilename) + missing.append(filename) + continue + + if os.path.exists(cachefilename): # Windows + os.remove(cachefilename) + os.rename(tmpfilename, cachefilename) + lfutil.linktosystemcache(self.repo, hash) + success.append((filename, hhash)) + + ui.progress(_('getting largefiles'), None) + return (success, missing) + + def verify(self, revs, contents=False): + '''Verify the existence (and, optionally, contents) of every big + file revision referenced by every changeset in revs. + Return 0 if all is well, non-zero on any errors.''' + write = self.ui.write + failed = False + + write(_('searching %d changesets for largefiles\n') % len(revs)) + verified = set() # set of (filename, filenode) tuples + + for rev in revs: + cctx = self.repo[rev] + cset = "%d:%s" % (cctx.rev(), node.short(cctx.node())) + + failed = lfutil.any_(self._verifyfile( + cctx, cset, contents, standin, verified) for standin in cctx) + + num_revs = len(verified) + num_lfiles = len(set([fname for (fname, fnode) in verified])) + if contents: + write(_('verified contents of %d revisions of %d largefiles\n') + % (num_revs, num_lfiles)) + else: + write(_('verified existence of %d revisions of %d largefiles\n') + % (num_revs, num_lfiles)) + + return int(failed) + + def _getfile(self, tmpfile, filename, hash): + '''Fetch one revision of one file from the store and write it + to tmpfile. Compute the hash of the file on-the-fly as it + downloads and return the binary hash. Close tmpfile. Raise + StoreError if unable to download the file (e.g. it does not + exist in the store).''' + raise NotImplementedError('abstract method') + + def _verifyfile(self, cctx, cset, contents, standin, verified): + '''Perform the actual verification of a file in the store. + ''' + raise NotImplementedError('abstract method') + +import localstore, wirestore + +_storeprovider = { + 'file': [localstore.localstore], + 'http': [wirestore.wirestore], + 'https': [wirestore.wirestore], + 'ssh': [wirestore.wirestore], + } + +_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') + +# During clone this function is passed the src's ui object +# but it needs the dest's ui object so it can read out of +# the config file. Use repo.ui instead. +def _openstore(repo, remote=None, put=False): + ui = repo.ui + + if not remote: + path = (getattr(repo, 'lfpullsource', None) or + ui.expandpath('default-push', 'default')) + + # ui.expandpath() leaves 'default-push' and 'default' alone if + # they cannot be expanded: fallback to the empty string, + # meaning the current directory. + if path == 'default-push' or path == 'default': + path = '' + remote = repo + else: + remote = hg.peer(repo, {}, path) + + # The path could be a scheme so use Mercurial's normal functionality + # to resolve the scheme to a repository and use its path + path = util.safehasattr(remote, 'url') and remote.url() or remote.path + + match = _scheme_re.match(path) + if not match: # regular filesystem path + scheme = 'file' + else: + scheme = match.group(1) + + try: + storeproviders = _storeprovider[scheme] + except KeyError: + raise util.Abort(_('unsupported URL scheme %r') % scheme) + + for class_obj in storeproviders: + try: + return class_obj(ui, repo, remote) + except lfutil.storeprotonotcapable: + pass + + raise util.Abort(_('%s does not appear to be a largefile store'), path) diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/design.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/design.txt Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,49 @@ += largefiles - manage large binary files = +This extension is based off of Greg Ward's bfiles extension which can be found +at http://mercurial.selenic.com/wiki/BfilesExtension. + +== The largefile store == + +largefile stores are, in the typical use case, centralized servers that have +every past revision of a given binary file. Each largefile is identified by +its sha1 hash, and all interactions with the store take one of the following +forms. + +-Download a bfile with this hash +-Upload a bfile with this hash +-Check if the store has a bfile with this hash + +largefiles stores can take one of two forms: + +-Directories on a network file share +-Mercurial wireproto servers, either via ssh or http (hgweb) + +== The Local Repository == + +The local repository has a largefile cache in .hg/largefiles which holds a +subset of the largefiles needed. On a clone only the largefiles at tip are +downloaded. When largefiles are downloaded from the central store, a copy is +saved in this store. + +== The Global Cache == + +largefiles in a local repository cache are hardlinked to files in the global +cache. Before a file is downloaded we check if it is in the global cache. + +== Implementation Details == + +Each largefile has a standin which is in .hglf. The standin is tracked by +Mercurial. The standin contains the SHA1 hash of the largefile. When a +largefile is added/removed/copied/renamed/etc the same operation is applied to +the standin. Thus the history of the standin is the history of the largefile. + +For performance reasons, the contents of a standin are only updated before a +commit. Standins are added/removed/copied/renamed from add/remove/copy/rename +Mercurial commands but their contents will not be updated. The contents of a +standin will always be the hash of the largefile as of the last commit. To +support some commands (revert) some standins are temporarily updated but will +be changed back after the command is finished. + +A Mercurial dirstate object tracks the state of the largefiles. The dirstate +uses the last modified time and current size to detect if a file has changed +(without reading the entire contents of the file). diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/lfcommands.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/lfcommands.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,481 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''High-level command function for lfconvert, plus the cmdtable.''' + +import os +import shutil + +from mercurial import util, match as match_, hg, node, context, error +from mercurial.i18n import _ + +import lfutil +import basestore + +# -- Commands ---------------------------------------------------------- + +def lfconvert(ui, src, dest, *pats, **opts): + '''convert a normal repository to a largefiles repository + + Convert repository SOURCE to a new repository DEST, identical to + SOURCE except that certain files will be converted as largefiles: + specifically, any file that matches any PATTERN *or* whose size is + above the minimum size threshold is converted as a largefile. The + size used to determine whether or not to track a file as a + largefile is the size of the first version of the file. The + minimum size can be specified either with --size or in + configuration as ``largefiles.size``. + + After running this command you will need to make sure that + largefiles is enabled anywhere you intend to push the new + repository. + + Use --tonormal to convert largefiles back to normal files; after + this, the DEST repository can be used without largefiles at all.''' + + if opts['tonormal']: + tolfile = False + else: + tolfile = True + size = lfutil.getminsize(ui, True, opts.get('size'), default=None) + try: + rsrc = hg.repository(ui, src) + if not rsrc.local(): + raise util.Abort(_('%s is not a local Mercurial repo') % src) + except error.RepoError, err: + ui.traceback() + raise util.Abort(err.args[0]) + if os.path.exists(dest): + if not os.path.isdir(dest): + raise util.Abort(_('destination %s already exists') % dest) + elif os.listdir(dest): + raise util.Abort(_('destination %s is not empty') % dest) + try: + ui.status(_('initializing destination %s\n') % dest) + rdst = hg.repository(ui, dest, create=True) + if not rdst.local(): + raise util.Abort(_('%s is not a local Mercurial repo') % dest) + except error.RepoError: + ui.traceback() + raise util.Abort(_('%s is not a repo') % dest) + + success = False + try: + # Lock destination to prevent modification while it is converted to. + # Don't need to lock src because we are just reading from its history + # which can't change. + dst_lock = rdst.lock() + + # Get a list of all changesets in the source. The easy way to do this + # is to simply walk the changelog, using changelog.nodesbewteen(). + # Take a look at mercurial/revlog.py:639 for more details. + # Use a generator instead of a list to decrease memory usage + ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None, + rsrc.heads())[0]) + revmap = {node.nullid: node.nullid} + if tolfile: + lfiles = set() + normalfiles = set() + if not pats: + pats = ui.config(lfutil.longname, 'patterns', default=()) + if pats: + pats = pats.split(' ') + if pats: + matcher = match_.match(rsrc.root, '', list(pats)) + else: + matcher = None + + lfiletohash = {} + for ctx in ctxs: + ui.progress(_('converting revisions'), ctx.rev(), + unit=_('revision'), total=rsrc['tip'].rev()) + _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, + lfiles, normalfiles, matcher, size, lfiletohash) + ui.progress(_('converting revisions'), None) + + if os.path.exists(rdst.wjoin(lfutil.shortname)): + shutil.rmtree(rdst.wjoin(lfutil.shortname)) + + for f in lfiletohash.keys(): + if os.path.isfile(rdst.wjoin(f)): + os.unlink(rdst.wjoin(f)) + try: + os.removedirs(os.path.dirname(rdst.wjoin(f))) + except OSError: + pass + + else: + for ctx in ctxs: + ui.progress(_('converting revisions'), ctx.rev(), + unit=_('revision'), total=rsrc['tip'].rev()) + _addchangeset(ui, rsrc, rdst, ctx, revmap) + + ui.progress(_('converting revisions'), None) + success = True + finally: + if not success: + # we failed, remove the new directory + shutil.rmtree(rdst.root) + dst_lock.release() + +def _addchangeset(ui, rsrc, rdst, ctx, revmap): + # Convert src parents to dst parents + parents = [] + for p in ctx.parents(): + parents.append(revmap[p.node()]) + while len(parents) < 2: + parents.append(node.nullid) + + # Generate list of changed files + files = set(ctx.files()) + if node.nullid not in parents: + mc = ctx.manifest() + mp1 = ctx.parents()[0].manifest() + mp2 = ctx.parents()[1].manifest() + files |= (set(mp1) | set(mp2)) - set(mc) + for f in mc: + if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): + files.add(f) + + def getfilectx(repo, memctx, f): + if lfutil.standin(f) in files: + # if the file isn't in the manifest then it was removed + # or renamed, raise IOError to indicate this + try: + fctx = ctx.filectx(lfutil.standin(f)) + except error.LookupError: + raise IOError() + renamed = fctx.renamed() + if renamed: + renamed = lfutil.splitstandin(renamed[0]) + + hash = fctx.data().strip() + path = lfutil.findfile(rsrc, hash) + ### TODO: What if the file is not cached? + data = '' + fd = None + try: + fd = open(path, 'rb') + data = fd.read() + finally: + if fd: + fd.close() + return context.memfilectx(f, data, 'l' in fctx.flags(), + 'x' in fctx.flags(), renamed) + else: + try: + fctx = ctx.filectx(f) + except error.LookupError: + raise IOError() + renamed = fctx.renamed() + if renamed: + renamed = renamed[0] + data = fctx.data() + if f == '.hgtags': + newdata = [] + for line in data.splitlines(): + id, name = line.split(' ', 1) + newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), + name)) + data = ''.join(newdata) + return context.memfilectx(f, data, 'l' in fctx.flags(), + 'x' in fctx.flags(), renamed) + + dstfiles = [] + for file in files: + if lfutil.isstandin(file): + dstfiles.append(lfutil.splitstandin(file)) + else: + dstfiles.append(file) + # Commit + mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, + getfilectx, ctx.user(), ctx.date(), ctx.extra()) + ret = rdst.commitctx(mctx) + rdst.dirstate.setparents(ret) + revmap[ctx.node()] = rdst.changelog.tip() + +def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles, + matcher, size, lfiletohash): + # Convert src parents to dst parents + parents = [] + for p in ctx.parents(): + parents.append(revmap[p.node()]) + while len(parents) < 2: + parents.append(node.nullid) + + # Generate list of changed files + files = set(ctx.files()) + if node.nullid not in parents: + mc = ctx.manifest() + mp1 = ctx.parents()[0].manifest() + mp2 = ctx.parents()[1].manifest() + files |= (set(mp1) | set(mp2)) - set(mc) + for f in mc: + if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): + files.add(f) + + dstfiles = [] + for f in files: + if f not in lfiles and f not in normalfiles: + islfile = _islfile(f, ctx, matcher, size) + # If this file was renamed or copied then copy + # the lfileness of its predecessor + if f in ctx.manifest(): + fctx = ctx.filectx(f) + renamed = fctx.renamed() + renamedlfile = renamed and renamed[0] in lfiles + islfile |= renamedlfile + if 'l' in fctx.flags(): + if renamedlfile: + raise util.Abort( + _('Renamed/copied largefile %s becomes symlink') + % f) + islfile = False + if islfile: + lfiles.add(f) + else: + normalfiles.add(f) + + if f in lfiles: + dstfiles.append(lfutil.standin(f)) + # largefile in manifest if it has not been removed/renamed + if f in ctx.manifest(): + if 'l' in ctx.filectx(f).flags(): + if renamed and renamed[0] in lfiles: + raise util.Abort(_('largefile %s becomes symlink') % f) + + # largefile was modified, update standins + fullpath = rdst.wjoin(f) + lfutil.createdir(os.path.dirname(fullpath)) + m = util.sha1('') + m.update(ctx[f].data()) + hash = m.hexdigest() + if f not in lfiletohash or lfiletohash[f] != hash: + try: + fd = open(fullpath, 'wb') + fd.write(ctx[f].data()) + finally: + if fd: + fd.close() + executable = 'x' in ctx[f].flags() + os.chmod(fullpath, lfutil.getmode(executable)) + lfutil.writestandin(rdst, lfutil.standin(f), hash, + executable) + lfiletohash[f] = hash + else: + # normal file + dstfiles.append(f) + + def getfilectx(repo, memctx, f): + if lfutil.isstandin(f): + # if the file isn't in the manifest then it was removed + # or renamed, raise IOError to indicate this + srcfname = lfutil.splitstandin(f) + try: + fctx = ctx.filectx(srcfname) + except error.LookupError: + raise IOError() + renamed = fctx.renamed() + if renamed: + # standin is always a largefile because largefile-ness + # doesn't change after rename or copy + renamed = lfutil.standin(renamed[0]) + + return context.memfilectx(f, lfiletohash[srcfname], 'l' in + fctx.flags(), 'x' in fctx.flags(), renamed) + else: + try: + fctx = ctx.filectx(f) + except error.LookupError: + raise IOError() + renamed = fctx.renamed() + if renamed: + renamed = renamed[0] + + data = fctx.data() + if f == '.hgtags': + newdata = [] + for line in data.splitlines(): + id, name = line.split(' ', 1) + newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), + name)) + data = ''.join(newdata) + return context.memfilectx(f, data, 'l' in fctx.flags(), + 'x' in fctx.flags(), renamed) + + # Commit + mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, + getfilectx, ctx.user(), ctx.date(), ctx.extra()) + ret = rdst.commitctx(mctx) + rdst.dirstate.setparents(ret) + revmap[ctx.node()] = rdst.changelog.tip() + +def _islfile(file, ctx, matcher, size): + '''Return true if file should be considered a largefile, i.e. + matcher matches it or it is larger than size.''' + # never store special .hg* files as largefiles + if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs': + return False + if matcher and matcher(file): + return True + try: + return ctx.filectx(file).size() >= size * 1024 * 1024 + except error.LookupError: + return False + +def uploadlfiles(ui, rsrc, rdst, files): + '''upload largefiles to the central store''' + + # Don't upload locally. All largefiles are in the system wide cache + # so the other repo can just get them from there. + if not files or rdst.local(): + return + + store = basestore._openstore(rsrc, rdst, put=True) + + at = 0 + files = filter(lambda h: not store.exists(h), files) + for hash in files: + ui.progress(_('uploading largefiles'), at, unit='largefile', + total=len(files)) + source = lfutil.findfile(rsrc, hash) + if not source: + raise util.Abort(_('largefile %s missing from store' + ' (needs to be uploaded)') % hash) + # XXX check for errors here + store.put(source, hash) + at += 1 + ui.progress(_('uploading largefiles'), None) + +def verifylfiles(ui, repo, all=False, contents=False): + '''Verify that every big file revision in the current changeset + exists in the central store. With --contents, also verify that + the contents of each big file revision are correct (SHA-1 hash + matches the revision ID). With --all, check every changeset in + this repository.''' + if all: + # Pass a list to the function rather than an iterator because we know a + # list will work. + revs = range(len(repo)) + else: + revs = ['.'] + + store = basestore._openstore(repo) + return store.verify(revs, contents=contents) + +def cachelfiles(ui, repo, node): + '''cachelfiles ensures that all largefiles needed by the specified revision + are present in the repository's largefile cache. + + returns a tuple (cached, missing). cached is the list of files downloaded + by this operation; missing is the list of files that were needed but could + not be found.''' + lfiles = lfutil.listlfiles(repo, node) + toget = [] + + for lfile in lfiles: + expectedhash = repo[node][lfutil.standin(lfile)].data().strip() + # if it exists and its hash matches, it might have been locally + # modified before updating and the user chose 'local'. in this case, + # it will not be in any store, so don't look for it. + if ((not os.path.exists(repo.wjoin(lfile)) or + expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and + not lfutil.findfile(repo, expectedhash)): + toget.append((lfile, expectedhash)) + + if toget: + store = basestore._openstore(repo) + ret = store.get(toget) + return ret + + return ([], []) + +def updatelfiles(ui, repo, filelist=None, printmessage=True): + wlock = repo.wlock() + try: + lfdirstate = lfutil.openlfdirstate(ui, repo) + lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) + + if filelist is not None: + lfiles = [f for f in lfiles if f in filelist] + + printed = False + if printmessage and lfiles: + ui.status(_('getting changed largefiles\n')) + printed = True + cachelfiles(ui, repo, '.') + + updated, removed = 0, 0 + for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles): + # increment the appropriate counter according to _updatelfile's + # return value + updated += i > 0 and i or 0 + removed -= i < 0 and i or 0 + if printmessage and (removed or updated) and not printed: + ui.status(_('getting changed largefiles\n')) + printed = True + + lfdirstate.write() + if printed and printmessage: + ui.status(_('%d largefiles updated, %d removed\n') % (updated, + removed)) + finally: + wlock.release() + +def _updatelfile(repo, lfdirstate, lfile): + '''updates a single largefile and copies the state of its standin from + the repository's dirstate to its state in the lfdirstate. + + returns 1 if the file was modified, -1 if the file was removed, 0 if the + file was unchanged, and None if the needed largefile was missing from the + cache.''' + ret = 0 + abslfile = repo.wjoin(lfile) + absstandin = repo.wjoin(lfutil.standin(lfile)) + if os.path.exists(absstandin): + if os.path.exists(absstandin+'.orig'): + shutil.copyfile(abslfile, abslfile+'.orig') + expecthash = lfutil.readstandin(repo, lfile) + if (expecthash != '' and + (not os.path.exists(abslfile) or + expecthash != lfutil.hashfile(abslfile))): + if not lfutil.copyfromcache(repo, expecthash, lfile): + return None # don't try to set the mode or update the dirstate + ret = 1 + mode = os.stat(absstandin).st_mode + if mode != os.stat(abslfile).st_mode: + os.chmod(abslfile, mode) + ret = 1 + else: + if os.path.exists(abslfile): + os.unlink(abslfile) + ret = -1 + state = repo.dirstate[lfutil.standin(lfile)] + if state == 'n': + lfdirstate.normal(lfile) + elif state == 'r': + lfdirstate.remove(lfile) + elif state == 'a': + lfdirstate.add(lfile) + elif state == '?': + lfdirstate.drop(lfile) + return ret + +# -- hg commands declarations ------------------------------------------------ + +cmdtable = { + 'lfconvert': (lfconvert, + [('s', 'size', '', + _('minimum size (MB) for files to be converted ' + 'as largefiles'), + 'SIZE'), + ('', 'tonormal', False, + _('convert from a largefiles repo to a normal repo')), + ], + _('hg lfconvert SOURCE DEST [FILE ...]')), + } diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/lfutil.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/lfutil.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,448 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''largefiles utility code: must not import other modules in this package.''' + +import os +import errno +import shutil +import stat +import hashlib + +from mercurial import dirstate, httpconnection, match as match_, util, scmutil +from mercurial.i18n import _ + +shortname = '.hglf' +longname = 'largefiles' + + +# -- Portability wrappers ---------------------------------------------- + +def dirstate_walk(dirstate, matcher, unknown=False, ignored=False): + return dirstate.walk(matcher, [], unknown, ignored) + +def repo_add(repo, list): + add = repo[None].add + return add(list) + +def repo_remove(repo, list, unlink=False): + def remove(list, unlink): + wlock = repo.wlock() + try: + if unlink: + for f in list: + try: + util.unlinkpath(repo.wjoin(f)) + except OSError, inst: + if inst.errno != errno.ENOENT: + raise + repo[None].forget(list) + finally: + wlock.release() + return remove(list, unlink=unlink) + +def repo_forget(repo, list): + forget = repo[None].forget + return forget(list) + +def findoutgoing(repo, remote, force): + from mercurial import discovery + common, _anyinc, _heads = discovery.findcommonincoming(repo, + remote, force=force) + return repo.changelog.findmissing(common) + +# -- Private worker functions ------------------------------------------ + +def getminsize(ui, assumelfiles, opt, default=10): + lfsize = opt + if not lfsize and assumelfiles: + lfsize = ui.config(longname, 'size', default=default) + if lfsize: + try: + lfsize = float(lfsize) + except ValueError: + raise util.Abort(_('largefiles: size must be number (not %s)\n') + % lfsize) + if lfsize is None: + raise util.Abort(_('minimum size for largefiles must be specified')) + return lfsize + +def link(src, dest): + try: + util.oslink(src, dest) + except OSError: + # if hardlinks fail, fallback on copy + shutil.copyfile(src, dest) + os.chmod(dest, os.stat(src).st_mode) + +def systemcachepath(ui, hash): + path = ui.config(longname, 'systemcache', None) + if path: + path = os.path.join(path, hash) + else: + if os.name == 'nt': + appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA')) + path = os.path.join(appdata, longname, hash) + elif os.name == 'posix': + path = os.path.join(os.getenv('HOME'), '.' + longname, hash) + else: + raise util.Abort(_('unknown operating system: %s\n') % os.name) + return path + +def insystemcache(ui, hash): + return os.path.exists(systemcachepath(ui, hash)) + +def findfile(repo, hash): + if incache(repo, hash): + repo.ui.note(_('Found %s in cache\n') % hash) + return cachepath(repo, hash) + if insystemcache(repo.ui, hash): + repo.ui.note(_('Found %s in system cache\n') % hash) + return systemcachepath(repo.ui, hash) + return None + +class largefiles_dirstate(dirstate.dirstate): + def __getitem__(self, key): + return super(largefiles_dirstate, self).__getitem__(unixpath(key)) + def normal(self, f): + return super(largefiles_dirstate, self).normal(unixpath(f)) + def remove(self, f): + return super(largefiles_dirstate, self).remove(unixpath(f)) + def add(self, f): + return super(largefiles_dirstate, self).add(unixpath(f)) + def drop(self, f): + return super(largefiles_dirstate, self).drop(unixpath(f)) + def forget(self, f): + return super(largefiles_dirstate, self).forget(unixpath(f)) + +def openlfdirstate(ui, repo): + ''' + Return a dirstate object that tracks largefiles: i.e. its root is + the repo root, but it is saved in .hg/largefiles/dirstate. + ''' + admin = repo.join(longname) + opener = scmutil.opener(admin) + if util.safehasattr(repo.dirstate, '_validate'): + lfdirstate = largefiles_dirstate(opener, ui, repo.root, + repo.dirstate._validate) + else: + lfdirstate = largefiles_dirstate(opener, ui, repo.root) + + # If the largefiles dirstate does not exist, populate and create + # it. This ensures that we create it on the first meaningful + # largefiles operation in a new clone. It also gives us an easy + # way to forcibly rebuild largefiles state: + # rm .hg/largefiles/dirstate && hg status + # Or even, if things are really messed up: + # rm -rf .hg/largefiles && hg status + if not os.path.exists(os.path.join(admin, 'dirstate')): + util.makedirs(admin) + matcher = getstandinmatcher(repo) + for standin in dirstate_walk(repo.dirstate, matcher): + lfile = splitstandin(standin) + hash = readstandin(repo, lfile) + lfdirstate.normallookup(lfile) + try: + if hash == hashfile(lfile): + lfdirstate.normal(lfile) + except IOError, err: + if err.errno != errno.ENOENT: + raise + + lfdirstate.write() + + return lfdirstate + +def lfdirstate_status(lfdirstate, repo, rev): + wlock = repo.wlock() + try: + match = match_.always(repo.root, repo.getcwd()) + s = lfdirstate.status(match, [], False, False, False) + unsure, modified, added, removed, missing, unknown, ignored, clean = s + for lfile in unsure: + if repo[rev][standin(lfile)].data().strip() != \ + hashfile(repo.wjoin(lfile)): + modified.append(lfile) + else: + clean.append(lfile) + lfdirstate.normal(lfile) + lfdirstate.write() + finally: + wlock.release() + return (modified, added, removed, missing, unknown, ignored, clean) + +def listlfiles(repo, rev=None, matcher=None): + '''return a list of largefiles in the working copy or the + specified changeset''' + + if matcher is None: + matcher = getstandinmatcher(repo) + + # ignore unknown files in working directory + return [splitstandin(f) + for f in repo[rev].walk(matcher) + if rev is not None or repo.dirstate[f] != '?'] + +def incache(repo, hash): + return os.path.exists(cachepath(repo, hash)) + +def createdir(dir): + if not os.path.exists(dir): + os.makedirs(dir) + +def cachepath(repo, hash): + return repo.join(os.path.join(longname, hash)) + +def copyfromcache(repo, hash, filename): + '''Copy the specified largefile from the repo or system cache to + filename in the repository. Return true on success or false if the + file was not found in either cache (which should not happened: + this is meant to be called only after ensuring that the needed + largefile exists in the cache).''' + path = findfile(repo, hash) + if path is None: + return False + util.makedirs(os.path.dirname(repo.wjoin(filename))) + shutil.copy(path, repo.wjoin(filename)) + return True + +def copytocache(repo, rev, file, uploaded=False): + hash = readstandin(repo, file) + if incache(repo, hash): + return + copytocacheabsolute(repo, repo.wjoin(file), hash) + +def copytocacheabsolute(repo, file, hash): + createdir(os.path.dirname(cachepath(repo, hash))) + if insystemcache(repo.ui, hash): + link(systemcachepath(repo.ui, hash), cachepath(repo, hash)) + else: + shutil.copyfile(file, cachepath(repo, hash)) + os.chmod(cachepath(repo, hash), os.stat(file).st_mode) + linktosystemcache(repo, hash) + +def linktosystemcache(repo, hash): + createdir(os.path.dirname(systemcachepath(repo.ui, hash))) + link(cachepath(repo, hash), systemcachepath(repo.ui, hash)) + +def getstandinmatcher(repo, pats=[], opts={}): + '''Return a match object that applies pats to the standin directory''' + standindir = repo.pathto(shortname) + if pats: + # patterns supplied: search standin directory relative to current dir + cwd = repo.getcwd() + if os.path.isabs(cwd): + # cwd is an absolute path for hg -R + # work relative to the repository root in this case + cwd = '' + pats = [os.path.join(standindir, cwd, pat) for pat in pats] + elif os.path.isdir(standindir): + # no patterns: relative to repo root + pats = [standindir] + else: + # no patterns and no standin dir: return matcher that matches nothing + match = match_.match(repo.root, None, [], exact=True) + match.matchfn = lambda f: False + return match + return getmatcher(repo, pats, opts, showbad=False) + +def getmatcher(repo, pats=[], opts={}, showbad=True): + '''Wrapper around scmutil.match() that adds showbad: if false, + neuter the match object's bad() method so it does not print any + warnings about missing files or directories.''' + match = scmutil.match(repo[None], pats, opts) + + if not showbad: + match.bad = lambda f, msg: None + return match + +def composestandinmatcher(repo, rmatcher): + '''Return a matcher that accepts standins corresponding to the + files accepted by rmatcher. Pass the list of files in the matcher + as the paths specified by the user.''' + smatcher = getstandinmatcher(repo, rmatcher.files()) + isstandin = smatcher.matchfn + def composed_matchfn(f): + return isstandin(f) and rmatcher.matchfn(splitstandin(f)) + smatcher.matchfn = composed_matchfn + + return smatcher + +def standin(filename): + '''Return the repo-relative path to the standin for the specified big + file.''' + # Notes: + # 1) Most callers want an absolute path, but _create_standin() needs + # it repo-relative so lfadd() can pass it to repo_add(). So leave + # it up to the caller to use repo.wjoin() to get an absolute path. + # 2) Join with '/' because that's what dirstate always uses, even on + # Windows. Change existing separator to '/' first in case we are + # passed filenames from an external source (like the command line). + return shortname + '/' + filename.replace(os.sep, '/') + +def isstandin(filename): + '''Return true if filename is a big file standin. filename must be + in Mercurial's internal form (slash-separated).''' + return filename.startswith(shortname + '/') + +def splitstandin(filename): + # Split on / because that's what dirstate always uses, even on Windows. + # Change local separator to / first just in case we are passed filenames + # from an external source (like the command line). + bits = filename.replace(os.sep, '/').split('/', 1) + if len(bits) == 2 and bits[0] == shortname: + return bits[1] + else: + return None + +def updatestandin(repo, standin): + file = repo.wjoin(splitstandin(standin)) + if os.path.exists(file): + hash = hashfile(file) + executable = getexecutable(file) + writestandin(repo, standin, hash, executable) + +def readstandin(repo, filename, node=None): + '''read hex hash from standin for filename at given node, or working + directory if no node is given''' + return repo[node][standin(filename)].data().strip() + +def writestandin(repo, standin, hash, executable): + '''write hash to /''' + writehash(hash, repo.wjoin(standin), executable) + +def copyandhash(instream, outfile): + '''Read bytes from instream (iterable) and write them to outfile, + computing the SHA-1 hash of the data along the way. Close outfile + when done and return the binary hash.''' + hasher = util.sha1('') + for data in instream: + hasher.update(data) + outfile.write(data) + + # Blecch: closing a file that somebody else opened is rude and + # wrong. But it's so darn convenient and practical! After all, + # outfile was opened just to copy and hash. + outfile.close() + + return hasher.digest() + +def hashrepofile(repo, file): + return hashfile(repo.wjoin(file)) + +def hashfile(file): + if not os.path.exists(file): + return '' + hasher = util.sha1('') + fd = open(file, 'rb') + for data in blockstream(fd): + hasher.update(data) + fd.close() + return hasher.hexdigest() + +class limitreader(object): + def __init__(self, f, limit): + self.f = f + self.limit = limit + + def read(self, length): + if self.limit == 0: + return '' + length = length > self.limit and self.limit or length + self.limit -= length + return self.f.read(length) + + def close(self): + pass + +def blockstream(infile, blocksize=128 * 1024): + """Generator that yields blocks of data from infile and closes infile.""" + while True: + data = infile.read(blocksize) + if not data: + break + yield data + # same blecch as copyandhash() above + infile.close() + +def readhash(filename): + rfile = open(filename, 'rb') + hash = rfile.read(40) + rfile.close() + if len(hash) < 40: + raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)') + % (filename, len(hash))) + return hash + +def writehash(hash, filename, executable): + util.makedirs(os.path.dirname(filename)) + if os.path.exists(filename): + os.unlink(filename) + wfile = open(filename, 'wb') + + try: + wfile.write(hash) + wfile.write('\n') + finally: + wfile.close() + if os.path.exists(filename): + os.chmod(filename, getmode(executable)) + +def getexecutable(filename): + mode = os.stat(filename).st_mode + return ((mode & stat.S_IXUSR) and + (mode & stat.S_IXGRP) and + (mode & stat.S_IXOTH)) + +def getmode(executable): + if executable: + return 0755 + else: + return 0644 + +def urljoin(first, second, *arg): + def join(left, right): + if not left.endswith('/'): + left += '/' + if right.startswith('/'): + right = right[1:] + return left + right + + url = join(first, second) + for a in arg: + url = join(url, a) + return url + +def hexsha1(data): + """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like + object data""" + h = hashlib.sha1() + for chunk in util.filechunkiter(data): + h.update(chunk) + return h.hexdigest() + +def httpsendfile(ui, filename): + return httpconnection.httpsendfile(ui, filename, 'rb') + +def unixpath(path): + '''Return a version of path normalized for use with the lfdirstate.''' + return os.path.normpath(path).replace(os.sep, '/') + +def islfilesrepo(repo): + return ('largefiles' in repo.requirements and + any_(shortname + '/' in f[0] for f in repo.store.datafiles())) + +def any_(gen): + for x in gen: + if x: + return True + return False + +class storeprotonotcapable(BaseException): + def __init__(self, storetypes): + self.storetypes = storetypes diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/localstore.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/localstore.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,71 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''store class for local filesystem''' + +import os + +from mercurial import util +from mercurial.i18n import _ + +import lfutil +import basestore + +class localstore(basestore.basestore): + '''Because there is a system-wide cache, the local store always + uses that cache. Since the cache is updated elsewhere, we can + just read from it here as if it were the store.''' + + def __init__(self, ui, repo, remote): + url = os.path.join(remote.path, '.hg', lfutil.longname) + super(localstore, self).__init__(ui, repo, util.expandpath(url)) + + def put(self, source, filename, hash): + '''Any file that is put must already be in the system-wide + cache so do nothing.''' + return + + def exists(self, hash): + return lfutil.insystemcache(self.repo.ui, hash) + + def _getfile(self, tmpfile, filename, hash): + if lfutil.insystemcache(self.ui, hash): + return lfutil.systemcachepath(self.ui, hash) + raise basestore.StoreError(filename, hash, '', + _("Can't get file locally")) + + def _verifyfile(self, cctx, cset, contents, standin, verified): + filename = lfutil.splitstandin(standin) + if not filename: + return False + fctx = cctx[standin] + key = (filename, fctx.filenode()) + if key in verified: + return False + + expecthash = fctx.data()[0:40] + verified.add(key) + if not lfutil.insystemcache(self.ui, expecthash): + self.ui.warn( + _('changeset %s: %s missing\n' + ' (looked for hash %s)\n') + % (cset, filename, expecthash)) + return True # failed + + if contents: + storepath = lfutil.systemcachepath(self.ui, expecthash) + actualhash = lfutil.hashfile(storepath) + if actualhash != expecthash: + self.ui.warn( + _('changeset %s: %s: contents differ\n' + ' (%s:\n' + ' expected hash %s,\n' + ' but got %s)\n') + % (cset, filename, storepath, expecthash, actualhash)) + return True # failed + return False diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/overrides.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/overrides.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,830 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''Overridden Mercurial commands and functions for the largefiles extension''' + +import os +import copy + +from mercurial import hg, commands, util, cmdutil, match as match_, node, \ + archival, error, merge +from mercurial.i18n import _ +from mercurial.node import hex +from hgext import rebase +import lfutil + +try: + from mercurial import scmutil +except ImportError: + pass + +import lfutil +import lfcommands + +def installnormalfilesmatchfn(manifest): + '''overrides scmutil.match so that the matcher it returns will ignore all + largefiles''' + oldmatch = None # for the closure + def override_match(repo, pats=[], opts={}, globbed=False, + default='relpath'): + match = oldmatch(repo, pats, opts, globbed, default) + m = copy.copy(match) + notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in + manifest) + m._files = filter(notlfile, m._files) + m._fmap = set(m._files) + orig_matchfn = m.matchfn + m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None + return m + oldmatch = installmatchfn(override_match) + +def installmatchfn(f): + oldmatch = scmutil.match + setattr(f, 'oldmatch', oldmatch) + scmutil.match = f + return oldmatch + +def restorematchfn(): + '''restores scmutil.match to what it was before installnormalfilesmatchfn + was called. no-op if scmutil.match is its original function. + + Note that n calls to installnormalfilesmatchfn will require n calls to + restore matchfn to reverse''' + scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match) + +# -- Wrappers: modify existing commands -------------------------------- + +# Add works by going through the files that the user wanted to add and +# checking if they should be added as largefiles. Then it makes a new +# matcher which matches only the normal files and runs the original +# version of add. +def override_add(orig, ui, repo, *pats, **opts): + large = opts.pop('large', None) + lfsize = lfutil.getminsize( + ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None)) + + lfmatcher = None + if os.path.exists(repo.wjoin(lfutil.shortname)): + lfpats = ui.configlist(lfutil.longname, 'patterns', default=[]) + if lfpats: + lfmatcher = match_.match(repo.root, '', list(lfpats)) + + lfnames = [] + m = scmutil.match(repo[None], pats, opts) + m.bad = lambda x, y: None + wctx = repo[None] + for f in repo.walk(m): + exact = m.exact(f) + lfile = lfutil.standin(f) in wctx + nfile = f in wctx + exists = lfile or nfile + + # Don't warn the user when they attempt to add a normal tracked file. + # The normal add code will do that for us. + if exact and exists: + if lfile: + ui.warn(_('%s already a largefile\n') % f) + continue + + if exact or not exists: + abovemin = (lfsize and + os.path.getsize(repo.wjoin(f)) >= lfsize * 1024 * 1024) + if large or abovemin or (lfmatcher and lfmatcher(f)): + lfnames.append(f) + if ui.verbose or not exact: + ui.status(_('adding %s as a largefile\n') % m.rel(f)) + + bad = [] + standins = [] + + # Need to lock, otherwise there could be a race condition between + # when standins are created and added to the repo. + wlock = repo.wlock() + try: + if not opts.get('dry_run'): + lfdirstate = lfutil.openlfdirstate(ui, repo) + for f in lfnames: + standinname = lfutil.standin(f) + lfutil.writestandin(repo, standinname, hash='', + executable=lfutil.getexecutable(repo.wjoin(f))) + standins.append(standinname) + if lfdirstate[f] == 'r': + lfdirstate.normallookup(f) + else: + lfdirstate.add(f) + lfdirstate.write() + bad += [lfutil.splitstandin(f) + for f in lfutil.repo_add(repo, standins) + if f in m.files()] + finally: + wlock.release() + + installnormalfilesmatchfn(repo[None].manifest()) + result = orig(ui, repo, *pats, **opts) + restorematchfn() + + return (result == 1 or bad) and 1 or 0 + +def override_remove(orig, ui, repo, *pats, **opts): + manifest = repo[None].manifest() + installnormalfilesmatchfn(manifest) + orig(ui, repo, *pats, **opts) + restorematchfn() + + after, force = opts.get('after'), opts.get('force') + if not pats and not after: + raise util.Abort(_('no files specified')) + m = scmutil.match(repo[None], pats, opts) + try: + repo.lfstatus = True + s = repo.status(match=m, clean=True) + finally: + repo.lfstatus = False + modified, added, deleted, clean = [[f for f in list + if lfutil.standin(f) in manifest] + for list in [s[0], s[1], s[3], s[6]]] + + def warn(files, reason): + for f in files: + ui.warn(_('not removing %s: file %s (use -f to force removal)\n') + % (m.rel(f), reason)) + + if force: + remove, forget = modified + deleted + clean, added + elif after: + remove, forget = deleted, [] + warn(modified + added + clean, _('still exists')) + else: + remove, forget = deleted + clean, [] + warn(modified, _('is modified')) + warn(added, _('has been marked for add')) + + for f in sorted(remove + forget): + if ui.verbose or not m.exact(f): + ui.status(_('removing %s\n') % m.rel(f)) + + # Need to lock because standin files are deleted then removed from the + # repository and we could race inbetween. + wlock = repo.wlock() + try: + lfdirstate = lfutil.openlfdirstate(ui, repo) + for f in remove: + if not after: + os.unlink(repo.wjoin(f)) + currentdir = os.path.split(f)[0] + while currentdir and not os.listdir(repo.wjoin(currentdir)): + os.rmdir(repo.wjoin(currentdir)) + currentdir = os.path.split(currentdir)[0] + lfdirstate.remove(f) + lfdirstate.write() + + forget = [lfutil.standin(f) for f in forget] + remove = [lfutil.standin(f) for f in remove] + lfutil.repo_forget(repo, forget) + lfutil.repo_remove(repo, remove, unlink=True) + finally: + wlock.release() + +def override_status(orig, ui, repo, *pats, **opts): + try: + repo.lfstatus = True + return orig(ui, repo, *pats, **opts) + finally: + repo.lfstatus = False + +def override_log(orig, ui, repo, *pats, **opts): + try: + repo.lfstatus = True + orig(ui, repo, *pats, **opts) + finally: + repo.lfstatus = False + +def override_verify(orig, ui, repo, *pats, **opts): + large = opts.pop('large', False) + all = opts.pop('lfa', False) + contents = opts.pop('lfc', False) + + result = orig(ui, repo, *pats, **opts) + if large: + result = result or lfcommands.verifylfiles(ui, repo, all, contents) + return result + +# Override needs to refresh standins so that update's normal merge +# will go through properly. Then the other update hook (overriding repo.update) +# will get the new files. Filemerge is also overriden so that the merge +# will merge standins correctly. +def override_update(orig, ui, repo, *pats, **opts): + lfdirstate = lfutil.openlfdirstate(ui, repo) + s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, + False, False) + (unsure, modified, added, removed, missing, unknown, ignored, clean) = s + + # Need to lock between the standins getting updated and their + # largefiles getting updated + wlock = repo.wlock() + try: + if opts['check']: + mod = len(modified) > 0 + for lfile in unsure: + standin = lfutil.standin(lfile) + if repo['.'][standin].data().strip() != \ + lfutil.hashfile(repo.wjoin(lfile)): + mod = True + else: + lfdirstate.normal(lfile) + lfdirstate.write() + if mod: + raise util.Abort(_('uncommitted local changes')) + # XXX handle removed differently + if not opts['clean']: + for lfile in unsure + modified + added: + lfutil.updatestandin(repo, lfutil.standin(lfile)) + finally: + wlock.release() + return orig(ui, repo, *pats, **opts) + +# Override filemerge to prompt the user about how they wish to merge +# largefiles. This will handle identical edits, and copy/rename + +# edit without prompting the user. +def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca): + # Use better variable names here. Because this is a wrapper we cannot + # change the variable names in the function declaration. + fcdest, fcother, fcancestor = fcd, fco, fca + if not lfutil.isstandin(orig): + return origfn(repo, mynode, orig, fcdest, fcother, fcancestor) + else: + if not fcother.cmp(fcdest): # files identical? + return None + + # backwards, use working dir parent as ancestor + if fcancestor == fcother: + fcancestor = fcdest.parents()[0] + + if orig != fcother.path(): + repo.ui.status(_('merging %s and %s to %s\n') + % (lfutil.splitstandin(orig), + lfutil.splitstandin(fcother.path()), + lfutil.splitstandin(fcdest.path()))) + else: + repo.ui.status(_('merging %s\n') + % lfutil.splitstandin(fcdest.path())) + + if fcancestor.path() != fcother.path() and fcother.data() == \ + fcancestor.data(): + return 0 + if fcancestor.path() != fcdest.path() and fcdest.data() == \ + fcancestor.data(): + repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) + return 0 + + if repo.ui.promptchoice(_('largefile %s has a merge conflict\n' + 'keep (l)ocal or take (o)ther?') % + lfutil.splitstandin(orig), + (_('&Local'), _('&Other')), 0) == 0: + return 0 + else: + repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) + return 0 + +# Copy first changes the matchers to match standins instead of +# largefiles. Then it overrides util.copyfile in that function it +# checks if the destination largefile already exists. It also keeps a +# list of copied files so that the largefiles can be copied and the +# dirstate updated. +def override_copy(orig, ui, repo, pats, opts, rename=False): + # doesn't remove largefile on rename + if len(pats) < 2: + # this isn't legal, let the original function deal with it + return orig(ui, repo, pats, opts, rename) + + def makestandin(relpath): + path = scmutil.canonpath(repo.root, repo.getcwd(), relpath) + return os.path.join(os.path.relpath('.', repo.getcwd()), + lfutil.standin(path)) + + fullpats = scmutil.expandpats(pats) + dest = fullpats[-1] + + if os.path.isdir(dest): + if not os.path.isdir(makestandin(dest)): + os.makedirs(makestandin(dest)) + # This could copy both lfiles and normal files in one command, + # but we don't want to do that. First replace their matcher to + # only match normal files and run it, then replace it to just + # match largefiles and run it again. + nonormalfiles = False + nolfiles = False + try: + installnormalfilesmatchfn(repo[None].manifest()) + result = orig(ui, repo, pats, opts, rename) + except util.Abort, e: + if str(e) != 'no files to copy': + raise e + else: + nonormalfiles = True + result = 0 + finally: + restorematchfn() + + # The first rename can cause our current working directory to be removed. + # In that case there is nothing left to copy/rename so just quit. + try: + repo.getcwd() + except OSError: + return result + + try: + # When we call orig below it creates the standins but we don't add them + # to the dir state until later so lock during that time. + wlock = repo.wlock() + + manifest = repo[None].manifest() + oldmatch = None # for the closure + def override_match(repo, pats=[], opts={}, globbed=False, + default='relpath'): + newpats = [] + # The patterns were previously mangled to add the standin + # directory; we need to remove that now + for pat in pats: + if match_.patkind(pat) is None and lfutil.shortname in pat: + newpats.append(pat.replace(lfutil.shortname, '')) + else: + newpats.append(pat) + match = oldmatch(repo, newpats, opts, globbed, default) + m = copy.copy(match) + lfile = lambda f: lfutil.standin(f) in manifest + m._files = [lfutil.standin(f) for f in m._files if lfile(f)] + m._fmap = set(m._files) + orig_matchfn = m.matchfn + m.matchfn = lambda f: (lfutil.isstandin(f) and + lfile(lfutil.splitstandin(f)) and + orig_matchfn(lfutil.splitstandin(f)) or + None) + return m + oldmatch = installmatchfn(override_match) + listpats = [] + for pat in pats: + if match_.patkind(pat) is not None: + listpats.append(pat) + else: + listpats.append(makestandin(pat)) + + try: + origcopyfile = util.copyfile + copiedfiles = [] + def override_copyfile(src, dest): + if lfutil.shortname in src and lfutil.shortname in dest: + destlfile = dest.replace(lfutil.shortname, '') + if not opts['force'] and os.path.exists(destlfile): + raise IOError('', + _('destination largefile already exists')) + copiedfiles.append((src, dest)) + origcopyfile(src, dest) + + util.copyfile = override_copyfile + result += orig(ui, repo, listpats, opts, rename) + finally: + util.copyfile = origcopyfile + + lfdirstate = lfutil.openlfdirstate(ui, repo) + for (src, dest) in copiedfiles: + if lfutil.shortname in src and lfutil.shortname in dest: + srclfile = src.replace(lfutil.shortname, '') + destlfile = dest.replace(lfutil.shortname, '') + destlfiledir = os.path.dirname(destlfile) or '.' + if not os.path.isdir(destlfiledir): + os.makedirs(destlfiledir) + if rename: + os.rename(srclfile, destlfile) + lfdirstate.remove(os.path.relpath(srclfile, + repo.root)) + else: + util.copyfile(srclfile, destlfile) + lfdirstate.add(os.path.relpath(destlfile, + repo.root)) + lfdirstate.write() + except util.Abort, e: + if str(e) != 'no files to copy': + raise e + else: + nolfiles = True + finally: + restorematchfn() + wlock.release() + + if nolfiles and nonormalfiles: + raise util.Abort(_('no files to copy')) + + return result + +# When the user calls revert, we have to be careful to not revert any +# changes to other largefiles accidentally. This means we have to keep +# track of the largefiles that are being reverted so we only pull down +# the necessary largefiles. +# +# Standins are only updated (to match the hash of largefiles) before +# commits. Update the standins then run the original revert, changing +# the matcher to hit standins instead of largefiles. Based on the +# resulting standins update the largefiles. Then return the standins +# to their proper state +def override_revert(orig, ui, repo, *pats, **opts): + # Because we put the standins in a bad state (by updating them) + # and then return them to a correct state we need to lock to + # prevent others from changing them in their incorrect state. + wlock = repo.wlock() + try: + lfdirstate = lfutil.openlfdirstate(ui, repo) + (modified, added, removed, missing, unknown, ignored, clean) = \ + lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev()) + for lfile in modified: + lfutil.updatestandin(repo, lfutil.standin(lfile)) + + try: + ctx = repo[opts.get('rev')] + oldmatch = None # for the closure + def override_match(ctxorrepo, pats=[], opts={}, globbed=False, + default='relpath'): + if util.safehasattr(ctxorrepo, 'match'): + ctx0 = ctxorrepo + else: + ctx0 = ctxorrepo[None] + match = oldmatch(ctxorrepo, pats, opts, globbed, default) + m = copy.copy(match) + def tostandin(f): + if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx: + return lfutil.standin(f) + elif lfutil.standin(f) in repo[None]: + return None + return f + m._files = [tostandin(f) for f in m._files] + m._files = [f for f in m._files if f is not None] + m._fmap = set(m._files) + orig_matchfn = m.matchfn + def matchfn(f): + if lfutil.isstandin(f): + # We need to keep track of what largefiles are being + # matched so we know which ones to update later -- + # otherwise we accidentally revert changes to other + # largefiles. This is repo-specific, so duckpunch the + # repo object to keep the list of largefiles for us + # later. + if orig_matchfn(lfutil.splitstandin(f)) and \ + (f in repo[None] or f in ctx): + lfileslist = getattr(repo, '_lfilestoupdate', []) + lfileslist.append(lfutil.splitstandin(f)) + repo._lfilestoupdate = lfileslist + return True + else: + return False + return orig_matchfn(f) + m.matchfn = matchfn + return m + oldmatch = installmatchfn(override_match) + scmutil.match + matches = override_match(repo[None], pats, opts) + orig(ui, repo, *pats, **opts) + finally: + restorematchfn() + lfileslist = getattr(repo, '_lfilestoupdate', []) + lfcommands.updatelfiles(ui, repo, filelist=lfileslist, + printmessage=False) + + # empty out the largefiles list so we start fresh next time + repo._lfilestoupdate = [] + for lfile in modified: + if lfile in lfileslist: + if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ + in repo['.']: + lfutil.writestandin(repo, lfutil.standin(lfile), + repo['.'][lfile].data().strip(), + 'x' in repo['.'][lfile].flags()) + lfdirstate = lfutil.openlfdirstate(ui, repo) + for lfile in added: + standin = lfutil.standin(lfile) + if standin not in ctx and (standin in matches or opts.get('all')): + if lfile in lfdirstate: + lfdirstate.drop(lfile) + util.unlinkpath(repo.wjoin(standin)) + lfdirstate.write() + finally: + wlock.release() + +def hg_update(orig, repo, node): + result = orig(repo, node) + # XXX check if it worked first + lfcommands.updatelfiles(repo.ui, repo) + return result + +def hg_clean(orig, repo, node, show_stats=True): + result = orig(repo, node, show_stats) + lfcommands.updatelfiles(repo.ui, repo) + return result + +def hg_merge(orig, repo, node, force=None, remind=True): + result = orig(repo, node, force, remind) + lfcommands.updatelfiles(repo.ui, repo) + return result + +# When we rebase a repository with remotely changed largefiles, we need to +# take some extra care so that the largefiles are correctly updated in the +# working copy +def override_pull(orig, ui, repo, source=None, **opts): + if opts.get('rebase', False): + repo._isrebasing = True + try: + if opts.get('update'): + del opts['update'] + ui.debug('--update and --rebase are not compatible, ignoring ' + 'the update flag\n') + del opts['rebase'] + cmdutil.bailifchanged(repo) + revsprepull = len(repo) + origpostincoming = commands.postincoming + def _dummy(*args, **kwargs): + pass + commands.postincoming = _dummy + repo.lfpullsource = source + if not source: + source = 'default' + try: + result = commands.pull(ui, repo, source, **opts) + finally: + commands.postincoming = origpostincoming + revspostpull = len(repo) + if revspostpull > revsprepull: + result = result or rebase.rebase(ui, repo) + finally: + repo._isrebasing = False + else: + repo.lfpullsource = source + if not source: + source = 'default' + result = orig(ui, repo, source, **opts) + return result + +def override_rebase(orig, ui, repo, **opts): + repo._isrebasing = True + try: + orig(ui, repo, **opts) + finally: + repo._isrebasing = False + +def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None, + prefix=None, mtime=None, subrepos=None): + # No need to lock because we are only reading history and + # largefile caches, neither of which are modified. + lfcommands.cachelfiles(repo.ui, repo, node) + + if kind not in archival.archivers: + raise util.Abort(_("unknown archive type '%s'") % kind) + + ctx = repo[node] + + if kind == 'files': + if prefix: + raise util.Abort( + _('cannot give prefix when archiving to files')) + else: + prefix = archival.tidyprefix(dest, kind, prefix) + + def write(name, mode, islink, getdata): + if matchfn and not matchfn(name): + return + data = getdata() + if decode: + data = repo.wwritedata(name, data) + archiver.addfile(prefix + name, mode, islink, data) + + archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) + + if repo.ui.configbool("ui", "archivemeta", True): + def metadata(): + base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( + hex(repo.changelog.node(0)), hex(node), ctx.branch()) + + tags = ''.join('tag: %s\n' % t for t in ctx.tags() + if repo.tagtype(t) == 'global') + if not tags: + repo.ui.pushbuffer() + opts = {'template': '{latesttag}\n{latesttagdistance}', + 'style': '', 'patch': None, 'git': None} + cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) + ltags, dist = repo.ui.popbuffer().split('\n') + tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) + tags += 'latesttagdistance: %s\n' % dist + + return base + tags + + write('.hg_archival.txt', 0644, False, metadata) + + for f in ctx: + ff = ctx.flags(f) + getdata = ctx[f].data + if lfutil.isstandin(f): + path = lfutil.findfile(repo, getdata().strip()) + f = lfutil.splitstandin(f) + + def getdatafn(): + try: + fd = open(path, 'rb') + return fd.read() + finally: + fd.close() + + getdata = getdatafn + write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) + + if subrepos: + for subpath in ctx.substate: + sub = ctx.sub(subpath) + try: + sub.archive(repo.ui, archiver, prefix) + except TypeError: + sub.archive(archiver, prefix) + + archiver.done() + +# If a largefile is modified, the change is not reflected in its +# standin until a commit. cmdutil.bailifchanged() raises an exception +# if the repo has uncommitted changes. Wrap it to also check if +# largefiles were changed. This is used by bisect and backout. +def override_bailifchanged(orig, repo): + orig(repo) + repo.lfstatus = True + modified, added, removed, deleted = repo.status()[:4] + repo.lfstatus = False + if modified or added or removed or deleted: + raise util.Abort(_('outstanding uncommitted changes')) + +# Fetch doesn't use cmdutil.bail_if_changed so override it to add the check +def override_fetch(orig, ui, repo, *pats, **opts): + repo.lfstatus = True + modified, added, removed, deleted = repo.status()[:4] + repo.lfstatus = False + if modified or added or removed or deleted: + raise util.Abort(_('outstanding uncommitted changes')) + return orig(ui, repo, *pats, **opts) + +def override_forget(orig, ui, repo, *pats, **opts): + installnormalfilesmatchfn(repo[None].manifest()) + orig(ui, repo, *pats, **opts) + restorematchfn() + m = scmutil.match(repo[None], pats, opts) + + try: + repo.lfstatus = True + s = repo.status(match=m, clean=True) + finally: + repo.lfstatus = False + forget = sorted(s[0] + s[1] + s[3] + s[6]) + forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()] + + for f in forget: + if lfutil.standin(f) not in repo.dirstate and not \ + os.path.isdir(m.rel(lfutil.standin(f))): + ui.warn(_('not removing %s: file is already untracked\n') + % m.rel(f)) + + for f in forget: + if ui.verbose or not m.exact(f): + ui.status(_('removing %s\n') % m.rel(f)) + + # Need to lock because standin files are deleted then removed from the + # repository and we could race inbetween. + wlock = repo.wlock() + try: + lfdirstate = lfutil.openlfdirstate(ui, repo) + for f in forget: + if lfdirstate[f] == 'a': + lfdirstate.drop(f) + else: + lfdirstate.remove(f) + lfdirstate.write() + lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget], + unlink=True) + finally: + wlock.release() + +def getoutgoinglfiles(ui, repo, dest=None, **opts): + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest, opts.get('branch')) + revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) + if revs: + revs = [repo.lookup(rev) for rev in revs] + + remoteui = hg.remoteui + + try: + remote = hg.repository(remoteui(repo, opts), dest) + except error.RepoError: + return None + o = lfutil.findoutgoing(repo, remote, False) + if not o: + return None + o = repo.changelog.nodesbetween(o, revs)[0] + if opts.get('newest_first'): + o.reverse() + + toupload = set() + for n in o: + parents = [p for p in repo.changelog.parents(n) if p != node.nullid] + ctx = repo[n] + files = set(ctx.files()) + if len(parents) == 2: + mc = ctx.manifest() + mp1 = ctx.parents()[0].manifest() + mp2 = ctx.parents()[1].manifest() + for f in mp1: + if f not in mc: + files.add(f) + for f in mp2: + if f not in mc: + files.add(f) + for f in mc: + if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): + files.add(f) + toupload = toupload.union( + set([f for f in files if lfutil.isstandin(f) and f in ctx])) + return toupload + +def override_outgoing(orig, ui, repo, dest=None, **opts): + orig(ui, repo, dest, **opts) + + if opts.pop('large', None): + toupload = getoutgoinglfiles(ui, repo, dest, **opts) + if toupload is None: + ui.status(_('largefiles: No remote repo\n')) + else: + ui.status(_('largefiles to upload:\n')) + for file in toupload: + ui.status(lfutil.splitstandin(file) + '\n') + ui.status('\n') + +def override_summary(orig, ui, repo, *pats, **opts): + orig(ui, repo, *pats, **opts) + + if opts.pop('large', None): + toupload = getoutgoinglfiles(ui, repo, None, **opts) + if toupload is None: + ui.status(_('largefiles: No remote repo\n')) + else: + ui.status(_('largefiles: %d to upload\n') % len(toupload)) + +def override_addremove(orig, ui, repo, *pats, **opts): + # Check if the parent or child has largefiles; if so, disallow + # addremove. If there is a symlink in the manifest then getting + # the manifest throws an exception: catch it and let addremove + # deal with it. + try: + manifesttip = set(repo['tip'].manifest()) + except util.Abort: + manifesttip = set() + try: + manifestworking = set(repo[None].manifest()) + except util.Abort: + manifestworking = set() + + # Manifests are only iterable so turn them into sets then union + for file in manifesttip.union(manifestworking): + if file.startswith(lfutil.shortname): + raise util.Abort( + _('addremove cannot be run on a repo with largefiles')) + + return orig(ui, repo, *pats, **opts) + +# Calling purge with --all will cause the largefiles to be deleted. +# Override repo.status to prevent this from happening. +def override_purge(orig, ui, repo, *dirs, **opts): + oldstatus = repo.status + def override_status(node1='.', node2=None, match=None, ignored=False, + clean=False, unknown=False, listsubrepos=False): + r = oldstatus(node1, node2, match, ignored, clean, unknown, + listsubrepos) + lfdirstate = lfutil.openlfdirstate(ui, repo) + modified, added, removed, deleted, unknown, ignored, clean = r + unknown = [f for f in unknown if lfdirstate[f] == '?'] + ignored = [f for f in ignored if lfdirstate[f] == '?'] + return modified, added, removed, deleted, unknown, ignored, clean + repo.status = override_status + orig(ui, repo, *dirs, **opts) + repo.status = oldstatus + +def override_rollback(orig, ui, repo, **opts): + result = orig(ui, repo, **opts) + merge.update(repo, node=None, branchmerge=False, force=True, + partial=lfutil.isstandin) + lfdirstate = lfutil.openlfdirstate(ui, repo) + lfiles = lfutil.listlfiles(repo) + oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev()) + for file in lfiles: + if file in oldlfiles: + lfdirstate.normallookup(file) + else: + lfdirstate.add(file) + lfdirstate.write() + return result diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/proto.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/proto.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,160 @@ +# Copyright 2011 Fog Creek Software +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os +import tempfile +import urllib2 + +from mercurial import error, httprepo, util, wireproto +from mercurial.i18n import _ + +import lfutil + +LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.' + '\n\nPlease enable it in your Mercurial config ' + 'file.\n') + +def putlfile(repo, proto, sha): + '''Put a largefile into a repository's local cache and into the + system cache.''' + f = None + proto.redirect() + try: + try: + f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-') + proto.getfile(f) + f.seek(0) + if sha != lfutil.hexsha1(f): + return wireproto.pushres(1) + lfutil.copytocacheabsolute(repo, f.name, sha) + except IOError: + repo.ui.warn( + _('error: could not put received data into largefile store')) + return wireproto.pushres(1) + finally: + if f: + f.close() + + return wireproto.pushres(0) + +def getlfile(repo, proto, sha): + '''Retrieve a largefile from the repository-local cache or system + cache.''' + filename = lfutil.findfile(repo, sha) + if not filename: + raise util.Abort(_('requested largefile %s not present in cache') % sha) + f = open(filename, 'rb') + length = os.fstat(f.fileno())[6] + + # Since we can't set an HTTP content-length header here, and + # Mercurial core provides no way to give the length of a streamres + # (and reading the entire file into RAM would be ill-advised), we + # just send the length on the first line of the response, like the + # ssh proto does for string responses. + def generator(): + yield '%d\n' % length + for chunk in f: + yield chunk + return wireproto.streamres(generator()) + +def statlfile(repo, proto, sha): + '''Return '2\n' if the largefile is missing, '1\n' if it has a + mismatched checksum, or '0\n' if it is in good condition''' + filename = lfutil.findfile(repo, sha) + if not filename: + return '2\n' + fd = None + try: + fd = open(filename, 'rb') + return lfutil.hexsha1(fd) == sha and '0\n' or '1\n' + finally: + if fd: + fd.close() + +def wirereposetup(ui, repo): + class lfileswirerepository(repo.__class__): + def putlfile(self, sha, fd): + # unfortunately, httprepository._callpush tries to convert its + # input file-like into a bundle before sending it, so we can't use + # it ... + if issubclass(self.__class__, httprepo.httprepository): + try: + return int(self._call('putlfile', data=fd, sha=sha, + headers={'content-type':'application/mercurial-0.1'})) + except (ValueError, urllib2.HTTPError): + return 1 + # ... but we can't use sshrepository._call because the data= + # argument won't get sent, and _callpush does exactly what we want + # in this case: send the data straight through + else: + try: + ret, output = self._callpush("putlfile", fd, sha=sha) + if ret == "": + raise error.ResponseError(_('putlfile failed:'), + output) + return int(ret) + except IOError: + return 1 + except ValueError: + raise error.ResponseError( + _('putlfile failed (unexpected response):'), ret) + + def getlfile(self, sha): + stream = self._callstream("getlfile", sha=sha) + length = stream.readline() + try: + length = int(length) + except ValueError: + self._abort(error.ResponseError(_("unexpected response:"), + length)) + return (length, stream) + + def statlfile(self, sha): + try: + return int(self._call("statlfile", sha=sha)) + except (ValueError, urllib2.HTTPError): + # If the server returns anything but an integer followed by a + # newline, newline, it's not speaking our language; if we get + # an HTTP error, we can't be sure the largefile is present; + # either way, consider it missing. + return 2 + + repo.__class__ = lfileswirerepository + +# advertise the largefiles=serve capability +def capabilities(repo, proto): + return capabilities_orig(repo, proto) + ' largefiles=serve' + +# duplicate what Mercurial's new out-of-band errors mechanism does, because +# clients old and new alike both handle it well +def webproto_refuseclient(self, message): + self.req.header([('Content-Type', 'application/hg-error')]) + return message + +def sshproto_refuseclient(self, message): + self.ui.write_err('%s\n-\n' % message) + self.fout.write('\n') + self.fout.flush() + + return '' + +def heads(repo, proto): + if lfutil.islfilesrepo(repo): + return wireproto.ooberror(LARGEFILES_REQUIRED_MSG) + return wireproto.heads(repo, proto) + +def sshrepo_callstream(self, cmd, **args): + if cmd == 'heads' and self.capable('largefiles'): + cmd = 'lheads' + if cmd == 'batch' and self.capable('largefiles'): + args['cmds'] = args['cmds'].replace('heads ', 'lheads ') + return ssh_oldcallstream(self, cmd, **args) + +def httprepo_callstream(self, cmd, **args): + if cmd == 'heads' and self.capable('largefiles'): + cmd = 'lheads' + if cmd == 'batch' and self.capable('largefiles'): + args['cmds'] = args['cmds'].replace('heads ', 'lheads ') + return http_oldcallstream(self, cmd, **args) diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/remotestore.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/remotestore.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,106 @@ +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''remote largefile store; the base class for servestore''' + +import urllib2 + +from mercurial import util +from mercurial.i18n import _ + +import lfutil +import basestore + +class remotestore(basestore.basestore): + '''a largefile store accessed over a network''' + def __init__(self, ui, repo, url): + super(remotestore, self).__init__(ui, repo, url) + + def put(self, source, hash): + if self._verify(hash): + return + if self.sendfile(source, hash): + raise util.Abort( + _('remotestore: could not put %s to remote store %s') + % (source, self.url)) + self.ui.debug( + _('remotestore: put %s to remote store %s') % (source, self.url)) + + def exists(self, hash): + return self._verify(hash) + + def sendfile(self, filename, hash): + self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash)) + fd = None + try: + try: + fd = lfutil.httpsendfile(self.ui, filename) + except IOError, e: + raise util.Abort( + _('remotestore: could not open file %s: %s') + % (filename, str(e))) + return self._put(hash, fd) + finally: + if fd: + fd.close() + + def _getfile(self, tmpfile, filename, hash): + # quit if the largefile isn't there + stat = self._stat(hash) + if stat == 1: + raise util.Abort(_('remotestore: largefile %s is invalid') % hash) + elif stat == 2: + raise util.Abort(_('remotestore: largefile %s is missing') % hash) + + try: + length, infile = self._get(hash) + except urllib2.HTTPError, e: + # 401s get converted to util.Aborts; everything else is fine being + # turned into a StoreError + raise basestore.StoreError(filename, hash, self.url, str(e)) + except urllib2.URLError, e: + # This usually indicates a connection problem, so don't + # keep trying with the other files... they will probably + # all fail too. + raise util.Abort('%s: %s' % (self.url, e.reason)) + except IOError, e: + raise basestore.StoreError(filename, hash, self.url, str(e)) + + # Mercurial does not close its SSH connections after writing a stream + if length is not None: + infile = lfutil.limitreader(infile, length) + return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile) + + def _verify(self, hash): + return not self._stat(hash) + + def _verifyfile(self, cctx, cset, contents, standin, verified): + filename = lfutil.splitstandin(standin) + if not filename: + return False + fctx = cctx[standin] + key = (filename, fctx.filenode()) + if key in verified: + return False + + verified.add(key) + + stat = self._stat(hash) + if not stat: + return False + elif stat == 1: + self.ui.warn( + _('changeset %s: %s: contents differ\n') + % (cset, filename)) + return True # failed + elif stat == 2: + self.ui.warn( + _('changeset %s: %s missing\n') + % (cset, filename)) + return True # failed + else: + raise RuntimeError('verify failed: unexpected response from ' + 'statlfile (%r)' % stat) diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/reposetup.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/reposetup.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,416 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''setup for largefiles repositories: reposetup''' +import copy +import types +import os +import re + +from mercurial import context, error, manifest, match as match_, \ + node, util +from mercurial.i18n import _ + +import lfcommands +import proto +import lfutil + +def reposetup(ui, repo): + # wire repositories should be given new wireproto functions but not the + # other largefiles modifications + if not repo.local(): + return proto.wirereposetup(ui, repo) + + for name in ('status', 'commitctx', 'commit', 'push'): + method = getattr(repo, name) + #if not (isinstance(method, types.MethodType) and + # method.im_func is repo.__class__.commitctx.im_func): + if (isinstance(method, types.FunctionType) and + method.func_name == 'wrap'): + ui.warn(_('largefiles: repo method %r appears to have already been' + ' wrapped by another extension: ' + 'largefiles may behave incorrectly\n') + % name) + + class lfiles_repo(repo.__class__): + lfstatus = False + def status_nolfiles(self, *args, **kwargs): + return super(lfiles_repo, self).status(*args, **kwargs) + + # When lfstatus is set, return a context that gives the names + # of largefiles instead of their corresponding standins and + # identifies the largefiles as always binary, regardless of + # their actual contents. + def __getitem__(self, changeid): + ctx = super(lfiles_repo, self).__getitem__(changeid) + if self.lfstatus: + class lfiles_manifestdict(manifest.manifestdict): + def __contains__(self, filename): + if super(lfiles_manifestdict, + self).__contains__(filename): + return True + return super(lfiles_manifestdict, + self).__contains__(lfutil.shortname+'/' + filename) + class lfiles_ctx(ctx.__class__): + def files(self): + filenames = super(lfiles_ctx, self).files() + return [re.sub('^\\'+lfutil.shortname+'/', '', + filename) for filename in filenames] + def manifest(self): + man1 = super(lfiles_ctx, self).manifest() + man1.__class__ = lfiles_manifestdict + return man1 + def filectx(self, path, fileid=None, filelog=None): + try: + result = super(lfiles_ctx, self).filectx(path, + fileid, filelog) + except error.LookupError: + # Adding a null character will cause Mercurial to + # identify this as a binary file. + result = super(lfiles_ctx, self).filectx( + lfutil.shortname + '/' + path, fileid, + filelog) + olddata = result.data + result.data = lambda: olddata() + '\0' + return result + ctx.__class__ = lfiles_ctx + return ctx + + # Figure out the status of big files and insert them into the + # appropriate list in the result. Also removes standin files + # from the listing. Revert to the original status if + # self.lfstatus is False. + def status(self, node1='.', node2=None, match=None, ignored=False, + clean=False, unknown=False, listsubrepos=False): + listignored, listclean, listunknown = ignored, clean, unknown + if not self.lfstatus: + try: + return super(lfiles_repo, self).status(node1, node2, match, + listignored, listclean, listunknown, listsubrepos) + except TypeError: + return super(lfiles_repo, self).status(node1, node2, match, + listignored, listclean, listunknown) + else: + # some calls in this function rely on the old version of status + self.lfstatus = False + if isinstance(node1, context.changectx): + ctx1 = node1 + else: + ctx1 = repo[node1] + if isinstance(node2, context.changectx): + ctx2 = node2 + else: + ctx2 = repo[node2] + working = ctx2.rev() is None + parentworking = working and ctx1 == self['.'] + + def inctx(file, ctx): + try: + if ctx.rev() is None: + return file in ctx.manifest() + ctx[file] + return True + except KeyError: + return False + + if match is None: + match = match_.always(self.root, self.getcwd()) + + # Create a copy of match that matches standins instead + # of largefiles. + def tostandin(file): + if inctx(lfutil.standin(file), ctx2): + return lfutil.standin(file) + return file + + m = copy.copy(match) + m._files = [tostandin(f) for f in m._files] + + # get ignored, clean, and unknown but remove them + # later if they were not asked for + try: + result = super(lfiles_repo, self).status(node1, node2, m, + True, True, True, listsubrepos) + except TypeError: + result = super(lfiles_repo, self).status(node1, node2, m, + True, True, True) + if working: + # hold the wlock while we read largefiles and + # update the lfdirstate + wlock = repo.wlock() + try: + # Any non-largefiles that were explicitly listed must be + # taken out or lfdirstate.status will report an error. + # The status of these files was already computed using + # super's status. + lfdirstate = lfutil.openlfdirstate(ui, self) + match._files = [f for f in match._files if f in + lfdirstate] + s = lfdirstate.status(match, [], listignored, + listclean, listunknown) + (unsure, modified, added, removed, missing, unknown, + ignored, clean) = s + if parentworking: + for lfile in unsure: + if ctx1[lfutil.standin(lfile)].data().strip() \ + != lfutil.hashfile(self.wjoin(lfile)): + modified.append(lfile) + else: + clean.append(lfile) + lfdirstate.normal(lfile) + lfdirstate.write() + else: + tocheck = unsure + modified + added + clean + modified, added, clean = [], [], [] + + for lfile in tocheck: + standin = lfutil.standin(lfile) + if inctx(standin, ctx1): + if ctx1[standin].data().strip() != \ + lfutil.hashfile(self.wjoin(lfile)): + modified.append(lfile) + else: + clean.append(lfile) + else: + added.append(lfile) + finally: + wlock.release() + + for standin in ctx1.manifest(): + if not lfutil.isstandin(standin): + continue + lfile = lfutil.splitstandin(standin) + if not match(lfile): + continue + if lfile not in lfdirstate: + removed.append(lfile) + # Handle unknown and ignored differently + lfiles = (modified, added, removed, missing, [], [], clean) + result = list(result) + # Unknown files + result[4] = [f for f in unknown + if (repo.dirstate[f] == '?' and + not lfutil.isstandin(f))] + # Ignored files must be ignored by both the dirstate and + # lfdirstate + result[5] = set(ignored).intersection(set(result[5])) + # combine normal files and largefiles + normals = [[fn for fn in filelist + if not lfutil.isstandin(fn)] + for filelist in result] + result = [sorted(list1 + list2) + for (list1, list2) in zip(normals, lfiles)] + else: + def toname(f): + if lfutil.isstandin(f): + return lfutil.splitstandin(f) + return f + result = [[toname(f) for f in items] for items in result] + + if not listunknown: + result[4] = [] + if not listignored: + result[5] = [] + if not listclean: + result[6] = [] + self.lfstatus = True + return result + + # As part of committing, copy all of the largefiles into the + # cache. + def commitctx(self, *args, **kwargs): + node = super(lfiles_repo, self).commitctx(*args, **kwargs) + ctx = self[node] + for filename in ctx.files(): + if lfutil.isstandin(filename) and filename in ctx.manifest(): + realfile = lfutil.splitstandin(filename) + lfutil.copytocache(self, ctx.node(), realfile) + + return node + + # Before commit, largefile standins have not had their + # contents updated to reflect the hash of their largefile. + # Do that here. + def commit(self, text="", user=None, date=None, match=None, + force=False, editor=False, extra={}): + orig = super(lfiles_repo, self).commit + + wlock = repo.wlock() + try: + if getattr(repo, "_isrebasing", False): + # We have to take the time to pull down the new + # largefiles now. Otherwise if we are rebasing, + # any largefiles that were modified in the + # destination changesets get overwritten, either + # by the rebase or in the first commit after the + # rebase. + lfcommands.updatelfiles(repo.ui, repo) + # Case 1: user calls commit with no specific files or + # include/exclude patterns: refresh and commit all files that + # are "dirty". + if ((match is None) or + (not match.anypats() and not match.files())): + # Spend a bit of time here to get a list of files we know + # are modified so we can compare only against those. + # It can cost a lot of time (several seconds) + # otherwise to update all standins if the largefiles are + # large. + lfdirstate = lfutil.openlfdirstate(ui, self) + dirtymatch = match_.always(repo.root, repo.getcwd()) + s = lfdirstate.status(dirtymatch, [], False, False, False) + modifiedfiles = [] + for i in s: + modifiedfiles.extend(i) + lfiles = lfutil.listlfiles(self) + # this only loops through largefiles that exist (not + # removed/renamed) + for lfile in lfiles: + if lfile in modifiedfiles: + if os.path.exists(self.wjoin(lfutil.standin(lfile))): + # this handles the case where a rebase is being + # performed and the working copy is not updated + # yet. + if os.path.exists(self.wjoin(lfile)): + lfutil.updatestandin(self, + lfutil.standin(lfile)) + lfdirstate.normal(lfile) + for lfile in lfdirstate: + if lfile in modifiedfiles: + if not os.path.exists( + repo.wjoin(lfutil.standin(lfile))): + lfdirstate.drop(lfile) + lfdirstate.write() + + return orig(text=text, user=user, date=date, match=match, + force=force, editor=editor, extra=extra) + + for f in match.files(): + if lfutil.isstandin(f): + raise util.Abort( + _('file "%s" is a largefile standin') % f, + hint=('commit the largefile itself instead')) + + # Case 2: user calls commit with specified patterns: refresh + # any matching big files. + smatcher = lfutil.composestandinmatcher(self, match) + standins = lfutil.dirstate_walk(self.dirstate, smatcher) + + # No matching big files: get out of the way and pass control to + # the usual commit() method. + if not standins: + return orig(text=text, user=user, date=date, match=match, + force=force, editor=editor, extra=extra) + + # Refresh all matching big files. It's possible that the + # commit will end up failing, in which case the big files will + # stay refreshed. No harm done: the user modified them and + # asked to commit them, so sooner or later we're going to + # refresh the standins. Might as well leave them refreshed. + lfdirstate = lfutil.openlfdirstate(ui, self) + for standin in standins: + lfile = lfutil.splitstandin(standin) + if lfdirstate[lfile] <> 'r': + lfutil.updatestandin(self, standin) + lfdirstate.normal(lfile) + else: + lfdirstate.drop(lfile) + lfdirstate.write() + + # Cook up a new matcher that only matches regular files or + # standins corresponding to the big files requested by the + # user. Have to modify _files to prevent commit() from + # complaining "not tracked" for big files. + lfiles = lfutil.listlfiles(repo) + match = copy.copy(match) + orig_matchfn = match.matchfn + + # Check both the list of largefiles and the list of + # standins because if a largefile was removed, it + # won't be in the list of largefiles at this point + match._files += sorted(standins) + + actualfiles = [] + for f in match._files: + fstandin = lfutil.standin(f) + + # ignore known largefiles and standins + if f in lfiles or fstandin in standins: + continue + + # append directory separator to avoid collisions + if not fstandin.endswith(os.sep): + fstandin += os.sep + + # prevalidate matching standin directories + if lfutil.any_(st for st in match._files + if st.startswith(fstandin)): + continue + actualfiles.append(f) + match._files = actualfiles + + def matchfn(f): + if orig_matchfn(f): + return f not in lfiles + else: + return f in standins + + match.matchfn = matchfn + return orig(text=text, user=user, date=date, match=match, + force=force, editor=editor, extra=extra) + finally: + wlock.release() + + def push(self, remote, force=False, revs=None, newbranch=False): + o = lfutil.findoutgoing(repo, remote, force) + if o: + toupload = set() + o = repo.changelog.nodesbetween(o, revs)[0] + for n in o: + parents = [p for p in repo.changelog.parents(n) + if p != node.nullid] + ctx = repo[n] + files = set(ctx.files()) + if len(parents) == 2: + mc = ctx.manifest() + mp1 = ctx.parents()[0].manifest() + mp2 = ctx.parents()[1].manifest() + for f in mp1: + if f not in mc: + files.add(f) + for f in mp2: + if f not in mc: + files.add(f) + for f in mc: + if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, + None): + files.add(f) + + toupload = toupload.union( + set([ctx[f].data().strip() + for f in files + if lfutil.isstandin(f) and f in ctx])) + lfcommands.uploadlfiles(ui, self, remote, toupload) + return super(lfiles_repo, self).push(remote, force, revs, + newbranch) + + repo.__class__ = lfiles_repo + + def checkrequireslfiles(ui, repo, **kwargs): + if 'largefiles' not in repo.requirements and lfutil.any_( + lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()): + # workaround bug in Mercurial 1.9 whereby requirements is + # a list on newly-cloned repos + repo.requirements = set(repo.requirements) + + repo.requirements |= set(['largefiles']) + repo._writerequirements() + + checkrequireslfiles(ui, repo) + + ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles) + ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles) diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/uisetup.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/uisetup.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,138 @@ +# Copyright 2009-2010 Gregory P. Ward +# Copyright 2009-2010 Intelerad Medical Systems Incorporated +# Copyright 2010-2011 Fog Creek Software +# Copyright 2010-2011 Unity Technologies +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''setup for largefiles extension: uisetup''' + +from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \ + httprepo, localrepo, sshrepo, sshserver, util, wireproto +from mercurial.i18n import _ +from mercurial.hgweb import hgweb_mod, protocol + +import overrides +import proto + +def uisetup(ui): + # Disable auto-status for some commands which assume that all + # files in the result are under Mercurial's control + + entry = extensions.wrapcommand(commands.table, 'add', + overrides.override_add) + addopt = [('', 'large', None, _('add as largefile')), + ('', 'lfsize', '', _('add all files above this size (in megabytes)' + 'as largefiles (default: 10)'))] + entry[1].extend(addopt) + + entry = extensions.wrapcommand(commands.table, 'addremove', + overrides.override_addremove) + entry = extensions.wrapcommand(commands.table, 'remove', + overrides.override_remove) + entry = extensions.wrapcommand(commands.table, 'forget', + overrides.override_forget) + entry = extensions.wrapcommand(commands.table, 'status', + overrides.override_status) + entry = extensions.wrapcommand(commands.table, 'log', + overrides.override_log) + entry = extensions.wrapcommand(commands.table, 'rollback', + overrides.override_rollback) + entry = extensions.wrapcommand(commands.table, 'verify', + overrides.override_verify) + + verifyopt = [('', 'large', None, _('verify largefiles')), + ('', 'lfa', None, + _('verify all revisions of largefiles not just current')), + ('', 'lfc', None, + _('verify largefile contents not just existence'))] + entry[1].extend(verifyopt) + + entry = extensions.wrapcommand(commands.table, 'outgoing', + overrides.override_outgoing) + outgoingopt = [('', 'large', None, _('display outgoing largefiles'))] + entry[1].extend(outgoingopt) + entry = extensions.wrapcommand(commands.table, 'summary', + overrides.override_summary) + summaryopt = [('', 'large', None, _('display outgoing largefiles'))] + entry[1].extend(summaryopt) + + entry = extensions.wrapcommand(commands.table, 'update', + overrides.override_update) + entry = extensions.wrapcommand(commands.table, 'pull', + overrides.override_pull) + entry = extensions.wrapfunction(filemerge, 'filemerge', + overrides.override_filemerge) + entry = extensions.wrapfunction(cmdutil, 'copy', + overrides.override_copy) + + # Backout calls revert so we need to override both the command and the + # function + entry = extensions.wrapcommand(commands.table, 'revert', + overrides.override_revert) + entry = extensions.wrapfunction(commands, 'revert', + overrides.override_revert) + + # clone uses hg._update instead of hg.update even though they are the + # same function... so wrap both of them) + extensions.wrapfunction(hg, 'update', overrides.hg_update) + extensions.wrapfunction(hg, '_update', overrides.hg_update) + extensions.wrapfunction(hg, 'clean', overrides.hg_clean) + extensions.wrapfunction(hg, 'merge', overrides.hg_merge) + + extensions.wrapfunction(archival, 'archive', overrides.override_archive) + if util.safehasattr(cmdutil, 'bailifchanged'): + extensions.wrapfunction(cmdutil, 'bailifchanged', + overrides.override_bailifchanged) + else: + extensions.wrapfunction(cmdutil, 'bail_if_changed', + overrides.override_bailifchanged) + + # create the new wireproto commands ... + wireproto.commands['putlfile'] = (proto.putlfile, 'sha') + wireproto.commands['getlfile'] = (proto.getlfile, 'sha') + wireproto.commands['statlfile'] = (proto.statlfile, 'sha') + + # ... and wrap some existing ones + wireproto.commands['capabilities'] = (proto.capabilities, '') + wireproto.commands['heads'] = (proto.heads, '') + wireproto.commands['lheads'] = (wireproto.heads, '') + + # make putlfile behave the same as push and {get,stat}lfile behave + # the same as pull w.r.t. permissions checks + hgweb_mod.perms['putlfile'] = 'push' + hgweb_mod.perms['getlfile'] = 'pull' + hgweb_mod.perms['statlfile'] = 'pull' + + # the hello wireproto command uses wireproto.capabilities, so it won't see + # our largefiles capability unless we replace the actual function as well. + proto.capabilities_orig = wireproto.capabilities + wireproto.capabilities = proto.capabilities + + # these let us reject non-largefiles clients and make them display + # our error messages + protocol.webproto.refuseclient = proto.webproto_refuseclient + sshserver.sshserver.refuseclient = proto.sshproto_refuseclient + + # can't do this in reposetup because it needs to have happened before + # wirerepo.__init__ is called + proto.ssh_oldcallstream = sshrepo.sshrepository._callstream + proto.http_oldcallstream = httprepo.httprepository._callstream + sshrepo.sshrepository._callstream = proto.sshrepo_callstream + httprepo.httprepository._callstream = proto.httprepo_callstream + + # don't die on seeing a repo with the largefiles requirement + localrepo.localrepository.supported |= set(['largefiles']) + + # override some extensions' stuff as well + for name, module in extensions.extensions(): + if name == 'fetch': + extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch', + overrides.override_fetch) + if name == 'purge': + extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge', + overrides.override_purge) + if name == 'rebase': + extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase', + overrides.override_rebase) diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/usage.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/usage.txt Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,51 @@ +Largefiles allows for tracking large, incompressible binary files in Mercurial +without requiring excessive bandwidth for clones and pulls. Files added as +largefiles are not tracked directly by Mercurial; rather, their revisions are +identified by a checksum, and Mercurial tracks these checksums. This way, when +you clone a repository or pull in changesets, the large files in older +revisions of the repository are not needed, and only the ones needed to update +to the current version are downloaded. This saves both disk space and +bandwidth. + +If you are starting a new repository or adding new large binary files, using +largefiles for them is as easy as adding '--large' to your hg add command. For +example: + +$ dd if=/dev/urandom of=thisfileislarge count=2000 +$ hg add --large thisfileislarge +$ hg commit -m 'add thisfileislarge, which is large, as a largefile' + +When you push a changeset that affects largefiles to a remote repository, its +largefile revisions will be uploaded along with it. Note that the remote +Mercurial must also have the largefiles extension enabled for this to work. + +When you pull a changeset that affects largefiles from a remote repository, +nothing different from Mercurial's normal behavior happens. However, when you +update to such a revision, any largefiles needed by that revision are +downloaded and cached if they have never been downloaded before. This means +that network access is required to update to revision you have not yet updated +to. + +If you already have large files tracked by Mercurial without the largefiles +extension, you will need to convert your repository in order to benefit from +largefiles. This is done with the 'hg lfconvert' command: + +$ hg lfconvert --size 10 oldrepo newrepo + +By default, in repositories that already have largefiles in them, any new file +over 10MB will automatically be added as largefiles. To change this +threshhold, set [largefiles].size in your Mercurial config file to the minimum +size in megabytes to track as a largefile, or use the --lfsize option to the +add command (also in megabytes): + +[largefiles] +size = 2 + +$ hg add --lfsize 2 + +The [largefiles].patterns config option allows you to specify specific +space-separated filename patterns (in shell glob syntax) that should always be +tracked as largefiles: + +[largefiles] +pattens = *.jpg *.{png,bmp} library.zip content/audio/* diff -r fccd350acf79 -r 384082750f2c hgext/largefiles/wirestore.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/largefiles/wirestore.py Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,29 @@ +# Copyright 2010-2011 Fog Creek Software +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''largefile store working over Mercurial's wire protocol''' + +import lfutil +import remotestore + +class wirestore(remotestore.remotestore): + def __init__(self, ui, repo, remote): + cap = remote.capable('largefiles') + if not cap: + raise lfutil.storeprotonotcapable([]) + storetypes = cap.split(',') + if not 'serve' in storetypes: + raise lfutil.storeprotonotcapable(storetypes) + self.remote = remote + super(wirestore, self).__init__(ui, repo, remote.url()) + + def _put(self, hash, fd): + return self.remote.putlfile(hash, fd) + + def _get(self, hash): + return self.remote.getlfile(hash) + + def _stat(self, hash): + return self.remote.statlfile(hash) diff -r fccd350acf79 -r 384082750f2c hgext/mq.py --- a/hgext/mq.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/mq.py Sat Oct 15 14:30:50 2011 -0500 @@ -287,25 +287,31 @@ @util.propertycache def applied(self): - if os.path.exists(self.join(self.statuspath)): - def parselines(lines): - for l in lines: - entry = l.split(':', 1) - if len(entry) > 1: - n, name = entry - yield statusentry(bin(n), name) - elif l.strip(): - self.ui.warn(_('malformated mq status line: %s\n') % entry) - # else we ignore empty lines + def parselines(lines): + for l in lines: + entry = l.split(':', 1) + if len(entry) > 1: + n, name = entry + yield statusentry(bin(n), name) + elif l.strip(): + self.ui.warn(_('malformated mq status line: %s\n') % entry) + # else we ignore empty lines + try: lines = self.opener.read(self.statuspath).splitlines() return list(parselines(lines)) - return [] + except IOError, e: + if e.errno == errno.ENOENT: + return [] + raise @util.propertycache def fullseries(self): - if os.path.exists(self.join(self.seriespath)): - return self.opener.read(self.seriespath).splitlines() - return [] + try: + return self.opener.read(self.seriespath).splitlines() + except IOError, e: + if e.errno == errno.ENOENT: + return [] + raise @util.propertycache def series(self): @@ -626,6 +632,7 @@ self.ui.note(str(inst) + '\n') if not self.ui.verbose: self.ui.warn(_("patch failed, unable to continue (try -v)\n")) + self.ui.traceback() return (False, list(files), False) def apply(self, repo, series, list=False, update_status=True, @@ -938,7 +945,7 @@ p.write("# User " + user + "\n") if date: p.write("# Date %s %s\n\n" % date) - if hasattr(msg, '__call__'): + if util.safehasattr(msg, '__call__'): msg = msg() commitmsg = msg and msg or ("[mq]: %s" % patchfn) n = repo.commit(commitmsg, user, date, match=match, force=True) @@ -1010,12 +1017,10 @@ # if the exact patch name does not exist, we try a few # variations. If strict is passed, we try only #1 # - # 1) a number to indicate an offset in the series file + # 1) a number (as string) to indicate an offset in the series file # 2) a unique substring of the patch name was given # 3) patchname[-+]num to indicate an offset in the series file def lookup(self, patch, strict=False): - patch = patch and str(patch) - def partialname(s): if s in self.series: return s @@ -1034,8 +1039,6 @@ return self.series[0] return None - if patch is None: - return None if patch in self.series: return patch @@ -1095,12 +1098,12 @@ self.ui.warn(_('no patches in series\n')) return 0 - patch = self.lookup(patch) # Suppose our series file is: A B C and the current 'top' # patch is B. qpush C should be performed (moving forward) # qpush B is a NOP (no change) qpush A is an error (can't # go backwards with qpush) if patch: + patch = self.lookup(patch) info = self.isapplied(patch) if info and info[0] >= len(self.applied) - 1: self.ui.warn( @@ -1492,7 +1495,7 @@ n = repo.commit(message, user, ph.date, match=match, force=True) # only write patch after a successful commit - patchf.rename() + patchf.close() self.applied.append(statusentry(n, patchfn)) except: ctx = repo[cparents[0]] @@ -2675,7 +2678,11 @@ return 0 @command("strip", - [('f', 'force', None, _('force removal of changesets, discard ' + [ + ('r', 'rev', [], _('strip specified revision (optional, ' + 'can specify revisions without this ' + 'option)'), _('REV')), + ('f', 'force', None, _('force removal of changesets, discard ' 'uncommitted changes (no backup)')), ('b', 'backup', None, _('bundle only changesets with local revision' ' number greater than REV which are not' @@ -2716,6 +2723,7 @@ backup = 'none' cl = repo.changelog + revs = list(revs) + opts.get('rev') revs = set(scmutil.revrange(repo, revs)) if not revs: raise util.Abort(_('empty revision set')) @@ -2867,7 +2875,7 @@ if i == 0: q.pop(repo, all=True) else: - q.pop(repo, i - 1) + q.pop(repo, str(i - 1)) break if popped: try: @@ -2915,6 +2923,7 @@ @command("qqueue", [('l', 'list', False, _('list all available queues')), + ('', 'active', False, _('print name of active queue')), ('c', 'create', False, _('create new queue')), ('', 'rename', False, _('rename active queue')), ('', 'delete', False, _('delete reference to queue')), @@ -2929,7 +2938,8 @@ Omitting a queue name or specifying -l/--list will show you the registered queues - by default the "normal" patches queue is registered. The currently - active queue will be marked with "(active)". + active queue will be marked with "(active)". Specifying --active will print + only the name of the active queue. To create a new queue, use -c/--create. The queue is automatically made active, except in the case where there are applied patches from the @@ -3022,8 +3032,11 @@ fh.close() util.rename(repo.join('patches.queues.new'), repo.join(_allqueues)) - if not name or opts.get('list'): + if not name or opts.get('list') or opts.get('active'): current = _getcurrent() + if opts.get('active'): + ui.write('%s\n' % (current,)) + return for queue in _getqueues(): ui.write('%s' % (queue,)) if queue == current and not ui.quiet: diff -r fccd350acf79 -r 384082750f2c hgext/notify.py --- a/hgext/notify.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/notify.py Sat Oct 15 14:30:50 2011 -0500 @@ -5,71 +5,115 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -'''hooks for sending email notifications at commit/push time +'''hooks for sending email push notifications -Subscriptions can be managed through a hgrc file. Default mode is to -print messages to stdout, for testing and configuring. +This extension let you run hooks sending email notifications when +changesets are being pushed, from the sending or receiving side. -To use, configure the notify extension and enable it in hgrc like -this:: - - [extensions] - notify = +First, enable the extension as explained in :hg:`help extensions`, and +register the hook you want to run. ``incoming`` and ``outgoing`` hooks +are run by the changesets receiver while the ``outgoing`` one is for +the sender:: [hooks] # one email for each incoming changeset incoming.notify = python:hgext.notify.hook - # batch emails when many changesets incoming at one time + # one email for all incoming changesets changegroup.notify = python:hgext.notify.hook - # batch emails when many changesets outgoing at one time (client side) + + # one email for all outgoing changesets outgoing.notify = python:hgext.notify.hook - [notify] - # config items go here - -Required configuration items:: - - config = /path/to/file # file containing subscriptions - -Optional configuration items:: - - test = True # print messages to stdout for testing - strip = 3 # number of slashes to strip for url paths - domain = example.com # domain to use if committer missing domain - style = ... # style file to use when formatting email - template = ... # template to use when formatting email - incoming = ... # template to use when run as incoming hook - outgoing = ... # template to use when run as outgoing hook - changegroup = ... # template to use when run as changegroup hook - maxdiff = 300 # max lines of diffs to include (0=none, -1=all) - maxsubject = 67 # truncate subject line longer than this - diffstat = True # add a diffstat before the diff content - sources = serve # notify if source of incoming changes in this list - # (serve == ssh or http, push, pull, bundle) - merge = False # send notification for merges (default True) - [email] - from = user@host.com # email address to send as if none given - [web] - baseurl = http://hgserver/... # root of hg web site for browsing commits - -The notify config file has same format as a regular hgrc file. It has -two sections so you can express subscriptions in whatever way is -handier for you. - -:: +Now the hooks are running, subscribers must be assigned to +repositories. Use the ``[usersubs]`` section to map repositories to a +given email or the ``[reposubs]`` section to map emails to a single +repository:: [usersubs] - # key is subscriber email, value is ","-separated list of glob patterns + # key is subscriber email, value is a comma-separated list of glob + # patterns user@host = pattern [reposubs] - # key is glob pattern, value is ","-separated list of subscriber emails + # key is glob pattern, value is a comma-separated list of subscriber + # emails pattern = user@host -Glob patterns are matched against path to repository root. +Glob patterns are matched against absolute path to repository +root. The subscriptions can be defined in their own file and +referenced with:: + + [notify] + config = /path/to/subscriptionsfile + +Alternatively, they can be added to Mercurial configuration files by +setting the previous entry to an empty value. + +At this point, notifications should be generated but will not be sent until you +set the ``notify.test`` entry to ``False``. + +Notifications content can be tweaked with the following configuration entries: + +notify.test + If ``True``, print messages to stdout instead of sending them. Default: True. + +notify.sources + Space separated list of change sources. Notifications are sent only + if it includes the incoming or outgoing changes source. Incoming + sources can be ``serve`` for changes coming from http or ssh, + ``pull`` for pulled changes, ``unbundle`` for changes added by + :hg:`unbundle` or ``push`` for changes being pushed + locally. Outgoing sources are the same except for ``unbundle`` which + is replaced by ``bundle``. Default: serve. + +notify.strip + Number of leading slashes to strip from url paths. By default, notifications + references repositories with their absolute path. ``notify.strip`` let you + turn them into relative paths. For example, ``notify.strip=3`` will change + ``/long/path/repository`` into ``repository``. Default: 0. + +notify.domain + If subscribers emails or the from email have no domain set, complete them + with this value. -If you like, you can put notify config file in repository that users -can push changes to, they can manage their own subscriptions. +notify.style + Style file to use when formatting emails. + +notify.template + Template to use when formatting emails. + +notify.incoming + Template to use when run as incoming hook, override ``notify.template``. + +notify.outgoing + Template to use when run as outgoing hook, override ``notify.template``. + +notify.changegroup + Template to use when running as changegroup hook, override + ``notify.template``. + +notify.maxdiff + Maximum number of diff lines to include in notification email. Set to 0 + to disable the diff, -1 to include all of it. Default: 300. + +notify.maxsubject + Maximum number of characters in emails subject line. Default: 67. + +notify.diffstat + Set to True to include a diffstat before diff content. Default: True. + +notify.merge + If True, send notifications for merge changesets. Default: True. + +If set, the following entries will also be used to customize the notifications: + +email.from + Email ``From`` address to use if none can be found in generated email content. + +web.baseurl + Root repository browsing URL to combine with repository paths when making + references. See also ``notify.strip``. + ''' from mercurial.i18n import _ @@ -167,9 +211,6 @@ return [mail.addressencode(self.ui, s, self.charsets, self.test) for s in sorted(subs)] - def url(self, path=None): - return self.ui.config('web', 'baseurl') + (path or self.root) - def node(self, ctx, **props): '''format one changeset, unless it is a suppressed merge.''' if not self.merge and len(ctx.parents()) > 1: diff -r fccd350acf79 -r 384082750f2c hgext/pager.py --- a/hgext/pager.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/pager.py Sat Oct 15 14:30:50 2011 -0500 @@ -58,7 +58,7 @@ from mercurial.i18n import _ def _runpager(p): - if not hasattr(os, 'fork'): + if not util.safehasattr(os, 'fork'): sys.stdout = util.popen(p, 'wb') if util.isatty(sys.stderr): sys.stderr = sys.stdout diff -r fccd350acf79 -r 384082750f2c hgext/patchbomb.py --- a/hgext/patchbomb.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/patchbomb.py Sat Oct 15 14:30:50 2011 -0500 @@ -57,24 +57,15 @@ command = cmdutil.command(cmdtable) def prompt(ui, prompt, default=None, rest=':'): - if not ui.interactive() and default is None: - raise util.Abort(_("%s Please enter a valid value" % (prompt + rest))) if default: prompt += ' [%s]' % default - prompt += rest - while True: - r = ui.prompt(prompt, default=default) - if r: - return r - if default is not None: - return default - ui.warn(_('Please enter a valid value.\n')) + return ui.prompt(prompt + rest, default) -def introneeded(opts, number): - '''is an introductory message required?''' +def introwanted(opts, number): + '''is an introductory message apparently wanted?''' return number > 1 or opts.get('intro') or opts.get('desc') -def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, +def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered, patchname=None): desc = [] @@ -141,7 +132,7 @@ flag = ' ' + flag subj = desc[0].strip().rstrip('. ') - if not introneeded(opts, total): + if not numbered: subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj) else: tlen = len(str(total)) @@ -352,51 +343,66 @@ ui.write(_('\nWrite the introductory message for the ' 'patch series.\n\n')) body = ui.edit(body, sender) - # Save serie description in case sendmail fails + # Save series description in case sendmail fails msgfile = repo.opener('last-email.txt', 'wb') msgfile.write(body) msgfile.close() return body def getpatchmsgs(patches, patchnames=None): - jumbo = [] msgs = [] ui.write(_('This patch series consists of %d patches.\n\n') % len(patches)) + # build the intro message, or skip it if the user declines + if introwanted(opts, len(patches)): + msg = makeintro(patches) + if msg: + msgs.append(msg) + + # are we going to send more than one message? + numbered = len(msgs) + len(patches) > 1 + + # now generate the actual patch messages name = None for i, p in enumerate(patches): - jumbo.extend(p) if patchnames: name = patchnames[i] msg = makepatch(ui, repo, p, opts, _charsets, i + 1, - len(patches), name) + len(patches), numbered, name) msgs.append(msg) - if introneeded(opts, len(patches)): - tlen = len(str(len(patches))) + return msgs + + def makeintro(patches): + tlen = len(str(len(patches))) - flag = ' '.join(opts.get('flag')) - if flag: - subj = '[PATCH %0*d of %d %s]' % (tlen, 0, len(patches), flag) - else: - subj = '[PATCH %0*d of %d]' % (tlen, 0, len(patches)) - subj += ' ' + (opts.get('subject') or - prompt(ui, 'Subject: ', rest=subj)) + flag = opts.get('flag') or '' + if flag: + flag = ' ' + ' '.join(flag) + prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag) + + subj = (opts.get('subject') or + prompt(ui, 'Subject: ', rest=prefix, default='')) + if not subj: + return None # skip intro if the user doesn't bother - body = '' - ds = patch.diffstat(jumbo) - if ds and opts.get('diffstat'): - body = '\n' + ds + subj = prefix + ' ' + subj - body = getdescription(body, sender) - msg = mail.mimeencode(ui, body, _charsets, opts.get('test')) - msg['Subject'] = mail.headencode(ui, subj, _charsets, - opts.get('test')) + body = '' + if opts.get('diffstat'): + # generate a cumulative diffstat of the whole patch series + diffstat = patch.diffstat(sum(patches, [])) + body = '\n' + diffstat + else: + diffstat = None - msgs.insert(0, (msg, subj, ds)) - return msgs + body = getdescription(body, sender) + msg = mail.mimeencode(ui, body, _charsets, opts.get('test')) + msg['Subject'] = mail.headencode(ui, subj, _charsets, + opts.get('test')) + return (msg, subj, diffstat) def getbundlemsgs(bundle): subj = (opts.get('subject') @@ -429,29 +435,33 @@ showaddrs = [] - def getaddrs(opt, prpt=None, default=None): - addrs = opts.get(opt.replace('-', '_')) - if opt != 'reply-to': - showaddr = '%s:' % opt.capitalize() - else: - showaddr = 'Reply-To:' - + def getaddrs(header, ask=False, default=None): + configkey = header.lower() + opt = header.replace('-', '_').lower() + addrs = opts.get(opt) if addrs: - showaddrs.append('%s %s' % (showaddr, ', '.join(addrs))) + showaddrs.append('%s: %s' % (header, ', '.join(addrs))) return mail.addrlistencode(ui, addrs, _charsets, opts.get('test')) - addrs = ui.config('email', opt) or ui.config('patchbomb', opt) or '' - if not addrs and prpt: - addrs = prompt(ui, prpt, default) + # not on the command line: fallback to config and then maybe ask + addr = (ui.config('email', configkey) or + ui.config('patchbomb', configkey) or + '') + if not addr and ask: + addr = prompt(ui, header, default=default) + if addr: + showaddrs.append('%s: %s' % (header, addr)) + return mail.addrlistencode(ui, [addr], _charsets, opts.get('test')) + else: + return default - if addrs: - showaddrs.append('%s %s' % (showaddr, addrs)) - return mail.addrlistencode(ui, [addrs], _charsets, opts.get('test')) - - to = getaddrs('to', 'To') - cc = getaddrs('cc', 'Cc', '') - bcc = getaddrs('bcc') - replyto = getaddrs('reply-to') + to = getaddrs('To', ask=True) + if not to: + # we can get here in non-interactive mode + raise util.Abort(_('no recipient addresses provided')) + cc = getaddrs('Cc', ask=True, default='') or [] + bcc = getaddrs('Bcc') or [] + replyto = getaddrs('Reply-To') if opts.get('diffstat') or opts.get('confirm'): ui.write(_('\nFinal summary:\n\n')) diff -r fccd350acf79 -r 384082750f2c hgext/progress.py --- a/hgext/progress.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/progress.py Sat Oct 15 14:30:50 2011 -0500 @@ -27,6 +27,9 @@ [progress] delay = 3 # number of seconds (float) before showing the progress bar + changedelay = 1 # changedelay: minimum delay before showing a new topic. + # If set to less than 3 * refresh, that value will + # be used instead. refresh = 0.1 # time in seconds between refreshes of the progress bar format = topic bar number estimate # format of the progress bar width = # if set, the maximum width of the progress information @@ -53,7 +56,7 @@ return ' '.join(s for s in args if s) def shouldprint(ui): - return (util.isatty(sys.stderr) or ui.configbool('progress', 'assume-tty')) + return util.isatty(sys.stderr) or ui.configbool('progress', 'assume-tty') def fmtremaining(seconds): if seconds < 60: @@ -105,9 +108,13 @@ self.printed = False self.lastprint = time.time() + float(self.ui.config( 'progress', 'delay', default=3)) + self.lasttopic = None self.indetcount = 0 self.refresh = float(self.ui.config( 'progress', 'refresh', default=0.1)) + self.changedelay = max(3 * self.refresh, + float(self.ui.config( + 'progress', 'changedelay', default=1))) self.order = self.ui.configlist( 'progress', 'format', default=['topic', 'bar', 'number', 'estimate']) @@ -184,6 +191,7 @@ else: out = spacejoin(head, tail) sys.stderr.write('\r' + out[:termwidth]) + self.lasttopic = topic sys.stderr.flush() def clear(self): @@ -248,10 +256,18 @@ self.topics.append(topic) self.topicstates[topic] = pos, item, unit, total if now - self.lastprint >= self.refresh and self.topics: - self.lastprint = now - self.show(now, topic, *self.topicstates[topic]) + if (self.lasttopic is None # first time we printed + # not a topic change + or topic == self.lasttopic + # it's been long enough we should print anyway + or now - self.lastprint >= self.changedelay): + self.lastprint = now + self.show(now, topic, *self.topicstates[topic]) + +_singleton = None def uisetup(ui): + global _singleton class progressui(ui.__class__): _progbar = None @@ -278,7 +294,9 @@ # we instantiate one globally shared progress bar to avoid # competing progress bars when multiple UI objects get created if not progressui._progbar: - progressui._progbar = progbar(ui) + if _singleton is None: + _singleton = progbar(ui) + progressui._progbar = _singleton def reposetup(ui, repo): uisetup(repo.ui) diff -r fccd350acf79 -r 384082750f2c hgext/rebase.py --- a/hgext/rebase.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/rebase.py Sat Oct 15 14:30:50 2011 -0500 @@ -15,7 +15,7 @@ ''' from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks -from mercurial import extensions, copies, patch +from mercurial import extensions, patch from mercurial.commands import templateopts from mercurial.node import nullrev from mercurial.lock import release @@ -34,11 +34,15 @@ _('rebase from the base of the specified changeset ' '(up to greatest common ancestor of base and dest)'), _('REV')), + ('r', 'rev', [], + _('rebase these revisions'), + _('REV')), ('d', 'dest', '', _('rebase onto the specified changeset'), _('REV')), ('', 'collapse', False, _('collapse the rebased changesets')), ('m', 'message', '', _('use text as collapse commit message'), _('TEXT')), + ('e', 'edit', False, _('invoke editor on commit messages')), ('l', 'logfile', '', _('read collapse commit message from file'), _('FILE')), ('', 'keep', False, _('keep original changesets')), @@ -105,6 +109,10 @@ skipped = set() targetancestors = set() + editor = None + if opts.get('edit'): + editor = cmdutil.commitforceeditor + lock = wlock = None try: lock = repo.lock() @@ -114,6 +122,7 @@ destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) + revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) @@ -151,7 +160,13 @@ else: if srcf and basef: raise util.Abort(_('cannot specify both a ' + 'source and a base')) + if revf and basef: + raise util.Abort(_('cannot specify both a' 'revision and a base')) + if revf and srcf: + raise util.Abort(_('cannot specify both a' + 'revision and a source')) if detachf: if not srcf: raise util.Abort( @@ -160,7 +175,38 @@ raise util.Abort(_('cannot specify a base with detach')) cmdutil.bailifchanged(repo) - result = buildstate(repo, destf, srcf, basef, detachf) + + if not destf: + # Destination defaults to the latest revision in the + # current branch + branch = repo[None].branch() + dest = repo[branch] + else: + dest = repo[destf] + + if revf: + revgen = repo.set('%lr', revf) + elif srcf: + revgen = repo.set('(%r)::', srcf) + else: + base = basef or '.' + revgen = repo.set('(children(ancestor(%r, %d)) and ::(%r))::', + base, dest, base) + + rebaseset = [c.rev() for c in revgen] + + if not rebaseset: + repo.ui.debug('base is ancestor of destination') + result = None + elif not keepf and list(repo.set('first(children(%ld) - %ld)', + rebaseset, rebaseset)): + raise util.Abort( + _("can't remove original changesets with" + " unrebased descendants"), + hint=_('use --keep to keep original changesets')) + else: + result = buildstate(repo, dest, rebaseset, detachf) + if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) @@ -215,9 +261,10 @@ 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') - updatedirstate(repo, rev, target, p2) + cmdutil.duplicatecopies(repo, rev, target, p2) if not collapsef: - newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) + newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, + editor=editor) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) @@ -247,7 +294,7 @@ commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, - extrafn=extrafn) + extrafn=extrafn, editor=editor) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) @@ -301,21 +348,7 @@ external = p.rev() return external -def updatedirstate(repo, rev, p1, p2): - """Keep track of renamed files in the revision that is going to be rebased - """ - # Here we simulate the copies and renames in the source changeset - cop, diver = copies.copies(repo, repo[rev], repo[p1], repo[p2], True) - m1 = repo[rev].manifest() - m2 = repo[p1].manifest() - for k, v in cop.iteritems(): - if k in m1: - if v in m1 or v in m2: - repo.dirstate.copy(v, k) - if v in m2 and v not in m1 and k in m2: - repo.dirstate.remove(v) - -def concludenode(repo, rev, p1, p2, commitmsg=None, extrafn=None): +def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None): 'Commit the changes and store useful information in extra' try: repo.dirstate.setparents(repo[p1].node(), repo[p2].node()) @@ -327,7 +360,7 @@ extrafn(ctx, extra) # Commit might fail if unresolved files exist newrev = repo.commit(text=commitmsg, user=ctx.user(), - date=ctx.date(), extra=extra) + date=ctx.date(), extra=extra, editor=editor) repo.dirstate.setbranch(repo[newrev].branch()) return newrev except util.Abort: @@ -515,68 +548,47 @@ repo.ui.warn(_('rebase aborted\n')) return 0 -def buildstate(repo, dest, src, base, detach): - 'Define which revisions are going to be rebased and where' - targetancestors = set() - detachset = set() +def buildstate(repo, dest, rebaseset, detach): + '''Define which revisions are going to be rebased and where - if not dest: - # Destination defaults to the latest revision in the current branch - branch = repo[None].branch() - dest = repo[branch].rev() - else: - dest = repo[dest].rev() + repo: repo + dest: context + rebaseset: set of rev + detach: boolean''' # This check isn't strictly necessary, since mq detects commits over an # applied patch. But it prevents messing up the working directory when # a partially completed rebase is blocked by mq. - if 'qtip' in repo.tags() and (repo[dest].node() in + if 'qtip' in repo.tags() and (dest.node() in [s.node for s in repo.mq.applied]): raise util.Abort(_('cannot rebase onto an applied mq patch')) - if src: - commonbase = repo[src].ancestor(repo[dest]) - samebranch = repo[src].branch() == repo[dest].branch() - if commonbase == repo[src]: - raise util.Abort(_('source is ancestor of destination')) - if samebranch and commonbase == repo[dest]: - raise util.Abort(_('source is descendant of destination')) - source = repo[src].rev() - if detach: - # We need to keep track of source's ancestors up to the common base - srcancestors = set(repo.changelog.ancestors(source)) - baseancestors = set(repo.changelog.ancestors(commonbase.rev())) - detachset = srcancestors - baseancestors - detachset.discard(commonbase.rev()) - else: - if base: - cwd = repo[base].rev() - else: - cwd = repo['.'].rev() + detachset = set() + roots = list(repo.set('roots(%ld)', rebaseset)) + if not roots: + raise util.Abort(_('no matching revisions')) + if len(roots) > 1: + raise util.Abort(_("can't rebase multiple roots")) + root = roots[0] - if cwd == dest: - repo.ui.debug('source and destination are the same\n') - return None - - targetancestors = set(repo.changelog.ancestors(dest)) - if cwd in targetancestors: - repo.ui.debug('source is ancestor of destination\n') - return None + commonbase = root.ancestor(dest) + if commonbase == root: + raise util.Abort(_('source is ancestor of destination')) + if commonbase == dest: + samebranch = root.branch() == dest.branch() + if samebranch and root in dest.children(): + repo.ui.debug('source is a child of destination') + return None + # rebase on ancestor, force detach + detach = True + if detach: + detachset = [c.rev() for c in repo.set('::%d - ::%d - %d', + root, commonbase, root)] - cwdancestors = set(repo.changelog.ancestors(cwd)) - if dest in cwdancestors: - repo.ui.debug('source is descendant of destination\n') - return None - - cwdancestors.add(cwd) - rebasingbranch = cwdancestors - targetancestors - source = min(rebasingbranch) - - repo.ui.debug('rebase onto %d starting from %d\n' % (dest, source)) - state = dict.fromkeys(repo.changelog.descendants(source), nullrev) + repo.ui.debug('rebase onto %d starting from %d\n' % (dest, root)) + state = dict.fromkeys(rebaseset, nullrev) state.update(dict.fromkeys(detachset, nullmerge)) - state[source] = nullrev - return repo['.'].rev(), repo[dest].rev(), state + return repo['.'].rev(), dest.rev(), state def pullrebase(orig, ui, repo, *args, **opts): 'Call rebase after pull if the latter has been invoked with --rebase' diff -r fccd350acf79 -r 384082750f2c hgext/relink.py --- a/hgext/relink.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/relink.py Sat Oct 15 14:30:50 2011 -0500 @@ -36,7 +36,8 @@ command is running. (Both repositories will be locked against writes.) """ - if not hasattr(util, 'samefile') or not hasattr(util, 'samedevice'): + if (not util.safehasattr(util, 'samefile') or + not util.safehasattr(util, 'samedevice')): raise util.Abort(_('hardlinks are not supported on this system')) src = hg.repository(ui, ui.expandpath(origin or 'default-relink', origin or 'default')) diff -r fccd350acf79 -r 384082750f2c hgext/share.py --- a/hgext/share.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/share.py Sat Oct 15 14:30:50 2011 -0500 @@ -6,7 +6,7 @@ '''share a common history between several working directories''' from mercurial.i18n import _ -from mercurial import hg, commands +from mercurial import hg, commands, util def share(ui, source, dest=None, noupdate=False): """create a new shared repository @@ -28,11 +28,46 @@ return hg.share(ui, source, dest, not noupdate) +def unshare(ui, repo): + """convert a shared repository to a normal one + + Copy the store data to the repo and remove the sharedpath data. + """ + + if repo.sharedpath == repo.path: + raise util.Abort(_("this is not a shared repo")) + + destlock = lock = None + lock = repo.lock() + try: + # we use locks here because if we race with commit, we + # can end up with extra data in the cloned revlogs that's + # not pointed to by changesets, thus causing verify to + # fail + + destlock = hg.copystore(ui, repo, repo.path) + + sharefile = repo.join('sharedpath') + util.rename(sharefile, sharefile + '.old') + + repo.requirements.discard('sharedpath') + repo._writerequirements() + finally: + destlock and destlock.release() + lock and lock.release() + + # update store, spath, sopener and sjoin of repo + repo.__init__(ui, repo.root) + cmdtable = { "share": (share, [('U', 'noupdate', None, _('do not create a working copy'))], _('[-U] SOURCE [DEST]')), + "unshare": + (unshare, + [], + ''), } commands.norepo += " share" diff -r fccd350acf79 -r 384082750f2c hgext/transplant.py --- a/hgext/transplant.py Sun Oct 02 16:41:07 2011 -0500 +++ b/hgext/transplant.py Sat Oct 15 14:30:50 2011 -0500 @@ -81,6 +81,7 @@ self.opener = scmutil.opener(self.path) self.transplants = transplants(self.path, 'transplants', opener=self.opener) + self.editor = None def applied(self, repo, node, parent): '''returns True if a node is already an ancestor of parent @@ -105,10 +106,11 @@ diffopts = patch.diffopts(self.ui, opts) diffopts.git = True - lock = wlock = None + lock = wlock = tr = None try: wlock = repo.wlock() lock = repo.lock() + tr = repo.transaction('transplant') for rev in revs: node = revmap[rev] revstr = '%s:%s' % (rev, short(node)) @@ -172,12 +174,15 @@ finally: if patchfile: os.unlink(patchfile) + tr.close() if pulls: repo.pull(source, heads=pulls) merge.update(repo, pulls[-1], False, False, None) finally: self.saveseries(revmap, merges) self.transplants.write() + if tr: + tr.release() lock.release() wlock.release() @@ -253,7 +258,8 @@ else: m = match.exact(repo.root, '', files) - n = repo.commit(message, user, date, extra=extra, match=m) + n = repo.commit(message, user, date, extra=extra, match=m, + editor=self.editor) if not n: # Crash here to prevent an unclear crash later, in # transplants.write(). This can happen if patch.patch() @@ -304,7 +310,8 @@ revlog.hex(parents[0])) if merge: repo.dirstate.setparents(p1, parents[1]) - n = repo.commit(message, user, date, extra=extra) + n = repo.commit(message, user, date, extra=extra, + editor=self.editor) if not n: raise util.Abort(_('commit failed')) if not merge: @@ -461,6 +468,7 @@ ('a', 'all', None, _('pull all changesets up to BRANCH')), ('p', 'prune', [], _('skip over REV'), _('REV')), ('m', 'merge', [], _('merge at REV'), _('REV')), + ('e', 'edit', False, _('invoke editor on commit messages')), ('', 'log', None, _('append transplant info to log message')), ('c', 'continue', None, _('continue last transplant session ' 'after repair')), @@ -549,6 +557,8 @@ opts['filter'] = ui.config('transplant', 'filter') tp = transplanter(ui, repo) + if opts.get('edit'): + tp.editor = cmdutil.commitforceeditor p1, p2 = repo.dirstate.parents() if len(repo) > 0 and p1 == revlog.nullid: diff -r fccd350acf79 -r 384082750f2c mercurial/archival.py --- a/mercurial/archival.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/archival.py Sat Oct 15 14:30:50 2011 -0500 @@ -195,7 +195,7 @@ return f = self.opener(name, "w", atomictemp=True) f.write(data) - f.rename() + f.close() destfile = os.path.join(self.basedir, name) os.chmod(destfile, mode) diff -r fccd350acf79 -r 384082750f2c mercurial/bdiff.c --- a/mercurial/bdiff.c Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/bdiff.c Sat Oct 15 14:30:50 2011 -0500 @@ -366,11 +366,11 @@ static PyObject *bdiff(PyObject *self, PyObject *args) { - char *sa, *sb; + char *sa, *sb, *rb; PyObject *result = NULL; struct line *al, *bl; struct hunk l, *h; - char encode[12], *rb; + uint32_t encode[3]; int an, bn, len = 0, la, lb, count; if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb)) @@ -407,9 +407,9 @@ for (h = l.next; h; h = h->next) { if (h->a1 != la || h->b1 != lb) { len = bl[h->b1].l - bl[lb].l; - *(uint32_t *)(encode) = htonl(al[la].l - al->l); - *(uint32_t *)(encode + 4) = htonl(al[h->a1].l - al->l); - *(uint32_t *)(encode + 8) = htonl(len); + encode[0] = htonl(al[la].l - al->l); + encode[1] = htonl(al[h->a1].l - al->l); + encode[2] = htonl(len); memcpy(rb, encode, 12); memcpy(rb + 12, bl[lb].l, len); rb += 12 + len; diff -r fccd350acf79 -r 384082750f2c mercurial/bookmarks.py --- a/mercurial/bookmarks.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/bookmarks.py Sat Oct 15 14:30:50 2011 -0500 @@ -26,7 +26,13 @@ bookmarks = {} try: for line in repo.opener('bookmarks'): - sha, refspec = line.strip().split(' ', 1) + line = line.strip() + if not line: + continue + if ' ' not in line: + repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line) + continue + sha, refspec = line.split(' ', 1) refspec = encoding.tolocal(refspec) try: bookmarks[refspec] = repo.changelog.lookup(sha) @@ -84,7 +90,7 @@ file = repo.opener('bookmarks', 'w', atomictemp=True) for refspec, node in refs.iteritems(): file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec))) - file.rename() + file.close() # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) try: @@ -115,7 +121,7 @@ try: file = repo.opener('bookmarks.current', 'w', atomictemp=True) file.write(encoding.fromlocal(mark)) - file.rename() + file.close() finally: wlock.release() repo._bookmarkcurrent = mark @@ -140,16 +146,15 @@ marks[mark] = new.node() update = True if update: - write(repo) + repo._writebookmarks(marks) def listbookmarks(repo): # We may try to list bookmarks on a repo type that does not # support it (e.g., statichttprepository). - if not hasattr(repo, '_bookmarks'): - return {} + marks = getattr(repo, '_bookmarks', {}) d = {} - for k, v in repo._bookmarks.iteritems(): + for k, v in marks.iteritems(): d[k] = hex(v) return d diff -r fccd350acf79 -r 384082750f2c mercurial/byterange.py --- a/mercurial/byterange.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/byterange.py Sat Oct 15 14:30:50 2011 -0500 @@ -103,9 +103,7 @@ """This effectively allows us to wrap at the instance level. Any attribute not found in _this_ object will be searched for in self.fo. This includes methods.""" - if hasattr(self.fo, name): - return getattr(self.fo, name) - raise AttributeError(name) + return getattr(self.fo, name) def tell(self): """Return the position within the range. @@ -170,10 +168,8 @@ offset is relative to the current position (self.realpos). """ assert offset >= 0 - if not hasattr(self.fo, 'seek'): - self._poor_mans_seek(offset) - else: - self.fo.seek(self.realpos + offset) + seek = getattr(self.fo, 'seek', self._poor_mans_seek) + seek(self.realpos + offset) self.realpos += offset def _poor_mans_seek(self, offset): diff -r fccd350acf79 -r 384082750f2c mercurial/cmdutil.py --- a/mercurial/cmdutil.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/cmdutil.py Sat Oct 15 14:30:50 2011 -0500 @@ -8,7 +8,7 @@ from node import hex, nullid, nullrev, short from i18n import _ import os, sys, errno, re, tempfile -import util, scmutil, templater, patch, error, templatekw, revlog +import util, scmutil, templater, patch, error, templatekw, revlog, copies import match as matchmod import subrepo @@ -75,6 +75,10 @@ modified, added, removed, deleted = repo.status()[:4] if modified or added or removed or deleted: raise util.Abort(_("outstanding uncommitted changes")) + ctx = repo[None] + for s in ctx.substate: + if ctx.sub(s).dirty(): + raise util.Abort(_("uncommitted changes in subrepo %s") % s) def logmessage(ui, opts): """ get the log message according to -m and -l option """ @@ -109,12 +113,13 @@ limit = None return limit -def makefilename(repo, pat, node, +def makefilename(repo, pat, node, desc=None, total=None, seqno=None, revwidth=None, pathname=None): node_expander = { 'H': lambda: hex(node), 'R': lambda: str(repo.changelog.rev(node)), 'h': lambda: short(node), + 'm': lambda: re.sub('[^\w]', '_', str(desc)) } expander = { '%': lambda: '%', @@ -154,14 +159,14 @@ raise util.Abort(_("invalid format spec '%%%s' in output filename") % inst.args[0]) -def makefileobj(repo, pat, node=None, total=None, +def makefileobj(repo, pat, node=None, desc=None, total=None, seqno=None, revwidth=None, mode='wb', pathname=None): writable = mode not in ('r', 'rb') if not pat or pat == '-': fp = writable and repo.ui.fout or repo.ui.fin - if hasattr(fp, 'fileno'): + if util.safehasattr(fp, 'fileno'): return os.fdopen(os.dup(fp.fileno()), mode) else: # if this fp can't be duped properly, return @@ -177,11 +182,11 @@ return getattr(self.f, attr) return wrappedfileobj(fp) - if hasattr(pat, 'write') and writable: + if util.safehasattr(pat, 'write') and writable: return pat - if hasattr(pat, 'read') and 'r' in mode: + if util.safehasattr(pat, 'read') and 'r' in mode: return pat - return open(makefilename(repo, pat, node, total, seqno, revwidth, + return open(makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname), mode) @@ -516,11 +521,13 @@ shouldclose = False if not fp: - fp = makefileobj(repo, template, node, total=total, seqno=seqno, - revwidth=revwidth, mode='ab') + desc_lines = ctx.description().rstrip().split('\n') + desc = desc_lines[0] #Commit always has a first line. + fp = makefileobj(repo, template, node, desc=desc, total=total, + seqno=seqno, revwidth=revwidth, mode='ab') if fp != template: shouldclose = True - if fp != sys.stdout and hasattr(fp, 'name'): + if fp != sys.stdout and util.safehasattr(fp, 'name'): repo.ui.note("%s\n" % fp.name) fp.write("# HG changeset patch\n") @@ -1173,6 +1180,19 @@ bad.extend(f for f in rejected if f in match.files()) return bad +def duplicatecopies(repo, rev, p1, p2): + "Reproduce copies found in the source revision in the dirstate for grafts" + # Here we simulate the copies and renames in the source changeset + cop, diver = copies.copies(repo, repo[rev], repo[p1], repo[p2], True) + m1 = repo[rev].manifest() + m2 = repo[p1].manifest() + for k, v in cop.iteritems(): + if k in m1: + if v in m1 or v in m2: + repo.dirstate.copy(v, k) + if v in m2 and v not in m1 and k in m2: + repo.dirstate.remove(v) + def commit(ui, repo, commitfunc, pats, opts): '''commit the specified files or all outstanding changes''' date = opts.get('date') diff -r fccd350acf79 -r 384082750f2c mercurial/commands.py --- a/mercurial/commands.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/commands.py Sat Oct 15 14:30:50 2011 -0500 @@ -119,6 +119,10 @@ ('', 'stat', None, _('output diffstat-style summary of changes')), ] +mergetoolopts = [ + ('t', 'tool', '', _('specify merge tool')), +] + similarityopts = [ ('s', 'similarity', '', _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY')) @@ -303,6 +307,18 @@ The archive type is automatically detected based on file extension (or override using -t/--type). + .. container:: verbose + + Examples: + + - create a zip file containing the 1.0 release:: + + hg archive -r 1.0 project-1.0.zip + + - create a tarball excluding .hg files:: + + hg archive project.tar.gz -X ".hg*" + Valid types are: :``files``: a directory full of files (default) @@ -348,10 +364,10 @@ @command('backout', [('', 'merge', None, _('merge with old dirstate parent after backout')), - ('', 'parent', '', _('parent to choose when backing out merge'), _('REV')), - ('t', 'tool', '', _('specify merge tool')), + ('', 'parent', '', + _('parent to choose when backing out merge (DEPRECATED)'), _('REV')), ('r', 'rev', '', _('revision to backout'), _('REV')), - ] + walkopts + commitopts + commitopts2, + ] + mergetoolopts + walkopts + commitopts + commitopts2, _('[OPTION]... [-r] REV')) def backout(ui, repo, node=None, rev=None, **opts): '''reverse effect of earlier changeset @@ -363,15 +379,21 @@ is committed automatically. Otherwise, hg needs to merge the changes and the merged result is left uncommitted. - By default, the pending changeset will have one parent, - maintaining a linear history. With --merge, the pending changeset - will instead have two parents: the old parent of the working - directory and a new child of REV that simply undoes REV. - - Before version 1.7, the behavior without --merge was equivalent to - specifying --merge followed by :hg:`update --clean .` to cancel - the merge and leave the child of REV as a head to be merged - separately. + .. note:: + backout cannot be used to fix either an unwanted or + incorrect merge. + + .. container:: verbose + + By default, the pending changeset will have one parent, + maintaining a linear history. With --merge, the pending + changeset will instead have two parents: the old parent of the + working directory and a new child of REV that simply undoes REV. + + Before version 1.7, the behavior without --merge was equivalent + to specifying --merge followed by :hg:`update --clean .` to + cancel the merge and leave the child of REV as a head to be + merged separately. See :hg:`help dates` for a list of formats valid for -d/--date. @@ -403,8 +425,7 @@ raise util.Abort(_('cannot backout a change with no parents')) if p2 != nullid: if not opts.get('parent'): - raise util.Abort(_('cannot backout a merge changeset without ' - '--parent')) + raise util.Abort(_('cannot backout a merge changeset')) p = repo.lookup(opts['parent']) if p not in (p1, p2): raise util.Abort(_('%s is not a parent of %s') % @@ -486,6 +507,54 @@ (command not found) will abort the bisection, and any other non-zero exit status means the revision is bad. + .. container:: verbose + + Some examples: + + - start a bisection with known bad revision 12, and good revision 34:: + + hg bisect --bad 34 + hg bisect --good 12 + + - advance the current bisection by marking current revision as good or + bad:: + + hg bisect --good + hg bisect --bad + + - mark the current revision, or a known revision, to be skipped (eg. if + that revision is not usable because of another issue):: + + hg bisect --skip + hg bisect --skip 23 + + - forget the current bisection:: + + hg bisect --reset + + - use 'make && make tests' to automatically find the first broken + revision:: + + hg bisect --reset + hg bisect --bad 34 + hg bisect --good 12 + hg bisect --command 'make && make tests' + + - see all changesets whose states are already known in the current + bisection:: + + hg log -r "bisect(pruned)" + + - see all changesets that took part in the current bisection:: + + hg log -r "bisect(range)" + + - with the graphlog extension, you can even get a nice graph:: + + hg log --graph -r "bisect(range)" + + See :hg:`help revsets` for more about the `bisect()` keyword. + Returns 0 on success. """ def extendbisectrange(nodes, good): @@ -767,7 +836,6 @@ :hg:`commit --close-branch` to mark this branch as closed. .. note:: - Branch names are permanent. Use :hg:`bookmark` to create a light-weight bookmark instead. See :hg:`help glossary` for more information about named branches and bookmarks. @@ -977,56 +1045,84 @@ The location of the source is added to the new repository's ``.hg/hgrc`` file, as the default to be used for future pulls. - See :hg:`help urls` for valid source format details. - - It is possible to specify an ``ssh://`` URL as the destination, but no - ``.hg/hgrc`` and working directory will be created on the remote side. - Please see :hg:`help urls` for important details about ``ssh://`` URLs. - - A set of changesets (tags, or branch names) to pull may be specified - by listing each changeset (tag, or branch name) with -r/--rev. - If -r/--rev is used, the cloned repository will contain only a subset - of the changesets of the source repository. Only the set of changesets - defined by all -r/--rev options (including all their ancestors) - will be pulled into the destination repository. - No subsequent changesets (including subsequent tags) will be present - in the destination. - - Using -r/--rev (or 'clone src#rev dest') implies --pull, even for - local source repositories. - - For efficiency, hardlinks are used for cloning whenever the source - and destination are on the same filesystem (note this applies only - to the repository data, not to the working directory). Some - filesystems, such as AFS, implement hardlinking incorrectly, but - do not report errors. In these cases, use the --pull option to - avoid hardlinking. - - In some cases, you can clone repositories and the working directory - using full hardlinks with :: - - $ cp -al REPO REPOCLONE - - This is the fastest way to clone, but it is not always safe. The - operation is not atomic (making sure REPO is not modified during - the operation is up to you) and you have to make sure your editor - breaks hardlinks (Emacs and most Linux Kernel tools do so). Also, - this is not compatible with certain extensions that place their - metadata under the .hg directory, such as mq. - - Mercurial will update the working directory to the first applicable - revision from this list: - - a) null if -U or the source repository has no changesets - b) if -u . and the source repository is local, the first parent of - the source repository's working directory - c) the changeset specified with -u (if a branch name, this means the - latest head of that branch) - d) the changeset specified with -r - e) the tipmost head specified with -b - f) the tipmost head specified with the url#branch source syntax - g) the tipmost head of the default branch - h) tip + Only local paths and ``ssh://`` URLs are supported as + destinations. For ``ssh://`` destinations, no working directory or + ``.hg/hgrc`` will be created on the remote side. + + To pull only a subset of changesets, specify one or more revisions + identifiers with -r/--rev or branches with -b/--branch. The + resulting clone will contain only the specified changesets and + their ancestors. These options (or 'clone src#rev dest') imply + --pull, even for local source repositories. Note that specifying a + tag will include the tagged changeset but not the changeset + containing the tag. + + To check out a particular version, use -u/--update, or + -U/--noupdate to create a clone with no working directory. + + .. container:: verbose + + For efficiency, hardlinks are used for cloning whenever the + source and destination are on the same filesystem (note this + applies only to the repository data, not to the working + directory). Some filesystems, such as AFS, implement hardlinking + incorrectly, but do not report errors. In these cases, use the + --pull option to avoid hardlinking. + + In some cases, you can clone repositories and the working + directory using full hardlinks with :: + + $ cp -al REPO REPOCLONE + + This is the fastest way to clone, but it is not always safe. The + operation is not atomic (making sure REPO is not modified during + the operation is up to you) and you have to make sure your + editor breaks hardlinks (Emacs and most Linux Kernel tools do + so). Also, this is not compatible with certain extensions that + place their metadata under the .hg directory, such as mq. + + Mercurial will update the working directory to the first applicable + revision from this list: + + a) null if -U or the source repository has no changesets + b) if -u . and the source repository is local, the first parent of + the source repository's working directory + c) the changeset specified with -u (if a branch name, this means the + latest head of that branch) + d) the changeset specified with -r + e) the tipmost head specified with -b + f) the tipmost head specified with the url#branch source syntax + g) the tipmost head of the default branch + h) tip + + Examples: + + - clone a remote repository to a new directory named hg/:: + + hg clone http://selenic.com/hg + + - create a lightweight local clone:: + + hg clone project/ project-feature/ + + - clone from an absolute path on an ssh server (note double-slash):: + + hg clone ssh://user@server//home/projects/alpha/ + + - do a high-speed clone over a LAN while checking out a + specified version:: + + hg clone --uncompressed http://server/repo -u 1.5 + + - create a repository without changesets after a particular revision:: + + hg clone -r 04e544 experimental/ good/ + + - clone (and track) a particular named branch:: + + hg clone http://selenic.com/hg#stable + + See :hg:`help urls` for details on specifying URLs. Returns 0 on success. """ @@ -1102,8 +1198,8 @@ ctx = repo[node] parents = ctx.parents() - if bheads and not [x for x in parents - if x.node() in bheads and x.branch() == branch]: + if (bheads and node not in bheads and not + [x for x in parents if x.node() in bheads and x.branch() == branch]): ui.status(_('created new head\n')) # The message is not printed for initial roots. For the other # changesets, it is printed in the following situations: @@ -1656,8 +1752,9 @@ def debugignore(ui, repo, *values, **opts): """display the combined ignore pattern""" ignore = repo.dirstate._ignore - if hasattr(ignore, 'includepat'): - ui.write("%s\n" % ignore.includepat) + includepat = getattr(ignore, 'includepat', None) + if includepat is not None: + ui.write("%s\n" % includepat) else: raise util.Abort(_("no ignore patterns found")) @@ -1755,6 +1852,7 @@ % os.path.dirname(__file__)) try: import bdiff, mpatch, base85, osutil + dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes except Exception, inst: ui.write(" %s\n" % inst) ui.write(_(" One or more extensions could not be found")) @@ -1762,9 +1860,10 @@ problems += 1 # templates - ui.status(_("Checking templates...\n")) + import templater + p = templater.templatepath() + ui.status(_("Checking templates (%s)...\n") % ' '.join(p)) try: - import templater templater.templater(templater.templatepath("map-cmdline.default")) except Exception, inst: ui.write(" %s\n" % inst) @@ -2170,6 +2269,32 @@ Use the -g/--git option to generate diffs in the git extended diff format. For more information, read :hg:`help diffs`. + .. container:: verbose + + Examples: + + - compare a file in the current working directory to its parent:: + + hg diff foo.c + + - compare two historical versions of a directory, with rename info:: + + hg diff --git -r 1.0:1.2 lib/ + + - get change stats relative to the last change on some date:: + + hg diff --stat -r "date('may 2')" + + - diff all newly-added files that contain a keyword:: + + hg diff "set:added() and grep(GNU)" + + - compare a revision and its parents:: + + hg diff -c 9353 # compare against first parent + hg diff -r 9353^:9353 # same using revset syntax + hg diff -r 9353^2:9353 # compare against the second parent + Returns 0 on success. """ @@ -2225,6 +2350,7 @@ :``%R``: changeset revision number :``%b``: basename of the exporting repository :``%h``: short-form changeset hash (12 hexadecimal digits) + :``%m``: first line of the commit message (only alphanumeric characters) :``%n``: zero-padded sequence number, starting at 1 :``%r``: zero-padded changeset revision number @@ -2238,6 +2364,25 @@ With the --switch-parent option, the diff will be against the second parent. It can be useful to review a merge. + .. container:: verbose + + Examples: + + - use export and import to transplant a bugfix to the current + branch:: + + hg export -r 9353 | hg import - + + - export all the changesets between two revisions to a file with + rename information:: + + hg export --git -r 123:150 > changes.txt + + - split outgoing changes into a series of patches with + descriptive names:: + + hg export -r "outgoing()" -o "%n-%m.patch" + Returns 0 on success. """ changesets += tuple(opts.get('rev', [])) @@ -2265,6 +2410,18 @@ To undo a forget before the next commit, see :hg:`add`. + .. container:: verbose + + Examples: + + - forget newly-added binary files:: + + hg forget "set:added() and binary()" + + - forget files that would be excluded by .hgignore:: + + hg forget "set:hgignore()" + Returns 0 on success. """ @@ -2290,6 +2447,160 @@ repo[None].forget(forget) return errs +@command( + 'graft', + [('c', 'continue', False, _('resume interrupted graft')), + ('e', 'edit', False, _('invoke editor on commit messages')), + ('D', 'currentdate', False, + _('record the current date as commit date')), + ('U', 'currentuser', False, + _('record the current user as committer'), _('DATE'))] + + commitopts2 + mergetoolopts, + _('[OPTION]... REVISION...')) +def graft(ui, repo, *revs, **opts): + '''copy changes from other branches onto the current branch + + This command uses Mercurial's merge logic to copy individual + changes from other branches without merging branches in the + history graph. This is sometimes known as 'backporting' or + 'cherry-picking'. By default, graft will copy user, date, and + description from the source changesets. + + Changesets that are ancestors of the current revision, that have + already been grafted, or that are merges will be skipped. + + If a graft merge results in conflicts, the graft process is + aborted so that the current merge can be manually resolved. Once + all conflicts are addressed, the graft process can be continued + with the -c/--continue option. + + .. note:: + The -c/--continue option does not reapply earlier options. + + .. container:: verbose + + Examples: + + - copy a single change to the stable branch and edit its description:: + + hg update stable + hg graft --edit 9393 + + - graft a range of changesets with one exception, updating dates:: + + hg graft -D "2085::2093 and not 2091" + + - continue a graft after resolving conflicts:: + + hg graft -c + + - show the source of a grafted changeset:: + + hg log --debug -r tip + + Returns 0 on successful completion. + ''' + + if not opts.get('user') and opts.get('currentuser'): + opts['user'] = ui.username() + if not opts.get('date') and opts.get('currentdate'): + opts['date'] = "%d %d" % util.makedate() + + editor = None + if opts.get('edit'): + editor = cmdutil.commitforceeditor + + cont = False + if opts['continue']: + cont = True + if revs: + raise util.Abort(_("can't specify --continue and revisions")) + # read in unfinished revisions + try: + nodes = repo.opener.read('graftstate').splitlines() + revs = [repo[node].rev() for node in nodes] + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + raise util.Abort(_("no graft state found, can't continue")) + else: + cmdutil.bailifchanged(repo) + if not revs: + raise util.Abort(_('no revisions specified')) + revs = scmutil.revrange(repo, revs) + + # check for merges + for ctx in repo.set('%ld and merge()', revs): + ui.warn(_('skipping ungraftable merge revision %s\n') % ctx.rev()) + revs.remove(ctx.rev()) + if not revs: + return -1 + + # check for ancestors of dest branch + for ctx in repo.set('::. and %ld', revs): + ui.warn(_('skipping ancestor revision %s\n') % ctx.rev()) + revs.remove(ctx.rev()) + if not revs: + return -1 + + # check ancestors for earlier grafts + ui.debug('scanning for existing transplants') + for ctx in repo.set("::. - ::%ld", revs): + n = ctx.extra().get('source') + if n and n in repo: + r = repo[n].rev() + ui.warn(_('skipping already grafted revision %s\n') % r) + revs.remove(r) + if not revs: + return -1 + + for pos, ctx in enumerate(repo.set("%ld", revs)): + current = repo['.'] + ui.status('grafting revision %s', ctx.rev()) + + # we don't merge the first commit when continuing + if not cont: + # perform the graft merge with p1(rev) as 'ancestor' + try: + # ui.forcemerge is an internal variable, do not document + repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + stats = mergemod.update(repo, ctx.node(), True, True, False, + ctx.p1().node()) + finally: + ui.setconfig('ui', 'forcemerge', '') + # drop the second merge parent + repo.dirstate.setparents(current.node(), nullid) + repo.dirstate.write() + # fix up dirstate for copies and renames + cmdutil.duplicatecopies(repo, ctx.rev(), current.node(), nullid) + # report any conflicts + if stats and stats[3] > 0: + # write out state for --continue + nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]] + repo.opener.write('graftstate', ''.join(nodelines)) + raise util.Abort( + _("unresolved conflicts, can't continue"), + hint=_('use hg resolve and hg graft --continue')) + else: + cont = False + + # commit + extra = {'source': ctx.hex()} + user = ctx.user() + if opts.get('user'): + user = opts['user'] + date = ctx.date() + if opts.get('date'): + date = opts['date'] + repo.commit(text=ctx.description(), user=user, + date=date, extra=extra, editor=editor) + + # remove state when we complete successfully + if os.path.exists(repo.join('graftstate')): + util.unlinkpath(repo.join('graftstate')) + + return 0 + @command('grep', [('0', 'print0', None, _('end fields with NUL')), ('', 'all', None, _('print all revisions that match')), @@ -2576,7 +2887,7 @@ [('e', 'extension', None, _('show only help for extensions')), ('c', 'command', None, _('show only help for commands'))], _('[-ec] [TOPIC]')) -def help_(ui, name=None, with_version=False, unknowncmd=False, full=True, **opts): +def help_(ui, name=None, unknowncmd=False, full=True, **opts): """show help for a given topic or a help overview With no arguments, print a list of commands with short help messages. @@ -2586,14 +2897,67 @@ Returns 0 if successful. """ - option_lists = [] + textwidth = min(ui.termwidth(), 80) - 2 - def addglobalopts(aliases): + def optrst(options): + data = [] + multioccur = False + for option in options: + if len(option) == 5: + shortopt, longopt, default, desc, optlabel = option + else: + shortopt, longopt, default, desc = option + optlabel = _("VALUE") # default label + + if _("DEPRECATED") in desc and not ui.verbose: + continue + + so = '' + if shortopt: + so = '-' + shortopt + lo = '--' + longopt + if default: + desc += _(" (default: %s)") % default + + if isinstance(default, list): + lo += " %s [+]" % optlabel + multioccur = True + elif (default is not None) and not isinstance(default, bool): + lo += " %s" % optlabel + + data.append((so, lo, desc)) + + rst = minirst.maketable(data, 1) + + if multioccur: + rst += _("\n[+] marked option can be specified multiple times\n") + + return rst + + # list all option lists + def opttext(optlist, width): + rst = '' + if not optlist: + return '' + + for title, options in optlist: + rst += '\n%s\n' % title + if options: + rst += "\n" + rst += optrst(options) + rst += '\n' + + return '\n' + minirst.format(rst, width) + + def addglobalopts(optlist, aliases): + if ui.quiet: + return [] + if ui.verbose: - option_lists.append((_("global options:"), globalopts)) + optlist.append((_("global options:"), globalopts)) if name == 'shortlist': - option_lists.append((_('use "hg help" for the full list ' + optlist.append((_('use "hg help" for the full list ' 'of commands'), ())) else: if name == 'shortlist': @@ -2605,14 +2969,10 @@ msg = _('use "hg -v help%s" to show builtin aliases and ' 'global options') % (name and " " + name or "") else: - msg = _('use "hg -v help %s" to show global options') % name - option_lists.append((msg, ())) + msg = _('use "hg -v help %s" to show more info') % name + optlist.append((msg, ())) def helpcmd(name): - if with_version: - version_(ui) - ui.write('\n') - try: aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd) except error.AmbiguousCommand, inst: @@ -2620,7 +2980,7 @@ # except block, nor can be used inside a lambda. python issue4617 prefix = inst.args[0] select = lambda c: c.lstrip('^').startswith(prefix) - helplist(_('list of commands:\n\n'), select) + helplist(select) return # check if it's an invalid alias and display its error if it is @@ -2629,42 +2989,33 @@ entry[0](ui) return + rst = "" + # synopsis if len(entry) > 2: if entry[2].startswith('hg'): - ui.write("%s\n" % entry[2]) + rst += "%s\n" % entry[2] else: - ui.write('hg %s %s\n' % (aliases[0], entry[2])) + rst += 'hg %s %s\n' % (aliases[0], entry[2]) else: - ui.write('hg %s\n' % aliases[0]) + rst += 'hg %s\n' % aliases[0] # aliases if full and not ui.quiet and len(aliases) > 1: - ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:])) + rst += _("\naliases: %s\n") % ', '.join(aliases[1:]) # description doc = gettext(entry[0].__doc__) if not doc: doc = _("(no help text available)") - if hasattr(entry[0], 'definition'): # aliased command + if util.safehasattr(entry[0], 'definition'): # aliased command if entry[0].definition.startswith('!'): # shell alias doc = _('shell alias for::\n\n %s') % entry[0].definition[1:] else: doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc) if ui.quiet or not full: doc = doc.splitlines()[0] - keep = ui.verbose and ['verbose'] or [] - formatted, pruned = minirst.format(doc, textwidth, keep=keep) - ui.write("\n%s\n" % formatted) - if pruned: - ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name) - - if not ui.quiet: - # options - if entry[1]: - option_lists.append((_("options:\n"), entry[1])) - - addglobalopts(False) + rst += "\n" + doc + "\n" # check if this command shadows a non-trivial (multi-line) # extension help text @@ -2674,11 +3025,38 @@ if '\n' in doc.strip(): msg = _('use "hg help -e %s" to show help for ' 'the %s extension') % (name, name) - ui.write('\n%s\n' % msg) + rst += '\n%s\n' % msg except KeyError: pass - def helplist(header, select=None): + # options + if not ui.quiet and entry[1]: + rst += '\noptions:\n\n' + rst += optrst(entry[1]) + + if ui.verbose: + rst += '\nglobal options:\n\n' + rst += optrst(globalopts) + + keep = ui.verbose and ['verbose'] or [] + formatted, pruned = minirst.format(rst, textwidth, keep=keep) + ui.write(formatted) + + if not ui.verbose: + if not full: + ui.write(_('\nuse "hg help %s" to show the full help text\n') + % name) + elif not ui.quiet: + ui.write(_('\nuse "hg -v help %s" to show more info\n') % name) + + + def helplist(select=None): + # list of commands + if name == "shortlist": + header = _('basic commands:\n\n') + else: + header = _('list of commands:\n\n') + h = {} cmds = {} for c, e in table.iteritems(): @@ -2718,8 +3096,22 @@ initindent=' %-*s ' % (m, f), hangindent=' ' * (m + 4)))) - if not ui.quiet: - addglobalopts(True) + if not name: + text = help.listexts(_('enabled extensions:'), extensions.enabled()) + if text: + ui.write("\n%s" % minirst.format(text, textwidth)) + + ui.write(_("\nadditional help topics:\n\n")) + topics = [] + for names, header, doc in help.helptable: + topics.append((sorted(names, key=len, reverse=True)[0], header)) + topics_len = max([len(s[0]) for s in topics]) + for t, desc in topics: + ui.write(" %-*s %s\n" % (topics_len, t, desc)) + + optlist = [] + addglobalopts(optlist, True) + ui.write(opttext(optlist, textwidth)) def helptopic(name): for names, header, doc in help.helptable: @@ -2731,11 +3123,11 @@ # description if not doc: doc = _("(no help text available)") - if hasattr(doc, '__call__'): + if util.safehasattr(doc, '__call__'): doc = doc() ui.write("%s\n\n" % header) - ui.write("%s\n" % minirst.format(doc, textwidth, indent=4)) + ui.write("%s" % minirst.format(doc, textwidth, indent=4)) try: cmdutil.findcmd(name, table) ui.write(_('\nuse "hg help -c %s" to see help for ' @@ -2760,7 +3152,7 @@ ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head)) if tail: ui.write(minirst.format(tail, textwidth)) - ui.status('\n\n') + ui.status('\n') if mod: try: @@ -2768,7 +3160,7 @@ except AttributeError: ct = {} modcmds = set([c.split('|', 1)[0] for c in ct]) - helplist(_('list of commands:\n\n'), modcmds.__contains__) + helplist(modcmds.__contains__) else: ui.write(_('use "hg help extensions" for information on enabling ' 'extensions\n')) @@ -2780,7 +3172,7 @@ msg = help.listexts(_("'%s' is provided by the following " "extension:") % cmd, {ext: doc}, indent=4) ui.write(minirst.format(msg, textwidth)) - ui.write('\n\n') + ui.write('\n') ui.write(_('use "hg help extensions" for information on enabling ' 'extensions\n')) @@ -2803,87 +3195,12 @@ i = inst if i: raise i - else: # program name - if ui.verbose or with_version: - version_(ui) - else: - ui.status(_("Mercurial Distributed SCM\n")) + ui.status(_("Mercurial Distributed SCM\n")) ui.status('\n') - - # list of commands - if name == "shortlist": - header = _('basic commands:\n\n') - else: - header = _('list of commands:\n\n') - - helplist(header) - if name != 'shortlist': - text = help.listexts(_('enabled extensions:'), extensions.enabled()) - if text: - ui.write("\n%s\n" % minirst.format(text, textwidth)) - - # list all option lists - opt_output = [] - multioccur = False - for title, options in option_lists: - opt_output.append(("\n%s" % title, None)) - for option in options: - if len(option) == 5: - shortopt, longopt, default, desc, optlabel = option - else: - shortopt, longopt, default, desc = option - optlabel = _("VALUE") # default label - - if _("DEPRECATED") in desc and not ui.verbose: - continue - if isinstance(default, list): - numqualifier = " %s [+]" % optlabel - multioccur = True - elif (default is not None) and not isinstance(default, bool): - numqualifier = " %s" % optlabel - else: - numqualifier = "" - opt_output.append(("%2s%s" % - (shortopt and "-%s" % shortopt, - longopt and " --%s%s" % - (longopt, numqualifier)), - "%s%s" % (desc, - default - and _(" (default: %s)") % default - or ""))) - if multioccur: - msg = _("\n[+] marked option can be specified multiple times") - if ui.verbose and name != 'shortlist': - opt_output.append((msg, None)) - else: - opt_output.insert(-1, (msg, None)) - - if not name: - ui.write(_("\nadditional help topics:\n\n")) - topics = [] - for names, header, doc in help.helptable: - topics.append((sorted(names, key=len, reverse=True)[0], header)) - topics_len = max([len(s[0]) for s in topics]) - for t, desc in topics: - ui.write(" %-*s %s\n" % (topics_len, t, desc)) - - if opt_output: - colwidth = encoding.colwidth - # normalize: (opt or message, desc or None, width of opt) - entries = [desc and (opt, desc, colwidth(opt)) or (opt, None, 0) - for opt, desc in opt_output] - hanging = max([e[2] for e in entries]) - for opt, desc, width in entries: - if desc: - initindent = ' %s%s ' % (opt, ' ' * (hanging - width)) - hangindent = ' ' * (hanging + 3) - ui.write('%s\n' % (util.wrap(desc, textwidth, - initindent=initindent, - hangindent=hangindent))) - else: - ui.write("%s\n" % opt) + helplist() + @command('identify|id', [('r', 'rev', '', @@ -2909,6 +3226,22 @@ Specifying a path to a repository root or Mercurial bundle will cause lookup to operate on that repository/bundle. + .. container:: verbose + + Examples: + + - generate a build identifier for the working directory:: + + hg id --id > build-id.dat + + - find the revision corresponding to a tag:: + + hg id -n -r 1.3 + + - check the most recent revision of a remote repository:: + + hg id -r tip http://selenic.com/hg/ + Returns 0 if successful. """ @@ -3007,6 +3340,7 @@ _('directory strip option for patch. This has the same ' 'meaning as the corresponding patch option'), _('NUM')), ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')), + ('e', 'edit', False, _('invoke editor on commit messages')), ('f', 'force', None, _('skip check for outstanding uncommitted changes')), ('', 'no-commit', None, _("don't commit, just update the working directory")), @@ -3057,6 +3391,27 @@ a URL is specified, the patch will be downloaded from it. See :hg:`help dates` for a list of formats valid for -d/--date. + .. container:: verbose + + Examples: + + - import a traditional patch from a website and detect renames:: + + hg import -s 80 http://example.com/bugfix.patch + + - import a changeset from an hgweb server:: + + hg import http://www.selenic.com/hg/rev/5ca8c111e9aa + + - import all the patches in an Unix-style mbox:: + + hg import incoming-patches.mbox + + - attempt to exactly restore an exported changeset (not always + possible):: + + hg import --exact proposed-fix.patch + Returns 0 on success. """ patches = (patch1,) + patches @@ -3065,6 +3420,10 @@ if date: opts['date'] = util.parsedate(date) + editor = cmdutil.commiteditor + if opts.get('edit'): + editor = cmdutil.commitforceeditor + update = not opts.get('bypass') if not update and opts.get('no_commit'): raise util.Abort(_('cannot use --no-commit with --bypass')) @@ -3080,9 +3439,9 @@ if (opts.get('exact') or not opts.get('force')) and update: cmdutil.bailifchanged(repo) - d = opts["base"] + base = opts["base"] strip = opts["strip"] - wlock = lock = None + wlock = lock = tr = None msgs = [] def checkexact(repo, n, nodeid): @@ -3095,8 +3454,8 @@ patch.extract(ui, hunk) if not tmpname: - return None - commitid = _('to working directory') + return (None, None) + msg = _('applied to working directory') try: cmdline_message = cmdutil.logmessage(ui, opts) @@ -3151,11 +3510,8 @@ m = scmutil.matchfiles(repo, files or []) n = repo.commit(message, opts.get('user') or user, opts.get('date') or date, match=m, - editor=cmdutil.commiteditor) + editor=editor) checkexact(repo, n, nodeid) - # Force a dirstate write so that the next transaction - # backups an up-to-date file. - repo.dirstate.write() else: if opts.get('exact') or opts.get('import_branch'): branch = branch or 'default' @@ -3181,45 +3537,52 @@ finally: store.close() if n: - commitid = short(n) - return commitid + msg = _('created %s') % short(n) + return (msg, n) finally: os.unlink(tmpname) try: wlock = repo.wlock() lock = repo.lock() + tr = repo.transaction('import') parents = repo.parents() - lastcommit = None - for p in patches: - pf = os.path.join(d, p) - - if pf == '-': - ui.status(_("applying patch from stdin\n")) - pf = ui.fin + for patchurl in patches: + if patchurl == '-': + ui.status(_('applying patch from stdin\n')) + patchfile = ui.fin + patchurl = 'stdin' # for error message else: - ui.status(_("applying %s\n") % p) - pf = url.open(ui, pf) + patchurl = os.path.join(base, patchurl) + ui.status(_('applying %s\n') % patchurl) + patchfile = url.open(ui, patchurl) haspatch = False - for hunk in patch.split(pf): - commitid = tryone(ui, hunk, parents) - if commitid: + for hunk in patch.split(patchfile): + (msg, node) = tryone(ui, hunk, parents) + if msg: haspatch = True - if lastcommit: - ui.status(_('applied %s\n') % lastcommit) - lastcommit = commitid + ui.note(msg + '\n') if update or opts.get('exact'): parents = repo.parents() else: - parents = [repo[commitid]] + parents = [repo[node]] if not haspatch: - raise util.Abort(_('no diffs found')) - + raise util.Abort(_('%s: no diffs found') % patchurl) + + tr.close() if msgs: repo.savecommitmessage('\n* * *\n'.join(msgs)) + except: + # wlock.release() indirectly calls dirstate.write(): since + # we're crashing, we do not want to change the working dir + # parent after all, so make sure it writes nothing + repo.dirstate.invalidate() + raise finally: + if tr: + tr.release() release(lock, wlock) @command('incoming|in', @@ -3356,18 +3719,14 @@ Print the revision history of the specified files or the entire project. + If no revision range is specified, the default is ``tip:0`` unless + --follow is set, in which case the working directory parent is + used as the starting revision. + File history is shown without following rename or copy history of files. Use -f/--follow with a filename to follow history across renames and copies. --follow without a filename will only show - ancestors or descendants of the starting revision. --follow-first - only follows the first parent of merge revisions. - - If no revision range is specified, the default is ``tip:0`` unless - --follow is set, in which case the working directory parent is - used as the starting revision. You can specify a revision set for - log, see :hg:`help revsets` for more information. - - See :hg:`help dates` for a list of formats valid for -d/--date. + ancestors or descendants of the starting revision. By default this command prints revision number and changeset id, tags, non-trivial parents, user, date and time, and a summary for @@ -3380,6 +3739,57 @@ its first parent. Also, only files different from BOTH parents will appear in files:. + .. note:: + for performance reasons, log FILE may omit duplicate changes + made on branches and will not show deletions. To see all + changes including duplicates and deletions, use the --removed + switch. + + .. container:: verbose + + Some examples: + + - changesets with full descriptions and file lists:: + + hg log -v + + - changesets ancestral to the working directory:: + + hg log -f + + - last 10 commits on the current branch:: + + hg log -l 10 -b . + + - changesets showing all modifications of a file, including removals:: + + hg log --removed file.c + + - all changesets that touch a directory, with diffs, excluding merges:: + + hg log -Mp lib/ + + - all revision numbers that match a keyword:: + + hg log -k bug --template "{rev}\\n" + + - check if a given changeset is included is a tagged release:: + + hg log -r "a21ccf and ancestor(1.9)" + + - find all changesets by some user in a date range:: + + hg log -k alice -d "may 2008 to jul 2008" + + - summary of all changesets after the last tag:: + + hg log -r "last(tagged())::" --template "{desc|firstline}\\n" + + See :hg:`help dates` for a list of formats valid for -d/--date. + + See :hg:`help revisions` and :hg:`help revsets` for more about + specifying revisions. + Returns 0 on success. """ @@ -3507,10 +3917,10 @@ @command('^merge', [('f', 'force', None, _('force a merge with outstanding changes')), - ('t', 'tool', '', _('specify merge tool')), ('r', 'rev', '', _('revision to merge'), _('REV')), ('P', 'preview', None, - _('review revisions to merge (no merge is performed)'))], + _('review revisions to merge (no merge is performed)')) + ] + mergetoolopts, _('[-P] [-f] [[-r] REV]')) def merge(ui, repo, node=None, **opts): """merge working directory with another revision @@ -3589,7 +3999,7 @@ try: # ui.forcemerge is an internal variable, do not document - ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) return hg.merge(repo, node, force=opts.get('force')) finally: ui.setconfig('ui', 'forcemerge', '') @@ -3935,31 +4345,36 @@ def remove(ui, repo, *pats, **opts): """remove the specified files on the next commit - Schedule the indicated files for removal from the repository. - - This only removes files from the current branch, not from the - entire project history. -A/--after can be used to remove only - files that have already been deleted, -f/--force can be used to - force deletion, and -Af can be used to remove files from the next - revision without deleting them from the working directory. - - The following table details the behavior of remove for different - file states (columns) and option combinations (rows). The file - states are Added [A], Clean [C], Modified [M] and Missing [!] (as - reported by :hg:`status`). The actions are Warn, Remove (from - branch) and Delete (from disk):: - - A C M ! - none W RD W R - -f R RD RD R - -A W W W R - -Af R R R R - - Note that remove never deletes files in Added [A] state from the - working directory, not even if option --force is specified. + Schedule the indicated files for removal from the current branch. This command schedules the files to be removed at the next commit. - To undo a remove before that, see :hg:`revert`. + To undo a remove before that, see :hg:`revert`. To undo added + files, see :hg:`forget`. + + .. container:: verbose + + -A/--after can be used to remove only files that have already + been deleted, -f/--force can be used to force deletion, and -Af + can be used to remove files from the next revision without + deleting them from the working directory. + + The following table details the behavior of remove for different + file states (columns) and option combinations (rows). The file + states are Added [A], Clean [C], Modified [M] and Missing [!] + (as reported by :hg:`status`). The actions are Warn, Remove + (from branch) and Delete (from disk): + + ======= == == == == + A C M ! + ======= == == == == + none W RD W R + -f R RD RD R + -A W W W R + -Af R R R R + ======= == == == == + + Note that remove never deletes files in Added [A] state from the + working directory, not even if option --force is specified. Returns 0 on success, 1 if any warnings encountered. """ @@ -3994,8 +4409,8 @@ ' to force removal)\n') % m.rel(f)) ret = 1 for f in added: - ui.warn(_('not removing %s: file has been marked for add (use -f' - ' to force removal)\n') % m.rel(f)) + ui.warn(_('not removing %s: file has been marked for add' + ' (use forget to undo)\n') % m.rel(f)) ret = 1 for f in sorted(list): @@ -4051,9 +4466,8 @@ ('l', 'list', None, _('list state of files needing merge')), ('m', 'mark', None, _('mark files as resolved')), ('u', 'unmark', None, _('mark files as unresolved')), - ('t', 'tool', '', _('specify merge tool')), ('n', 'no-status', None, _('hide status prefix'))] - + walkopts, + + mergetoolopts + walkopts, _('[OPTION]... [FILE]...')) def resolve(ui, repo, *pats, **opts): """redo merges or set/view the merge status of files @@ -4072,7 +4486,8 @@ performed for files already marked as resolved. Use ``--all/-a`` to select all unresolved files. ``--tool`` can be used to specify the merge tool used for the given files. It overrides the HGMERGE - environment variable and your configuration files. + environment variable and your configuration files. Previous file + contents are saved with a ``.orig`` suffix. - :hg:`resolve -m [FILE]`: mark a file as having been resolved (e.g. after having manually fixed-up the files). The default is @@ -4145,7 +4560,7 @@ [('a', 'all', None, _('revert all changes when no arguments given')), ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), ('r', 'rev', '', _('revert to the specified revision'), _('REV')), - ('', 'no-backup', None, _('do not save backup copies of files')), + ('C', 'no-backup', None, _('do not save backup copies of files')), ] + walkopts + dryrunopts, _('[OPTION]... [-r REV] [NAME]...')) def revert(ui, repo, *pats, **opts): @@ -4237,6 +4652,10 @@ def badfn(path, msg): if path in names: return + if path in repo[node].substate: + ui.warn("%s: %s\n" % (m.rel(path), + 'reverting subrepos is unsupported')) + return path_ = path + '/' for f in names: if f.startswith(path_): @@ -4381,7 +4800,8 @@ finally: wlock.release() -@command('rollback', dryrunopts) +@command('rollback', dryrunopts + + [('f', 'force', False, _('ignore safety measures'))]) def rollback(ui, repo, **opts): """roll back the last transaction (dangerous) @@ -4402,6 +4822,12 @@ - push (with this repository as the destination) - unbundle + It's possible to lose data with rollback: commit, update back to + an older changeset, and then rollback. The update removes the + changes you committed from the working directory, and rollback + removes them from history. To avoid data loss, you must pass + --force in this case. + This command is not intended for use on public repositories. Once changes are visible for pull by other users, rolling a transaction back locally is ineffective (someone else may already have pulled @@ -4411,7 +4837,8 @@ Returns 0 on success, 1 if no rollback data is available. """ - return repo.rollback(opts.get('dry_run')) + return repo.rollback(dryrun=opts.get('dry_run'), + force=opts.get('force')) @command('root', []) def root(ui, repo): @@ -4653,6 +5080,22 @@ I = ignored = origin of the previous file listed as A (added) + .. container:: verbose + + Examples: + + - show changes in the working directory relative to a changeset: + + hg status --rev 9353 + + - show all changes including copies in an existing changeset:: + + hg status --copies --change 9353 + + - get a NUL separated list of added files, suitable for xargs:: + + hg status -an0 + Returns 0 on success. """ @@ -4727,6 +5170,7 @@ ctx = repo[None] parents = ctx.parents() pnode = parents[0].node() + marks = [] for p in parents: # label with log.changeset (instead of log.parent) since this @@ -4735,7 +5179,7 @@ label='log.changeset') ui.write(' '.join(p.tags()), label='log.tag') if p.bookmarks(): - ui.write(' ' + ' '.join(p.bookmarks()), label='log.bookmark') + marks.extend(p.bookmarks()) if p.rev() == -1: if not len(repo): ui.write(_(' (empty repository)')) @@ -4754,6 +5198,20 @@ else: ui.status(m, label='log.branch') + if marks: + current = repo._bookmarkcurrent + ui.write(_('bookmarks:'), label='log.bookmark') + if current is not None: + try: + marks.remove(current) + ui.write(' *' + current, label='bookmarks.current') + except ValueError: + # current bookmark not in parent ctx marks + pass + for m in marks: + ui.write(' ' + m, label='log.bookmark') + ui.write('\n', label='log.bookmark') + st = list(repo.status(unknown=True))[:6] c = repo.dirstate.copies() @@ -4988,19 +5446,22 @@ for t, n in reversed(repo.tagslist()): if ui.quiet: - ui.write("%s\n" % t) + ui.write("%s\n" % t, label='tags.normal') continue hn = hexfunc(n) r = "%5d:%s" % (repo.changelog.rev(n), hn) + rev = ui.label(r, 'log.changeset') spaces = " " * (30 - encoding.colwidth(t)) + tag = ui.label(t, 'tags.normal') if ui.verbose: if repo.tagtype(t) == 'local': tagtype = " local" + tag = ui.label(t, 'tags.local') else: tagtype = "" - ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype)) + ui.write("%s%s %s%s\n" % (tag, spaces, rev, tagtype)) @command('tip', [('p', 'patch', None, _('show patch')), diff -r fccd350acf79 -r 384082750f2c mercurial/commandserver.py --- a/mercurial/commandserver.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/commandserver.py Sat Oct 15 14:30:50 2011 -0500 @@ -185,6 +185,7 @@ copiedui = self.ui.copy() self.repo.baseui = copiedui self.repo.ui = self.repo.dirstate._ui = self.repoui.copy() + self.repo.invalidate() req = dispatch.request(args[:], copiedui, self.repo, self.cin, self.cout, self.cerr) diff -r fccd350acf79 -r 384082750f2c mercurial/demandimport.py --- a/mercurial/demandimport.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/demandimport.py Sat Oct 15 14:30:50 2011 -0500 @@ -27,6 +27,17 @@ import __builtin__ _origimport = __import__ +nothing = object() + +try: + _origimport(__builtin__.__name__, {}, {}, None, -1) +except TypeError: # no level argument + def _import(name, globals, locals, fromlist, level): + "call _origimport with no level argument" + return _origimport(name, globals, locals, fromlist) +else: + _import = _origimport + class _demandmod(object): """module demand-loader and proxy""" def __init__(self, name, globals, locals): @@ -50,7 +61,7 @@ h, t = p, None if '.' in p: h, t = p.split('.', 1) - if not hasattr(mod, h): + if getattr(mod, h, nothing) is nothing: setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__)) elif t: subload(getattr(mod, h), t) @@ -81,20 +92,14 @@ def _demandimport(name, globals=None, locals=None, fromlist=None, level=-1): if not locals or name in ignore or fromlist == ('*',): # these cases we can't really delay - if level == -1: - return _origimport(name, globals, locals, fromlist) - else: - return _origimport(name, globals, locals, fromlist, level) + return _import(name, globals, locals, fromlist, level) elif not fromlist: # import a [as b] if '.' in name: # a.b base, rest = name.split('.', 1) # email.__init__ loading email.mime if globals and globals.get('__name__', None) == base: - if level != -1: - return _origimport(name, globals, locals, fromlist, level) - else: - return _origimport(name, globals, locals, fromlist) + return _import(name, globals, locals, fromlist, level) # if a is already demand-loaded, add b to its submodule list if base in locals: if isinstance(locals[base], _demandmod): @@ -109,12 +114,12 @@ mod = _origimport(name, globals, locals) # recurse down the module chain for comp in name.split('.')[1:]: - if not hasattr(mod, comp): + if getattr(mod, comp, nothing) is nothing: setattr(mod, comp, _demandmod(comp, mod.__dict__, mod.__dict__)) mod = getattr(mod, comp) for x in fromlist: # set requested submodules for demand load - if not hasattr(mod, x): + if getattr(mod, x, nothing) is nothing: setattr(mod, x, _demandmod(x, mod.__dict__, locals)) return mod @@ -137,6 +142,8 @@ # raise ImportError if x not defined '__main__', '_ssl', # conditional imports in the stdlib, issue1964 + 'rfc822', + 'mimetools', ] def enable(): @@ -146,4 +153,3 @@ def disable(): "disable global demand-loading of modules" __builtin__.__import__ = _origimport - diff -r fccd350acf79 -r 384082750f2c mercurial/dirstate.py --- a/mercurial/dirstate.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/dirstate.py Sat Oct 15 14:30:50 2011 -0500 @@ -453,7 +453,7 @@ write(e) write(f) st.write(cs.getvalue()) - st.rename() + st.close() self._lastnormaltime = None self._dirty = self._dirtypl = False diff -r fccd350acf79 -r 384082750f2c mercurial/dispatch.py --- a/mercurial/dispatch.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/dispatch.py Sat Oct 15 14:30:50 2011 -0500 @@ -123,6 +123,9 @@ else: ui.warn(_("hg: %s\n") % inst.args[1]) commands.help_(ui, 'shortlist') + except error.OutOfBandError, inst: + ui.warn("abort: remote error:\n") + ui.warn(''.join(inst.args)) except error.RepoError, inst: ui.warn(_("abort: %s!\n") % inst) if inst.hint: @@ -159,16 +162,16 @@ elif m in "zlib".split(): ui.warn(_("(is your Python install correct?)\n")) except IOError, inst: - if hasattr(inst, "code"): + if util.safehasattr(inst, "code"): ui.warn(_("abort: %s\n") % inst) - elif hasattr(inst, "reason"): + elif util.safehasattr(inst, "reason"): try: # usually it is in the form (errno, strerror) reason = inst.reason.args[1] except (AttributeError, IndexError): # it might be anything, for example a string reason = inst.reason ui.warn(_("abort: error: %s\n") % reason) - elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE: + elif util.safehasattr(inst, "args") and inst.args[0] == errno.EPIPE: if ui.debugflag: ui.warn(_("broken pipe\n")) elif getattr(inst, "strerror", None): @@ -338,7 +341,7 @@ ui.debug("alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)) - if hasattr(self, 'shell'): + if util.safehasattr(self, 'shell'): return self.fn(ui, *args, **opts) else: try: @@ -363,7 +366,7 @@ # definition might not exist or it might not be a cmdalias pass - cmdtable[aliasdef.cmd] = (aliasdef, aliasdef.opts, aliasdef.help) + cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help) if aliasdef.norepo: commands.norepo += ' %s' % alias @@ -483,15 +486,14 @@ lui = ui.copy() lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) - if rpath: + if rpath and rpath[-1]: path = lui.expandpath(rpath[-1]) lui = ui.copy() lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) return path, lui -def _checkshellalias(ui, args): - cwd = os.getcwd() +def _checkshellalias(lui, ui, args): norepo = commands.norepo options = {} @@ -503,12 +505,6 @@ if not args: return - _parseconfig(ui, options['config']) - if options['cwd']: - os.chdir(options['cwd']) - - path, lui = _getlocal(ui, [options['repository']]) - cmdtable = commands.table.copy() addaliases(lui, cmdtable) @@ -517,28 +513,22 @@ aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict")) except (error.AmbiguousCommand, error.UnknownCommand): commands.norepo = norepo - os.chdir(cwd) return cmd = aliases[0] fn = entry[0] - if cmd and hasattr(fn, 'shell'): + if cmd and util.safehasattr(fn, 'shell'): d = lambda: fn(ui, *args[1:]) return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {}) commands.norepo = norepo - os.chdir(cwd) _loaded = set() def _dispatch(req): args = req.args ui = req.ui - shellaliasfn = _checkshellalias(ui, args) - if shellaliasfn: - return shellaliasfn() - # read --config before doing anything else # (e.g. to change trust settings for reading .hg/hgrc) cfgs = _parseconfig(ui, _earlygetopt(['--config'], args)) @@ -551,6 +541,12 @@ rpath = _earlygetopt(["-R", "--repository", "--repo"], args) path, lui = _getlocal(ui, rpath) + # Now that we're operating in the right directory/repository with + # the right config settings, check for shell aliases + shellaliasfn = _checkshellalias(lui, ui, args) + if shellaliasfn: + return shellaliasfn() + # Configure extensions in phases: uisetup, extsetup, cmdtable, and # reposetup. Programs like TortoiseHg will call _dispatch several # times so we keep track of configured extensions in _loaded. @@ -635,10 +631,10 @@ for ui_ in uis: ui_.setconfig('web', 'cacerts', '') + if options['version']: + return commands.version_(ui) if options['help']: - return commands.help_(ui, cmd, options['version']) - elif options['version']: - return commands.version_(ui) + return commands.help_(ui, cmd) elif not cmd: return commands.help_(ui, 'shortlist') diff -r fccd350acf79 -r 384082750f2c mercurial/encoding.py --- a/mercurial/encoding.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/encoding.py Sat Oct 15 14:30:50 2011 -0500 @@ -139,7 +139,7 @@ and "WFA" or "WF") def colwidth(s): - "Find the column width of a UTF-8 string for display" + "Find the column width of a string for display in the local encoding" return ucolwidth(s.decode(encoding, 'replace')) def ucolwidth(d): @@ -149,6 +149,14 @@ return sum([eaw(c) in wide and 2 or 1 for c in d]) return len(d) +def getcols(s, start, c): + '''Use colwidth to find a c-column substring of s starting at byte + index start''' + for x in xrange(start + c, len(s)): + t = s[start:x] + if colwidth(t) == c: + return t + def lower(s): "best-effort encoding-aware case-folding of local string s" try: diff -r fccd350acf79 -r 384082750f2c mercurial/error.py --- a/mercurial/error.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/error.py Sat Oct 15 14:30:50 2011 -0500 @@ -39,6 +39,9 @@ class ConfigError(Abort): 'Exception raised when parsing config files' +class OutOfBandError(Exception): + 'Exception raised when a remote repo reports failure' + class ParseError(Exception): 'Exception raised when parsing config files (msg[, pos])' diff -r fccd350acf79 -r 384082750f2c mercurial/extensions.py --- a/mercurial/extensions.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/extensions.py Sat Oct 15 14:30:50 2011 -0500 @@ -69,7 +69,9 @@ return mod try: mod = importh("hgext.%s" % name) - except ImportError: + except ImportError, err: + ui.debug('could not import hgext.%s (%s): trying %s\n' + % (name, err, name)) mod = importh(name) _extensions[shortname] = mod _order.append(shortname) @@ -124,7 +126,7 @@ where orig is the original (wrapped) function, and *args, **kwargs are the arguments passed to it. ''' - assert hasattr(wrapper, '__call__') + assert util.safehasattr(wrapper, '__call__') aliases, entry = cmdutil.findcmd(command, table) for alias, e in table.iteritems(): if e is entry: @@ -177,12 +179,12 @@ your end users, you should play nicely with others by using the subclass trick. ''' - assert hasattr(wrapper, '__call__') + assert util.safehasattr(wrapper, '__call__') def wrap(*args, **kwargs): return wrapper(origfn, *args, **kwargs) origfn = getattr(container, funcname) - assert hasattr(origfn, '__call__') + assert util.safehasattr(origfn, '__call__') setattr(container, funcname, wrap) return origfn diff -r fccd350acf79 -r 384082750f2c mercurial/fancyopts.py --- a/mercurial/fancyopts.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/fancyopts.py Sat Oct 15 14:30:50 2011 -0500 @@ -75,7 +75,7 @@ # copy defaults to state if isinstance(default, list): state[name] = default[:] - elif hasattr(default, '__call__'): + elif getattr(default, '__call__', False): state[name] = None else: state[name] = default diff -r fccd350acf79 -r 384082750f2c mercurial/filemerge.py --- a/mercurial/filemerge.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/filemerge.py Sat Oct 15 14:30:50 2011 -0500 @@ -34,7 +34,8 @@ p = util.findexe(p + _toolstr(ui, tool, "regappend")) if p: return p - return util.findexe(_toolstr(ui, tool, "executable", tool)) + exe = _toolstr(ui, tool, "executable", tool) + return util.findexe(util.expandpath(exe)) def _picktool(repo, ui, path, binary, symlink): def check(tool, pat, symlink, binary): diff -r fccd350acf79 -r 384082750f2c mercurial/hbisect.py --- a/mercurial/hbisect.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hbisect.py Sat Oct 15 14:30:50 2011 -0500 @@ -8,7 +8,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import os +import os, error from i18n import _ from node import short, hex import util @@ -35,17 +35,18 @@ # build visit array ancestors = [None] * (len(changelog) + 1) # an extra for [-1] - # set nodes descended from goodrev - ancestors[goodrev] = [] + # set nodes descended from goodrevs + for rev in goodrevs: + ancestors[rev] = [] for rev in xrange(goodrev + 1, len(changelog)): for prev in clparents(rev): if ancestors[prev] == []: ancestors[rev] = [] # clear good revs from array - for node in goodrevs: - ancestors[node] = None - for rev in xrange(len(changelog), -1, -1): + for rev in goodrevs: + ancestors[rev] = None + for rev in xrange(len(changelog), goodrev, -1): if ancestors[rev] is None: for prev in clparents(rev): ancestors[prev] = None @@ -149,7 +150,102 @@ for kind in state: for node in state[kind]: f.write("%s %s\n" % (kind, hex(node))) - f.rename() + f.close() finally: wlock.release() +def get(repo, status): + """ + Return a list of revision(s) that match the given status: + + - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip + - ``goods``, ``bads`` : csets topologicaly good/bad + - ``range`` : csets taking part in the bisection + - ``pruned`` : csets that are goods, bads or skipped + - ``untested`` : csets whose fate is yet unknown + - ``ignored`` : csets ignored due to DAG topology + """ + state = load_state(repo) + if status in ('good', 'bad', 'skip'): + return [repo.changelog.rev(n) for n in state[status]] + else: + # In the floowing sets, we do *not* call 'bisect()' with more + # than one level of recusrsion, because that can be very, very + # time consuming. Instead, we always develop the expression as + # much as possible. + + # 'range' is all csets that make the bisection: + # - have a good ancestor and a bad descendant, or conversely + # that's because the bisection can go either way + range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )' + + _t = [c.rev() for c in repo.set('bisect(good)::bisect(bad)')] + # The sets of topologically good or bad csets + if len(_t) == 0: + # Goods are topologically after bads + goods = 'bisect(good)::' # Pruned good csets + bads = '::bisect(bad)' # Pruned bad csets + else: + # Goods are topologically before bads + goods = '::bisect(good)' # Pruned good csets + bads = 'bisect(bad)::' # Pruned bad csets + + # 'pruned' is all csets whose fate is already known: good, bad, skip + skips = 'bisect(skip)' # Pruned skipped csets + pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips) + + # 'untested' is all cset that are- in 'range', but not in 'pruned' + untested = '( (%s) - (%s) )' % (range, pruned) + + # 'ignored' is all csets that were not used during the bisection + # due to DAG topology, but may however have had an impact. + # Eg., a branch merged between bads and goods, but whose branch- + # point is out-side of the range. + iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors + iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors + ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range) + + if status == 'range': + return [c.rev() for c in repo.set(range)] + elif status == 'pruned': + return [c.rev() for c in repo.set(pruned)] + elif status == 'untested': + return [c.rev() for c in repo.set(untested)] + elif status == 'ignored': + return [c.rev() for c in repo.set(ignored)] + elif status == "goods": + return [c.rev() for c in repo.set(goods)] + elif status == "bads": + return [c.rev() for c in repo.set(bads)] + + else: + raise error.ParseError(_('invalid bisect state')) + +def label(repo, node, short=False): + rev = repo.changelog.rev(node) + + # Try explicit sets + if rev in get(repo, 'good'): + return _('good') + if rev in get(repo, 'bad'): + return _('bad') + if rev in get(repo, 'skip'): + return _('skipped') + if rev in get(repo, 'untested'): + return _('untested') + if rev in get(repo, 'ignored'): + return _('ignored') + + # Try implicit sets + if rev in get(repo, 'goods'): + return _('good (implicit)') + if rev in get(repo, 'bads'): + return _('bad (implicit)') + + return None + +def shortlabel(label): + if label: + return label[0].upper() + + return None diff -r fccd350acf79 -r 384082750f2c mercurial/help.py --- a/mercurial/help.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/help.py Sat Oct 15 14:30:50 2011 -0500 @@ -31,7 +31,7 @@ """Return a delayed loader for help/topic.txt.""" def loader(): - if hasattr(sys, 'frozen'): + if util.mainfrozen(): module = sys.executable else: module = __file__ diff -r fccd350acf79 -r 384082750f2c mercurial/help/config.txt --- a/mercurial/help/config.txt Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/help/config.txt Sat Oct 15 14:30:50 2011 -0500 @@ -223,6 +223,10 @@ ``$HG_ARGS`` expand to the arguments given to Mercurial. In the ``hg echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``. +.. note:: Some global configuration options such as ``-R`` are + processed before shell aliases and will thus not be passed to + aliases. + ``auth`` """""""" @@ -1261,6 +1265,12 @@ ``ipv6`` Whether to use IPv6. Default is False. +``logoimg`` + File name of the logo image that some templates display on each page. + The file name is relative to ``staticurl``. That is, the full path to + the logo image is "staticurl/logoimg". + If unset, ``hglogo.png`` will be used. + ``logourl`` Base URL to use for logos. If unset, ``http://mercurial.selenic.com/`` will be used. diff -r fccd350acf79 -r 384082750f2c mercurial/help/subrepos.txt --- a/mercurial/help/subrepos.txt Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/help/subrepos.txt Sat Oct 15 14:30:50 2011 -0500 @@ -1,13 +1,14 @@ Subrepositories let you nest external repositories or projects into a parent Mercurial repository, and make commands operate on them as a -group. External Mercurial and Subversion projects are currently -supported. +group. + +Mercurial currently supports Mercurial, Git, and Subversion +subrepositories. Subrepositories are made of three components: 1. Nested repository checkouts. They can appear anywhere in the - parent working directory, and are Mercurial clones or Subversion - checkouts. + parent working directory. 2. Nested repository references. They are defined in ``.hgsub`` and tell where the subrepository checkouts come from. Mercurial @@ -15,12 +16,15 @@ path/to/nested = https://example.com/nested/repo/path + Git and Subversion subrepos are also supported: + + path/to/nested = [git]git://example.com/nested/repo/path + path/to/nested = [svn]https://example.com/nested/trunk/path + where ``path/to/nested`` is the checkout location relatively to the parent Mercurial root, and ``https://example.com/nested/repo/path`` is the source repository path. The source can also reference a - filesystem path. Subversion repositories are defined with: - - path/to/nested = [svn]https://example.com/nested/trunk/path + filesystem path. Note that ``.hgsub`` does not exist by default in Mercurial repositories, you have to create and add it to the parent diff -r fccd350acf79 -r 384082750f2c mercurial/hg.py --- a/mercurial/hg.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hg.py Sat Oct 15 14:30:50 2011 -0500 @@ -98,9 +98,9 @@ hook(ui, repo) return repo -def peer(ui, opts, path, create=False): +def peer(uiorrepo, opts, path, create=False): '''return a repository peer for the specified path''' - rui = remoteui(ui, opts) + rui = remoteui(uiorrepo, opts) return repository(rui, path, create) def defaultdest(source): @@ -174,6 +174,36 @@ continue _update(r, uprev) +def copystore(ui, srcrepo, destpath): + '''copy files from store of srcrepo in destpath + + returns destlock + ''' + destlock = None + try: + hardlink = None + num = 0 + for f in srcrepo.store.copylist(): + src = os.path.join(srcrepo.sharedpath, f) + dst = os.path.join(destpath, f) + dstbase = os.path.dirname(dst) + if dstbase and not os.path.exists(dstbase): + os.mkdir(dstbase) + if os.path.exists(src): + if dst.endswith('data'): + # lock to avoid premature writing to the target + destlock = lock.lock(os.path.join(dstbase, "lock")) + hardlink, n = util.copyfiles(src, dst, hardlink) + num += n + if hardlink: + ui.debug("linked %d files\n" % num) + else: + ui.debug("copied %d files\n" % num) + return destlock + except: + release(destlock) + raise + def clone(ui, peeropts, source, dest=None, pull=False, rev=None, update=True, stream=False, branch=None): """Make a copy of an existing repository. @@ -287,24 +317,7 @@ % dest) raise - hardlink = None - num = 0 - for f in srcrepo.store.copylist(): - src = os.path.join(srcrepo.sharedpath, f) - dst = os.path.join(destpath, f) - dstbase = os.path.dirname(dst) - if dstbase and not os.path.exists(dstbase): - os.mkdir(dstbase) - if os.path.exists(src): - if dst.endswith('data'): - # lock to avoid premature writing to the target - destlock = lock.lock(os.path.join(dstbase, "lock")) - hardlink, n = util.copyfiles(src, dst, hardlink) - num += n - if hardlink: - ui.debug("linked %d files\n" % num) - else: - ui.debug("copied %d files\n" % num) + destlock = copystore(ui, srcrepo, destpath) # we need to re-init the repo after manually copying the data # into it @@ -537,7 +550,7 @@ def remoteui(src, opts): 'build a remote ui from ui or repo and opts' - if hasattr(src, 'baseui'): # looks like a repository + if util.safehasattr(src, 'baseui'): # looks like a repository dst = src.baseui.copy() # drop repo-specific config src = src.ui # copy target options from repo else: # assume it's a global ui object diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/hgweb_mod.py --- a/mercurial/hgweb/hgweb_mod.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/hgweb_mod.py Sat Oct 15 14:30:50 2011 -0500 @@ -7,7 +7,7 @@ # GNU General Public License version 2 or any later version. import os -from mercurial import ui, hg, hook, error, encoding, templater +from mercurial import ui, hg, hook, error, encoding, templater, util from common import get_stat, ErrorResponse, permhooks, caching from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR @@ -148,7 +148,7 @@ cmd = cmd[style + 1:] # avoid accepting e.g. style parameter as command - if hasattr(webcommands, cmd): + if util.safehasattr(webcommands, cmd): req.form['cmd'] = [cmd] else: cmd = '' @@ -236,6 +236,7 @@ port = port != default_port and (":" + port) or "" urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) logourl = self.config("web", "logourl", "http://mercurial.selenic.com/") + logoimg = self.config("web", "logoimg", "hglogo.png") staticurl = self.config("web", "staticurl") or req.url + 'static/' if not staticurl.endswith('/'): staticurl += '/' @@ -276,6 +277,7 @@ tmpl = templater.templater(mapfile, defaults={"url": req.url, "logourl": logourl, + "logoimg": logoimg, "staticurl": staticurl, "urlbase": urlbase, "repo": self.reponame, diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/hgwebdir_mod.py --- a/mercurial/hgweb/hgwebdir_mod.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/hgwebdir_mod.py Sat Oct 15 14:30:50 2011 -0500 @@ -51,6 +51,33 @@ yield (prefix + '/' + util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path +def geturlcgivars(baseurl, port): + """ + Extract CGI variables from baseurl + + >>> geturlcgivars("http://host.org/base", "80") + ('host.org', '80', '/base') + >>> geturlcgivars("http://host.org:8000/base", "80") + ('host.org', '8000', '/base') + >>> geturlcgivars('/base', 8000) + ('', '8000', '/base') + >>> geturlcgivars("base", '8000') + ('', '8000', '/base') + >>> geturlcgivars("http://host", '8000') + ('host', '8000', '/') + >>> geturlcgivars("http://host/", '8000') + ('host', '8000', '/') + """ + u = util.url(baseurl) + name = u.host or '' + if u.port: + port = u.port + path = u.path or "" + if not path.startswith('/'): + path = '/' + path + + return name, str(port), path + class hgwebdir(object): refreshinterval = 20 @@ -348,6 +375,7 @@ start = url[-1] == '?' and '&' or '?' sessionvars = webutil.sessionvars(vars, start) logourl = config('web', 'logourl', 'http://mercurial.selenic.com/') + logoimg = config('web', 'logoimg', 'hglogo.png') staticurl = config('web', 'staticurl') or url + 'static/' if not staticurl.endswith('/'): staticurl += '/' @@ -358,17 +386,14 @@ "motd": motd, "url": url, "logourl": logourl, + "logoimg": logoimg, "staticurl": staticurl, "sessionvars": sessionvars}) return tmpl def updatereqenv(self, env): if self._baseurl is not None: - u = util.url(self._baseurl) - env['SERVER_NAME'] = u.host - if u.port: - env['SERVER_PORT'] = u.port - path = u.path or "" - if not path.startswith('/'): - path = '/' + path + name, port, path = geturlcgivars(self._baseurl, env['SERVER_PORT']) + env['SERVER_NAME'] = name + env['SERVER_PORT'] = port env['SCRIPT_NAME'] = path diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/protocol.py --- a/mercurial/hgweb/protocol.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/protocol.py Sat Oct 15 14:30:50 2011 -0500 @@ -10,6 +10,7 @@ from common import HTTP_OK HGTYPE = 'application/mercurial-0.1' +HGERRTYPE = 'application/hg-error' class webproto(object): def __init__(self, req, ui): @@ -90,3 +91,7 @@ rsp = '0\n%s\n' % rsp.res req.respond(HTTP_OK, HGTYPE, length=len(rsp)) return [rsp] + elif isinstance(rsp, wireproto.ooberror): + rsp = rsp.message + req.respond(HTTP_OK, HGERRTYPE, length=len(rsp)) + return [rsp] diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/request.py --- a/mercurial/hgweb/request.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/request.py Sat Oct 15 14:30:50 2011 -0500 @@ -101,7 +101,7 @@ self.headers = [] def write(self, thing): - if hasattr(thing, "__iter__"): + if util.safehasattr(thing, "__iter__"): for part in thing: self.write(part) else: diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/server.py --- a/mercurial/hgweb/server.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/server.py Sat Oct 15 14:30:50 2011 -0500 @@ -246,9 +246,10 @@ try: from threading import activeCount + activeCount() # silence pyflakes _mixin = SocketServer.ThreadingMixIn except ImportError: - if hasattr(os, "fork"): + if util.safehasattr(os, "fork"): _mixin = SocketServer.ForkingMixIn else: class _mixin(object): diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/webutil.py --- a/mercurial/hgweb/webutil.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/webutil.py Sat Oct 15 14:30:50 2011 -0500 @@ -72,7 +72,7 @@ d['date'] = s.date() d['description'] = s.description() d['branch'] = s.branch() - if hasattr(s, 'path'): + if util.safehasattr(s, 'path'): d['file'] = s.path() yield d diff -r fccd350acf79 -r 384082750f2c mercurial/hgweb/wsgicgi.py --- a/mercurial/hgweb/wsgicgi.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hgweb/wsgicgi.py Sat Oct 15 14:30:50 2011 -0500 @@ -78,5 +78,4 @@ for chunk in content: write(chunk) finally: - if hasattr(content, 'close'): - content.close() + getattr(content, 'close', lambda : None)() diff -r fccd350acf79 -r 384082750f2c mercurial/hook.py --- a/mercurial/hook.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/hook.py Sat Oct 15 14:30:50 2011 -0500 @@ -21,14 +21,14 @@ ui.note(_("calling hook %s: %s\n") % (hname, funcname)) obj = funcname - if not hasattr(obj, '__call__'): + if not util.safehasattr(obj, '__call__'): d = funcname.rfind('.') if d == -1: raise util.Abort(_('%s hook is invalid ("%s" not in ' 'a module)') % (hname, funcname)) modname = funcname[:d] oldpaths = sys.path - if hasattr(sys, "frozen"): + if util.mainfrozen(): # binary installs require sys.path manipulation modpath, modfile = os.path.split(modname) if modpath and modfile: @@ -60,7 +60,7 @@ raise util.Abort(_('%s hook is invalid ' '("%s" is not defined)') % (hname, funcname)) - if not hasattr(obj, '__call__'): + if not util.safehasattr(obj, '__call__'): raise util.Abort(_('%s hook is invalid ' '("%s" is not callable)') % (hname, funcname)) @@ -99,7 +99,7 @@ env = {} for k, v in args.iteritems(): - if hasattr(v, '__call__'): + if util.safehasattr(v, '__call__'): v = v() if isinstance(v, dict): # make the dictionary element order stable across Python @@ -149,7 +149,7 @@ for hname, cmd in ui.configitems('hooks'): if hname.split('.')[0] != name or not cmd: continue - if hasattr(cmd, '__call__'): + if util.safehasattr(cmd, '__call__'): r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r elif cmd.startswith('python:'): if cmd.count(':') >= 2: diff -r fccd350acf79 -r 384082750f2c mercurial/httpclient/__init__.py --- a/mercurial/httpclient/__init__.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/httpclient/__init__.py Sat Oct 15 14:30:50 2011 -0500 @@ -171,6 +171,14 @@ logger.info('cl: %r body: %r', self._content_len, self._body) try: data = self.sock.recv(INCOMING_BUFFER_SIZE) + # If the socket was readable and no data was read, that + # means the socket was closed. If this isn't a + # _CLOSE_IS_END socket, then something is wrong if we're + # here (we shouldn't enter _select() if the response is + # complete), so abort. + if not data and self._content_len != _LEN_CLOSE_IS_END: + raise HTTPRemoteClosedError( + 'server appears to have closed the socket mid-response') except socket.sslerror, e: if e.args[0] != socket.SSL_ERROR_WANT_READ: raise @@ -693,6 +701,11 @@ class HTTPProxyConnectFailedException(httplib.HTTPException): """Connecting to the HTTP proxy failed.""" + class HTTPStateError(httplib.HTTPException): """Invalid internal state encountered.""" + + +class HTTPRemoteClosedError(httplib.HTTPException): + """The server closed the remote socket in the middle of a response.""" # no-check-code diff -r fccd350acf79 -r 384082750f2c mercurial/httpclient/tests/simple_http_test.py --- a/mercurial/httpclient/tests/simple_http_test.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/httpclient/tests/simple_http_test.py Sat Oct 15 14:30:50 2011 -0500 @@ -380,6 +380,21 @@ con.request('GET', '/') self.assertEqual(2, len(sockets)) + def test_server_closes_before_end_of_body(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + s = con.sock + s.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Connection: Keep-Alive\r\n', + 'Content-Length: 16', + '\r\n\r\n', + 'You can '] # Note: this is shorter than content-length + s.close_on_empty = True + con.request('GET', '/') + r1 = con.getresponse() + self.assertRaises(http.HTTPRemoteClosedError, r1.read) + def test_no_response_raises_response_not_ready(self): con = http.HTTPConnection('foo') self.assertRaises(http.httplib.ResponseNotReady, con.getresponse) diff -r fccd350acf79 -r 384082750f2c mercurial/httpclient/tests/test_chunked_transfer.py --- a/mercurial/httpclient/tests/test_chunked_transfer.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/httpclient/tests/test_chunked_transfer.py Sat Oct 15 14:30:50 2011 -0500 @@ -134,4 +134,20 @@ con.request('GET', '/') self.assertStringEqual('hi there\nthere\nthere\nthere\nthere\n', con.getresponse().read()) + + def testChunkedDownloadEarlyHangup(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + broken = chunkedblock('hi'*20)[:-1] + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'transfer-encoding: chunked', + '\r\n\r\n', + broken, + ] + sock.close_on_empty = True + con.request('GET', '/') + resp = con.getresponse() + self.assertRaises(http.HTTPRemoteClosedError, resp.read) # no-check-code diff -r fccd350acf79 -r 384082750f2c mercurial/httprepo.py --- a/mercurial/httprepo.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/httprepo.py Sat Oct 15 14:30:50 2011 -0500 @@ -28,6 +28,7 @@ self.path = path self.caps = None self.handler = None + self.urlopener = None u = util.url(path) if u.query or u.fragment: raise util.Abort(_('unsupported URL component: "%s"') % @@ -42,10 +43,10 @@ self.urlopener = url.opener(ui, authinfo) def __del__(self): - for h in self.urlopener.handlers: - h.close() - if hasattr(h, "close_all"): - h.close_all() + if self.urlopener: + for h in self.urlopener.handlers: + h.close() + getattr(h, "close_all", lambda : None)() def url(self): return self.path @@ -139,6 +140,8 @@ proto = resp.headers.get('content-type', '') safeurl = util.hidepassword(self._url) + if proto.startswith('application/hg-error'): + raise error.OutOfBandError(resp.read()) # accept old "text/plain" and "application/hg-changegroup" for now if not (proto.startswith('application/mercurial-') or proto.startswith('text/plain') or diff -r fccd350acf79 -r 384082750f2c mercurial/i18n.py --- a/mercurial/i18n.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/i18n.py Sat Oct 15 14:30:50 2011 -0500 @@ -9,7 +9,7 @@ import gettext, sys, os # modelled after templater.templatepath: -if hasattr(sys, 'frozen'): +if getattr(sys, 'frozen', None) is not None: module = sys.executable else: module = __file__ @@ -61,4 +61,3 @@ _ = lambda message: message else: _ = gettext - diff -r fccd350acf79 -r 384082750f2c mercurial/keepalive.py --- a/mercurial/keepalive.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/keepalive.py Sat Oct 15 14:30:50 2011 -0500 @@ -547,13 +547,14 @@ print "send:", repr(str) try: blocksize = 8192 - if hasattr(str,'read') : + read = getattr(str, 'read', None) + if read is not None: if self.debuglevel > 0: print "sendIng a read()able" - data = str.read(blocksize) + data = read(blocksize) while data: self.sock.sendall(data) - data = str.read(blocksize) + data = read(blocksize) else: self.sock.sendall(str) except socket.error, v: diff -r fccd350acf79 -r 384082750f2c mercurial/localrepo.py --- a/mercurial/localrepo.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/localrepo.py Sat Oct 15 14:30:50 2011 -0500 @@ -10,13 +10,14 @@ import repo, changegroup, subrepo, discovery, pushkey import changelog, dirstate, filelog, manifest, context, bookmarks import lock, transaction, store, encoding -import scmutil, util, extensions, hook, error +import scmutil, util, extensions, hook, error, revset import match as matchmod import merge as mergemod import tags as tagsmod from lock import release import weakref, errno, os, time, inspect propertycache = util.propertycache +filecache = scmutil.filecache class localrepository(repo.repository): capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey', @@ -63,6 +64,7 @@ ) if self.ui.configbool('format', 'generaldelta', False): requirements.append("generaldelta") + requirements = set(requirements) else: raise error.RepoError(_("repository %s not found") % path) elif create: @@ -77,7 +79,7 @@ self.sharedpath = self.path try: - s = os.path.realpath(self.opener.read("sharedpath")) + s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n')) if not os.path.exists(s): raise error.RepoError( _('.hg/sharedpath points to nonexistent directory %s') % s) @@ -95,21 +97,19 @@ if create: self._writerequirements() - # These two define the set of tags for this repository. _tags - # maps tag name to node; _tagtypes maps tag name to 'global' or - # 'local'. (Global tags are defined by .hgtags across all - # heads, and local tags are defined in .hg/localtags.) They - # constitute the in-memory cache of tags. - self._tags = None - self._tagtypes = None self._branchcache = None self._branchcachetip = None - self.nodetagscache = None self.filterpats = {} self._datafilters = {} self._transref = self._lockref = self._wlockref = None + # A cache for various files under .hg/ that tracks file changes, + # (used by the filecache decorator) + # + # Maps a property name to its util.filecacheentry + self._filecache = {} + def _applyrequirements(self, requirements): self.requirements = requirements openerreqs = set(('revlogv1', 'generaldelta')) @@ -159,15 +159,18 @@ parts.pop() return False - @util.propertycache + @filecache('bookmarks') def _bookmarks(self): return bookmarks.read(self) - @util.propertycache + @filecache('bookmarks.current') def _bookmarkcurrent(self): return bookmarks.readcurrent(self) - @propertycache + def _writebookmarks(self, marks): + bookmarks.write(self) + + @filecache('00changelog.i', True) def changelog(self): c = changelog.changelog(self.sopener) if 'HG_PENDING' in os.environ: @@ -176,11 +179,11 @@ c.readpending('00changelog.i.a') return c - @propertycache + @filecache('00manifest.i', True) def manifest(self): return manifest.manifest(self.sopener) - @propertycache + @filecache('dirstate') def dirstate(self): warned = [0] def validate(node): @@ -217,6 +220,17 @@ for i in xrange(len(self)): yield i + def set(self, expr, *args): + ''' + Yield a context for each matching revision, after doing arg + replacement via revset.formatspec + ''' + + expr = revset.formatspec(expr, *args) + m = revset.match(None, expr) + for r in m(self, range(len(self))): + yield self[r] + def url(self): return 'file:' + self.root @@ -249,8 +263,8 @@ fp.write('\n') for name in names: m = munge and munge(name) or name - if self._tagtypes and name in self._tagtypes: - old = self._tags.get(name, nullid) + if self._tagscache.tagtypes and name in self._tagscache.tagtypes: + old = self.tags().get(name, nullid) fp.write('%s %s\n' % (hex(old), m)) fp.write('%s %s\n' % (hex(node), m)) fp.close() @@ -325,12 +339,31 @@ self.tags() # instantiate the cache self._tag(names, node, message, local, user, date) + @propertycache + def _tagscache(self): + '''Returns a tagscache object that contains various tags related caches.''' + + # This simplifies its cache management by having one decorated + # function (this one) and the rest simply fetch things from it. + class tagscache(object): + def __init__(self): + # These two define the set of tags for this repository. tags + # maps tag name to node; tagtypes maps tag name to 'global' or + # 'local'. (Global tags are defined by .hgtags across all + # heads, and local tags are defined in .hg/localtags.) + # They constitute the in-memory cache of tags. + self.tags = self.tagtypes = None + + self.nodetagscache = self.tagslist = None + + cache = tagscache() + cache.tags, cache.tagtypes = self._findtags() + + return cache + def tags(self): '''return a mapping of tag to node''' - if self._tags is None: - (self._tags, self._tagtypes) = self._findtags() - - return self._tags + return self._tagscache.tags def _findtags(self): '''Do the hard work of finding tags. Return a pair of dicts @@ -379,27 +412,29 @@ None : tag does not exist ''' - self.tags() - - return self._tagtypes.get(tagname) + return self._tagscache.tagtypes.get(tagname) def tagslist(self): '''return a list of tags ordered by revision''' - l = [] - for t, n in self.tags().iteritems(): - r = self.changelog.rev(n) - l.append((r, t, n)) - return [(t, n) for r, t, n in sorted(l)] + if not self._tagscache.tagslist: + l = [] + for t, n in self.tags().iteritems(): + r = self.changelog.rev(n) + l.append((r, t, n)) + self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] + + return self._tagscache.tagslist def nodetags(self, node): '''return the tags associated with a node''' - if not self.nodetagscache: - self.nodetagscache = {} + if not self._tagscache.nodetagscache: + nodetagscache = {} for t, n in self.tags().iteritems(): - self.nodetagscache.setdefault(n, []).append(t) - for tags in self.nodetagscache.itervalues(): + nodetagscache.setdefault(n, []).append(t) + for tags in nodetagscache.itervalues(): tags.sort() - return self.nodetagscache.get(node, []) + self._tagscache.nodetagscache = nodetagscache + return self._tagscache.nodetagscache.get(node, []) def nodebookmarks(self, node): marks = [] @@ -489,7 +524,7 @@ for label, nodes in branches.iteritems(): for node in nodes: f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) - f.rename() + f.close() except (IOError, OSError): pass @@ -722,67 +757,112 @@ finally: lock.release() - def rollback(self, dryrun=False): + def rollback(self, dryrun=False, force=False): wlock = lock = None try: wlock = self.wlock() lock = self.lock() if os.path.exists(self.sjoin("undo")): - try: - args = self.opener.read("undo.desc").splitlines() - if len(args) >= 3 and self.ui.verbose: - desc = _("repository tip rolled back to revision %s" - " (undo %s: %s)\n") % ( - int(args[0]) - 1, args[1], args[2]) - elif len(args) >= 2: - desc = _("repository tip rolled back to revision %s" - " (undo %s)\n") % ( - int(args[0]) - 1, args[1]) - except IOError: - desc = _("rolling back unknown transaction\n") - self.ui.status(desc) - if dryrun: - return - transaction.rollback(self.sopener, self.sjoin("undo"), - self.ui.warn) - util.rename(self.join("undo.dirstate"), self.join("dirstate")) - if os.path.exists(self.join('undo.bookmarks')): - util.rename(self.join('undo.bookmarks'), - self.join('bookmarks')) - try: - branch = self.opener.read("undo.branch") - self.dirstate.setbranch(branch) - except IOError: - self.ui.warn(_("named branch could not be reset, " - "current branch is still: %s\n") - % self.dirstate.branch()) - self.invalidate() - self.dirstate.invalidate() - self.destroyed() - parents = tuple([p.rev() for p in self.parents()]) - if len(parents) > 1: - self.ui.status(_("working directory now based on " - "revisions %d and %d\n") % parents) - else: - self.ui.status(_("working directory now based on " - "revision %d\n") % parents) + return self._rollback(dryrun, force) else: self.ui.warn(_("no rollback information available\n")) return 1 finally: release(lock, wlock) + def _rollback(self, dryrun, force): + ui = self.ui + try: + args = self.opener.read('undo.desc').splitlines() + (oldlen, desc, detail) = (int(args[0]), args[1], None) + if len(args) >= 3: + detail = args[2] + oldtip = oldlen - 1 + + if detail and ui.verbose: + msg = (_('repository tip rolled back to revision %s' + ' (undo %s: %s)\n') + % (oldtip, desc, detail)) + else: + msg = (_('repository tip rolled back to revision %s' + ' (undo %s)\n') + % (oldtip, desc)) + except IOError: + msg = _('rolling back unknown transaction\n') + desc = None + + if not force and self['.'] != self['tip'] and desc == 'commit': + raise util.Abort( + _('rollback of last commit while not checked out ' + 'may lose data'), hint=_('use -f to force')) + + ui.status(msg) + if dryrun: + return 0 + + parents = self.dirstate.parents() + transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn) + if os.path.exists(self.join('undo.bookmarks')): + util.rename(self.join('undo.bookmarks'), + self.join('bookmarks')) + self.invalidate() + + parentgone = (parents[0] not in self.changelog.nodemap or + parents[1] not in self.changelog.nodemap) + if parentgone: + util.rename(self.join('undo.dirstate'), self.join('dirstate')) + try: + branch = self.opener.read('undo.branch') + self.dirstate.setbranch(branch) + except IOError: + ui.warn(_('named branch could not be reset: ' + 'current branch is still \'%s\'\n') + % self.dirstate.branch()) + + self.dirstate.invalidate() + self.destroyed() + parents = tuple([p.rev() for p in self.parents()]) + if len(parents) > 1: + ui.status(_('working directory now based on ' + 'revisions %d and %d\n') % parents) + else: + ui.status(_('working directory now based on ' + 'revision %d\n') % parents) + return 0 + def invalidatecaches(self): - self._tags = None - self._tagtypes = None - self.nodetagscache = None + try: + delattr(self, '_tagscache') + except AttributeError: + pass + self._branchcache = None # in UTF-8 self._branchcachetip = None + def invalidatedirstate(self): + '''Invalidates the dirstate, causing the next call to dirstate + to check if it was modified since the last time it was read, + rereading it if it has. + + This is different to dirstate.invalidate() that it doesn't always + rereads the dirstate. Use dirstate.invalidate() if you want to + explicitly read the dirstate again (i.e. restoring it to a previous + known good state).''' + try: + delattr(self, 'dirstate') + except AttributeError: + pass + def invalidate(self): - for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"): - if a in self.__dict__: - delattr(self, a) + for k in self._filecache: + # dirstate is invalidated separately in invalidatedirstate() + if k == 'dirstate': + continue + + try: + delattr(self, k) + except AttributeError: + pass self.invalidatecaches() def _lock(self, lockname, wait, releasefn, acquirefn, desc): @@ -809,7 +889,14 @@ l.lock() return l - l = self._lock(self.sjoin("lock"), wait, self.store.write, + def unlock(): + self.store.write() + for k, ce in self._filecache.items(): + if k == 'dirstate': + continue + ce.refresh() + + l = self._lock(self.sjoin("lock"), wait, unlock, self.invalidate, _('repository %s') % self.origroot) self._lockref = weakref.ref(l) return l @@ -823,8 +910,14 @@ l.lock() return l - l = self._lock(self.join("wlock"), wait, self.dirstate.write, - self.dirstate.invalidate, _('working directory of %s') % + def unlock(): + self.dirstate.write() + ce = self._filecache.get('dirstate') + if ce: + ce.refresh() + + l = self._lock(self.join("wlock"), wait, unlock, + self.invalidatedirstate, _('working directory of %s') % self.origroot) self._wlockref = weakref.ref(l) return l diff -r fccd350acf79 -r 384082750f2c mercurial/lsprof.py --- a/mercurial/lsprof.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/lsprof.py Sat Oct 15 14:30:50 2011 -0500 @@ -86,9 +86,7 @@ for k, v in list(sys.modules.iteritems()): if v is None: continue - if not hasattr(v, '__file__'): - continue - if not isinstance(v.__file__, str): + if not isinstance(getattr(v, '__file__', None), str): continue if v.__file__.startswith(code.co_filename): mname = _fn2mod[code.co_filename] = k diff -r fccd350acf79 -r 384082750f2c mercurial/mail.py --- a/mercurial/mail.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/mail.py Sat Oct 15 14:30:50 2011 -0500 @@ -37,7 +37,7 @@ # backward compatible: when tls = true, we use starttls. starttls = tls == 'starttls' or util.parsebool(tls) smtps = tls == 'smtps' - if (starttls or smtps) and not hasattr(socket, 'ssl'): + if (starttls or smtps) and not util.safehasattr(socket, 'ssl'): raise util.Abort(_("can't use TLS: Python SSL support not installed")) if smtps: ui.note(_('(using smtps)\n')) diff -r fccd350acf79 -r 384082750f2c mercurial/match.py --- a/mercurial/match.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/match.py Sat Oct 15 14:30:50 2011 -0500 @@ -49,7 +49,6 @@ '' - a pattern of the specified default type """ - self._ctx = None self._root = root self._cwd = cwd self._files = [] diff -r fccd350acf79 -r 384082750f2c mercurial/mdiff.py --- a/mercurial/mdiff.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/mdiff.py Sat Oct 15 14:30:50 2011 -0500 @@ -157,6 +157,7 @@ return 0 return ret + lastfunc = [0, ''] def yieldhunk(hunk): (astart, a2, bstart, b2, delta) = hunk aend = contextend(a2, len(l1)) @@ -165,13 +166,19 @@ func = "" if opts.showfunc: - # walk backwards from the start of the context - # to find a line starting with an alphanumeric char. - for x in xrange(astart - 1, -1, -1): - t = l1[x].rstrip() - if funcre.match(t): - func = ' ' + t[:40] + lastpos, func = lastfunc + # walk backwards from the start of the context up to the start of + # the previous hunk context until we find a line starting with an + # alphanumeric char. + for i in xrange(astart - 1, lastpos - 1, -1): + if l1[i][0].isalnum(): + func = ' ' + l1[i].rstrip()[:40] + lastfunc[1] = func break + # by recording this hunk's starting point as the next place to + # start looking for function lines, we avoid reading any line in + # the file more than once. + lastfunc[0] = astart yield "@@ -%d,%d +%d,%d @@%s\n" % (astart + 1, alen, bstart + 1, blen, func) @@ -180,9 +187,6 @@ for x in xrange(a2, aend): yield ' ' + l1[x] - if opts.showfunc: - funcre = re.compile('\w') - # bdiff.blocks gives us the matching sequences in the files. The loop # below finds the spaces between those matching sequences and translates # them into diff output. diff -r fccd350acf79 -r 384082750f2c mercurial/merge.py --- a/mercurial/merge.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/merge.py Sat Oct 15 14:30:50 2011 -0500 @@ -273,7 +273,6 @@ action.sort(key=actionkey) # prescan for merges - u = repo.ui for a in action: f, m = a[:2] if m == 'm': # merge @@ -308,8 +307,8 @@ numupdates = len(action) for i, a in enumerate(action): f, m = a[:2] - u.progress(_('updating'), i + 1, item=f, total=numupdates, - unit=_('files')) + repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates, + unit=_('files')) if f and f[0] == "/": continue if m == "r": # remove @@ -377,7 +376,7 @@ repo.wopener.audit(f) util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) ms.commit() - u.progress(_('updating'), None, total=numupdates, unit=_('files')) + repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files')) return updated, merged, removed, unresolved diff -r fccd350acf79 -r 384082750f2c mercurial/minirst.py --- a/mercurial/minirst.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/minirst.py Sat Oct 15 14:30:50 2011 -0500 @@ -18,17 +18,14 @@ when adding support for new constructs. """ -import re, sys +import re import util, encoding from i18n import _ - def replace(text, substs): - utext = text.decode(encoding.encoding) for f, t in substs: - utext = utext.replace(f, t) - return utext.encode(encoding.encoding) - + text = text.replace(f, t) + return text _blockre = re.compile(r"\n(?:\s*\n)+") @@ -39,14 +36,14 @@ has an 'indent' field and a 'lines' field. """ blocks = [] - for b in _blockre.split(text.strip()): + for b in _blockre.split(text.lstrip('\n').rstrip()): lines = b.splitlines() - indent = min((len(l) - len(l.lstrip())) for l in lines) - lines = [l[indent:] for l in lines] - blocks.append(dict(indent=indent, lines=lines)) + if lines: + indent = min((len(l) - len(l.lstrip())) for l in lines) + lines = [l[indent:] for l in lines] + blocks.append(dict(indent=indent, lines=lines)) return blocks - def findliteralblocks(blocks): """Finds literal blocks and adds a 'type' field to the blocks. @@ -103,6 +100,7 @@ r'((.*) +)(.*)$') _fieldre = re.compile(r':(?![: ])([^:]*)(? indent: if prune: del blocks[j] - i -= 1 # adjust outer index else: blocks[j]['indent'] -= adjustment j += 1 i += 1 return blocks, pruned +_sectionre = re.compile(r"""^([-=`:.'"~^_*+#])\1+$""") -_sectionre = re.compile(r"""^([-=`:.'"~^_*+#])\1+$""") +def findtables(blocks): + '''Find simple tables + + Only simple one-line table elements are supported + ''' + + for block in blocks: + # Searching for a block that looks like this: + # + # === ==== === + # A B C + # === ==== === <- optional + # 1 2 3 + # x y z + # === ==== === + if (block['type'] == 'paragraph' and + len(block['lines']) > 2 and + _tablere.match(block['lines'][0]) and + block['lines'][0] == block['lines'][-1]): + block['type'] = 'table' + block['header'] = False + div = block['lines'][0] + + # column markers are ASCII so we can calculate column + # position in bytes + columns = [x for x in xrange(len(div)) + if div[x] == '=' and (x == 0 or div[x - 1] == ' ')] + rows = [] + for l in block['lines'][1:-1]: + if l == div: + block['header'] = True + continue + row = [] + # we measure columns not in bytes or characters but in + # colwidth which makes things tricky + pos = columns[0] # leading whitespace is bytes + for n, start in enumerate(columns): + if n + 1 < len(columns): + width = columns[n + 1] - start + v = encoding.getcols(l, pos, width) # gather columns + pos += len(v) # calculate byte position of end + row.append(v.strip()) + else: + row.append(l[pos:].strip()) + rows.append(row) + + block['table'] = rows + + return blocks def findsections(blocks): """Finds sections. @@ -273,7 +318,6 @@ del block['lines'][1] return blocks - def inlineliterals(blocks): substs = [('``', '"')] for b in blocks: @@ -281,7 +325,6 @@ b['lines'] = [replace(l, substs) for l in b['lines']] return blocks - def hgrole(blocks): substs = [(':hg:`', '"hg '), ('`', '"')] for b in blocks: @@ -293,7 +336,6 @@ b['lines'] = [replace(l, substs) for l in b['lines']] return blocks - def addmargins(blocks): """Adds empty blocks for vertical spacing. @@ -366,7 +408,7 @@ hanging = block['optstrwidth'] initindent = '%s%s ' % (block['optstr'], ' ' * ((hanging - colwidth))) hangindent = ' ' * (encoding.colwidth(initindent) + 1) - return ' %s' % (util.wrap(desc, usablewidth, + return ' %s\n' % (util.wrap(desc, usablewidth, initindent=initindent, hangindent=hangindent)) @@ -381,25 +423,47 @@ defindent = indent + hang * ' ' text = ' '.join(map(str.strip, block['lines'])) - return '%s\n%s' % (indent + admonition, util.wrap(text, width=width, - initindent=defindent, - hangindent=defindent)) + return '%s\n%s\n' % (indent + admonition, + util.wrap(text, width=width, + initindent=defindent, + hangindent=defindent)) if block['type'] == 'margin': - return '' + return '\n' if block['type'] == 'literal': indent += ' ' - return indent + ('\n' + indent).join(block['lines']) + return indent + ('\n' + indent).join(block['lines']) + '\n' if block['type'] == 'section': underline = encoding.colwidth(block['lines'][0]) * block['underline'] - return "%s%s\n%s%s" % (indent, block['lines'][0],indent, underline) + return "%s%s\n%s%s\n" % (indent, block['lines'][0],indent, underline) + if block['type'] == 'table': + table = block['table'] + # compute column widths + widths = [max([encoding.colwidth(e) for e in c]) for c in zip(*table)] + text = '' + span = sum(widths) + len(widths) - 1 + indent = ' ' * block['indent'] + hang = ' ' * (len(indent) + span - widths[-1]) + + for row in table: + l = [] + for w, v in zip(widths, row): + pad = ' ' * (w - encoding.colwidth(v)) + l.append(v + pad) + l = ' '.join(l) + l = util.wrap(l, width=width, initindent=indent, hangindent=hang) + if not text and block['header']: + text = l + '\n' + indent + '-' * (min(width, span)) + '\n' + else: + text += l + "\n" + return text if block['type'] == 'definition': term = indent + block['lines'][0] hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip()) defindent = indent + hang * ' ' text = ' '.join(map(str.strip, block['lines'][1:])) - return '%s\n%s' % (term, util.wrap(text, width=width, - initindent=defindent, - hangindent=defindent)) + return '%s\n%s\n' % (term, util.wrap(text, width=width, + initindent=defindent, + hangindent=defindent)) subindent = indent if block['type'] == 'bullet': if block['lines'][0].startswith('| '): @@ -431,15 +495,103 @@ text = ' '.join(map(str.strip, block['lines'])) return util.wrap(text, width=width, initindent=indent, - hangindent=subindent) + hangindent=subindent) + '\n' + +def formathtml(blocks): + """Format RST blocks as HTML""" + + out = [] + headernest = '' + listnest = [] + def openlist(start, level): + if not listnest or listnest[-1][0] != start: + listnest.append((start, level)) + out.append('<%s>\n' % start) + + blocks = [b for b in blocks if b['type'] != 'margin'] + + for pos, b in enumerate(blocks): + btype = b['type'] + level = b['indent'] + lines = b['lines'] -def format(text, width, indent=0, keep=None): - """Parse and format the text according to width.""" + if btype == 'admonition': + admonition = _admonitiontitles[b['admonitiontitle']] + text = ' '.join(map(str.strip, lines)) + out.append('

\n%s %s\n

\n' % (admonition, text)) + elif btype == 'paragraph': + out.append('

\n%s\n

\n' % '\n'.join(lines)) + elif btype == 'margin': + pass + elif btype == 'literal': + out.append('
\n%s\n
\n' % '\n'.join(lines)) + elif btype == 'section': + i = b['underline'] + if i not in headernest: + headernest += i + level = headernest.index(i) + 1 + out.append('%s\n' % (level, lines[0], level)) + elif btype == 'table': + table = b['table'] + t = [] + for row in table: + l = [] + for v in zip(row): + if not t: + l.append('%s' % v) + else: + l.append('%s' % v) + t.append(' %s\n' % ''.join(l)) + out.append('\n%s
\n' % ''.join(t)) + elif btype == 'definition': + openlist('dl', level) + term = lines[0] + text = ' '.join(map(str.strip, lines[1:])) + out.append('
%s\n
%s\n' % (term, text)) + elif btype == 'bullet': + bullet, head = lines[0].split(' ', 1) + if bullet == '-': + openlist('ul', level) + else: + openlist('ol', level) + out.append('
  • %s\n' % ' '.join([head] + lines[1:])) + elif btype == 'field': + openlist('dl', level) + key = b['key'] + text = ' '.join(map(str.strip, lines)) + out.append('
    %s\n
    %s\n' % (key, text)) + elif btype == 'option': + openlist('dl', level) + opt = b['optstr'] + desc = ' '.join(map(str.strip, lines)) + out.append('
    %s\n
    %s\n' % (opt, desc)) + + # close lists if indent level of next block is lower + if listnest: + start, level = listnest[-1] + if pos == len(blocks) - 1: + out.append('\n' % start) + listnest.pop() + else: + nb = blocks[pos + 1] + ni = nb['indent'] + if (ni < level or + (ni == level and + nb['type'] not in 'definition bullet field option')): + out.append('\n' % start) + listnest.pop() + + return ''.join(out) + +def parse(text, indent=0, keep=None): + """Parse text into a list of blocks""" + pruned = [] blocks = findblocks(text) for b in blocks: b['indent'] += indent blocks = findliteralblocks(blocks) + blocks = findtables(blocks) blocks, pruned = prunecontainers(blocks, keep or []) blocks = findsections(blocks) blocks = inlineliterals(blocks) @@ -450,33 +602,68 @@ blocks = addmargins(blocks) blocks = prunecomments(blocks) blocks = findadmonitions(blocks) - text = '\n'.join(formatblock(b, width) for b in blocks) + return blocks, pruned + +def formatblocks(blocks, width): + text = ''.join(formatblock(b, width) for b in blocks) + return text + +def format(text, width=80, indent=0, keep=None, style='plain'): + """Parse and format the text according to width.""" + blocks, pruned = parse(text, indent, keep or []) + if style == 'html': + text = formathtml(blocks) + else: + text = ''.join(formatblock(b, width) for b in blocks) if keep is None: return text else: return text, pruned - -if __name__ == "__main__": - from pprint import pprint - - def debug(func, *args): - blocks = func(*args) - print "*** after %s:" % func.__name__ - pprint(blocks) - print - return blocks +def getsections(blocks): + '''return a list of (section name, nesting level, blocks) tuples''' + nest = "" + level = 0 + secs = [] + for b in blocks: + if b['type'] == 'section': + i = b['underline'] + if i not in nest: + nest += i + level = nest.index(i) + 1 + nest = nest[:level] + secs.append((b['lines'][0], level, [b])) + else: + if not secs: + # add an initial empty section + secs = [('', 0, [])] + secs[-1][2].append(b) + return secs - text = sys.stdin.read() - blocks = debug(findblocks, text) - blocks = debug(findliteralblocks, blocks) - blocks, pruned = debug(prunecontainers, blocks, sys.argv[1:]) - blocks = debug(inlineliterals, blocks) - blocks = debug(splitparagraphs, blocks) - blocks = debug(updatefieldlists, blocks) - blocks = debug(updateoptionlists, blocks) - blocks = debug(findsections, blocks) - blocks = debug(addmargins, blocks) - blocks = debug(prunecomments, blocks) - blocks = debug(findadmonitions, blocks) - print '\n'.join(formatblock(b, 30) for b in blocks) +def decorateblocks(blocks, width): + '''generate a list of (section name, line text) pairs for search''' + lines = [] + for s in getsections(blocks): + section = s[0] + text = formatblocks(s[2], width) + lines.append([(section, l) for l in text.splitlines(True)]) + return lines + +def maketable(data, indent=0, header=False): + '''Generate an RST table for the given table data''' + + widths = [max(encoding.colwidth(e) for e in c) for c in zip(*data)] + indent = ' ' * indent + div = indent + ' '.join('=' * w for w in widths) + '\n' + + out = [div] + for row in data: + l = [] + for w, v in zip(widths, row): + pad = ' ' * (w - encoding.colwidth(v)) + l.append(v + pad) + out.append(indent + ' '.join(l) + "\n") + if header and len(data) > 1: + out.insert(2, div) + out.append(div) + return ''.join(out) diff -r fccd350acf79 -r 384082750f2c mercurial/osutil.c --- a/mercurial/osutil.c Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/osutil.c Sat Oct 15 14:30:50 2011 -0500 @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef _WIN32 #include @@ -288,7 +289,8 @@ #endif if (pathlen >= PATH_MAX) { - PyErr_SetString(PyExc_ValueError, "path too long"); + errno = ENAMETOOLONG; + PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); goto error_value; } strncpy(fullpath, path, PATH_MAX); diff -r fccd350acf79 -r 384082750f2c mercurial/patch.py --- a/mercurial/patch.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/patch.py Sat Oct 15 14:30:50 2011 -0500 @@ -126,7 +126,7 @@ mimeheaders = ['content-type'] - if not hasattr(stream, 'next'): + if not util.safehasattr(stream, 'next'): # http responses, for example, have readline but not next stream = fiter(stream) @@ -1619,27 +1619,36 @@ def difflabel(func, *args, **kw): '''yields 2-tuples of (output, label) based on the output of func()''' - prefixes = [('diff', 'diff.diffline'), - ('copy', 'diff.extended'), - ('rename', 'diff.extended'), - ('old', 'diff.extended'), - ('new', 'diff.extended'), - ('deleted', 'diff.extended'), - ('---', 'diff.file_a'), - ('+++', 'diff.file_b'), - ('@@', 'diff.hunk'), - ('-', 'diff.deleted'), - ('+', 'diff.inserted')] - + headprefixes = [('diff', 'diff.diffline'), + ('copy', 'diff.extended'), + ('rename', 'diff.extended'), + ('old', 'diff.extended'), + ('new', 'diff.extended'), + ('deleted', 'diff.extended'), + ('---', 'diff.file_a'), + ('+++', 'diff.file_b')] + textprefixes = [('@', 'diff.hunk'), + ('-', 'diff.deleted'), + ('+', 'diff.inserted')] + head = False for chunk in func(*args, **kw): lines = chunk.split('\n') for i, line in enumerate(lines): if i != 0: yield ('\n', '') + if head: + if line.startswith('@'): + head = False + else: + if line and not line[0] in ' +-@': + head = True stripline = line - if line and line[0] in '+-': + if not head and line and line[0] in '+-': # highlight trailing whitespace, but only in changed lines stripline = line.rstrip() + prefixes = textprefixes + if head: + prefixes = headprefixes for prefix, label in prefixes: if stripline.startswith(prefix): yield (stripline, label) diff -r fccd350acf79 -r 384082750f2c mercurial/posix.py --- a/mercurial/posix.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/posix.py Sat Oct 15 14:30:50 2011 -0500 @@ -84,6 +84,21 @@ # Turn off all +x bits os.chmod(f, s & 0666) +def copymode(src, dst, mode=None): + '''Copy the file mode from the file at path src to dst. + If src doesn't exist, we're using mode instead. If mode is None, we're + using umask.''' + try: + st_mode = os.lstat(src).st_mode & 0777 + except OSError, inst: + if inst.errno != errno.ENOENT: + raise + st_mode = mode + if st_mode is None: + st_mode = ~umask + st_mode &= 0666 + os.chmod(dst, st_mode) + def checkexec(path): """ Check whether the given path is on a filesystem with UNIX-like exec flags @@ -241,7 +256,9 @@ for path in os.environ.get('PATH', '').split(os.pathsep): executable = findexisting(os.path.join(path, command)) if executable is not None: - return executable + st = os.stat(executable) + if (st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)): + return executable return None def setsignalhandler(): @@ -325,3 +342,45 @@ except ImportError: pass return 80 + +def makedir(path, notindexed): + os.mkdir(path) + +def unlinkpath(f): + """unlink and remove the directory if it is empty""" + os.unlink(f) + # try removing directories that might now be empty + try: + os.removedirs(os.path.dirname(f)) + except OSError: + pass + +def lookupreg(key, name=None, scope=None): + return None + +def hidewindow(): + """Hide current shell window. + + Used to hide the window opened when starting asynchronous + child process under Windows, unneeded on other systems. + """ + pass + +class cachestat(object): + def __init__(self, path): + self.stat = os.stat(path) + + def cacheable(self): + return bool(self.stat.st_ino) + + def __eq__(self, other): + try: + return self.stat == other.stat + except AttributeError: + return False + + def __ne__(self, other): + return not self == other + +def executablepath(): + return None # available on Windows only diff -r fccd350acf79 -r 384082750f2c mercurial/pure/parsers.py --- a/mercurial/pure/parsers.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/pure/parsers.py Sat Oct 15 14:30:50 2011 -0500 @@ -36,7 +36,7 @@ s = struct.calcsize(indexformatng) index = [] cache = None - n = off = 0 + off = 0 l = len(data) - s append = index.append @@ -45,7 +45,6 @@ while off <= l: e = _unpack(indexformatng, data[off:off + s]) append(e) - n += 1 if e[1] < 0: break off += e[1] + s @@ -53,7 +52,6 @@ while off <= l: e = _unpack(indexformatng, data[off:off + s]) append(e) - n += 1 off += s if off != len(data): diff -r fccd350acf79 -r 384082750f2c mercurial/repair.py --- a/mercurial/repair.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/repair.py Sat Oct 15 14:30:50 2011 -0500 @@ -11,9 +11,8 @@ from mercurial.i18n import _ import os -def _bundle(repo, bases, heads, node, suffix, compress=True): +def _bundle(repo, cg, node, suffix, compress=True): """create a bundle with the specified revisions as a backup""" - cg = repo.changegroupsubset(bases, heads, 'strip') backupdir = repo.join("strip-backup") if not os.path.isdir(backupdir): os.mkdir(backupdir) @@ -83,11 +82,9 @@ saveheads.add(r) saveheads = [cl.node(r) for r in saveheads] - # compute base nodes - if saverevs: - descendants = set(cl.descendants(*saverevs)) - saverevs.difference_update(descendants) - savebases = [cl.node(r) for r in saverevs] + # compute common nodes + savecommon = set(cl.node(p) for r in saverevs for p in cl.parentrevs(r) + if p not in saverevs and p not in tostrip) bm = repo._bookmarks updatebm = [] @@ -99,12 +96,14 @@ # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": - backupfile = _bundle(repo, [node], cl.heads(), node, 'backup') + allnodes=[cl.node(r) for r in xrange(striprev, len(cl))] + cg = repo._changegroup(allnodes, 'strip') + backupfile = _bundle(repo, cg, node, 'backup') repo.ui.status(_("saved backup bundle to %s\n") % backupfile) - if saveheads or savebases: + if saveheads or savecommon: # do not compress partial bundle if we remove it from disk later - chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', - compress=keeppartialbundle) + cg = repo.getbundle('strip', common=savecommon, heads=saveheads) + chgrpfile = _bundle(repo, cg, node, 'temp', compress=keeppartialbundle) mfst = repo.manifest @@ -128,7 +127,7 @@ tr.abort() raise - if saveheads or savebases: + if saveheads or savecommon: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) diff -r fccd350acf79 -r 384082750f2c mercurial/revlog.py --- a/mercurial/revlog.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/revlog.py Sat Oct 15 14:30:50 2011 -0500 @@ -226,9 +226,10 @@ self._nodepos = None v = REVLOG_DEFAULT_VERSION - if hasattr(opener, 'options'): - if 'revlogv1' in opener.options: - if 'generaldelta' in opener.options: + opts = getattr(opener, 'options', None) + if opts is not None: + if 'revlogv1' in opts: + if 'generaldelta' in opts: v |= REVLOGGENERALDELTA else: v = 0 @@ -945,9 +946,9 @@ e = self._io.packentry(self.index[i], self.node, self.version, i) fp.write(e) - # if we don't call rename, the temp file will never replace the + # if we don't call close, the temp file will never replace the # real index - fp.rename() + fp.close() tr.replace(self.indexfile, trindex * self._io.size) self._chunkclear() diff -r fccd350acf79 -r 384082750f2c mercurial/revset.py --- a/mercurial/revset.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/revset.py Sat Oct 15 14:30:50 2011 -0500 @@ -6,7 +6,7 @@ # GNU General Public License version 2 or any later version. import re -import parser, util, error, discovery, hbisect +import parser, util, error, discovery, hbisect, node import bookmarks as bookmarksmod import match as matchmod from i18n import _ @@ -235,15 +235,24 @@ n = getstring(x, _("author requires a string")).lower() return [r for r in subset if n in repo[r].user().lower()] -def bisected(repo, subset, x): - """``bisected(string)`` - Changesets marked in the specified bisect state (good, bad, skip). +def bisect(repo, subset, x): + """``bisect(string)`` + Changesets marked in the specified bisect status: + + - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip + - ``goods``, ``bads`` : csets topologicaly good/bad + - ``range`` : csets taking part in the bisection + - ``pruned`` : csets that are goods, bads or skipped + - ``untested`` : csets whose fate is yet unknown + - ``ignored`` : csets ignored due to DAG topology """ - state = getstring(x, _("bisect requires a string")).lower() - if state not in ('good', 'bad', 'skip', 'unknown'): - raise error.ParseError(_('invalid bisect state')) - marked = set(repo.changelog.rev(n) for n in hbisect.load_state(repo)[state]) - return [r for r in subset if r in marked] + status = getstring(x, _("bisect requires a string")).lower() + return [r for r in subset if r in hbisect.get(repo, status)] + +# Backward-compatibility +# - no help entry so that we do not advertise it any more +def bisected(repo, subset, x): + return bisect(repo, subset, x) def bookmark(repo, subset, x): """``bookmark([name])`` @@ -407,6 +416,12 @@ return [r for r in subset if r in s] +def first(repo, subset, x): + """``first(set, [n])`` + An alias for limit(). + """ + return limit(repo, subset, x) + def follow(repo, subset, x): """``follow([file])`` An alias for ``::.`` (ancestors of the working copy's first parent). @@ -513,14 +528,16 @@ return l def limit(repo, subset, x): - """``limit(set, n)`` - First n members of set. + """``limit(set, [n])`` + First n members of set, defaulting to 1. """ # i18n: "limit" is a keyword - l = getargs(x, 2, 2, _("limit requires two arguments")) + l = getargs(x, 1, 2, _("limit requires one or two arguments")) try: - # i18n: "limit" is a keyword - lim = int(getstring(l[1], _("limit requires a number"))) + lim = 1 + if len(l) == 2: + # i18n: "limit" is a keyword + lim = int(getstring(l[1], _("limit requires a number"))) except (TypeError, ValueError): # i18n: "limit" is a keyword raise error.ParseError(_("limit expects a number")) @@ -529,14 +546,16 @@ return [r for r in os if r in ss] def last(repo, subset, x): - """``last(set, n)`` - Last n members of set. + """``last(set, [n])`` + Last n members of set, defaulting to 1. """ # i18n: "last" is a keyword - l = getargs(x, 2, 2, _("last requires two arguments")) + l = getargs(x, 1, 2, _("last requires one or two arguments")) try: - # i18n: "last" is a keyword - lim = int(getstring(l[1], _("last requires a number"))) + lim = 1 + if len(l) == 2: + # i18n: "last" is a keyword + lim = int(getstring(l[1], _("last requires a number"))) except (TypeError, ValueError): # i18n: "last" is a keyword raise error.ParseError(_("last expects a number")) @@ -827,6 +846,7 @@ "ancestor": ancestor, "ancestors": ancestors, "author": author, + "bisect": bisect, "bisected": bisected, "bookmark": bookmark, "branch": branch, @@ -838,6 +858,7 @@ "descendants": descendants, "file": hasfile, "filelog": filelog, + "first": first, "follow": follow, "grep": grep, "head": head, @@ -951,7 +972,7 @@ w = 100 # very slow elif f == "ancestor": w = 1 * smallbonus - elif f in "reverse limit": + elif f in "reverse limit first": w = 0 elif f in "sort": w = 10 # assume most sorts look at changelog @@ -1019,11 +1040,87 @@ tree, pos = parse(spec) if (pos != len(spec)): raise error.ParseError(_("invalid token"), pos) - tree = findaliases(ui, tree) + if ui: + tree = findaliases(ui, tree) weight, tree = optimize(tree, True) def mfunc(repo, subset): return getset(repo, subset, tree) return mfunc +def formatspec(expr, *args): + ''' + This is a convenience function for using revsets internally, and + escapes arguments appropriately. Aliases are intentionally ignored + so that intended expression behavior isn't accidentally subverted. + + Supported arguments: + + %r = revset expression, parenthesized + %d = int(arg), no quoting + %s = string(arg), escaped and single-quoted + %b = arg.branch(), escaped and single-quoted + %n = hex(arg), single-quoted + %% = a literal '%' + + Prefixing the type with 'l' specifies a parenthesized list of that type. + + >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()")) + '(10 or 11):: and ((this()) or (that()))' + >>> formatspec('%d:: and not %d::', 10, 20) + '10:: and not 20::' + >>> formatspec('keyword(%s)', 'foo\\xe9') + "keyword('foo\\\\xe9')" + >>> b = lambda: 'default' + >>> b.branch = b + >>> formatspec('branch(%b)', b) + "branch('default')" + >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd']) + "root(('a' or 'b' or 'c' or 'd'))" + ''' + + def quote(s): + return repr(str(s)) + + def argtype(c, arg): + if c == 'd': + return str(int(arg)) + elif c == 's': + return quote(arg) + elif c == 'r': + parse(arg) # make sure syntax errors are confined + return '(%s)' % arg + elif c == 'n': + return quote(node.hex(arg)) + elif c == 'b': + return quote(arg.branch()) + + ret = '' + pos = 0 + arg = 0 + while pos < len(expr): + c = expr[pos] + if c == '%': + pos += 1 + d = expr[pos] + if d == '%': + ret += d + elif d in 'dsnbr': + ret += argtype(d, args[arg]) + arg += 1 + elif d == 'l': + # a list of some type + pos += 1 + d = expr[pos] + lv = ' or '.join(argtype(d, e) for e in args[arg]) + ret += '(%s)' % lv + arg += 1 + else: + raise util.Abort('unexpected revspec format character %s' % d) + else: + ret += c + pos += 1 + + return ret + # tell hggettext to extract docstrings from these functions: i18nfunctions = symbols.values() diff -r fccd350acf79 -r 384082750f2c mercurial/scmutil.py --- a/mercurial/scmutil.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/scmutil.py Sat Oct 15 14:30:50 2011 -0500 @@ -324,10 +324,10 @@ def errhandler(err): if err.filename == path: raise err - if followsym and hasattr(os.path, 'samestat'): + samestat = getattr(os.path, 'samestat', None) + if followsym and samestat is not None: def adddir(dirlst, dirname): match = False - samestat = os.path.samestat dirstat = os.stat(dirname) for lstdirstat in dirlst: if samestat(dirstat, lstdirstat): @@ -709,3 +709,95 @@ raise error.RequirementError(_("unknown repository format: " "requires features '%s' (upgrade Mercurial)") % "', '".join(missings)) return requirements + +class filecacheentry(object): + def __init__(self, path): + self.path = path + self.cachestat = filecacheentry.stat(self.path) + + if self.cachestat: + self._cacheable = self.cachestat.cacheable() + else: + # None means we don't know yet + self._cacheable = None + + def refresh(self): + if self.cacheable(): + self.cachestat = filecacheentry.stat(self.path) + + def cacheable(self): + if self._cacheable is not None: + return self._cacheable + + # we don't know yet, assume it is for now + return True + + def changed(self): + # no point in going further if we can't cache it + if not self.cacheable(): + return True + + newstat = filecacheentry.stat(self.path) + + # we may not know if it's cacheable yet, check again now + if newstat and self._cacheable is None: + self._cacheable = newstat.cacheable() + + # check again + if not self._cacheable: + return True + + if self.cachestat != newstat: + self.cachestat = newstat + return True + else: + return False + + @staticmethod + def stat(path): + try: + return util.cachestat(path) + except OSError, e: + if e.errno != errno.ENOENT: + raise + +class filecache(object): + '''A property like decorator that tracks a file under .hg/ for updates. + + Records stat info when called in _filecache. + + On subsequent calls, compares old stat info with new info, and recreates + the object when needed, updating the new stat info in _filecache. + + Mercurial either atomic renames or appends for files under .hg, + so to ensure the cache is reliable we need the filesystem to be able + to tell us if a file has been replaced. If it can't, we fallback to + recreating the object on every call (essentially the same behaviour as + propertycache).''' + def __init__(self, path, instore=False): + self.path = path + self.instore = instore + + def __call__(self, func): + self.func = func + self.name = func.__name__ + return self + + def __get__(self, obj, type=None): + entry = obj._filecache.get(self.name) + + if entry: + if entry.changed(): + entry.obj = self.func(obj) + else: + path = self.instore and obj.sjoin(self.path) or obj.join(self.path) + + # We stat -before- creating the object so our cache doesn't lie if + # a writer modified between the time we read and stat + entry = filecacheentry(path) + entry.obj = self.func(obj) + + obj._filecache[self.name] = entry + + setattr(obj, self.name, entry.obj) + return entry.obj diff -r fccd350acf79 -r 384082750f2c mercurial/simplemerge.py --- a/mercurial/simplemerge.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/simplemerge.py Sat Oct 15 14:30:50 2011 -0500 @@ -445,7 +445,7 @@ out.write(line) if not opts.get('print'): - out.rename() + out.close() if m3.conflicts: if not opts.get('quiet'): diff -r fccd350acf79 -r 384082750f2c mercurial/sshrepo.py --- a/mercurial/sshrepo.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/sshrepo.py Sat Oct 15 14:30:50 2011 -0500 @@ -164,6 +164,17 @@ def _recv(self): l = self.pipei.readline() + if l == '\n': + err = [] + while True: + line = self.pipee.readline() + if line == '-\n': + break + err.extend([line]) + if len(err) > 0: + # strip the trailing newline added to the last line server-side + err[-1] = err[-1][:-1] + self._abort(error.OutOfBandError(*err)) self.readerr() try: l = int(l) diff -r fccd350acf79 -r 384082750f2c mercurial/sshserver.py --- a/mercurial/sshserver.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/sshserver.py Sat Oct 15 14:30:50 2011 -0500 @@ -82,6 +82,12 @@ def sendpusherror(self, rsp): self.sendresponse(rsp.res) + def sendooberror(self, rsp): + self.ui.ferr.write('%s\n-\n' % rsp.message) + self.ui.ferr.flush() + self.fout.write('\n') + self.fout.flush() + def serve_forever(self): try: while self.serve_one(): @@ -96,6 +102,7 @@ wireproto.streamres: sendstream, wireproto.pushres: sendpushresponse, wireproto.pusherr: sendpusherror, + wireproto.ooberror: sendooberror, } def serve_one(self): diff -r fccd350acf79 -r 384082750f2c mercurial/sslutil.py --- a/mercurial/sslutil.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/sslutil.py Sat Oct 15 14:30:50 2011 -0500 @@ -22,6 +22,8 @@ def ssl_wrap_socket(sock, key_file, cert_file, cert_reqs=CERT_REQUIRED, ca_certs=None): + if not util.safehasattr(socket, 'ssl'): + raise util.Abort(_('Python SSL support not found')) if ca_certs: raise util.Abort(_( 'certificate checking requires Python 2.6')) diff -r fccd350acf79 -r 384082750f2c mercurial/statichttprepo.py --- a/mercurial/statichttprepo.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/statichttprepo.py Sat Oct 15 14:30:50 2011 -0500 @@ -31,15 +31,11 @@ try: f = self.opener.open(req) data = f.read() - if hasattr(f, 'getcode'): - # python 2.6+ - code = f.getcode() - elif hasattr(f, 'code'): - # undocumented attribute, seems to be set in 2.4 and 2.5 - code = f.code - else: - # Don't know how to check, hope for the best. - code = 206 + # Python 2.6+ defines a getcode() function, and 2.4 and + # 2.5 appear to always have an undocumented code attribute + # set. If we can't read either of those, fall back to 206 + # and hope for the best. + code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))() except urllib2.HTTPError, inst: num = inst.code == 404 and errno.ENOENT or None raise IOError(num, inst) @@ -125,6 +121,7 @@ self.encodepats = None self.decodepats = None self.capabilities.difference_update(["pushkey"]) + self._filecache = {} def url(self): return self._url diff -r fccd350acf79 -r 384082750f2c mercurial/store.py --- a/mercurial/store.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/store.py Sat Oct 15 14:30:50 2011 -0500 @@ -345,7 +345,7 @@ fp = self.opener('fncache', mode='wb', atomictemp=True) for p in self.entries: fp.write(encodedir(p) + '\n') - fp.rename() + fp.close() self._dirty = False def add(self, fn): diff -r fccd350acf79 -r 384082750f2c mercurial/subrepo.py --- a/mercurial/subrepo.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/subrepo.py Sat Oct 15 14:30:50 2011 -0500 @@ -50,15 +50,7 @@ if err.errno != errno.ENOENT: raise - state = {} - for path, src in p[''].items(): - kind = 'hg' - if src.startswith('['): - if ']' not in src: - raise util.Abort(_('missing ] in subrepo source')) - kind, src = src.split(']', 1) - kind = kind[1:] - + def remap(src): for pattern, repl in p.items('subpaths'): # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub # does a string decode. @@ -72,7 +64,34 @@ except re.error, e: raise util.Abort(_("bad subrepository pattern in %s: %s") % (p.source('subpaths', pattern), e)) + return src + state = {} + for path, src in p[''].items(): + kind = 'hg' + if src.startswith('['): + if ']' not in src: + raise util.Abort(_('missing ] in subrepo source')) + kind, src = src.split(']', 1) + kind = kind[1:] + src = src.lstrip() # strip any extra whitespace after ']' + + if not util.url(src).isabs(): + parent = _abssource(ctx._repo, abort=False) + if parent: + parent = util.url(parent) + parent.path = posixpath.join(parent.path or '', src) + parent.path = posixpath.normpath(parent.path) + joined = str(parent) + # Remap the full joined path and use it if it changes, + # else remap the original source. + remapped = remap(joined) + if remapped == joined: + src = remap(src) + else: + src = remapped + + src = remap(src) state[path] = (src.strip(), rev.get(path, ''), kind) return state @@ -181,23 +200,23 @@ def reporelpath(repo): """return path to this (sub)repo as seen from outermost repo""" parent = repo - while hasattr(parent, '_subparent'): + while util.safehasattr(parent, '_subparent'): parent = parent._subparent p = parent.root.rstrip(os.sep) return repo.root[len(p) + 1:] def subrelpath(sub): """return path to this subrepo as seen from outermost repo""" - if hasattr(sub, '_relpath'): + if util.safehasattr(sub, '_relpath'): return sub._relpath - if not hasattr(sub, '_repo'): + if not util.safehasattr(sub, '_repo'): return sub._path return reporelpath(sub._repo) def _abssource(repo, push=False, abort=True): """return pull/push path of repo - either based on parent repo .hgsub info or on the top repo config. Abort or return None if no source found.""" - if hasattr(repo, '_subparent'): + if util.safehasattr(repo, '_subparent'): source = util.url(repo._subsource) if source.isabs(): return str(source) @@ -209,7 +228,7 @@ parent.path = posixpath.normpath(parent.path) return str(parent) else: # recursion reached top repo - if hasattr(repo, '_subtoppath'): + if util.safehasattr(repo, '_subtoppath'): return repo._subtoppath if push and repo.ui.config('paths', 'default-push'): return repo.ui.config('paths', 'default-push') @@ -530,9 +549,13 @@ self._state = state self._ctx = ctx self._ui = ctx._repo.ui + self._exe = util.findexe('svn') + if not self._exe: + raise util.Abort(_("'svn' executable not found for subrepo '%s'") + % self._path) def _svncommand(self, commands, filename='', failok=False): - cmd = ['svn'] + cmd = [self._exe] extrakw = {} if not self._ui.interactive(): # Making stdin be a pipe should prevent svn from behaving @@ -810,9 +833,10 @@ for b in branches: if b.startswith('refs/remotes/'): continue - remote = self._gitcommand(['config', 'branch.%s.remote' % b]) + bname = b.split('/', 2)[2] + remote = self._gitcommand(['config', 'branch.%s.remote' % bname]) if remote: - ref = self._gitcommand(['config', 'branch.%s.merge' % b]) + ref = self._gitcommand(['config', 'branch.%s.merge' % bname]) tracking['refs/remotes/%s/%s' % (remote, ref.split('/', 2)[2])] = b return tracking diff -r fccd350acf79 -r 384082750f2c mercurial/tags.py --- a/mercurial/tags.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/tags.py Sat Oct 15 14:30:50 2011 -0500 @@ -287,6 +287,6 @@ cachefile.write("%s %s\n" % (hex(node), name)) try: - cachefile.rename() + cachefile.close() except (OSError, IOError): pass diff -r fccd350acf79 -r 384082750f2c mercurial/templatefilters.py --- a/mercurial/templatefilters.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/templatefilters.py Sat Oct 15 14:30:50 2011 -0500 @@ -7,6 +7,7 @@ import cgi, re, os, time, urllib import encoding, node, util +import hbisect def addbreaks(text): """:addbreaks: Any text. Add an XHTML "
    " tag before the end of @@ -188,13 +189,13 @@ return '"%s"' % jsonescape(u) elif isinstance(obj, unicode): return '"%s"' % jsonescape(obj) - elif hasattr(obj, 'keys'): + elif util.safehasattr(obj, 'keys'): out = [] for k, v in obj.iteritems(): s = '%s: %s' % (json(k), json(v)) out.append(s) return '{' + ', '.join(out) + '}' - elif hasattr(obj, '__iter__'): + elif util.safehasattr(obj, '__iter__'): out = [] for i in obj: out.append(json(i)) @@ -268,6 +269,14 @@ """ return text[:12] +def shortbisect(text): + """:shortbisect: Any text. Treats `text` as a bisection status, and + returns a single-character representing the status (G: good, B: bad, + S: skipped, U: untested, I: ignored). Returns single space if `text` + is not a valid bisection status. + """ + return hbisect.shortlabel(text) or ' ' + def shortdate(text): """:shortdate: Date. Returns a date like "2006-09-18".""" return util.shortdate(text) @@ -279,7 +288,7 @@ """:stringify: Any type. Turns the value into text by converting values into text and concatenating them. """ - if hasattr(thing, '__iter__') and not isinstance(thing, str): + if util.safehasattr(thing, '__iter__') and not isinstance(thing, str): return "".join([stringify(t) for t in thing if t is not None]) return str(thing) @@ -347,6 +356,7 @@ "rfc3339date": rfc3339date, "rfc822date": rfc822date, "short": short, + "shortbisect": shortbisect, "shortdate": shortdate, "stringescape": stringescape, "stringify": stringify, diff -r fccd350acf79 -r 384082750f2c mercurial/templatekw.py --- a/mercurial/templatekw.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/templatekw.py Sat Oct 15 14:30:50 2011 -0500 @@ -7,6 +7,7 @@ from node import hex import patch, util, error +import hbisect def showlist(name, values, plural=None, **args): '''expand set of values. @@ -145,6 +146,10 @@ """:author: String. The unmodified author of the changeset.""" return ctx.user() +def showbisect(repo, ctx, templ, **args): + """:bisect: String. The changeset bisection status.""" + return hbisect.label(repo, ctx.node()) + def showbranch(**args): """:branch: String. The name of the branch on which the changeset was committed. @@ -288,6 +293,7 @@ # revcache - a cache dictionary for the current revision keywords = { 'author': showauthor, + 'bisect': showbisect, 'branch': showbranch, 'branches': showbranches, 'bookmarks': showbookmarks, diff -r fccd350acf79 -r 384082750f2c mercurial/templater.py --- a/mercurial/templater.py Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/templater.py Sat Oct 15 14:30:50 2011 -0500 @@ -135,7 +135,7 @@ v = mapping.get(key) if v is None: v = context._defaults.get(key, '') - if hasattr(v, '__call__'): + if util.safehasattr(v, '__call__'): return v(**mapping) return v @@ -172,14 +172,14 @@ def buildfunc(exp, context): n = getsymbol(exp[1]) args = [compileexp(x, context) for x in getlist(exp[2])] + if n in funcs: + f = funcs[n] + return (f, args) if n in context._filters: if len(args) != 1: raise error.ParseError(_("filter %s expects one argument") % n) f = context._filters[n] return (runfilter, (args[0][0], args[0][1], f)) - elif n in context._funcs: - f = context._funcs[n] - return (f, args) methods = { "string": lambda e, c: (runstring, e[1]), @@ -191,6 +191,9 @@ "func": buildfunc, } +funcs = { +} + # template engine path = ['templates', '../templates'] @@ -200,14 +203,14 @@ '''yield a single stream from a possibly nested set of iterators''' if isinstance(thing, str): yield thing - elif not hasattr(thing, '__iter__'): + elif not util.safehasattr(thing, '__iter__'): if thing is not None: yield str(thing) else: for i in thing: if isinstance(i, str): yield i - elif not hasattr(i, '__iter__'): + elif not util.safehasattr(i, '__iter__'): if i is not None: yield str(i) elif i is not None: @@ -338,7 +341,7 @@ normpaths = [] # executable version (py2exe) doesn't support __file__ - if hasattr(sys, 'frozen'): + if util.mainfrozen(): module = sys.executable else: module = __file__ diff -r fccd350acf79 -r 384082750f2c mercurial/templates/map-cmdline.bisect --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/templates/map-cmdline.bisect Sat Oct 15 14:30:50 2011 -0500 @@ -0,0 +1,25 @@ +changeset = 'changeset: {rev}:{node|short}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n' +changeset_quiet = '{bisect|shortbisect} {rev}:{node|short}\n' +changeset_verbose = 'changeset: {rev}:{node|short}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies_switch}description:\n{desc|strip}\n\n\n' +changeset_debug = 'changeset: {rev}:{node}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies_switch}{extras}description:\n{desc|strip}\n\n\n' +start_files = 'files: ' +file = ' {file}' +end_files = '\n' +start_file_mods = 'files: ' +file_mod = ' {file_mod}' +end_file_mods = '\n' +start_file_adds = 'files+: ' +file_add = ' {file_add}' +end_file_adds = '\n' +start_file_dels = 'files-: ' +file_del = ' {file_del}' +end_file_dels = '\n' +start_file_copies = 'copies: ' +file_copy = ' {name} ({source})' +end_file_copies = '\n' +parent = 'parent: {rev}:{node|formatnode}\n' +manifest = 'manifest: {rev}:{node}\n' +branch = 'branch: {branch}\n' +tag = 'tag: {tag}\n' +bookmark = 'bookmark: {bookmark}\n' +extra = 'extra: {key}={value|stringescape}\n' diff -r fccd350acf79 -r 384082750f2c mercurial/templates/monoblue/footer.tmpl --- a/mercurial/templates/monoblue/footer.tmpl Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/templates/monoblue/footer.tmpl Sat Oct 15 14:30:50 2011 -0500 @@ -9,7 +9,7 @@
    -

    mercurial

    +

    mercurial

    diff -r fccd350acf79 -r 384082750f2c mercurial/templates/monoblue/index.tmpl --- a/mercurial/templates/monoblue/index.tmpl Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/templates/monoblue/index.tmpl Sat Oct 15 14:30:50 2011 -0500 @@ -26,7 +26,7 @@
    -

    mercurial

    +

    mercurial

    diff -r fccd350acf79 -r 384082750f2c mercurial/templates/paper/bookmarks.tmpl --- a/mercurial/templates/paper/bookmarks.tmpl Sun Oct 02 16:41:07 2011 -0500 +++ b/mercurial/templates/paper/bookmarks.tmpl Sat Oct 15 14:30:50 2011 -0500 @@ -11,7 +11,7 @@