# HG changeset patch # User Augie Fackler # Date 1516661582 18000 # Node ID 27b6df1b5adbdf647cf5c6675b40575e1b197c60 # Parent 87676e8ee05692bda0144e29b0478f2cc339aa4d# Parent 4fb2bb61597cb34c69c4af7a2d1fb0bb43145eb1 merge with stable to begin 4.5 freeze # no-check-commit because it's a clean merge diff -r 87676e8ee056 -r 27b6df1b5adb .hgignore --- a/.hgignore Mon Jan 08 16:07:51 2018 -0800 +++ b/.hgignore Mon Jan 22 17:53:02 2018 -0500 @@ -24,6 +24,7 @@ tests/.hypothesis tests/hypothesis-generated tests/annotated +tests/exceptions tests/*.err tests/htmlcov build @@ -55,6 +56,8 @@ locale/*/LC_MESSAGES/hg.mo hgext/__index__.py +rust/target/ + # Generated wheels wheelhouse/ diff -r 87676e8ee056 -r 27b6df1b5adb .jshintrc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.jshintrc Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,11 @@ +{ + // Enforcing + "eqeqeq" : true, // true: Require triple equals (===) for comparison + "forin" : true, // true: Require filtering for..in loops with obj.hasOwnProperty() + "freeze" : true, // true: prohibits overwriting prototypes of native objects such as Array, Date etc. + "nonbsp" : true, // true: Prohibit "non-breaking whitespace" characters. + "undef" : true, // true: Require all non-global variables to be declared (prevents global leaks) + + // Environments + "browser" : true // Web Browser (window, document, etc) +} diff -r 87676e8ee056 -r 27b6df1b5adb Makefile --- a/Makefile Mon Jan 08 16:07:51 2018 -0800 +++ b/Makefile Mon Jan 22 17:53:02 2018 -0500 @@ -124,7 +124,7 @@ format-c: clang-format --style file -i \ - `hg files 'set:(**.c or **.h) and not "listfile:contrib/clang-format-blacklist"'` + `hg files 'set:(**.c or **.cc or **.h) and not "listfile:contrib/clang-format-blacklist"'` update-pot: i18n/hg.pot diff -r 87676e8ee056 -r 27b6df1b5adb contrib/bash_completion --- a/contrib/bash_completion Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/bash_completion Mon Jan 22 17:53:02 2018 -0500 @@ -296,7 +296,7 @@ merge) _hg_labels ;; - commit|ci|record) + commit|ci|record|amend) _hg_status "mar" ;; remove|rm) @@ -309,7 +309,7 @@ _hg_status "mar" ;; revert) - _hg_debugpathcomplete + _hg_status "mard" ;; clone) local count=$(_hg_count_non_option) diff -r 87676e8ee056 -r 27b6df1b5adb contrib/check-code.py --- a/contrib/check-code.py Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/check-code.py Mon Jan 22 17:53:02 2018 -0500 @@ -135,7 +135,6 @@ (r'if\s*!', "don't use '!' to negate exit status"), (r'/dev/u?random', "don't use entropy, use /dev/zero"), (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"), - (r'^( *)\t', "don't use tabs to indent"), (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', "put a backslash-escaped newline after sed 'i' command"), (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"), @@ -148,7 +147,9 @@ (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"), (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"), (r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"), - (r'grep.* -[ABC] ', "don't use grep's context flags"), + (r'grep.* -[ABC]', "don't use grep's context flags"), + (r'find.*-printf', + "don't use 'find -printf', it doesn't exist on BSD find(1)"), ], # warnings [ @@ -165,7 +166,6 @@ (r"<<(\S+)((.|\n)*?\n\1)", rephere), ] -winglobmsg = "use (glob) to match Windows paths too" uprefix = r"^ \$ " utestpats = [ [ @@ -181,25 +181,11 @@ (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite " "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx '# no-msys'), # in test-pull.t which is skipped on windows - (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), - (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', - winglobmsg), - (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg, - '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows - (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg), - (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg), - (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg), - (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg), - (r'^ moving \S+/.*[^)]$', winglobmsg), - (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg), - (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg), - (r'^ .*file://\$TESTTMP', - 'write "file:/*/$TESTTMP" + (glob) to match on windows too'), (r'^ [^$>].*27\.0\.0\.1', 'use $LOCALIP not an explicit loopback address'), - (r'^ [^$>].*\$LOCALIP.*[^)]$', + (r'^ (?![>$] ).*\$LOCALIP.*[^)]$', 'mark $LOCALIP output lines with (glob) to help tests in BSD jails'), - (r'^ (cat|find): .*: No such file or directory', + (r'^ (cat|find): .*: \$ENOENT\$', 'use test -f to test for file existence'), (r'^ diff -[^ -]*p', "don't use (external) diff with -p for portability"), @@ -223,6 +209,7 @@ ] ] +# transform plain test rules to unified test's for i in [0, 1]: for tp in testpats[i]: p = tp[0] @@ -233,6 +220,11 @@ p = r"^ [$>] .*(%s)" % p utestpats[i].append((p, m) + tp[2:]) +# don't transform the following rules: +# " > \t" and " \t" should be allowed in unified tests +testpats[0].append((r'^( *)\t', "don't use tabs to indent")) +utestpats[0].append((r'^( ?)\t', "don't use tabs to indent")) + utestfilters = [ (r"<<(\S+)((.|\n)*?\n > \1)", rephere), (r"( +)(#([^!][^\n]*\S)?)", repcomment), diff -r 87676e8ee056 -r 27b6df1b5adb contrib/debian/copyright --- a/contrib/debian/copyright Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/debian/copyright Mon Jan 22 17:53:02 2018 -0500 @@ -3,7 +3,7 @@ Source: https://www.mercurial-scm.org/ Files: * -Copyright: 2005-2017, Matt Mackall and others. +Copyright: 2005-2018, Matt Mackall and others. License: GPL-2+ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public diff -r 87676e8ee056 -r 27b6df1b5adb contrib/fuzz/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/fuzz/Makefile Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,20 @@ +bdiff.o: ../../mercurial/bdiff.c + clang -g -O1 -fsanitize=fuzzer-no-link,address -c -o bdiff.o \ + ../../mercurial/bdiff.c + +bdiff: bdiff.cc bdiff.o + clang -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ + -I../../mercurial bdiff.cc bdiff.o -o bdiff + +bdiff-oss-fuzz.o: ../../mercurial/bdiff.c + $$CC $$CFLAGS -c -o bdiff-oss-fuzz.o ../../mercurial/bdiff.c + +bdiff_fuzzer: bdiff.cc bdiff-oss-fuzz.o + $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial bdiff.cc \ + bdiff-oss-fuzz.o -lFuzzingEngine -o $$OUT/bdiff_fuzzer + +all: bdiff + +oss-fuzz: bdiff_fuzzer + +.PHONY: all oss-fuzz diff -r 87676e8ee056 -r 27b6df1b5adb contrib/fuzz/bdiff.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/fuzz/bdiff.cc Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,49 @@ +/* + * bdiff.cc - fuzzer harness for bdiff.c + * + * Copyright 2018, Google Inc. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + */ +#include + +extern "C" { +#include "bdiff.h" + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) +{ + if (!Size) { + return 0; + } + // figure out a random point in [0, Size] to split our input. + size_t split = Data[0] / 255.0 * Size; + + // left input to diff is data[1:split] + const uint8_t *left = Data + 1; + // which has len split-1 + size_t left_size = split - 1; + // right starts at the next byte after left ends + const uint8_t *right = left + left_size; + size_t right_size = Size - split; + + struct bdiff_line *a, *b; + int an = bdiff_splitlines((const char *)left, split - 1, &a); + int bn = bdiff_splitlines((const char *)right, right_size, &b); + struct bdiff_hunk l; + bdiff_diff(a, an, b, bn, &l); + free(a); + free(b); + bdiff_freehunks(l.next); + return 0; // Non-zero return values are reserved for future use. +} + +#ifdef HG_FUZZER_INCLUDE_MAIN +int main(int argc, char **argv) +{ + const char data[] = "asdf"; + return LLVMFuzzerTestOneInput((const uint8_t *)data, 4); +} +#endif + +} // extern "C" diff -r 87676e8ee056 -r 27b6df1b5adb contrib/perf.py --- a/contrib/perf.py Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/perf.py Mon Jan 22 17:53:02 2018 -0500 @@ -25,6 +25,7 @@ import random import struct import sys +import threading import time from mercurial import ( changegroup, @@ -488,6 +489,122 @@ timer(d) fm.end() +@command('perfbundleread', formatteropts, 'BUNDLE') +def perfbundleread(ui, repo, bundlepath, **opts): + """Benchmark reading of bundle files. + + This command is meant to isolate the I/O part of bundle reading as + much as possible. + """ + from mercurial import ( + bundle2, + exchange, + streamclone, + ) + + def makebench(fn): + def run(): + with open(bundlepath, 'rb') as fh: + bundle = exchange.readbundle(ui, fh, bundlepath) + fn(bundle) + + return run + + def makereadnbytes(size): + def run(): + with open(bundlepath, 'rb') as fh: + bundle = exchange.readbundle(ui, fh, bundlepath) + while bundle.read(size): + pass + + return run + + def makestdioread(size): + def run(): + with open(bundlepath, 'rb') as fh: + while fh.read(size): + pass + + return run + + # bundle1 + + def deltaiter(bundle): + for delta in bundle.deltaiter(): + pass + + def iterchunks(bundle): + for chunk in bundle.getchunks(): + pass + + # bundle2 + + def forwardchunks(bundle): + for chunk in bundle._forwardchunks(): + pass + + def iterparts(bundle): + for part in bundle.iterparts(): + pass + + def iterpartsseekable(bundle): + for part in bundle.iterparts(seekable=True): + pass + + def seek(bundle): + for part in bundle.iterparts(seekable=True): + part.seek(0, os.SEEK_END) + + def makepartreadnbytes(size): + def run(): + with open(bundlepath, 'rb') as fh: + bundle = exchange.readbundle(ui, fh, bundlepath) + for part in bundle.iterparts(): + while part.read(size): + pass + + return run + + benches = [ + (makestdioread(8192), 'read(8k)'), + (makestdioread(16384), 'read(16k)'), + (makestdioread(32768), 'read(32k)'), + (makestdioread(131072), 'read(128k)'), + ] + + with open(bundlepath, 'rb') as fh: + bundle = exchange.readbundle(ui, fh, bundlepath) + + if isinstance(bundle, changegroup.cg1unpacker): + benches.extend([ + (makebench(deltaiter), 'cg1 deltaiter()'), + (makebench(iterchunks), 'cg1 getchunks()'), + (makereadnbytes(8192), 'cg1 read(8k)'), + (makereadnbytes(16384), 'cg1 read(16k)'), + (makereadnbytes(32768), 'cg1 read(32k)'), + (makereadnbytes(131072), 'cg1 read(128k)'), + ]) + elif isinstance(bundle, bundle2.unbundle20): + benches.extend([ + (makebench(forwardchunks), 'bundle2 forwardchunks()'), + (makebench(iterparts), 'bundle2 iterparts()'), + (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'), + (makebench(seek), 'bundle2 part seek()'), + (makepartreadnbytes(8192), 'bundle2 part read(8k)'), + (makepartreadnbytes(16384), 'bundle2 part read(16k)'), + (makepartreadnbytes(32768), 'bundle2 part read(32k)'), + (makepartreadnbytes(131072), 'bundle2 part read(128k)'), + ]) + elif isinstance(bundle, streamclone.streamcloneapplier): + raise error.Abort('stream clone bundles not supported') + else: + raise error.Abort('unhandled bundle type: %s' % type(bundle)) + + for fn, title in benches: + timer, fm = gettimer(ui, opts) + timer(fn, title=title) + fm.end() + @command('perfchangegroupchangelog', formatteropts + [('', 'version', '02', 'changegroup version'), ('r', 'rev', '', 'revisions to add to changegroup')]) @@ -525,8 +642,8 @@ dirstate = repo.dirstate 'a' in dirstate def d(): - dirstate.dirs() - del dirstate._map.dirs + dirstate.hasdir('a') + del dirstate._map._dirs timer(d) fm.end() @@ -545,8 +662,8 @@ timer, fm = gettimer(ui, opts) "a" in repo.dirstate def d(): - "a" in repo.dirstate._map.dirs - del repo.dirstate._map.dirs + repo.dirstate.hasdir("a") + del repo.dirstate._map._dirs timer(d) fm.end() @@ -569,7 +686,7 @@ def d(): dirstate._map.dirfoldmap.get('a') del dirstate._map.dirfoldmap - del dirstate._map.dirs + del dirstate._map._dirs timer(d) fm.end() @@ -817,11 +934,25 @@ timer(d) fm.end() +def _bdiffworker(q, ready, done): + while not done.is_set(): + pair = q.get() + while pair is not None: + mdiff.textdiff(*pair) + q.task_done() + pair = q.get() + q.task_done() # for the None one + with ready: + ready.wait() + @command('perfbdiff', revlogopts + formatteropts + [ ('', 'count', 1, 'number of revisions to test (when using --startrev)'), - ('', 'alldata', False, 'test bdiffs for all associated revisions')], + ('', 'alldata', False, 'test bdiffs for all associated revisions'), + ('', 'threads', 0, 'number of thread to use (disable with 0)'), + ], + '-c|-m|FILE REV') -def perfbdiff(ui, repo, file_, rev=None, count=None, **opts): +def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts): """benchmark a bdiff between revisions By default, benchmark a bdiff between its delta parent and itself. @@ -867,14 +998,39 @@ dp = r.deltaparent(rev) textpairs.append((r.revision(dp), r.revision(rev))) - def d(): - for pair in textpairs: - mdiff.textdiff(*pair) - + withthreads = threads > 0 + if not withthreads: + def d(): + for pair in textpairs: + mdiff.textdiff(*pair) + else: + q = util.queue() + for i in xrange(threads): + q.put(None) + ready = threading.Condition() + done = threading.Event() + for i in xrange(threads): + threading.Thread(target=_bdiffworker, args=(q, ready, done)).start() + q.join() + def d(): + for pair in textpairs: + q.put(pair) + for i in xrange(threads): + q.put(None) + with ready: + ready.notify_all() + q.join() timer, fm = gettimer(ui, opts) timer(d) fm.end() + if withthreads: + done.set() + for i in xrange(threads): + q.put(None) + with ready: + ready.notify_all() + @command('perfdiffwd', formatteropts) def perfdiffwd(ui, repo, **opts): """Profile diff of working directory changes""" diff -r 87676e8ee056 -r 27b6df1b5adb contrib/phabricator.py --- a/contrib/phabricator.py Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/phabricator.py Mon Jan 22 17:53:02 2018 -0500 @@ -166,7 +166,7 @@ _differentialrevisiontagre = re.compile('\AD([1-9][0-9]*)\Z') _differentialrevisiondescre = re.compile( - '^Differential Revision:\s*(?:.*)D([1-9][0-9]*)$', re.M) + '^Differential Revision:\s*(?P(?:.*)D(?P[1-9][0-9]*))$', re.M) def getoldnodedrevmap(repo, nodelist): """find previous nodes that has been sent to Phabricator @@ -207,7 +207,7 @@ # Check commit message m = _differentialrevisiondescre.search(ctx.description()) if m: - toconfirm[node] = (1, set(precnodes), int(m.group(1))) + toconfirm[node] = (1, set(precnodes), int(m.group('id'))) # Double check if tags are genuine by collecting all old nodes from # Phabricator, and expect precursors overlap with it. @@ -442,7 +442,7 @@ # Create a local tag to note the association, if commit message # does not have it already m = _differentialrevisiondescre.search(ctx.description()) - if not m or int(m.group(1)) != newrevid: + if not m or int(m.group('id')) != newrevid: tagname = 'D%d' % newrevid tags.tag(repo, tagname, ctx.node(), message=None, user=None, date=None, local=True) @@ -865,3 +865,17 @@ params = {'objectIdentifier': drev[r'phid'], 'transactions': actions} callconduit(repo, 'differential.revision.edit', params) + +templatekeyword = registrar.templatekeyword() + +@templatekeyword('phabreview') +def template_review(repo, ctx, revcache, **args): + """:phabreview: Object describing the review for this changeset. + Has attributes `url` and `id`. + """ + m = _differentialrevisiondescre.search(ctx.description()) + if m: + return { + 'url': m.group('url'), + 'id': "D{}".format(m.group('id')), + } diff -r 87676e8ee056 -r 27b6df1b5adb contrib/python3-whitelist --- a/contrib/python3-whitelist Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/python3-whitelist Mon Jan 22 17:53:02 2018 -0500 @@ -1,5 +1,8 @@ +test-add.t +test-addremove-similar.t test-addremove.t test-ancestor.py +test-automv.t test-backwards-remove.t test-bheads.t test-bisect2.t @@ -7,6 +10,7 @@ test-bookmarks-strip.t test-branch-tag-confict.t test-casecollision.t +test-cat.t test-changelog-exec.t test-check-commit.t test-check-execute.t @@ -14,7 +18,9 @@ test-check-pyflakes.t test-check-pylint.t test-check-shbang.t +test-children.t test-commit-unresolved.t +test-completion.t test-contrib-check-code.t test-contrib-check-commit.t test-debugrename.t @@ -24,6 +30,8 @@ test-diff-newlines.t test-diff-reverse.t test-diff-subdir.t +test-diffdir.t +test-directaccess.t test-dirstate-nonnormalset.t test-doctest.py test-double-merge.t @@ -33,11 +41,17 @@ test-empty.t test-encoding-func.py test-excessive-merge.t +test-execute-bit.t +test-gpg.t test-hghave.t test-imports-checker.t test-issue1089.t +test-issue1175.t +test-issue1502.t +test-issue1802.t test-issue1877.t test-issue1993.t +test-issue522.t test-issue612.t test-issue619.t test-issue672.t @@ -46,30 +60,72 @@ test-locate.t test-lrucachedict.py test-manifest.py +test-manifest-merging.t test-match.py test-merge-default.t +test-merge-internal-tools-pattern.t +test-merge-remove.t +test-merge-revert.t +test-merge-revert2.t +test-merge-subrepos.t +test-merge10.t test-merge2.t test-merge4.t test-merge5.t +test-merge6.t +test-merge7.t +test-merge8.t +test-mq-qimport-fail-cleanup.t +test-obshistory.t test-permissions.t +test-push-checkheads-partial-C1.t +test-push-checkheads-partial-C2.t +test-push-checkheads-partial-C3.t +test-push-checkheads-partial-C4.t test-push-checkheads-pruned-B1.t +test-push-checkheads-pruned-B2.t +test-push-checkheads-pruned-B3.t +test-push-checkheads-pruned-B4.t +test-push-checkheads-pruned-B5.t test-push-checkheads-pruned-B6.t test-push-checkheads-pruned-B7.t +test-push-checkheads-pruned-B8.t test-push-checkheads-superceed-A1.t +test-push-checkheads-superceed-A2.t +test-push-checkheads-superceed-A3.t test-push-checkheads-superceed-A4.t test-push-checkheads-superceed-A5.t +test-push-checkheads-superceed-A6.t +test-push-checkheads-superceed-A7.t test-push-checkheads-superceed-A8.t test-push-checkheads-unpushed-D1.t +test-push-checkheads-unpushed-D2.t +test-push-checkheads-unpushed-D3.t +test-push-checkheads-unpushed-D4.t +test-push-checkheads-unpushed-D5.t test-push-checkheads-unpushed-D6.t test-push-checkheads-unpushed-D7.t +test-record.t +test-rename-dir-merge.t test-rename-merge1.t test-rename.t +test-revert-flags.t +test-revert-unknown.t +test-revlog-group-emptyiter.t +test-revlog-mmapindex.t test-revlog-packentry.t test-run-tests.py test-show-stack.t +test-simple-update.t +test-sparse-clear.t +test-sparse-merges.t +test-sparse-requirement.t +test-sparse-verbose-json.t test-status-terse.t -test-terse-status.t +test-uncommit.t test-unified-test.t +test-unrelated-pull.t test-update-issue1456.t +test-update-names.t test-update-reverse.t test-xdg.t diff -r 87676e8ee056 -r 27b6df1b5adb contrib/showstack.py --- a/contrib/showstack.py Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/showstack.py Mon Jan 22 17:53:02 2018 -0500 @@ -1,6 +1,8 @@ # showstack.py - extension to dump a Python stack trace on signal # # binds to both SIGQUIT (Ctrl-\) and SIGINFO (Ctrl-T on BSDs) +"""dump stack trace when receiving SIGQUIT (Ctrl-\) and SIGINFO (Ctrl-T on BSDs) +""" from __future__ import absolute_import import signal diff -r 87676e8ee056 -r 27b6df1b5adb contrib/synthrepo.py --- a/contrib/synthrepo.py Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/synthrepo.py Mon Jan 22 17:53:02 2018 -0500 @@ -369,14 +369,14 @@ while not validpath(path): path = pickpath() data = '%s contents\n' % path - files[path] = context.memfilectx(repo, path, data) + files[path] = data dir = os.path.dirname(path) while dir and dir not in dirs: dirs.add(dir) dir = os.path.dirname(dir) def filectxfn(repo, memctx, path): - return files[path] + return context.memfilectx(repo, memctx, path, files[path]) ui.progress(_synthesizing, None) message = 'synthesized wide repo with %d files' % (len(files),) @@ -444,14 +444,12 @@ for __ in xrange(add): lines.insert(random.randint(0, len(lines)), makeline()) path = fctx.path() - changes[path] = context.memfilectx(repo, path, - '\n'.join(lines) + '\n') + changes[path] = '\n'.join(lines) + '\n' for __ in xrange(pick(filesremoved)): path = random.choice(mfk) for __ in xrange(10): path = random.choice(mfk) if path not in changes: - changes[path] = None break if filesadded: dirs = list(pctx.dirs()) @@ -466,9 +464,11 @@ pathstr = '/'.join(filter(None, path)) data = '\n'.join(makeline() for __ in xrange(pick(linesinfilesadded))) + '\n' - changes[pathstr] = context.memfilectx(repo, pathstr, data) + changes[pathstr] = data def filectxfn(repo, memctx, path): - return changes[path] + if path not in changes: + return None + return context.memfilectx(repo, memctx, path, changes[path]) if not changes: continue if revs: diff -r 87676e8ee056 -r 27b6df1b5adb contrib/win32/ReadMe.html --- a/contrib/win32/ReadMe.html Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/win32/ReadMe.html Mon Jan 22 17:53:02 2018 -0500 @@ -140,7 +140,7 @@

- Mercurial is Copyright 2005-2017 Matt Mackall and others. See + Mercurial is Copyright 2005-2018 Matt Mackall and others. See the Contributors.txt file for a list of contributors.

diff -r 87676e8ee056 -r 27b6df1b5adb contrib/win32/mercurial.iss --- a/contrib/win32/mercurial.iss Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/win32/mercurial.iss Mon Jan 22 17:53:02 2018 -0500 @@ -21,7 +21,7 @@ #endif [Setup] -AppCopyright=Copyright 2005-2017 Matt Mackall and others +AppCopyright=Copyright 2005-2018 Matt Mackall and others AppName=Mercurial AppVersion={#VERSION} #if ARCH == "x64" @@ -45,7 +45,7 @@ DefaultDirName={pf}\Mercurial SourceDir=..\.. VersionInfoDescription=Mercurial distributed SCM (version {#VERSION}) -VersionInfoCopyright=Copyright 2005-2017 Matt Mackall and others +VersionInfoCopyright=Copyright 2005-2018 Matt Mackall and others VersionInfoCompany=Matt Mackall and others InternalCompressLevel=max SolidCompression=true diff -r 87676e8ee056 -r 27b6df1b5adb contrib/wix/COPYING.rtf Binary file contrib/wix/COPYING.rtf has changed diff -r 87676e8ee056 -r 27b6df1b5adb contrib/wix/help.wxs --- a/contrib/wix/help.wxs Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/wix/help.wxs Mon Jan 22 17:53:02 2018 -0500 @@ -23,6 +23,7 @@ + diff -r 87676e8ee056 -r 27b6df1b5adb contrib/wix/templates.wxs --- a/contrib/wix/templates.wxs Mon Jan 08 16:07:51 2018 -0800 +++ b/contrib/wix/templates.wxs Mon Jan 22 17:53:02 2018 -0500 @@ -42,6 +42,7 @@ + @@ -85,6 +86,7 @@ + @@ -114,6 +116,7 @@ + @@ -143,6 +146,7 @@ + @@ -208,6 +212,7 @@ + @@ -225,7 +230,6 @@ - diff -r 87676e8ee056 -r 27b6df1b5adb hgdemandimport/demandimportpy3.py --- a/hgdemandimport/demandimportpy3.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgdemandimport/demandimportpy3.py Mon Jan 22 17:53:02 2018 -0500 @@ -46,7 +46,7 @@ super().exec_module(module) # This is 3.6+ because with Python 3.5 it isn't possible to lazily load -# extensions. See the discussion in https://python.org/sf/26186 for more. +# extensions. See the discussion in https://bugs.python.org/issue26186 for more. _extensions_loader = _lazyloaderex.factory( importlib.machinery.ExtensionFileLoader) _bytecode_loader = _lazyloaderex.factory( diff -r 87676e8ee056 -r 27b6df1b5adb hgext/amend.py --- a/hgext/amend.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/amend.py Mon Jan 22 17:53:02 2018 -0500 @@ -17,6 +17,7 @@ cmdutil, commands, error, + pycompat, registrar, ) @@ -46,10 +47,11 @@ See :hg:`help commit` for more details. """ + opts = pycompat.byteskwargs(opts) if len(opts['note']) > 255: raise error.Abort(_("cannot store a note of more than 255 bytes")) with repo.wlock(), repo.lock(): if not opts.get('logfile'): opts['message'] = opts.get('message') or repo['.'].description() opts['amend'] = True - return commands._docommit(ui, repo, *pats, **opts) + return commands._docommit(ui, repo, *pats, **pycompat.strkwargs(opts)) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/automv.py --- a/hgext/automv.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/automv.py Mon Jan 22 17:53:02 2018 -0500 @@ -32,6 +32,7 @@ copies, error, extensions, + pycompat, registrar, scmutil, similar @@ -53,6 +54,7 @@ def mvcheck(orig, ui, repo, *pats, **opts): """Hook to check for moves at commit time""" + opts = pycompat.byteskwargs(opts) renames = None disabled = opts.pop('no_automv', False) if not disabled: @@ -68,7 +70,7 @@ with repo.wlock(): if renames is not None: scmutil._markchanges(repo, (), (), renames) - return orig(ui, repo, *pats, **opts) + return orig(ui, repo, *pats, **pycompat.strkwargs(opts)) def _interestingfiles(repo, matcher): """Find what files were added or removed in this commit. diff -r 87676e8ee056 -r 27b6df1b5adb hgext/blackbox.py --- a/hgext/blackbox.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/blackbox.py Mon Jan 22 17:53:02 2018 -0500 @@ -44,6 +44,7 @@ from mercurial.node import hex from mercurial import ( + encoding, registrar, ui as uimod, util, @@ -129,6 +130,11 @@ def track(self): return self.configlist('blackbox', 'track') + def debug(self, *msg, **opts): + super(blackboxui, self).debug(*msg, **opts) + if self.debugflag: + self.log('debug', '%s', ''.join(msg)) + def log(self, event, *msg, **opts): global lastui super(blackboxui, self).log(event, *msg, **opts) @@ -182,7 +188,7 @@ fp.write(fmt % args) except (IOError, OSError) as err: self.debug('warning: cannot write to blackbox.log: %s\n' % - err.strerror) + encoding.strtolocal(err.strerror)) # do not restore _bbinlog intentionally to avoid failed # logging again else: @@ -226,7 +232,7 @@ if not repo.vfs.exists('blackbox.log'): return - limit = opts.get('limit') + limit = opts.get(r'limit') fp = repo.vfs('blackbox.log', 'r') lines = fp.read().split('\n') diff -r 87676e8ee056 -r 27b6df1b5adb hgext/bugzilla.py --- a/hgext/bugzilla.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/bugzilla.py Mon Jan 22 17:53:02 2018 -0500 @@ -580,7 +580,7 @@ self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n")) (user, userid) = self.get_bugzilla_user(committer) - now = time.strftime('%Y-%m-%d %H:%M:%S') + now = time.strftime(r'%Y-%m-%d %H:%M:%S') self.run('''insert into longdescs (bug_id, who, bug_when, thetext) values (%s, %s, %s, %s)''', diff -r 87676e8ee056 -r 27b6df1b5adb hgext/children.py --- a/hgext/children.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/children.py Mon Jan 22 17:53:02 2018 -0500 @@ -19,6 +19,7 @@ from mercurial.i18n import _ from mercurial import ( cmdutil, + pycompat, registrar, ) @@ -55,6 +56,7 @@ See :hg:`help log` and :hg:`help revsets.children`. """ + opts = pycompat.byteskwargs(opts) rev = opts.get('rev') if file_: fctx = repo.filectx(file_, changeid=rev) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/churn.py --- a/hgext/churn.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/churn.py Mon Jan 22 17:53:02 2018 -0500 @@ -19,6 +19,7 @@ cmdutil, encoding, patch, + pycompat, registrar, scmutil, util, @@ -45,6 +46,7 @@ def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" + opts = pycompat.byteskwargs(opts) if opts.get('dateformat'): def getkey(ctx): t, tz = ctx.date() @@ -154,7 +156,7 @@ return s + " " * (l - encoding.colwidth(s)) amap = {} - aliases = opts.get('aliases') + aliases = opts.get(r'aliases') if not aliases and os.path.exists(repo.wjoin('.hgchurn')): aliases = repo.wjoin('.hgchurn') if aliases: @@ -172,7 +174,7 @@ if not rate: return - if opts.get('sort'): + if opts.get(r'sort'): rate.sort() else: rate.sort(key=lambda x: (-sum(x[1]), x)) @@ -185,7 +187,7 @@ ui.debug("assuming %i character terminal\n" % ttywidth) width = ttywidth - maxname - 2 - 2 - 2 - if opts.get('diffstat'): + if opts.get(r'diffstat'): width -= 15 def format(name, diffstat): added, removed = diffstat diff -r 87676e8ee056 -r 27b6df1b5adb hgext/commitextras.py --- a/hgext/commitextras.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/commitextras.py Mon Jan 22 17:53:02 2018 -0500 @@ -46,7 +46,7 @@ origcommit = repo.commit try: def _wrappedcommit(*innerpats, **inneropts): - extras = opts.get('extra') + extras = opts.get(r'extra') if extras: for raw in extras: if '=' not in raw: @@ -65,7 +65,7 @@ msg = _("key '%s' is used internally, can't be set " "manually") raise error.Abort(msg % k) - inneropts['extra'][k] = v + inneropts[r'extra'][k] = v return origcommit(*innerpats, **inneropts) # This __dict__ logic is needed because the normal diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/bzr.py --- a/hgext/convert/bzr.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/bzr.py Mon Jan 22 17:53:02 2018 -0500 @@ -44,8 +44,8 @@ class bzr_source(common.converter_source): """Reads Bazaar repositories by using the Bazaar Python libraries""" - def __init__(self, ui, path, revs=None): - super(bzr_source, self).__init__(ui, path, revs=revs) + def __init__(self, ui, repotype, path, revs=None): + super(bzr_source, self).__init__(ui, repotype, path, revs=revs) if not os.path.exists(os.path.join(path, '.bzr')): raise common.NoRepo(_('%s does not look like a Bazaar repository') diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/common.py --- a/hgext/convert/common.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/common.py Mon Jan 22 17:53:02 2018 -0500 @@ -73,12 +73,13 @@ class converter_source(object): """Conversion source interface""" - def __init__(self, ui, path=None, revs=None): + def __init__(self, ui, repotype, path=None, revs=None): """Initialize conversion source (or raise NoRepo("message") exception if path is not a valid repository)""" self.ui = ui self.path = path self.revs = revs + self.repotype = repotype self.encoding = 'utf-8' @@ -218,7 +219,7 @@ class converter_sink(object): """Conversion sink (target) interface""" - def __init__(self, ui, path): + def __init__(self, ui, repotype, path): """Initialize conversion sink (or raise NoRepo("message") exception if path is not a valid repository) @@ -227,6 +228,7 @@ self.ui = ui self.path = path self.created = [] + self.repotype = repotype def revmapfile(self): """Path to a file that will contain lines diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/convcmd.py --- a/hgext/convert/convcmd.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/convcmd.py Mon Jan 22 17:53:02 2018 -0500 @@ -6,6 +6,7 @@ # GNU General Public License version 2 or any later version. from __future__ import absolute_import +import collections import os import shlex import shutil @@ -15,6 +16,7 @@ encoding, error, hg, + scmutil, util, ) @@ -114,7 +116,7 @@ for name, source, sortmode in source_converters: try: if not type or name == type: - return source(ui, path, revs), sortmode + return source(ui, name, path, revs), sortmode except (NoRepo, MissingTool) as inst: exceptions.append(inst) if not ui.quiet: @@ -128,7 +130,7 @@ for name, sink in sink_converters: try: if not type or name == type: - return sink(ui, path) + return sink(ui, name, path) except NoRepo as inst: ui.note(_("convert: %s\n") % inst) except MissingTool as inst: @@ -289,13 +291,13 @@ revisions without parents. 'parents' must be a mapping of revision identifier to its parents ones. """ - visit = sorted(parents) + visit = collections.deque(sorted(parents)) seen = set() children = {} roots = [] while visit: - n = visit.pop(0) + n = visit.popleft() if n in seen: continue seen.add(n) @@ -449,7 +451,7 @@ commit = self.commitcache[rev] full = self.opts.get('full') changes = self.source.getchanges(rev, full) - if isinstance(changes, basestring): + if isinstance(changes, bytes): if changes == SKIPREV: dest = SKIPREV else: @@ -575,6 +577,7 @@ ui.status(_("assuming destination %s\n") % dest) destc = convertsink(ui, dest, opts.get('dest_type')) + destc = scmutil.wrapconvertsink(destc) try: srcc, defaultsort = convertsource(ui, src, opts.get('source_type'), diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/cvs.py --- a/hgext/convert/cvs.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/cvs.py Mon Jan 22 17:53:02 2018 -0500 @@ -32,8 +32,8 @@ NoRepo = common.NoRepo class convert_cvs(converter_source): - def __init__(self, ui, path, revs=None): - super(convert_cvs, self).__init__(ui, path, revs=revs) + def __init__(self, ui, repotype, path, revs=None): + super(convert_cvs, self).__init__(ui, repotype, path, revs=revs) cvs = os.path.join(path, "CVS") if not os.path.exists(cvs): diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/darcs.py --- a/hgext/convert/darcs.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/darcs.py Mon Jan 22 17:53:02 2018 -0500 @@ -40,8 +40,8 @@ pass class darcs_source(common.converter_source, common.commandline): - def __init__(self, ui, path, revs=None): - common.converter_source.__init__(self, ui, path, revs=revs) + def __init__(self, ui, repotype, path, revs=None): + common.converter_source.__init__(self, ui, repotype, path, revs=revs) common.commandline.__init__(self, ui, 'darcs') # check for _darcs, ElementTree so that we can easily skip diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/filemap.py --- a/hgext/convert/filemap.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/filemap.py Mon Jan 22 17:53:02 2018 -0500 @@ -172,7 +172,7 @@ class filemap_source(common.converter_source): def __init__(self, ui, baseconverter, filemap): - super(filemap_source, self).__init__(ui) + super(filemap_source, self).__init__(ui, baseconverter.repotype) self.base = baseconverter self.filemapper = filemapper(ui, filemap) self.commits = {} diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/git.py --- a/hgext/convert/git.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/git.py Mon Jan 22 17:53:02 2018 -0500 @@ -66,8 +66,8 @@ def gitpipe(self, *args, **kwargs): return self._gitcmd(self._run3, *args, **kwargs) - def __init__(self, ui, path, revs=None): - super(convert_git, self).__init__(ui, path, revs=revs) + def __init__(self, ui, repotype, path, revs=None): + super(convert_git, self).__init__(ui, repotype, path, revs=revs) common.commandline.__init__(self, ui, 'git') # Pass an absolute path to git to prevent from ever being interpreted @@ -342,13 +342,15 @@ p = v.split() tm, tz = p[-2:] author = " ".join(p[:-2]) - if author[0] == "<": author = author[1:-1] + if author[0] == "<": + author = author[1:-1] author = self.recode(author) if n == "committer": p = v.split() tm, tz = p[-2:] committer = " ".join(p[:-2]) - if committer[0] == "<": committer = committer[1:-1] + if committer[0] == "<": + committer = committer[1:-1] committer = self.recode(committer) if n == "parent": parents.append(v) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/gnuarch.py --- a/hgext/convert/gnuarch.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/gnuarch.py Mon Jan 22 17:53:02 2018 -0500 @@ -7,7 +7,7 @@ # GNU General Public License version 2 or any later version. from __future__ import absolute_import -import email +import email.parser as emailparser import os import shutil import stat @@ -36,8 +36,8 @@ self.ren_files = {} self.ren_dirs = {} - def __init__(self, ui, path, revs=None): - super(gnuarch_source, self).__init__(ui, path, revs=revs) + def __init__(self, ui, repotype, path, revs=None): + super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs) if not os.path.exists(os.path.join(path, '{arch}')): raise common.NoRepo(_("%s does not look like a GNU Arch repository") @@ -63,7 +63,7 @@ self.changes = {} self.parents = {} self.tags = {} - self.catlogparser = email.Parser.Parser() + self.catlogparser = emailparser.Parser() self.encoding = encoding.encoding self.archives = [] diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/hg.py --- a/hgext/convert/hg.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/hg.py Mon Jan 22 17:53:02 2018 -0500 @@ -45,8 +45,8 @@ sha1re = re.compile(r'\b[0-9a-f]{12,40}\b') class mercurial_sink(common.converter_sink): - def __init__(self, ui, path): - common.converter_sink.__init__(self, ui, path) + def __init__(self, ui, repotype, path): + common.converter_sink.__init__(self, ui, repotype, path) self.branchnames = ui.configbool('convert', 'hg.usebranchnames') self.clonebranches = ui.configbool('convert', 'hg.clonebranches') self.tagsbranch = ui.config('convert', 'hg.tagsbranch') @@ -253,7 +253,7 @@ data = self._rewritetags(source, revmap, data) if f == '.hgsubstate': data = self._rewritesubstate(source, data) - return context.memfilectx(self.repo, f, data, 'l' in mode, + return context.memfilectx(self.repo, memctx, f, data, 'l' in mode, 'x' in mode, copies.get(f)) pl = [] @@ -401,7 +401,7 @@ data = "".join(newlines) def getfilectx(repo, memctx, f): - return context.memfilectx(repo, f, data, False, False, None) + return context.memfilectx(repo, memctx, f, data, False, False, None) self.ui.status(_("updating tags\n")) date = "%s 0" % int(time.mktime(time.gmtime())) @@ -444,8 +444,8 @@ return rev in self.repo class mercurial_source(common.converter_source): - def __init__(self, ui, path, revs=None): - common.converter_source.__init__(self, ui, path, revs) + def __init__(self, ui, repotype, path, revs=None): + common.converter_source.__init__(self, ui, repotype, path, revs) self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors') self.ignored = set() self.saverev = ui.configbool('convert', 'hg.saverev') diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/monotone.py --- a/hgext/convert/monotone.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/monotone.py Mon Jan 22 17:53:02 2018 -0500 @@ -19,8 +19,8 @@ from . import common class monotone_source(common.converter_source, common.commandline): - def __init__(self, ui, path=None, revs=None): - common.converter_source.__init__(self, ui, path, revs) + def __init__(self, ui, repotype, path=None, revs=None): + common.converter_source.__init__(self, ui, repotype, path, revs) if revs and len(revs) > 1: raise error.Abort(_('monotone source does not support specifying ' 'multiple revs')) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/p4.py --- a/hgext/convert/p4.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/p4.py Mon Jan 22 17:53:02 2018 -0500 @@ -43,11 +43,11 @@ return filename class p4_source(common.converter_source): - def __init__(self, ui, path, revs=None): + def __init__(self, ui, repotype, path, revs=None): # avoid import cycle from . import convcmd - super(p4_source, self).__init__(ui, path, revs=revs) + super(p4_source, self).__init__(ui, repotype, path, revs=revs) if "/" in path and not path.startswith('//'): raise common.NoRepo(_('%s does not look like a P4 repository') % diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/subversion.py --- a/hgext/convert/subversion.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/convert/subversion.py Mon Jan 22 17:53:02 2018 -0500 @@ -285,8 +285,8 @@ # the parent module. A revision has at most one parent. # class svn_source(converter_source): - def __init__(self, ui, url, revs=None): - super(svn_source, self).__init__(ui, url, revs=revs) + def __init__(self, ui, repotype, url, revs=None): + super(svn_source, self).__init__(ui, repotype, url, revs=revs) if not (url.startswith('svn://') or url.startswith('svn+ssh://') or (os.path.exists(url) and @@ -1112,9 +1112,9 @@ def authorfile(self): return self.join('hg-authormap') - def __init__(self, ui, path): + def __init__(self, ui, repotype, path): - converter_sink.__init__(self, ui, path) + converter_sink.__init__(self, ui, repotype, path) commandline.__init__(self, ui, 'svn') self.delete = [] self.setexec = [] diff -r 87676e8ee056 -r 27b6df1b5adb hgext/extdiff.py --- a/hgext/extdiff.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/extdiff.py Mon Jan 22 17:53:02 2018 -0500 @@ -338,6 +338,7 @@ that revision is compared to the working directory, and, when no revisions are specified, the working directory files are compared to its parent.''' + opts = pycompat.byteskwargs(opts) program = opts.get('program') option = opts.get('option') if not program: @@ -369,6 +370,7 @@ self._cmdline = cmdline def __call__(self, ui, repo, *pats, **opts): + opts = pycompat.byteskwargs(opts) options = ' '.join(map(util.shellquote, opts['option'])) if options: options = ' ' + options diff -r 87676e8ee056 -r 27b6df1b5adb hgext/fetch.py --- a/hgext/fetch.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/fetch.py Mon Jan 22 17:53:02 2018 -0500 @@ -19,6 +19,7 @@ exchange, hg, lock, + pycompat, registrar, util, ) @@ -60,6 +61,7 @@ Returns 0 on success. ''' + opts = pycompat.byteskwargs(opts) date = opts.get('date') if date: opts['date'] = util.parsedate(date) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/fsmonitor/__init__.py --- a/hgext/fsmonitor/__init__.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/fsmonitor/__init__.py Mon Jan 22 17:53:02 2018 -0500 @@ -117,7 +117,6 @@ from mercurial.i18n import _ from mercurial.node import ( hex, - nullid, ) from mercurial import ( @@ -165,9 +164,6 @@ configitem('experimental', 'fsmonitor.transaction_notify', default=False, ) -configitem('experimental', 'fsmonitor.wc_change_notify', - default=False, -) # This extension is incompatible with the following blacklisted extensions # and will disable itself when encountering one of these: @@ -224,16 +220,21 @@ Whenever full is False, ignored is False, and the Watchman client is available, use Watchman combined with saved state to possibly return only a subset of files.''' - def bail(): + def bail(reason): + self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason) return orig(match, subrepos, unknown, ignored, full=True) - if full or ignored or not self._watchmanclient.available(): - return bail() + if full: + return bail('full rewalk requested') + if ignored: + return bail('listing ignored files') + if not self._watchmanclient.available(): + return bail('client unavailable') state = self._fsmonitorstate clock, ignorehash, notefiles = state.get() if not clock: if state.walk_on_invalidate: - return bail() + return bail('no clock') # Initial NULL clock value, see # https://facebook.github.io/watchman/docs/clockspec.html clock = 'c:0:0' @@ -263,7 +264,7 @@ if _hashignore(ignore) != ignorehash and clock != 'c:0:0': # ignore list changed -- can't rely on Watchman state any more if state.walk_on_invalidate: - return bail() + return bail('ignore rules changed') notefiles = [] clock = 'c:0:0' else: @@ -273,7 +274,11 @@ matchfn = match.matchfn matchalways = match.always() - dmap = self._map._map + dmap = self._map + if util.safehasattr(dmap, '_map'): + # for better performance, directly access the inner dirstate map if the + # standard dirstate implementation is in use. + dmap = dmap._map nonnormalset = self._map.nonnormalset copymap = self._map.copymap @@ -334,7 +339,7 @@ except Exception as ex: _handleunavailable(self._ui, state, ex) self._watchmanclient.clearconnection() - return bail() + return bail('exception during run') else: # We need to propagate the last observed clock up so that we # can use it for our next query @@ -342,7 +347,7 @@ if result['is_fresh_instance']: if state.walk_on_invalidate: state.invalidate() - return bail() + return bail('fresh instance') fresh_instance = True # Ignore any prior noteable files from the state info notefiles = [] @@ -600,14 +605,6 @@ self._fsmonitorstate.invalidate() return super(fsmonitordirstate, self).invalidate(*args, **kwargs) - if dirstate._ui.configbool( - "experimental", "fsmonitor.wc_change_notify"): - def setparents(self, p1, p2=nullid): - with state_update(self._repo, name="hg.wc_change", - oldnode=self._pl[0], newnode=p1, - partial=False): - return super(fsmonitordirstate, self).setparents(p1, p2) - dirstate.__class__ = fsmonitordirstate dirstate._fsmonitorinit(repo) @@ -662,14 +659,18 @@ self.enter() def enter(self): - # We explicitly need to take a lock here, before we proceed to update - # watchman about the update operation, so that we don't race with - # some other actor. merge.update is going to take the wlock almost - # immediately anyway, so this is effectively extending the lock - # around a couple of short sanity checks. + # Make sure we have a wlock prior to sending notifications to watchman. + # We don't want to race with other actors. In the update case, + # merge.update is going to take the wlock almost immediately. We are + # effectively extending the lock around several short sanity checks. if self.oldnode is None: self.oldnode = self.repo['.'].node() - self._lock = self.repo.wlock() + + if self.repo.currentwlock() is None: + if util.safehasattr(self.repo, 'wlocknostateupdate'): + self._lock = self.repo.wlocknostateupdate() + else: + self._lock = self.repo.wlock() self.need_leave = self._state( 'state-enter', hex(self.oldnode)) @@ -790,32 +791,34 @@ orig = super(fsmonitorrepo, self).status return overridestatus(orig, self, *args, **kwargs) - if ui.configbool("experimental", "fsmonitor.transaction_notify"): - def transaction(self, *args, **kwargs): - tr = super(fsmonitorrepo, self).transaction( - *args, **kwargs) - if tr.count != 1: - return tr - stateupdate = state_update(self, name="hg.transaction") - stateupdate.enter() + def wlocknostateupdate(self, *args, **kwargs): + return super(fsmonitorrepo, self).wlock(*args, **kwargs) + + def wlock(self, *args, **kwargs): + l = super(fsmonitorrepo, self).wlock(*args, **kwargs) + if not ui.configbool( + "experimental", "fsmonitor.transaction_notify"): + return l + if l.held != 1: + return l + origrelease = l.releasefn - class fsmonitortrans(tr.__class__): - def _abort(self): - try: - result = super(fsmonitortrans, self)._abort() - finally: - stateupdate.exit(abort=True) - return result + def staterelease(): + if origrelease: + origrelease() + if l.stateupdate: + l.stateupdate.exit() + l.stateupdate = None - def close(self): - try: - result = super(fsmonitortrans, self).close() - finally: - if self.count == 0: - stateupdate.exit() - return result - - tr.__class__ = fsmonitortrans - return tr + try: + l.stateupdate = None + l.stateupdate = state_update(self, name="hg.transaction") + l.stateupdate.enter() + l.releasefn = staterelease + except Exception as e: + # Swallow any errors; fire and forget + self.ui.log( + 'watchman', 'Exception in state update %s\n', e) + return l repo.__class__ = fsmonitorrepo diff -r 87676e8ee056 -r 27b6df1b5adb hgext/githelp.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/githelp.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,1073 @@ +# githelp.py - Try to map Git commands to Mercurial equivalents. +# +# Copyright 2013 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +"""try mapping git commands to Mercurial commands + +Tries to map a given git command to a Mercurial command: + + $ hg githelp -- git checkout master + hg update master + +If an unknown command or parameter combination is detected, an error is +produced. +""" + +from __future__ import absolute_import + +import getopt +import re + +from mercurial.i18n import _ +from mercurial import ( + error, + fancyopts, + registrar, + util, +) + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +cmdtable = {} +command = registrar.command(cmdtable) + +def convert(s): + if s.startswith("origin/"): + return s[7:] + if 'HEAD' in s: + s = s.replace('HEAD', '.') + # HEAD~ in git is .~1 in mercurial + s = re.sub('~$', '~1', s) + return s + +@command('^githelp|git', [ + ], _('hg githelp')) +def githelp(ui, repo, *args, **kwargs): + '''suggests the Mercurial equivalent of the given git command + + Usage: hg githelp -- + ''' + + if len(args) == 0 or (len(args) == 1 and args[0] =='git'): + raise error.Abort(_('missing git command - ' + 'usage: hg githelp -- ')) + + if args[0] == 'git': + args = args[1:] + + cmd = args[0] + if not cmd in gitcommands: + raise error.Abort("error: unknown git command %s" % (cmd)) + + ui.pager('githelp') + args = args[1:] + return gitcommands[cmd](ui, repo, *args, **kwargs) + +def parseoptions(ui, cmdoptions, args): + cmdoptions = list(cmdoptions) + opts = {} + args = list(args) + while True: + try: + args = fancyopts.fancyopts(list(args), cmdoptions, opts, True) + break + except getopt.GetoptError as ex: + flag = None + if "requires argument" in ex.msg: + raise + if ('--' + ex.opt) in ex.msg: + flag = '--' + ex.opt + elif ('-' + ex.opt) in ex.msg: + flag = '-' + ex.opt + else: + raise error.Abort("unknown option %s" % ex.opt) + try: + args.remove(flag) + except Exception: + raise error.Abort( + "unknown option {0} packed with other options\n" + "Please try passing the option as it's own flag: -{0}" \ + .format(ex.opt)) + + ui.warn(_("ignoring unknown option %s\n") % flag) + + args = list([convert(x) for x in args]) + opts = dict([(k, convert(v)) if isinstance(v, str) else (k, v) + for k, v in opts.iteritems()]) + + return args, opts + +class Command(object): + def __init__(self, name): + self.name = name + self.args = [] + self.opts = {} + + def __str__(self): + cmd = "hg " + self.name + if self.opts: + for k, values in sorted(self.opts.iteritems()): + for v in values: + if v: + cmd += " %s %s" % (k, v) + else: + cmd += " %s" % (k,) + if self.args: + cmd += " " + cmd += " ".join(self.args) + return cmd + + def append(self, value): + self.args.append(value) + + def extend(self, values): + self.args.extend(values) + + def __setitem__(self, key, value): + values = self.opts.setdefault(key, []) + values.append(value) + + def __and__(self, other): + return AndCommand(self, other) + +class AndCommand(object): + def __init__(self, left, right): + self.left = left + self.right = right + + def __str__(self): + return "%s && %s" % (self.left, self.right) + + def __and__(self, other): + return AndCommand(self, other) + +def add(ui, repo, *args, **kwargs): + cmdoptions = [ + ('A', 'all', None, ''), + ('p', 'patch', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if (opts.get('patch')): + ui.status(_("note: Mercurial will commit when complete, " + "as there is no staging area in Mercurial\n\n")) + cmd = Command('commit --interactive') + else: + cmd = Command("add") + + if not opts.get('all'): + cmd.extend(args) + else: + ui.status(_("note: use hg addremove to remove files that have " + "been deleted.\n\n")) + + ui.status((str(cmd)), "\n") + +def am(ui, repo, *args, **kwargs): + cmdoptions=[ + ] + args, opts = parseoptions(ui, cmdoptions, args) + cmd = Command('import') + ui.status(str(cmd), "\n") + +def apply(ui, repo, *args, **kwargs): + cmdoptions = [ + ('p', 'p', int, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('import --no-commit') + if (opts.get('p')): + cmd['-p'] = opts.get('p') + cmd.extend(args) + + ui.status((str(cmd)), "\n") + +def bisect(ui, repo, *args, **kwargs): + ui.status(_("See 'hg help bisect' for how to use bisect.\n\n")) + +def blame(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + cmd = Command('annotate -udl') + cmd.extend([convert(v) for v in args]) + ui.status((str(cmd)), "\n") + +def branch(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'set-upstream', None, ''), + ('', 'set-upstream-to', '', ''), + ('d', 'delete', None, ''), + ('D', 'delete', None, ''), + ('m', 'move', None, ''), + ('M', 'move', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command("bookmark") + + if opts.get('set_upstream') or opts.get('set_upstream_to'): + ui.status(_("Mercurial has no concept of upstream branches\n")) + return + elif opts.get('delete'): + cmd = Command("strip") + for branch in args: + cmd['-B'] = branch + else: + cmd['-B'] = None + elif opts.get('move'): + if len(args) > 0: + if len(args) > 1: + old = args.pop(0) + else: + # shell command to output the active bookmark for the active + # revision + old = '`hg log -T"{activebookmark}" -r .`' + new = args[0] + cmd['-m'] = old + cmd.append(new) + else: + if len(args) > 1: + cmd['-r'] = args[1] + cmd.append(args[0]) + elif len(args) == 1: + cmd.append(args[0]) + ui.status((str(cmd)), "\n") + +def ispath(repo, string): + """ + The first argument to git checkout can either be a revision or a path. Let's + generally assume it's a revision, unless it's obviously a path. There are + too many ways to spell revisions in git for us to reasonably catch all of + them, so let's be conservative. + """ + if string in repo: + # if it's definitely a revision let's not even check if a file of the + # same name exists. + return False + + cwd = repo.getcwd() + if cwd == '': + repopath = string + else: + repopath = cwd + '/' + string + + exists = repo.wvfs.exists(repopath) + if exists: + return True + + manifest = repo['.'].manifest() + + didexist = (repopath in manifest) or manifest.hasdir(repopath) + + return didexist + +def checkout(ui, repo, *args, **kwargs): + cmdoptions = [ + ('b', 'branch', '', ''), + ('B', 'branch', '', ''), + ('f', 'force', None, ''), + ('p', 'patch', None, ''), + ] + paths = [] + if '--' in args: + sepindex = args.index('--') + paths.extend(args[sepindex + 1:]) + args = args[:sepindex] + + args, opts = parseoptions(ui, cmdoptions, args) + + rev = None + if args and ispath(repo, args[0]): + paths = args + paths + elif args: + rev = args[0] + paths = args[1:] + paths + + cmd = Command('update') + + if opts.get('force'): + if paths or rev: + cmd['-C'] = None + + if opts.get('patch'): + cmd = Command('revert') + cmd['-i'] = None + + if opts.get('branch'): + if len(args) == 0: + cmd = Command('bookmark') + cmd.append(opts.get('branch')) + else: + cmd.append(args[0]) + bookcmd = Command('bookmark') + bookcmd.append(opts.get('branch')) + cmd = cmd & bookcmd + # if there is any path argument supplied, use revert instead of update + elif len(paths) > 0: + ui.status(_("note: use --no-backup to avoid creating .orig files\n\n")) + cmd = Command('revert') + if opts.get('patch'): + cmd['-i'] = None + if rev: + cmd['-r'] = rev + cmd.extend(paths) + elif rev: + if opts.get('patch'): + cmd['-r'] = rev + else: + cmd.append(rev) + elif opts.get('force'): + cmd = Command('revert') + cmd['--all'] = None + else: + raise error.Abort("a commit must be specified") + + ui.status((str(cmd)), "\n") + +def cherrypick(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'continue', None, ''), + ('', 'abort', None, ''), + ('e', 'edit', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('graft') + + if opts.get('edit'): + cmd['--edit'] = None + if opts.get('continue'): + cmd['--continue'] = None + elif opts.get('abort'): + ui.status(_("note: hg graft does not have --abort.\n\n")) + return + else: + cmd.extend(args) + + ui.status((str(cmd)), "\n") + +def clean(ui, repo, *args, **kwargs): + cmdoptions = [ + ('d', 'd', None, ''), + ('f', 'force', None, ''), + ('x', 'x', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('purge') + if opts.get('x'): + cmd['--all'] = None + cmd.extend(args) + + ui.status((str(cmd)), "\n") + +def clone(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'bare', None, ''), + ('n', 'no-checkout', None, ''), + ('b', 'branch', '', ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if len(args) == 0: + raise error.Abort("a repository to clone must be specified") + + cmd = Command('clone') + cmd.append(args[0]) + if len(args) > 1: + cmd.append(args[1]) + + if opts.get('bare'): + cmd['-U'] = None + ui.status(_("note: Mercurial does not have bare clones. " + + "-U will clone the repo without checking out a commit\n\n")) + elif opts.get('no_checkout'): + cmd['-U'] = None + + if opts.get('branch'): + cocmd = Command("update") + cocmd.append(opts.get('branch')) + cmd = cmd & cocmd + + ui.status((str(cmd)), "\n") + +def commit(ui, repo, *args, **kwargs): + cmdoptions = [ + ('a', 'all', None, ''), + ('m', 'message', '', ''), + ('p', 'patch', None, ''), + ('C', 'reuse-message', '', ''), + ('F', 'file', '', ''), + ('', 'author', '', ''), + ('', 'date', '', ''), + ('', 'amend', None, ''), + ('', 'no-edit', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('commit') + if opts.get('patch'): + cmd = Command('commit --interactive') + + if opts.get('amend'): + if opts.get('no_edit'): + cmd = Command('amend') + else: + cmd['--amend'] = None + + if opts.get('reuse_message'): + cmd['-M'] = opts.get('reuse_message') + + if opts.get('message'): + cmd['-m'] = "'%s'" % (opts.get('message'),) + + if opts.get('all'): + ui.status(_("note: Mercurial doesn't have a staging area, " + + "so there is no --all. -A will add and remove files " + + "for you though.\n\n")) + + if opts.get('file'): + cmd['-l'] = opts.get('file') + + if opts.get('author'): + cmd['-u'] = opts.get('author') + + if opts.get('date'): + cmd['-d'] = opts.get('date') + + cmd.extend(args) + + ui.status((str(cmd)), "\n") + +def deprecated(ui, repo, *args, **kwargs): + ui.warn(_('This command has been deprecated in the git project, ' + + 'thus isn\'t supported by this tool.\n\n')) + +def diff(ui, repo, *args, **kwargs): + cmdoptions = [ + ('a', 'all', None, ''), + ('', 'cached', None, ''), + ('R', 'reverse', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('diff') + + if opts.get('cached'): + ui.status(_('note: Mercurial has no concept of a staging area, ' + + 'so --cached does nothing.\n\n')) + + if opts.get('reverse'): + cmd['--reverse'] = None + + for a in list(args): + args.remove(a) + try: + repo.revs(a) + cmd['-r'] = a + except Exception: + cmd.append(a) + + ui.status((str(cmd)), "\n") + +def difftool(ui, repo, *args, **kwargs): + ui.status(_('Mercurial does not enable external difftool by default. You ' + 'need to enable the extdiff extension in your .hgrc file by adding\n' + 'extdiff =\n' + 'to the [extensions] section and then running\n\n' + 'hg extdiff -p \n\n' + 'See \'hg help extdiff\' and \'hg help -e extdiff\' for more ' + 'information.\n')) + +def fetch(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'all', None, ''), + ('f', 'force', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('pull') + + if len(args) > 0: + cmd.append(args[0]) + if len(args) > 1: + ui.status(_("note: Mercurial doesn't have refspecs. " + + "-r can be used to specify which commits you want to pull. " + + "-B can be used to specify which bookmark you want to pull." + + "\n\n")) + for v in args[1:]: + if v in repo._bookmarks: + cmd['-B'] = v + else: + cmd['-r'] = v + + ui.status((str(cmd)), "\n") + +def grep(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('grep') + + # For basic usage, git grep and hg grep are the same. They both have the + # pattern first, followed by paths. + cmd.extend(args) + + ui.status((str(cmd)), "\n") + +def init(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('init') + + if len(args) > 0: + cmd.append(args[0]) + + ui.status((str(cmd)), "\n") + +def log(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'follow', None, ''), + ('', 'decorate', None, ''), + ('n', 'number', '', ''), + ('1', '1', None, ''), + ('', 'pretty', '', ''), + ('', 'format', '', ''), + ('', 'oneline', None, ''), + ('', 'stat', None, ''), + ('', 'graph', None, ''), + ('p', 'patch', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + ui.status(_('note: -v prints the entire commit message like Git does. To ' + + 'print just the first line, drop the -v.\n\n')) + ui.status(_("note: see hg help revset for information on how to filter " + + "log output.\n\n")) + + cmd = Command('log') + cmd['-v'] = None + + if opts.get('number'): + cmd['-l'] = opts.get('number') + if opts.get('1'): + cmd['-l'] = '1' + if opts.get('stat'): + cmd['--stat'] = None + if opts.get('graph'): + cmd['-G'] = None + if opts.get('patch'): + cmd['-p'] = None + + if opts.get('pretty') or opts.get('format') or opts.get('oneline'): + format = opts.get('format', '') + if 'format:' in format: + ui.status(_("note: --format format:??? equates to Mercurial's " + + "--template. See hg help templates for more info.\n\n")) + cmd['--template'] = '???' + else: + ui.status(_("note: --pretty/format/oneline equate to Mercurial's " + + "--style or --template. See hg help templates for more info." + + "\n\n")) + cmd['--style'] = '???' + + if len(args) > 0: + if '..' in args[0]: + since, until = args[0].split('..') + cmd['-r'] = "'%s::%s'" % (since, until) + del args[0] + cmd.extend(args) + + ui.status((str(cmd)), "\n") + +def lsfiles(ui, repo, *args, **kwargs): + cmdoptions = [ + ('c', 'cached', None, ''), + ('d', 'deleted', None, ''), + ('m', 'modified', None, ''), + ('o', 'others', None, ''), + ('i', 'ignored', None, ''), + ('s', 'stage', None, ''), + ('z', '_zero', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if (opts.get('modified') or opts.get('deleted') + or opts.get('others') or opts.get('ignored')): + cmd = Command('status') + if opts.get('deleted'): + cmd['-d'] = None + if opts.get('modified'): + cmd['-m'] = None + if opts.get('others'): + cmd['-o'] = None + if opts.get('ignored'): + cmd['-i'] = None + else: + cmd = Command('files') + if opts.get('stage'): + ui.status(_("note: Mercurial doesn't have a staging area, ignoring " + "--stage\n")) + if opts.get('_zero'): + cmd['-0'] = None + cmd.append('.') + for include in args: + cmd['-I'] = util.shellquote(include) + + ui.status((str(cmd)), "\n") + +def merge(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('merge') + + if len(args) > 0: + cmd.append(args[len(args) - 1]) + + ui.status((str(cmd)), "\n") + +def mergebase(ui, repo, *args, **kwargs): + cmdoptions = [] + args, opts = parseoptions(ui, cmdoptions, args) + + if len(args) != 2: + args = ['A', 'B'] + + cmd = Command("log -T '{node}\\n' -r 'ancestor(%s,%s)'" + % (args[0], args[1])) + + ui.status(_('NOTE: ancestors() is part of the revset language.\n'), + _("Learn more about revsets with 'hg help revsets'\n\n")) + ui.status((str(cmd)), "\n") + +def mergetool(ui, repo, *args, **kwargs): + cmdoptions = [] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command("resolve") + + if len(args) == 0: + cmd['--all'] = None + cmd.extend(args) + ui.status((str(cmd)), "\n") + +def mv(ui, repo, *args, **kwargs): + cmdoptions = [ + ('f', 'force', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('mv') + cmd.extend(args) + + if opts.get('force'): + cmd['-f'] = None + + ui.status((str(cmd)), "\n") + +def pull(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'all', None, ''), + ('f', 'force', None, ''), + ('r', 'rebase', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('pull') + cmd['--rebase'] = None + + if len(args) > 0: + cmd.append(args[0]) + if len(args) > 1: + ui.status(_("note: Mercurial doesn't have refspecs. " + + "-r can be used to specify which commits you want to pull. " + + "-B can be used to specify which bookmark you want to pull." + + "\n\n")) + for v in args[1:]: + if v in repo._bookmarks: + cmd['-B'] = v + else: + cmd['-r'] = v + + ui.status((str(cmd)), "\n") + +def push(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'all', None, ''), + ('f', 'force', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('push') + + if len(args) > 0: + cmd.append(args[0]) + if len(args) > 1: + ui.status(_("note: Mercurial doesn't have refspecs. " + + "-r can be used to specify which commits you want to push. " + + "-B can be used to specify which bookmark you want to push." + + "\n\n")) + for v in args[1:]: + if v in repo._bookmarks: + cmd['-B'] = v + else: + cmd['-r'] = v + + if opts.get('force'): + cmd['-f'] = None + + ui.status((str(cmd)), "\n") + +def rebase(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'all', None, ''), + ('i', 'interactive', None, ''), + ('', 'onto', '', ''), + ('', 'abort', None, ''), + ('', 'continue', None, ''), + ('', 'skip', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if opts.get('interactive'): + ui.status(_("note: hg histedit does not perform a rebase. " + + "It just edits history.\n\n")) + cmd = Command('histedit') + if len(args) > 0: + ui.status(_("also note: 'hg histedit' will automatically detect" + " your stack, so no second argument is necessary.\n\n")) + ui.status((str(cmd)), "\n") + return + + if opts.get('skip'): + cmd = Command('revert --all -r .') + ui.status((str(cmd)), "\n") + + cmd = Command('rebase') + + if opts.get('continue') or opts.get('skip'): + cmd['--continue'] = None + if opts.get('abort'): + cmd['--abort'] = None + + if opts.get('onto'): + ui.status(_("note: if you're trying to lift a commit off one branch, " + + "try hg rebase -d -s " + + "\n\n")) + cmd['-d'] = convert(opts.get('onto')) + if len(args) < 2: + raise error.Abort("Expected format: git rebase --onto X Y Z") + cmd['-s'] = "'::%s - ::%s'" % (convert(args[1]), convert(args[0])) + else: + if len(args) == 1: + cmd['-d'] = convert(args[0]) + elif len(args) == 2: + cmd['-d'] = convert(args[0]) + cmd['-b'] = convert(args[1]) + + ui.status((str(cmd)), "\n") + +def reflog(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'all', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('journal') + if opts.get('all'): + cmd['--all'] = None + if len(args) > 0: + cmd.append(args[0]) + + ui.status(str(cmd), "\n\n") + ui.status(_("note: in hg commits can be deleted from repo but we always" + " have backups.\n")) + +def reset(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'soft', None, ''), + ('', 'hard', None, ''), + ('', 'mixed', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + commit = convert(args[0] if len(args) > 0 else '.') + hard = opts.get('hard') + + if opts.get('mixed'): + ui.status(_('NOTE: --mixed has no meaning since Mercurial has no ' + 'staging area\n\n')) + if opts.get('soft'): + ui.status(_('NOTE: --soft has no meaning since Mercurial has no ' + 'staging area\n\n')) + + cmd = Command('update') + if hard: + cmd.append('--clean') + + cmd.append(commit) + + ui.status((str(cmd)), "\n") + +def revert(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if len(args) > 1: + ui.status(_("note: hg backout doesn't support multiple commits at " + + "once\n\n")) + + cmd = Command('backout') + if args: + cmd.append(args[0]) + + ui.status((str(cmd)), "\n") + +def revparse(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'show-cdup', None, ''), + ('', 'show-toplevel', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if opts.get('show_cdup') or opts.get('show_toplevel'): + cmd = Command('root') + if opts.get('show_cdup'): + ui.status(_("note: hg root prints the root of the repository\n\n")) + ui.status((str(cmd)), "\n") + else: + ui.status(_("note: see hg help revset for how to refer to commits\n")) + +def rm(ui, repo, *args, **kwargs): + cmdoptions = [ + ('f', 'force', None, ''), + ('n', 'dry-run', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('rm') + cmd.extend(args) + + if opts.get('force'): + cmd['-f'] = None + if opts.get('dry_run'): + cmd['-n'] = None + + ui.status((str(cmd)), "\n") + +def show(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'name-status', None, ''), + ('', 'pretty', '', ''), + ('U', 'unified', int, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if opts.get('name_status'): + if opts.get('pretty') == 'format:': + cmd = Command('status') + cmd['--change'] = '.' + else: + cmd = Command('log') + cmd.append('--style status') + cmd.append('-r .') + elif len(args) > 0: + if ispath(repo, args[0]): + cmd = Command('cat') + else: + cmd = Command('export') + cmd.extend(args) + if opts.get('unified'): + cmd.append('--config diff.unified=%d' % (opts['unified'],)) + elif opts.get('unified'): + cmd = Command('export') + cmd.append('--config diff.unified=%d' % (opts['unified'],)) + else: + cmd = Command('export') + + ui.status((str(cmd)), "\n") + +def stash(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('shelve') + action = args[0] if len(args) > 0 else None + + if action == 'list': + cmd['-l'] = None + elif action == 'drop': + cmd['-d'] = None + if len(args) > 1: + cmd.append(args[1]) + else: + cmd.append('') + elif action == 'pop' or action == 'apply': + cmd = Command('unshelve') + if len(args) > 1: + cmd.append(args[1]) + if action == 'apply': + cmd['--keep'] = None + elif (action == 'branch' or action == 'show' or action == 'clear' + or action == 'create'): + ui.status(_("note: Mercurial doesn't have equivalents to the " + + "git stash branch, show, clear, or create actions.\n\n")) + return + else: + if len(args) > 0: + if args[0] != 'save': + cmd['--name'] = args[0] + elif len(args) > 1: + cmd['--name'] = args[1] + + ui.status((str(cmd)), "\n") + +def status(ui, repo, *args, **kwargs): + cmdoptions = [ + ('', 'ignored', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('status') + cmd.extend(args) + + if opts.get('ignored'): + cmd['-i'] = None + + ui.status((str(cmd)), "\n") + +def svn(ui, repo, *args, **kwargs): + svncmd = args[0] + if not svncmd in gitsvncommands: + ui.warn(_("error: unknown git svn command %s\n") % (svncmd)) + + args = args[1:] + return gitsvncommands[svncmd](ui, repo, *args, **kwargs) + +def svndcommit(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('push') + + ui.status((str(cmd)), "\n") + +def svnfetch(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('pull') + cmd.append('default-push') + + ui.status((str(cmd)), "\n") + +def svnfindrev(ui, repo, *args, **kwargs): + cmdoptions = [ + ] + args, opts = parseoptions(ui, cmdoptions, args) + + cmd = Command('log') + cmd['-r'] = args[0] + + ui.status((str(cmd)), "\n") + +def svnrebase(ui, repo, *args, **kwargs): + cmdoptions = [ + ('l', 'local', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + pullcmd = Command('pull') + pullcmd.append('default-push') + rebasecmd = Command('rebase') + rebasecmd.append('tip') + + cmd = pullcmd & rebasecmd + + ui.status((str(cmd)), "\n") + +def tag(ui, repo, *args, **kwargs): + cmdoptions = [ + ('f', 'force', None, ''), + ('l', 'list', None, ''), + ('d', 'delete', None, ''), + ] + args, opts = parseoptions(ui, cmdoptions, args) + + if opts.get('list'): + cmd = Command('tags') + else: + cmd = Command('tag') + cmd.append(args[0]) + if len(args) > 1: + cmd['-r'] = args[1] + + if opts.get('delete'): + cmd['--remove'] = None + + if opts.get('force'): + cmd['-f'] = None + + ui.status((str(cmd)), "\n") + +gitcommands = { + 'add': add, + 'am': am, + 'apply': apply, + 'bisect': bisect, + 'blame': blame, + 'branch': branch, + 'checkout': checkout, + 'cherry-pick': cherrypick, + 'clean': clean, + 'clone': clone, + 'commit': commit, + 'diff': diff, + 'difftool': difftool, + 'fetch': fetch, + 'grep': grep, + 'init': init, + 'log': log, + 'ls-files': lsfiles, + 'merge': merge, + 'merge-base': mergebase, + 'mergetool': mergetool, + 'mv': mv, + 'pull': pull, + 'push': push, + 'rebase': rebase, + 'reflog': reflog, + 'reset': reset, + 'revert': revert, + 'rev-parse': revparse, + 'rm': rm, + 'show': show, + 'stash': stash, + 'status': status, + 'svn': svn, + 'tag': tag, + 'whatchanged': deprecated, +} + +gitsvncommands = { + 'dcommit': svndcommit, + 'fetch': svnfetch, + 'find-rev': svnfindrev, + 'rebase': svnrebase, +} diff -r 87676e8ee056 -r 27b6df1b5adb hgext/gpg.py --- a/hgext/gpg.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/gpg.py Mon Jan 22 17:53:02 2018 -0500 @@ -106,7 +106,7 @@ def newgpg(ui, **opts): """create a new gpg instance""" gpgpath = ui.config("gpg", "cmd") - gpgkey = opts.get('key') + gpgkey = opts.get(r'key') if not gpgkey: gpgkey = ui.config("gpg", "key") return gpg(gpgpath, gpgkey) @@ -253,6 +253,7 @@ def _dosign(ui, repo, *revs, **opts): mygpg = newgpg(ui, **opts) + opts = pycompat.byteskwargs(opts) sigver = "0" sigmessage = "" @@ -312,7 +313,8 @@ % hgnode.short(n) for n in nodes]) try: - editor = cmdutil.getcommiteditor(editform='gpg.sign', **opts) + editor = cmdutil.getcommiteditor(editform='gpg.sign', + **pycompat.strkwargs(opts)) repo.commit(message, opts['user'], opts['date'], match=msigs, editor=editor) except ValueError as inst: diff -r 87676e8ee056 -r 27b6df1b5adb hgext/graphlog.py --- a/hgext/graphlog.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/graphlog.py Mon Jan 22 17:53:02 2018 -0500 @@ -66,5 +66,5 @@ This is an alias to :hg:`log -G`. """ - opts['graph'] = True + opts[r'graph'] = True return commands.log(ui, repo, *pats, **opts) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/hgk.py --- a/hgext/hgk.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/hgk.py Mon Jan 22 17:53:02 2018 -0500 @@ -48,6 +48,7 @@ commands, obsolete, patch, + pycompat, registrar, scmutil, util, @@ -79,6 +80,7 @@ inferrepo=True) def difftree(ui, repo, node1=None, node2=None, *files, **opts): """diff trees from two commits""" + def __difftree(repo, node1, node2, files=None): assert node2 is not None if files is None: @@ -102,7 +104,7 @@ ## while True: - if opts['stdin']: + if opts[r'stdin']: try: line = util.bytesinput(ui.fin, ui.fout).split(' ') node1 = line[0] @@ -118,8 +120,8 @@ else: node2 = node1 node1 = repo.changelog.parents(node1)[0] - if opts['patch']: - if opts['pretty']: + if opts[r'patch']: + if opts[r'pretty']: catcommit(ui, repo, node2, "") m = scmutil.match(repo[node1], files) diffopts = patch.difffeatureopts(ui) @@ -130,7 +132,7 @@ ui.write(chunk) else: __difftree(repo, node1, node2, files=files) - if not opts['stdin']: + if not opts[r'stdin']: break def catcommit(ui, repo, n, prefix, ctx=None): @@ -183,7 +185,7 @@ # strings # prefix = "" - if opts['stdin']: + if opts[r'stdin']: try: (type, r) = util.bytesinput(ui.fin, ui.fout).split(' ') prefix = " " @@ -201,7 +203,7 @@ return 1 n = repo.lookup(r) catcommit(ui, repo, n, prefix) - if opts['stdin']: + if opts[r'stdin']: try: (type, r) = util.bytesinput(ui.fin, ui.fout).split(' ') except EOFError: @@ -340,7 +342,7 @@ else: full = None copy = [x for x in revs] - revtree(ui, copy, repo, full, opts['max_count'], opts['parents']) + revtree(ui, copy, repo, full, opts[r'max_count'], opts[r'parents']) @command('view', [('l', 'limit', '', @@ -348,6 +350,7 @@ _('[-l LIMIT] [REVRANGE]')) def view(ui, repo, *etc, **opts): "start interactive history viewer" + opts = pycompat.byteskwargs(opts) os.chdir(repo.root) optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v]) if repo.filtername is None: diff -r 87676e8ee056 -r 27b6df1b5adb hgext/highlight/highlight.py --- a/hgext/highlight/highlight.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/highlight/highlight.py Mon Jan 22 17:53:02 2018 -0500 @@ -22,8 +22,12 @@ import pygments import pygments.formatters import pygments.lexers + import pygments.plugin import pygments.util + for unused in pygments.plugin.find_plugin_lexers(): + pass + highlight = pygments.highlight ClassNotFound = pygments.util.ClassNotFound guess_lexer = pygments.lexers.guess_lexer diff -r 87676e8ee056 -r 27b6df1b5adb hgext/histedit.py --- a/hgext/histedit.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/histedit.py Mon Jan 22 17:53:02 2018 -0500 @@ -203,6 +203,7 @@ mergeutil, node, obsolete, + pycompat, registrar, repair, scmutil, @@ -542,9 +543,9 @@ def commitfunc(**kwargs): overrides = {('phases', 'new-commit'): phasemin} with repo.ui.configoverride(overrides, 'histedit'): - extra = kwargs.get('extra', {}).copy() + extra = kwargs.get(r'extra', {}).copy() extra['histedit_source'] = src.hex() - kwargs['extra'] = extra + kwargs[r'extra'] = extra return repo.commit(**kwargs) return commitfunc @@ -602,7 +603,7 @@ if path in headmf: fctx = last[path] flags = fctx.flags() - mctx = context.memfilectx(repo, + mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(), islink='l' in flags, isexec='x' in flags, @@ -917,7 +918,8 @@ ('o', 'outgoing', False, _('changesets not found in destination')), ('f', 'force', False, _('force outgoing even for unrelated repositories')), - ('r', 'rev', [], _('first revision to be edited'), _('REV'))], + ('r', 'rev', [], _('first revision to be edited'), _('REV'))] + + cmdutil.formatteropts, _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])")) def histedit(ui, repo, *freeargs, **opts): """interactively edit changeset history @@ -1094,6 +1096,9 @@ _('histedit requires exactly one ancestor revision')) def _histedit(ui, repo, state, *freeargs, **opts): + opts = pycompat.byteskwargs(opts) + fm = ui.formatter('histedit', opts) + fm.startitem() goal = _getgoal(opts) revs = opts.get('rev', []) rules = opts.get('commands', '') @@ -1116,7 +1121,8 @@ _newhistedit(ui, repo, state, revs, freeargs, opts) _continuehistedit(ui, repo, state) - _finishhistedit(ui, repo, state) + _finishhistedit(ui, repo, state, fm) + fm.end() def _continuehistedit(ui, repo, state): """This function runs after either: @@ -1163,7 +1169,7 @@ state.write() ui.progress(_("editing"), None) -def _finishhistedit(ui, repo, state): +def _finishhistedit(ui, repo, state, fm): """This action runs when histedit is finishing its session""" repo.ui.pushbuffer() hg.update(repo, state.parentctxnode, quietempty=True) @@ -1197,6 +1203,13 @@ mapping = {k: v for k, v in mapping.items() if k in nodemap and all(n in nodemap for n in v)} scmutil.cleanupnodes(repo, mapping, 'histedit') + hf = fm.hexfunc + fl = fm.formatlist + fd = fm.formatdict + nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node') + for oldn, newn in mapping.iteritems()}, + key="oldnode", value="newnodes") + fm.data(nodechanges=nodechanges) state.clear() if os.path.exists(repo.sjoin('undo')): @@ -1297,6 +1310,9 @@ state.topmost = topmost state.replacements = [] + ui.log("histedit", "%d actions to histedit", len(actions), + histedit_num_actions=len(actions)) + # Create a backup so we can always abort completely. backupfile = None if not obsolete.isenabled(repo, obsolete.createmarkersopt): diff -r 87676e8ee056 -r 27b6df1b5adb hgext/journal.py --- a/hgext/journal.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/journal.py Mon Jan 22 17:53:02 2018 -0500 @@ -30,6 +30,7 @@ localrepo, lock, node, + pycompat, registrar, util, ) @@ -133,7 +134,7 @@ Note that by default entries go from most recent to oldest. """ - order = kwargs.pop('order', max) + order = kwargs.pop(r'order', max) iterables = [iter(it) for it in iterables] # this tracks still active iterables; iterables are deleted as they are # exhausted, which is why this is a dictionary and why each entry also @@ -303,7 +304,7 @@ # default to 600 seconds timeout l = lock.lock( vfs, 'namejournal.lock', - int(self.ui.config("ui", "timeout")), desc=desc) + self.ui.configint("ui", "timeout"), desc=desc) self.ui.warn(_("got lock after %s seconds\n") % l.delay) self._lockref = weakref.ref(l) return l @@ -458,6 +459,7 @@ `hg journal -T json` can be used to produce machine readable output. """ + opts = pycompat.byteskwargs(opts) name = '.' if opts.get('all'): if args: @@ -478,6 +480,7 @@ limit = cmdutil.loglimit(opts) entry = None + ui.pager('journal') for count, entry in enumerate(repo.journal.filtered(name=name)): if count == limit: break diff -r 87676e8ee056 -r 27b6df1b5adb hgext/keyword.py --- a/hgext/keyword.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/keyword.py Mon Jan 22 17:53:02 2018 -0500 @@ -104,6 +104,7 @@ match, patch, pathutil, + pycompat, registrar, scmutil, templatefilters, @@ -380,6 +381,7 @@ '''Bails out if [keyword] configuration is not active. Returns status of working directory.''' if kwt: + opts = pycompat.byteskwargs(opts) return repo.status(match=scmutil.match(wctx, pats, opts), clean=True, unknown=opts.get('unknown') or opts.get('all')) if ui.configitems('keyword'): @@ -436,16 +438,16 @@ ui.setconfig('keywordset', 'svn', svn, 'keyword') uikwmaps = ui.configitems('keywordmaps') - if args or opts.get('rcfile'): + if args or opts.get(r'rcfile'): ui.status(_('\n\tconfiguration using custom keyword template maps\n')) if uikwmaps: ui.status(_('\textending current template maps\n')) - if opts.get('default') or not uikwmaps: + if opts.get(r'default') or not uikwmaps: if svn: ui.status(_('\toverriding default svn keywordset\n')) else: ui.status(_('\toverriding default cvs keywordset\n')) - if opts.get('rcfile'): + if opts.get(r'rcfile'): ui.readconfig(opts.get('rcfile')) if args: # simulate hgrc parsing @@ -453,7 +455,7 @@ repo.vfs.write('hgrc', rcmaps) ui.readconfig(repo.vfs.join('hgrc')) kwmaps = dict(ui.configitems('keywordmaps')) - elif opts.get('default'): + elif opts.get(r'default'): if svn: ui.status(_('\n\tconfiguration using default svn keywordset\n')) else: @@ -543,6 +545,7 @@ else: cwd = '' files = [] + opts = pycompat.byteskwargs(opts) if not opts.get('unknown') or opts.get('all'): files = sorted(status.modified + status.added + status.clean) kwfiles = kwt.iskwfile(files, wctx) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/lfcommands.py --- a/hgext/largefiles/lfcommands.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/lfcommands.py Mon Jan 22 17:53:02 2018 -0500 @@ -24,6 +24,7 @@ lock, match as matchmod, node, + pycompat, registrar, scmutil, util, @@ -74,6 +75,7 @@ Use --to-normal to convert largefiles back to normal files; after this, the DEST repository can be used without largefiles at all.''' + opts = pycompat.byteskwargs(opts) if opts['to_normal']: tolfile = False else: @@ -177,7 +179,7 @@ convcmd.converter = converter try: - convcmd.convert(ui, src, dest) + convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg') finally: convcmd.converter = orig success = True @@ -259,7 +261,8 @@ # doesn't change after rename or copy renamed = lfutil.standin(renamed[0]) - return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n', + return context.memfilectx(repo, memctx, f, + lfiletohash[srcfname] + '\n', 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: @@ -311,7 +314,7 @@ data = fctx.data() if f == '.hgtags': data = _converttags (repo.ui, revmap, data) - return context.memfilectx(repo, f, data, 'l' in fctx.flags(), + return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) # Remap tag data using a revision map @@ -579,7 +582,7 @@ """ repo.lfpullsource = source - revs = opts.get('rev', []) + revs = opts.get(r'rev', []) if not revs: raise error.Abort(_('no revisions specified')) revs = scmutil.revrange(repo, revs) @@ -590,3 +593,12 @@ (cached, missing) = cachelfiles(ui, repo, rev) numcached += len(cached) ui.status(_("%d largefiles cached\n") % numcached) + +@command('debuglfput', + [] + cmdutil.remoteopts, + _('FILE')) +def debuglfput(ui, repo, filepath, **kwargs): + hash = lfutil.hashfile(filepath) + storefactory.openstore(repo).put(filepath, hash) + ui.write('%s\n' % hash) + return 0 diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/lfutil.py --- a/hgext/largefiles/lfutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/lfutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -69,31 +69,31 @@ to preserve download bandwidth and storage space.''' return os.path.join(_usercachedir(ui), hash) -def _usercachedir(ui): +def _usercachedir(ui, name=longname): '''Return the location of the "global" largefiles cache.''' - path = ui.configpath(longname, 'usercache') + path = ui.configpath(name, 'usercache') if path: return path if pycompat.iswindows: appdata = encoding.environ.get('LOCALAPPDATA',\ encoding.environ.get('APPDATA')) if appdata: - return os.path.join(appdata, longname) + return os.path.join(appdata, name) elif pycompat.isdarwin: home = encoding.environ.get('HOME') if home: - return os.path.join(home, 'Library', 'Caches', longname) + return os.path.join(home, 'Library', 'Caches', name) elif pycompat.isposix: path = encoding.environ.get('XDG_CACHE_HOME') if path: - return os.path.join(path, longname) + return os.path.join(path, name) home = encoding.environ.get('HOME') if home: - return os.path.join(home, '.cache', longname) + return os.path.join(home, '.cache', name) else: raise error.Abort(_('unknown operating system: %s\n') % pycompat.osname) - raise error.Abort(_('unknown %s usercache location') % longname) + raise error.Abort(_('unknown %s usercache location') % name) def inusercache(ui, hash): path = usercachepath(ui, hash) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/overrides.py --- a/hgext/largefiles/overrides.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/overrides.py Mon Jan 22 17:53:02 2018 -0500 @@ -21,6 +21,7 @@ hg, match as matchmod, pathutil, + pycompat, registrar, scmutil, smartset, @@ -156,7 +157,7 @@ # Need to lock, otherwise there could be a race condition between # when standins are created and added to the repo. with repo.wlock(): - if not opts.get('dry_run'): + if not opts.get(r'dry_run'): standins = [] lfdirstate = lfutil.openlfdirstate(ui, repo) for f in lfnames: @@ -177,7 +178,7 @@ return added, bad def removelargefiles(ui, repo, isaddremove, matcher, **opts): - after = opts.get('after') + after = opts.get(r'after') m = composelargefilematcher(matcher, repo[None].manifest()) try: repo.lfstatus = True @@ -221,11 +222,11 @@ name = m.rel(f) ui.status(_('removing %s\n') % name) - if not opts.get('dry_run'): + if not opts.get(r'dry_run'): if not after: repo.wvfs.unlinkpath(f, ignoremissing=True) - if opts.get('dry_run'): + if opts.get(r'dry_run'): return result remove = [lfutil.standin(f) for f in remove] @@ -252,7 +253,7 @@ # -- Wrappers: modify existing commands -------------------------------- def overrideadd(orig, ui, repo, *pats, **opts): - if opts.get('normal') and opts.get('large'): + if opts.get(r'normal') and opts.get(r'large'): raise error.Abort(_('--normal cannot be used with --large')) return orig(ui, repo, *pats, **opts) @@ -403,9 +404,9 @@ setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher) def overrideverify(orig, ui, repo, *pats, **opts): - large = opts.pop('large', False) - all = opts.pop('lfa', False) - contents = opts.pop('lfc', False) + large = opts.pop(r'large', False) + all = opts.pop(r'lfa', False) + contents = opts.pop(r'lfc', False) result = orig(ui, repo, *pats, **opts) if large or all or contents: @@ -413,7 +414,7 @@ return result def overridedebugstate(orig, ui, repo, *pats, **opts): - large = opts.pop('large', False) + large = opts.pop(r'large', False) if large: class fakerepo(object): dirstate = lfutil.openlfdirstate(ui, repo) @@ -802,8 +803,8 @@ repo.lfpullsource = source result = orig(ui, repo, source, **opts) revspostpull = len(repo) - lfrevs = opts.get('lfrev', []) - if opts.get('all_largefiles'): + lfrevs = opts.get(r'lfrev', []) + if opts.get(r'all_largefiles'): lfrevs.append('pulled()') if lfrevs and revspostpull > revsprepull: numcached = 0 @@ -820,7 +821,7 @@ def overridepush(orig, ui, repo, *args, **kwargs): """Override push command and store --lfrev parameters in opargs""" - lfrevs = kwargs.pop('lfrev', None) + lfrevs = kwargs.pop(r'lfrev', None) if lfrevs: opargs = kwargs.setdefault('opargs', {}) opargs['lfrevs'] = scmutil.revrange(repo, lfrevs) @@ -828,7 +829,7 @@ def exchangepushoperation(orig, *args, **kwargs): """Override pushoperation constructor and store lfrevs parameter""" - lfrevs = kwargs.pop('lfrevs', None) + lfrevs = kwargs.pop(r'lfrevs', None) pushop = orig(*args, **kwargs) pushop.lfrevs = lfrevs return pushop @@ -865,7 +866,7 @@ d = dest if d is None: d = hg.defaultdest(source) - if opts.get('all_largefiles') and not hg.islocal(d): + if opts.get(r'all_largefiles') and not hg.islocal(d): raise error.Abort(_( '--all-largefiles is incompatible with non-local destination %s') % d) @@ -887,13 +888,13 @@ # If largefiles is required for this repo, permanently enable it locally if 'largefiles' in repo.requirements: - with repo.vfs('hgrc', 'a', text=True) as fp: - fp.write('\n[extensions]\nlargefiles=\n') + repo.vfs.append('hgrc', + util.tonativeeol('\n[extensions]\nlargefiles=\n')) # Caching is implicitly limited to 'rev' option, since the dest repo was # truncated at that point. The user may expect a download count with # this option, so attempt whether or not this is a largefile repo. - if opts.get('all_largefiles'): + if opts.get(r'all_largefiles'): success, missing = lfcommands.downloadlfiles(ui, repo, None) if missing != 0: @@ -906,14 +907,14 @@ # If largefiles is required for this repo, permanently enable it locally if 'largefiles' in destrepo.requirements: - with destrepo.vfs('hgrc', 'a+', text=True) as fp: - fp.write('\n[extensions]\nlargefiles=\n') + destrepo.vfs.append('hgrc', + util.tonativeeol('\n[extensions]\nlargefiles=\n')) def overriderebase(orig, ui, repo, **opts): if not util.safehasattr(repo, '_largefilesenabled'): return orig(ui, repo, **opts) - resuming = opts.get('continue') + resuming = opts.get(r'continue') repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) repo._lfstatuswriters.append(lambda *msg, **opts: None) try: @@ -1272,6 +1273,7 @@ repo.status = overridestatus orig(ui, repo, *dirs, **opts) repo.status = oldstatus + def overriderollback(orig, ui, repo, **opts): with repo.wlock(): before = repo.dirstate.parents() @@ -1310,7 +1312,7 @@ return result def overridetransplant(orig, ui, repo, *revs, **opts): - resuming = opts.get('continue') + resuming = opts.get(r'continue') repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) repo._lfstatuswriters.append(lambda *msg, **opts: None) try: @@ -1321,6 +1323,7 @@ return result def overridecat(orig, ui, repo, file1, *pats, **opts): + opts = pycompat.byteskwargs(opts) ctx = scmutil.revsingle(repo, opts.get('rev')) err = 1 notbad = set() @@ -1382,7 +1385,7 @@ def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs): - matcher = kwargs.get('matcher', None) + matcher = kwargs.get(r'matcher', None) # note if this is a partial update partial = matcher and not matcher.always() with repo.wlock(): @@ -1437,7 +1440,7 @@ # Make sure the merge runs on disk, not in-memory. largefiles is not a # good candidate for in-memory merge (large files, custom dirstate, # matcher usage). - kwargs['wc'] = repo[None] + kwargs[r'wc'] = repo[None] result = orig(repo, node, branchmerge, force, *args, **kwargs) newstandins = lfutil.getstandinsstate(repo) @@ -1470,3 +1473,20 @@ printmessage=False, normallookup=True) return result + +def upgraderequirements(orig, repo): + reqs = orig(repo) + if 'largefiles' in repo.requirements: + reqs.add('largefiles') + return reqs + +_lfscheme = 'largefile://' +def openlargefile(orig, ui, url_, data=None): + if url_.startswith(_lfscheme): + if data: + msg = "cannot use data on a 'largefile://' url" + raise error.ProgrammingError(msg) + lfid = url_[len(_lfscheme):] + return storefactory.getlfile(ui, lfid) + else: + return orig(ui, url_, data=data) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/proto.py --- a/hgext/largefiles/proto.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/proto.py Mon Jan 22 17:53:02 2018 -0500 @@ -28,7 +28,6 @@ 'file.\n') # these will all be replaced by largefiles.uisetup -capabilitiesorig = None ssholdcallstream = None httpoldcallstream = None @@ -76,7 +75,7 @@ yield '%d\n' % length for chunk in util.filechunkiter(f): yield chunk - return wireproto.streamres(gen=generator()) + return wireproto.streamres_legacy(gen=generator()) def statlfile(repo, proto, sha): '''Server command for checking if a largefile is present - returns '2\n' if @@ -161,9 +160,11 @@ repo.__class__ = lfileswirerepository # advertise the largefiles=serve capability -def capabilities(repo, proto): - '''Wrap server command to announce largefile server capability''' - return capabilitiesorig(repo, proto) + ' largefiles=serve' +def _capabilities(orig, repo, proto): + '''announce largefile server capability''' + caps = orig(repo, proto) + caps.append('largefiles=serve') + return caps def heads(repo, proto): '''Wrap server command - largefile capable clients will know to call @@ -176,7 +177,7 @@ if cmd == 'heads' and self.capable('largefiles'): cmd = 'lheads' if cmd == 'batch' and self.capable('largefiles'): - args['cmds'] = args['cmds'].replace('heads ', 'lheads ') + args[r'cmds'] = args[r'cmds'].replace('heads ', 'lheads ') return ssholdcallstream(self, cmd, **args) headsre = re.compile(r'(^|;)heads\b') @@ -185,5 +186,5 @@ if cmd == 'heads' and self.capable('largefiles'): cmd = 'lheads' if cmd == 'batch' and self.capable('largefiles'): - args['cmds'] = headsre.sub('lheads', args['cmds']) + args[r'cmds'] = headsre.sub('lheads', args[r'cmds']) return httpoldcallstream(self, cmd, **args) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/remotestore.py --- a/hgext/largefiles/remotestore.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/remotestore.py Mon Jan 22 17:53:02 2018 -0500 @@ -27,7 +27,9 @@ '''a largefile store accessed over a network''' def __init__(self, ui, repo, url): super(remotestore, self).__init__(ui, repo, url) - self._lstore = localstore.localstore(self.ui, self.repo, self.repo) + self._lstore = None + if repo is not None: + self._lstore = localstore.localstore(self.ui, self.repo, self.repo) def put(self, source, hash): if self.sendfile(source, hash): diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/reposetup.py --- a/hgext/largefiles/reposetup.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/reposetup.py Mon Jan 22 17:53:02 2018 -0500 @@ -138,7 +138,7 @@ sf = lfutil.standin(f) if sf in dirstate: newfiles.append(sf) - elif sf in dirstate.dirs(): + elif dirstate.hasdir(sf): # Directory entries could be regular or # standin, check both newfiles.extend((f, sf)) @@ -156,7 +156,7 @@ def sfindirstate(f): sf = lfutil.standin(f) dirstate = self.dirstate - return sf in dirstate or sf in dirstate.dirs() + return sf in dirstate or dirstate.hasdir(sf) match._files = [f for f in match._files if sfindirstate(f)] diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/storefactory.py --- a/hgext/largefiles/storefactory.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/storefactory.py Mon Jan 22 17:53:02 2018 -0500 @@ -22,8 +22,9 @@ # During clone this function is passed the src's ui object # but it needs the dest's ui object so it can read out of # the config file. Use repo.ui instead. -def openstore(repo, remote=None, put=False): - ui = repo.ui +def openstore(repo=None, remote=None, put=False, ui=None): + if ui is None: + ui = repo.ui if not remote: lfpullsource = getattr(repo, 'lfpullsource', None) @@ -37,12 +38,16 @@ # ui.expandpath() leaves 'default-push' and 'default' alone if # they cannot be expanded: fallback to the empty string, # meaning the current directory. - if path == 'default-push' or path == 'default': + if repo is None: + path = ui.expandpath('default') + path, _branches = hg.parseurl(path) + remote = hg.peer(repo or ui, {}, path) + elif path == 'default-push' or path == 'default': path = '' remote = repo else: path, _branches = hg.parseurl(path) - remote = hg.peer(repo, {}, path) + remote = hg.peer(repo or ui, {}, path) # The path could be a scheme so use Mercurial's normal functionality # to resolve the scheme to a repository and use its path @@ -76,3 +81,6 @@ } _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') + +def getlfile(ui, hash): + return util.chunkbuffer(openstore(ui=ui)._get(hash)) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/uisetup.py --- a/hgext/largefiles/uisetup.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/largefiles/uisetup.py Mon Jan 22 17:53:02 2018 -0500 @@ -30,6 +30,8 @@ scmutil, sshpeer, subrepo, + upgrade, + url, wireproto, ) @@ -60,6 +62,12 @@ extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies) + extensions.wrapfunction(upgrade, 'preservedrequirements', + overrides.upgraderequirements) + + extensions.wrapfunction(upgrade, 'supporteddestrequirements', + overrides.upgraderequirements) + # Subrepos call status function entry = extensions.wrapcommand(commands.table, 'status', overrides.overridestatus) @@ -153,13 +161,15 @@ extensions.wrapfunction(scmutil, 'marktouched', overrides.scmutilmarktouched) + extensions.wrapfunction(url, 'open', + overrides.openlargefile) + # create the new wireproto commands ... wireproto.commands['putlfile'] = (proto.putlfile, 'sha') wireproto.commands['getlfile'] = (proto.getlfile, 'sha') wireproto.commands['statlfile'] = (proto.statlfile, 'sha') # ... and wrap some existing ones - wireproto.commands['capabilities'] = (proto.capabilities, '') wireproto.commands['heads'] = (proto.heads, '') wireproto.commands['lheads'] = (wireproto.heads, '') @@ -171,10 +181,7 @@ extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath) - # the hello wireproto command uses wireproto.capabilities, so it won't see - # our largefiles capability unless we replace the actual function as well. - proto.capabilitiesorig = wireproto.capabilities - wireproto.capabilities = proto.capabilities + extensions.wrapfunction(wireproto, '_capabilities', proto._capabilities) # can't do this in reposetup because it needs to have happened before # wirerepo.__init__ is called diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/__init__.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/lfs/__init__.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,387 @@ +# lfs - hash-preserving large file support using Git-LFS protocol +# +# Copyright 2017 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""lfs - large file support (EXPERIMENTAL) + +This extension allows large files to be tracked outside of the normal +repository storage and stored on a centralized server, similar to the +``largefiles`` extension. The ``git-lfs`` protocol is used when +communicating with the server, so existing git infrastructure can be +harnessed. Even though the files are stored outside of the repository, +they are still integrity checked in the same manner as normal files. + +The files stored outside of the repository are downloaded on demand, +which reduces the time to clone, and possibly the local disk usage. +This changes fundamental workflows in a DVCS, so careful thought +should be given before deploying it. :hg:`convert` can be used to +convert LFS repositories to normal repositories that no longer +require this extension, and do so without changing the commit hashes. +This allows the extension to be disabled if the centralized workflow +becomes burdensome. However, the pre and post convert clones will +not be able to communicate with each other unless the extension is +enabled on both. + +To start a new repository, or add new LFS files, just create and add +an ``.hglfs`` file as described below. Because the file is tracked in +the repository, all clones will use the same selection policy. During +subsequent commits, Mercurial will consult this file to determine if +an added or modified file should be stored externally. The type of +storage depends on the characteristics of the file at each commit. A +file that is near a size threshold may switch back and forth between +LFS and normal storage, as needed. + +Alternately, both normal repositories and largefile controlled +repositories can be converted to LFS by using :hg:`convert` and the +``lfs.track`` config option described below. The ``.hglfs`` file +should then be created and added, to control subsequent LFS selection. +The hashes are also unchanged in this case. The LFS and non-LFS +repositories can be distinguished because the LFS repository will +abort any command if this extension is disabled. + +Committed LFS files are held locally, until the repository is pushed. +Prior to pushing the normal repository data, the LFS files that are +tracked by the outgoing commits are automatically uploaded to the +configured central server. No LFS files are transferred on +:hg:`pull` or :hg:`clone`. Instead, the files are downloaded on +demand as they need to be read, if a cached copy cannot be found +locally. Both committing and downloading an LFS file will link the +file to a usercache, to speed up future access. See the `usercache` +config setting described below. + +.hglfs:: + + The extension reads its configuration from a versioned ``.hglfs`` + configuration file found in the root of the working directory. The + ``.hglfs`` file uses the same syntax as all other Mercurial + configuration files. It uses a single section, ``[track]``. + + The ``[track]`` section specifies which files are stored as LFS (or + not). Each line is keyed by a file pattern, with a predicate value. + The first file pattern match is used, so put more specific patterns + first. The available predicates are ``all()``, ``none()``, and + ``size()``. See "hg help filesets.size" for the latter. + + Example versioned ``.hglfs`` file:: + + [track] + # No Makefile or python file, anywhere, will be LFS + **Makefile = none() + **.py = none() + + **.zip = all() + **.exe = size(">1MB") + + # Catchall for everything not matched above + ** = size(">10MB") + +Configs:: + + [lfs] + # Remote endpoint. Multiple protocols are supported: + # - http(s)://user:pass@example.com/path + # git-lfs endpoint + # - file:///tmp/path + # local filesystem, usually for testing + # if unset, lfs will prompt setting this when it must use this value. + # (default: unset) + url = https://example.com/repo.git/info/lfs + + # Which files to track in LFS. Path tests are "**.extname" for file + # extensions, and "path:under/some/directory" for path prefix. Both + # are relative to the repository root. + # File size can be tested with the "size()" fileset, and tests can be + # joined with fileset operators. (See "hg help filesets.operators".) + # + # Some examples: + # - all() # everything + # - none() # nothing + # - size(">20MB") # larger than 20MB + # - !**.txt # anything not a *.txt file + # - **.zip | **.tar.gz | **.7z # some types of compressed files + # - path:bin # files under "bin" in the project root + # - (**.php & size(">2MB")) | (**.js & size(">5MB")) | **.tar.gz + # | (path:bin & !path:/bin/README) | size(">1GB") + # (default: none()) + # + # This is ignored if there is a tracked '.hglfs' file, and this setting + # will eventually be deprecated and removed. + track = size(">10M") + + # how many times to retry before giving up on transferring an object + retry = 5 + + # the local directory to store lfs files for sharing across local clones. + # If not set, the cache is located in an OS specific cache location. + usercache = /path/to/global/cache +""" + +from __future__ import absolute_import + +from mercurial.i18n import _ + +from mercurial import ( + bundle2, + changegroup, + cmdutil, + config, + context, + error, + exchange, + extensions, + filelog, + fileset, + hg, + localrepo, + minifileset, + node, + pycompat, + registrar, + revlog, + scmutil, + templatekw, + upgrade, + util, + vfs as vfsmod, + wireproto, +) + +from . import ( + blobstore, + wrapper, +) + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +configtable = {} +configitem = registrar.configitem(configtable) + +configitem('experimental', 'lfs.user-agent', + default=None, +) +configitem('experimental', 'lfs.worker-enable', + default=False, +) + +configitem('lfs', 'url', + default=None, +) +configitem('lfs', 'usercache', + default=None, +) +# Deprecated +configitem('lfs', 'threshold', + default=None, +) +configitem('lfs', 'track', + default='none()', +) +configitem('lfs', 'retry', + default=5, +) + +cmdtable = {} +command = registrar.command(cmdtable) + +templatekeyword = registrar.templatekeyword() + +def featuresetup(ui, supported): + # don't die on seeing a repo with the lfs requirement + supported |= {'lfs'} + +def uisetup(ui): + localrepo.localrepository.featuresetupfuncs.add(featuresetup) + +def reposetup(ui, repo): + # Nothing to do with a remote repo + if not repo.local(): + return + + repo.svfs.lfslocalblobstore = blobstore.local(repo) + repo.svfs.lfsremoteblobstore = blobstore.remote(repo) + + class lfsrepo(repo.__class__): + @localrepo.unfilteredmethod + def commitctx(self, ctx, error=False): + repo.svfs.options['lfstrack'] = _trackedmatcher(self, ctx) + return super(lfsrepo, self).commitctx(ctx, error) + + repo.__class__ = lfsrepo + + if 'lfs' not in repo.requirements: + def checkrequireslfs(ui, repo, **kwargs): + if 'lfs' not in repo.requirements: + last = kwargs.get('node_last') + _bin = node.bin + if last: + s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last)) + else: + s = repo.set('%n', _bin(kwargs['node'])) + for ctx in s: + # TODO: is there a way to just walk the files in the commit? + if any(ctx[f].islfs() for f in ctx.files() if f in ctx): + repo.requirements.add('lfs') + repo._writerequirements() + repo.prepushoutgoinghooks.add('lfs', wrapper.prepush) + break + + ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs') + ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs') + else: + repo.prepushoutgoinghooks.add('lfs', wrapper.prepush) + +def _trackedmatcher(repo, ctx): + """Return a function (path, size) -> bool indicating whether or not to + track a given file with lfs.""" + data = '' + + if '.hglfs' in ctx.added() or '.hglfs' in ctx.modified(): + data = ctx['.hglfs'].data() + elif '.hglfs' not in ctx.removed(): + p1 = repo['.'] + + if '.hglfs' not in p1: + # No '.hglfs' in wdir or in parent. Fallback to config + # for now. + trackspec = repo.ui.config('lfs', 'track') + + # deprecated config: lfs.threshold + threshold = repo.ui.configbytes('lfs', 'threshold') + if threshold: + fileset.parse(trackspec) # make sure syntax errors are confined + trackspec = "(%s) | size('>%d')" % (trackspec, threshold) + + return minifileset.compile(trackspec) + + data = p1['.hglfs'].data() + + # In removed, or not in parent + if not data: + return lambda p, s: False + + # Parse errors here will abort with a message that points to the .hglfs file + # and line number. + cfg = config.config() + cfg.parse('.hglfs', data) + + try: + rules = [(minifileset.compile(pattern), minifileset.compile(rule)) + for pattern, rule in cfg.items('track')] + except error.ParseError as e: + # The original exception gives no indicator that the error is in the + # .hglfs file, so add that. + + # TODO: See if the line number of the file can be made available. + raise error.Abort(_('parse error in .hglfs: %s') % e) + + def _match(path, size): + for pat, rule in rules: + if pat(path, size): + return rule(path, size) + + return False + + return _match + +def wrapfilelog(filelog): + wrapfunction = extensions.wrapfunction + + wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision) + wrapfunction(filelog, 'renamed', wrapper.filelogrenamed) + wrapfunction(filelog, 'size', wrapper.filelogsize) + +def extsetup(ui): + wrapfilelog(filelog.filelog) + + wrapfunction = extensions.wrapfunction + + wrapfunction(cmdutil, '_updatecatformatter', wrapper._updatecatformatter) + wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink) + + wrapfunction(upgrade, '_finishdatamigration', + wrapper.upgradefinishdatamigration) + + wrapfunction(upgrade, 'preservedrequirements', + wrapper.upgraderequirements) + + wrapfunction(upgrade, 'supporteddestrequirements', + wrapper.upgraderequirements) + + wrapfunction(changegroup, + 'supportedoutgoingversions', + wrapper.supportedoutgoingversions) + wrapfunction(changegroup, + 'allsupportedversions', + wrapper.allsupportedversions) + + wrapfunction(exchange, 'push', wrapper.push) + wrapfunction(wireproto, '_capabilities', wrapper._capabilities) + + wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp) + wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary) + context.basefilectx.islfs = wrapper.filectxislfs + + revlog.addflagprocessor( + revlog.REVIDX_EXTSTORED, + ( + wrapper.readfromstore, + wrapper.writetostore, + wrapper.bypasscheckhash, + ), + ) + + wrapfunction(hg, 'clone', wrapper.hgclone) + wrapfunction(hg, 'postshare', wrapper.hgpostshare) + + # Make bundle choose changegroup3 instead of changegroup2. This affects + # "hg bundle" command. Note: it does not cover all bundle formats like + # "packed1". Using "packed1" with lfs will likely cause trouble. + names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02'] + for k in names: + exchange._bundlespeccgversions[k] = '03' + + # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs + # options and blob stores are passed from othervfs to the new readonlyvfs. + wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit) + + # when writing a bundle via "hg bundle" command, upload related LFS blobs + wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle) + +@templatekeyword('lfs_files') +def lfsfiles(repo, ctx, **args): + """List of strings. LFS files added or modified by the changeset.""" + args = pycompat.byteskwargs(args) + + pointers = wrapper.pointersfromctx(ctx) # {path: pointer} + files = sorted(pointers.keys()) + + def pointer(v): + # In the file spec, version is first and the other keys are sorted. + sortkeyfunc = lambda x: (x[0] != 'version', x) + items = sorted(pointers[v].iteritems(), key=sortkeyfunc) + return util.sortdict(items) + + makemap = lambda v: { + 'file': v, + 'oid': pointers[v].oid(), + 'pointer': templatekw.hybriddict(pointer(v)), + } + + # TODO: make the separator ', '? + f = templatekw._showlist('lfs_file', files, args) + return templatekw._hybrid(f, files, makemap, pycompat.identity) + +@command('debuglfsupload', + [('r', 'rev', [], _('upload large files introduced by REV'))]) +def debuglfsupload(ui, repo, **opts): + """upload lfs blobs added by the working copy parent or given revisions""" + revs = opts.get('rev', []) + pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs)) + wrapper.uploadblobs(repo, pointers) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/blobstore.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/lfs/blobstore.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,463 @@ +# blobstore.py - local and remote (speaking Git-LFS protocol) blob storages +# +# Copyright 2017 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import hashlib +import json +import os +import re +import socket + +from mercurial.i18n import _ + +from mercurial import ( + error, + pathutil, + url as urlmod, + util, + vfs as vfsmod, + worker, +) + +from ..largefiles import lfutil + +# 64 bytes for SHA256 +_lfsre = re.compile(r'\A[a-f0-9]{64}\Z') + +class lfsvfs(vfsmod.vfs): + def join(self, path): + """split the path at first two characters, like: XX/XXXXX...""" + if not _lfsre.match(path): + raise error.ProgrammingError('unexpected lfs path: %s' % path) + return super(lfsvfs, self).join(path[0:2], path[2:]) + + def walk(self, path=None, onerror=None): + """Yield (dirpath, [], oids) tuple for blobs under path + + Oids only exist in the root of this vfs, so dirpath is always ''. + """ + root = os.path.normpath(self.base) + # when dirpath == root, dirpath[prefixlen:] becomes empty + # because len(dirpath) < prefixlen. + prefixlen = len(pathutil.normasprefix(root)) + oids = [] + + for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''), + onerror=onerror): + dirpath = dirpath[prefixlen:] + + # Silently skip unexpected files and directories + if len(dirpath) == 2: + oids.extend([dirpath + f for f in files + if _lfsre.match(dirpath + f)]) + + yield ('', [], oids) + +class filewithprogress(object): + """a file-like object that supports __len__ and read. + + Useful to provide progress information for how many bytes are read. + """ + + def __init__(self, fp, callback): + self._fp = fp + self._callback = callback # func(readsize) + fp.seek(0, os.SEEK_END) + self._len = fp.tell() + fp.seek(0) + + def __len__(self): + return self._len + + def read(self, size): + if self._fp is None: + return b'' + data = self._fp.read(size) + if data: + if self._callback: + self._callback(len(data)) + else: + self._fp.close() + self._fp = None + return data + +class local(object): + """Local blobstore for large file contents. + + This blobstore is used both as a cache and as a staging area for large blobs + to be uploaded to the remote blobstore. + """ + + def __init__(self, repo): + fullpath = repo.svfs.join('lfs/objects') + self.vfs = lfsvfs(fullpath) + usercache = lfutil._usercachedir(repo.ui, 'lfs') + self.cachevfs = lfsvfs(usercache) + self.ui = repo.ui + + def open(self, oid): + """Open a read-only file descriptor to the named blob, in either the + usercache or the local store.""" + # The usercache is the most likely place to hold the file. Commit will + # write to both it and the local store, as will anything that downloads + # the blobs. However, things like clone without an update won't + # populate the local store. For an init + push of a local clone, + # the usercache is the only place it _could_ be. If not present, the + # missing file msg here will indicate the local repo, not the usercache. + if self.cachevfs.exists(oid): + return self.cachevfs(oid, 'rb') + + return self.vfs(oid, 'rb') + + def download(self, oid, src): + """Read the blob from the remote source in chunks, verify the content, + and write to this local blobstore.""" + sha256 = hashlib.sha256() + + with self.vfs(oid, 'wb', atomictemp=True) as fp: + for chunk in util.filechunkiter(src, size=1048576): + fp.write(chunk) + sha256.update(chunk) + + realoid = sha256.hexdigest() + if realoid != oid: + raise error.Abort(_('corrupt remote lfs object: %s') % oid) + + # XXX: should we verify the content of the cache, and hardlink back to + # the local store on success, but truncate, write and link on failure? + if not self.cachevfs.exists(oid): + self.ui.note(_('lfs: adding %s to the usercache\n') % oid) + lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid)) + + def write(self, oid, data): + """Write blob to local blobstore. + + This should only be called from the filelog during a commit or similar. + As such, there is no need to verify the data. Imports from a remote + store must use ``download()`` instead.""" + with self.vfs(oid, 'wb', atomictemp=True) as fp: + fp.write(data) + + # XXX: should we verify the content of the cache, and hardlink back to + # the local store on success, but truncate, write and link on failure? + if not self.cachevfs.exists(oid): + self.ui.note(_('lfs: adding %s to the usercache\n') % oid) + lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid)) + + def read(self, oid, verify=True): + """Read blob from local blobstore.""" + if not self.vfs.exists(oid): + blob = self._read(self.cachevfs, oid, verify) + + # Even if revlog will verify the content, it needs to be verified + # now before making the hardlink to avoid propagating corrupt blobs. + # Don't abort if corruption is detected, because `hg verify` will + # give more useful info about the corruption- simply don't add the + # hardlink. + if verify or hashlib.sha256(blob).hexdigest() == oid: + self.ui.note(_('lfs: found %s in the usercache\n') % oid) + lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid)) + else: + self.ui.note(_('lfs: found %s in the local lfs store\n') % oid) + blob = self._read(self.vfs, oid, verify) + return blob + + def _read(self, vfs, oid, verify): + """Read blob (after verifying) from the given store""" + blob = vfs.read(oid) + if verify: + _verify(oid, blob) + return blob + + def has(self, oid): + """Returns True if the local blobstore contains the requested blob, + False otherwise.""" + return self.cachevfs.exists(oid) or self.vfs.exists(oid) + +class _gitlfsremote(object): + + def __init__(self, repo, url): + ui = repo.ui + self.ui = ui + baseurl, authinfo = url.authinfo() + self.baseurl = baseurl.rstrip('/') + useragent = repo.ui.config('experimental', 'lfs.user-agent') + if not useragent: + useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version() + self.urlopener = urlmod.opener(ui, authinfo, useragent) + self.retry = ui.configint('lfs', 'retry') + + def writebatch(self, pointers, fromstore): + """Batch upload from local to remote blobstore.""" + self._batch(pointers, fromstore, 'upload') + + def readbatch(self, pointers, tostore): + """Batch download from remote to local blostore.""" + self._batch(pointers, tostore, 'download') + + def _batchrequest(self, pointers, action): + """Get metadata about objects pointed by pointers for given action + + Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]} + See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md + """ + objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers] + requestdata = json.dumps({ + 'objects': objects, + 'operation': action, + }) + batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl, + data=requestdata) + batchreq.add_header('Accept', 'application/vnd.git-lfs+json') + batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') + try: + rawjson = self.urlopener.open(batchreq).read() + except util.urlerr.httperror as ex: + raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)') + % (ex, action)) + try: + response = json.loads(rawjson) + except ValueError: + raise LfsRemoteError(_('LFS server returns invalid JSON: %s') + % rawjson) + return response + + def _checkforservererror(self, pointers, responses, action): + """Scans errors from objects + + Raises LfsRemoteError if any objects have an error""" + for response in responses: + # The server should return 404 when objects cannot be found. Some + # server implementation (ex. lfs-test-server) does not set "error" + # but just removes "download" from "actions". Treat that case + # as the same as 404 error. + notfound = (response.get('error', {}).get('code') == 404 + or (action == 'download' + and action not in response.get('actions', []))) + if notfound: + ptrmap = {p.oid(): p for p in pointers} + p = ptrmap.get(response['oid'], None) + if p: + filename = getattr(p, 'filename', 'unknown') + raise LfsRemoteError( + _(('LFS server error. Remote object ' + 'for "%s" not found: %r')) % (filename, response)) + else: + raise LfsRemoteError( + _('LFS server error. Unsolicited response for oid %s') + % response['oid']) + if 'error' in response: + raise LfsRemoteError(_('LFS server error: %r') % response) + + def _extractobjects(self, response, pointers, action): + """extract objects from response of the batch API + + response: parsed JSON object returned by batch API + return response['objects'] filtered by action + raise if any object has an error + """ + # Scan errors from objects - fail early + objects = response.get('objects', []) + self._checkforservererror(pointers, objects, action) + + # Filter objects with given action. Practically, this skips uploading + # objects which exist in the server. + filteredobjects = [o for o in objects if action in o.get('actions', [])] + + return filteredobjects + + def _basictransfer(self, obj, action, localstore): + """Download or upload a single object using basic transfer protocol + + obj: dict, an object description returned by batch API + action: string, one of ['upload', 'download'] + localstore: blobstore.local + + See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\ + basic-transfers.md + """ + oid = str(obj['oid']) + + href = str(obj['actions'][action].get('href')) + headers = obj['actions'][action].get('header', {}).items() + + request = util.urlreq.request(href) + if action == 'upload': + # If uploading blobs, read data from local blobstore. + with localstore.open(oid) as fp: + _verifyfile(oid, fp) + request.data = filewithprogress(localstore.open(oid), None) + request.get_method = lambda: 'PUT' + + for k, v in headers: + request.add_header(k, v) + + response = b'' + try: + req = self.urlopener.open(request) + if action == 'download': + # If downloading blobs, store downloaded data to local blobstore + localstore.download(oid, req) + else: + while True: + data = req.read(1048576) + if not data: + break + response += data + if response: + self.ui.debug('lfs %s response: %s' % (action, response)) + except util.urlerr.httperror as ex: + if self.ui.debugflag: + self.ui.debug('%s: %s\n' % (oid, ex.read())) + raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)') + % (ex, oid, action)) + + def _batch(self, pointers, localstore, action): + if action not in ['upload', 'download']: + raise error.ProgrammingError('invalid Git-LFS action: %s' % action) + + response = self._batchrequest(pointers, action) + objects = self._extractobjects(response, pointers, action) + total = sum(x.get('size', 0) for x in objects) + sizes = {} + for obj in objects: + sizes[obj.get('oid')] = obj.get('size', 0) + topic = {'upload': _('lfs uploading'), + 'download': _('lfs downloading')}[action] + if len(objects) > 1: + self.ui.note(_('lfs: need to transfer %d objects (%s)\n') + % (len(objects), util.bytecount(total))) + self.ui.progress(topic, 0, total=total) + def transfer(chunk): + for obj in chunk: + objsize = obj.get('size', 0) + if self.ui.verbose: + if action == 'download': + msg = _('lfs: downloading %s (%s)\n') + elif action == 'upload': + msg = _('lfs: uploading %s (%s)\n') + self.ui.note(msg % (obj.get('oid'), + util.bytecount(objsize))) + retry = self.retry + while True: + try: + self._basictransfer(obj, action, localstore) + yield 1, obj.get('oid') + break + except socket.error as ex: + if retry > 0: + self.ui.note( + _('lfs: failed: %r (remaining retry %d)\n') + % (ex, retry)) + retry -= 1 + continue + raise + + # Until https multiplexing gets sorted out + if self.ui.configbool('experimental', 'lfs.worker-enable'): + oids = worker.worker(self.ui, 0.1, transfer, (), + sorted(objects, key=lambda o: o.get('oid'))) + else: + oids = transfer(sorted(objects, key=lambda o: o.get('oid'))) + + processed = 0 + for _one, oid in oids: + processed += sizes[oid] + self.ui.progress(topic, processed, total=total) + self.ui.note(_('lfs: processed: %s\n') % oid) + self.ui.progress(topic, pos=None, total=total) + + def __del__(self): + # copied from mercurial/httppeer.py + urlopener = getattr(self, 'urlopener', None) + if urlopener: + for h in urlopener.handlers: + h.close() + getattr(h, "close_all", lambda : None)() + +class _dummyremote(object): + """Dummy store storing blobs to temp directory.""" + + def __init__(self, repo, url): + fullpath = repo.vfs.join('lfs', url.path) + self.vfs = lfsvfs(fullpath) + + def writebatch(self, pointers, fromstore): + for p in pointers: + content = fromstore.read(p.oid(), verify=True) + with self.vfs(p.oid(), 'wb', atomictemp=True) as fp: + fp.write(content) + + def readbatch(self, pointers, tostore): + for p in pointers: + with self.vfs(p.oid(), 'rb') as fp: + tostore.download(p.oid(), fp) + +class _nullremote(object): + """Null store storing blobs to /dev/null.""" + + def __init__(self, repo, url): + pass + + def writebatch(self, pointers, fromstore): + pass + + def readbatch(self, pointers, tostore): + pass + +class _promptremote(object): + """Prompt user to set lfs.url when accessed.""" + + def __init__(self, repo, url): + pass + + def writebatch(self, pointers, fromstore, ui=None): + self._prompt() + + def readbatch(self, pointers, tostore, ui=None): + self._prompt() + + def _prompt(self): + raise error.Abort(_('lfs.url needs to be configured')) + +_storemap = { + 'https': _gitlfsremote, + 'http': _gitlfsremote, + 'file': _dummyremote, + 'null': _nullremote, + None: _promptremote, +} + +def _verify(oid, content): + realoid = hashlib.sha256(content).hexdigest() + if realoid != oid: + raise error.Abort(_('detected corrupt lfs object: %s') % oid, + hint=_('run hg verify')) + +def _verifyfile(oid, fp): + sha256 = hashlib.sha256() + while True: + data = fp.read(1024 * 1024) + if not data: + break + sha256.update(data) + realoid = sha256.hexdigest() + if realoid != oid: + raise error.Abort(_('detected corrupt lfs object: %s') % oid, + hint=_('run hg verify')) + +def remote(repo): + """remotestore factory. return a store in _storemap depending on config""" + url = util.url(repo.ui.config('lfs', 'url') or '') + scheme = url.scheme + if scheme not in _storemap: + raise error.Abort(_('lfs: unknown url scheme: %s') % scheme) + return _storemap[scheme](repo, url) + +class LfsRemoteError(error.RevlogError): + pass diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/pointer.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/lfs/pointer.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,73 @@ +# pointer.py - Git-LFS pointer serialization +# +# Copyright 2017 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import re + +from mercurial.i18n import _ + +from mercurial import ( + error, +) + +class InvalidPointer(error.RevlogError): + pass + +class gitlfspointer(dict): + VERSION = 'https://git-lfs.github.com/spec/v1' + + def __init__(self, *args, **kwargs): + self['version'] = self.VERSION + super(gitlfspointer, self).__init__(*args, **kwargs) + + @classmethod + def deserialize(cls, text): + try: + return cls(l.split(' ', 1) for l in text.splitlines()).validate() + except ValueError: # l.split returns 1 item instead of 2 + raise InvalidPointer(_('cannot parse git-lfs text: %r') % text) + + def serialize(self): + sortkeyfunc = lambda x: (x[0] != 'version', x) + items = sorted(self.validate().iteritems(), key=sortkeyfunc) + return ''.join('%s %s\n' % (k, v) for k, v in items) + + def oid(self): + return self['oid'].split(':')[-1] + + def size(self): + return int(self['size']) + + # regular expressions used by _validate + # see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md + _keyre = re.compile(r'\A[a-z0-9.-]+\Z') + _valuere = re.compile(r'\A[^\n]*\Z') + _requiredre = { + 'size': re.compile(r'\A[0-9]+\Z'), + 'oid': re.compile(r'\Asha256:[0-9a-f]{64}\Z'), + 'version': re.compile(r'\A%s\Z' % re.escape(VERSION)), + } + + def validate(self): + """raise InvalidPointer on error. return self if there is no error""" + requiredcount = 0 + for k, v in self.iteritems(): + if k in self._requiredre: + if not self._requiredre[k].match(v): + raise InvalidPointer(_('unexpected value: %s=%r') % (k, v)) + requiredcount += 1 + elif not self._keyre.match(k): + raise InvalidPointer(_('unexpected key: %s') % k) + if not self._valuere.match(v): + raise InvalidPointer(_('unexpected value: %s=%r') % (k, v)) + if len(self._requiredre) != requiredcount: + miss = sorted(set(self._requiredre.keys()).difference(self.keys())) + raise InvalidPointer(_('missed keys: %s') % ', '.join(miss)) + return self + +deserialize = gitlfspointer.deserialize diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/wrapper.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/lfs/wrapper.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,350 @@ +# wrapper.py - methods wrapping core mercurial logic +# +# Copyright 2017 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import hashlib + +from mercurial.i18n import _ +from mercurial.node import bin, nullid, short + +from mercurial import ( + error, + filelog, + revlog, + util, +) + +from ..largefiles import lfutil + +from . import ( + blobstore, + pointer, +) + +def supportedoutgoingversions(orig, repo): + versions = orig(repo) + if 'lfs' in repo.requirements: + versions.discard('01') + versions.discard('02') + versions.add('03') + return versions + +def allsupportedversions(orig, ui): + versions = orig(ui) + versions.add('03') + return versions + +def _capabilities(orig, repo, proto): + '''Wrap server command to announce lfs server capability''' + caps = orig(repo, proto) + # XXX: change to 'lfs=serve' when separate git server isn't required? + caps.append('lfs') + return caps + +def bypasscheckhash(self, text): + return False + +def readfromstore(self, text): + """Read filelog content from local blobstore transform for flagprocessor. + + Default tranform for flagprocessor, returning contents from blobstore. + Returns a 2-typle (text, validatehash) where validatehash is True as the + contents of the blobstore should be checked using checkhash. + """ + p = pointer.deserialize(text) + oid = p.oid() + store = self.opener.lfslocalblobstore + if not store.has(oid): + p.filename = self.filename + self.opener.lfsremoteblobstore.readbatch([p], store) + + # The caller will validate the content + text = store.read(oid, verify=False) + + # pack hg filelog metadata + hgmeta = {} + for k in p.keys(): + if k.startswith('x-hg-'): + name = k[len('x-hg-'):] + hgmeta[name] = p[k] + if hgmeta or text.startswith('\1\n'): + text = filelog.packmeta(hgmeta, text) + + return (text, True) + +def writetostore(self, text): + # hg filelog metadata (includes rename, etc) + hgmeta, offset = filelog.parsemeta(text) + if offset and offset > 0: + # lfs blob does not contain hg filelog metadata + text = text[offset:] + + # git-lfs only supports sha256 + oid = hashlib.sha256(text).hexdigest() + self.opener.lfslocalblobstore.write(oid, text) + + # replace contents with metadata + longoid = 'sha256:%s' % oid + metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text))) + + # by default, we expect the content to be binary. however, LFS could also + # be used for non-binary content. add a special entry for non-binary data. + # this will be used by filectx.isbinary(). + if not util.binary(text): + # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix + metadata['x-is-binary'] = '0' + + # translate hg filelog metadata to lfs metadata with "x-hg-" prefix + if hgmeta is not None: + for k, v in hgmeta.iteritems(): + metadata['x-hg-%s' % k] = v + + rawtext = metadata.serialize() + return (rawtext, False) + +def _islfs(rlog, node=None, rev=None): + if rev is None: + if node is None: + # both None - likely working copy content where node is not ready + return False + rev = rlog.rev(node) + else: + node = rlog.node(rev) + if node == nullid: + return False + flags = rlog.flags(rev) + return bool(flags & revlog.REVIDX_EXTSTORED) + +def filelogaddrevision(orig, self, text, transaction, link, p1, p2, + cachedelta=None, node=None, + flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): + textlen = len(text) + # exclude hg rename meta from file size + meta, offset = filelog.parsemeta(text) + if offset: + textlen -= offset + + lfstrack = self.opener.options['lfstrack'] + + # Always exclude hg owned files + if not self.filename.startswith('.hg') and lfstrack(self.filename, textlen): + flags |= revlog.REVIDX_EXTSTORED + + return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, + node=node, flags=flags, **kwds) + +def filelogrenamed(orig, self, node): + if _islfs(self, node): + rawtext = self.revision(node, raw=True) + if not rawtext: + return False + metadata = pointer.deserialize(rawtext) + if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata: + return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) + else: + return False + return orig(self, node) + +def filelogsize(orig, self, rev): + if _islfs(self, rev=rev): + # fast path: use lfs metadata to answer size + rawtext = self.revision(rev, raw=True) + metadata = pointer.deserialize(rawtext) + return int(metadata['size']) + return orig(self, rev) + +def filectxcmp(orig, self, fctx): + """returns True if text is different than fctx""" + # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs + if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): + # fast path: check LFS oid + p1 = pointer.deserialize(self.rawdata()) + p2 = pointer.deserialize(fctx.rawdata()) + return p1.oid() != p2.oid() + return orig(self, fctx) + +def filectxisbinary(orig, self): + if self.islfs(): + # fast path: use lfs metadata to answer isbinary + metadata = pointer.deserialize(self.rawdata()) + # if lfs metadata says nothing, assume it's binary by default + return bool(int(metadata.get('x-is-binary', 1))) + return orig(self) + +def filectxislfs(self): + return _islfs(self.filelog(), self.filenode()) + +def _updatecatformatter(orig, fm, ctx, matcher, path, decode): + orig(fm, ctx, matcher, path, decode) + fm.data(rawdata=ctx[path].rawdata()) + +def convertsink(orig, sink): + sink = orig(sink) + if sink.repotype == 'hg': + class lfssink(sink.__class__): + def putcommit(self, files, copies, parents, commit, source, revmap, + full, cleanp2): + pc = super(lfssink, self).putcommit + node = pc(files, copies, parents, commit, source, revmap, full, + cleanp2) + + if 'lfs' not in self.repo.requirements: + ctx = self.repo[node] + + # The file list may contain removed files, so check for + # membership before assuming it is in the context. + if any(f in ctx and ctx[f].islfs() for f, n in files): + self.repo.requirements.add('lfs') + self.repo._writerequirements() + + # Permanently enable lfs locally + self.repo.vfs.append( + 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n')) + + return node + + sink.__class__ = lfssink + + return sink + +def vfsinit(orig, self, othervfs): + orig(self, othervfs) + # copy lfs related options + for k, v in othervfs.options.items(): + if k.startswith('lfs'): + self.options[k] = v + # also copy lfs blobstores. note: this can run before reposetup, so lfs + # blobstore attributes are not always ready at this time. + for name in ['lfslocalblobstore', 'lfsremoteblobstore']: + if util.safehasattr(othervfs, name): + setattr(self, name, getattr(othervfs, name)) + +def hgclone(orig, ui, opts, *args, **kwargs): + result = orig(ui, opts, *args, **kwargs) + + if result is not None: + sourcerepo, destrepo = result + repo = destrepo.local() + + # When cloning to a remote repo (like through SSH), no repo is available + # from the peer. Therefore the hgrc can't be updated. + if not repo: + return result + + # If lfs is required for this repo, permanently enable it locally + if 'lfs' in repo.requirements: + repo.vfs.append('hgrc', + util.tonativeeol('\n[extensions]\nlfs=\n')) + + return result + +def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None): + orig(sourcerepo, destrepo, bookmarks, defaultpath) + + # If lfs is required for this repo, permanently enable it locally + if 'lfs' in destrepo.requirements: + destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n')) + +def _canskipupload(repo): + # if remotestore is a null store, upload is a no-op and can be skipped + return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) + +def candownload(repo): + # if remotestore is a null store, downloads will lead to nothing + return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) + +def uploadblobsfromrevs(repo, revs): + '''upload lfs blobs introduced by revs + + Note: also used by other extensions e. g. infinitepush. avoid renaming. + ''' + if _canskipupload(repo): + return + pointers = extractpointers(repo, revs) + uploadblobs(repo, pointers) + +def prepush(pushop): + """Prepush hook. + + Read through the revisions to push, looking for filelog entries that can be + deserialized into metadata so that we can block the push on their upload to + the remote blobstore. + """ + return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) + +def push(orig, repo, remote, *args, **kwargs): + """bail on push if the extension isn't enabled on remote when needed""" + if 'lfs' in repo.requirements: + # If the remote peer is for a local repo, the requirement tests in the + # base class method enforce lfs support. Otherwise, some revisions in + # this repo use lfs, and the remote repo needs the extension loaded. + if not remote.local() and not remote.capable('lfs'): + # This is a copy of the message in exchange.push() when requirements + # are missing between local repos. + m = _("required features are not supported in the destination: %s") + raise error.Abort(m % 'lfs', + hint=_('enable the lfs extension on the server')) + return orig(repo, remote, *args, **kwargs) + +def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, + *args, **kwargs): + """upload LFS blobs added by outgoing revisions on 'hg bundle'""" + uploadblobsfromrevs(repo, outgoing.missing) + return orig(ui, repo, source, filename, bundletype, outgoing, *args, + **kwargs) + +def extractpointers(repo, revs): + """return a list of lfs pointers added by given revs""" + repo.ui.debug('lfs: computing set of blobs to upload\n') + pointers = {} + for r in revs: + ctx = repo[r] + for p in pointersfromctx(ctx).values(): + pointers[p.oid()] = p + return sorted(pointers.values()) + +def pointersfromctx(ctx): + """return a dict {path: pointer} for given single changectx""" + result = {} + for f in ctx.files(): + if f not in ctx: + continue + fctx = ctx[f] + if not _islfs(fctx.filelog(), fctx.filenode()): + continue + try: + result[f] = pointer.deserialize(fctx.rawdata()) + except pointer.InvalidPointer as ex: + raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') + % (f, short(ctx.node()), ex)) + return result + +def uploadblobs(repo, pointers): + """upload given pointers from local blobstore""" + if not pointers: + return + + remoteblob = repo.svfs.lfsremoteblobstore + remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) + +def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): + orig(ui, srcrepo, dstrepo, requirements) + + srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs + dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs + + for dirpath, dirs, files in srclfsvfs.walk(): + for oid in files: + ui.write(_('copying lfs blob %s\n') % oid) + lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) + +def upgraderequirements(orig, repo): + reqs = orig(repo) + if 'lfs' in repo.requirements: + reqs.add('lfs') + return reqs diff -r 87676e8ee056 -r 27b6df1b5adb hgext/logtoprocess.py --- a/hgext/logtoprocess.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/logtoprocess.py Mon Jan 22 17:53:02 2018 -0500 @@ -124,8 +124,6 @@ env = dict(itertools.chain(encoding.environ.items(), msgpairs, optpairs), EVENT=event, HGPID=str(os.getpid())) - # Connect stdin to /dev/null to prevent child processes messing - # with mercurial's stdin. runshellcommand(script, env) return super(logtoprocessui, self).log(event, *msg, **opts) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/mq.py --- a/hgext/mq.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/mq.py Mon Jan 22 17:53:02 2018 -0500 @@ -565,7 +565,7 @@ return index return None - guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') + guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') def parseseries(self): self.series = [] diff -r 87676e8ee056 -r 27b6df1b5adb hgext/notify.py --- a/hgext/notify.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/notify.py Mon Jan 22 17:53:02 2018 -0500 @@ -135,6 +135,7 @@ from __future__ import absolute_import import email +import email.parser as emailparser import fnmatch import socket import time @@ -339,7 +340,7 @@ 'and revset\n') return - p = email.Parser.Parser() + p = emailparser.Parser() try: msg = p.parsestr(data) except email.Errors.MessageParseError as inst: diff -r 87676e8ee056 -r 27b6df1b5adb hgext/patchbomb.py --- a/hgext/patchbomb.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/patchbomb.py Mon Jan 22 17:53:02 2018 -0500 @@ -89,6 +89,7 @@ mail, node as nodemod, patch, + pycompat, registrar, repair, scmutil, @@ -318,7 +319,7 @@ tmpfn = os.path.join(tmpdir, 'bundle') btype = ui.config('patchbomb', 'bundletype') if btype: - opts['type'] = btype + opts[r'type'] = btype try: commands.bundle(ui, repo, tmpfn, dest, **opts) return util.readfile(tmpfn) @@ -338,8 +339,8 @@ the user through the editor. """ ui = repo.ui - if opts.get('desc'): - body = open(opts.get('desc')).read() + if opts.get(r'desc'): + body = open(opts.get(r'desc')).read() else: ui.write(_('\nWrite the introductory message for the ' 'patch series.\n\n')) @@ -359,21 +360,21 @@ """ ui = repo.ui _charsets = mail._charsets(ui) - subj = (opts.get('subject') + subj = (opts.get(r'subject') or prompt(ui, 'Subject:', 'A bundle for your repository')) body = _getdescription(repo, '', sender, **opts) msg = emailmod.MIMEMultipart.MIMEMultipart() if body: - msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test'))) + msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test'))) datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle') datapart.set_payload(bundle) - bundlename = '%s.hg' % opts.get('bundlename', 'bundle') + bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle') datapart.add_header('Content-Disposition', 'attachment', filename=bundlename) emailmod.Encoders.encode_base64(datapart) msg.attach(datapart) - msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) + msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test')) return [(msg, subj, None)] def _makeintro(repo, sender, revs, patches, **opts): @@ -384,9 +385,9 @@ _charsets = mail._charsets(ui) # use the last revision which is likely to be a bookmarked head - prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'), + prefix = _formatprefix(ui, repo, revs.last(), opts.get(r'flag'), 0, len(patches), numbered=True) - subj = (opts.get('subject') or + subj = (opts.get(r'subject') or prompt(ui, '(optional) Subject: ', rest=prefix, default='')) if not subj: return None # skip intro if the user doesn't bother @@ -394,7 +395,7 @@ subj = prefix + ' ' + subj body = '' - if opts.get('diffstat'): + if opts.get(r'diffstat'): # generate a cumulative diffstat of the whole patch series diffstat = patch.diffstat(sum(patches, [])) body = '\n' + diffstat @@ -402,9 +403,9 @@ diffstat = None body = _getdescription(repo, body, sender, **opts) - msg = mail.mimeencode(ui, body, _charsets, opts.get('test')) + msg = mail.mimeencode(ui, body, _charsets, opts.get(r'test')) msg['Subject'] = mail.headencode(ui, subj, _charsets, - opts.get('test')) + opts.get(r'test')) return (msg, subj, diffstat) def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts): @@ -414,6 +415,7 @@ This function returns a list of "email" tuples (subject, content, None). """ + bytesopts = pycompat.byteskwargs(opts) ui = repo.ui _charsets = mail._charsets(ui) patches = list(_getpatches(repo, revs, **opts)) @@ -423,7 +425,7 @@ % len(patches)) # build the intro message, or skip it if the user declines - if introwanted(ui, opts, len(patches)): + if introwanted(ui, bytesopts, len(patches)): msg = _makeintro(repo, sender, revs, patches, **opts) if msg: msgs.append(msg) @@ -437,8 +439,8 @@ for i, (r, p) in enumerate(zip(revs, patches)): if patchnames: name = patchnames[i] - msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1, - len(patches), numbered, name) + msg = makepatch(ui, repo, r, p, bytesopts, _charsets, + i + 1, len(patches), numbered, name) msgs.append(msg) return msgs @@ -452,7 +454,7 @@ revs = [r for r in revs if r >= 0] if not revs: - revs = [len(repo) - 1] + revs = [repo.changelog.tiprev()] revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs) if not revs: ui.status(_("no changes found\n")) @@ -579,6 +581,7 @@ Before using this command, you will need to enable email in your hgrc. See the [email] section in hgrc(5) for details. ''' + opts = pycompat.byteskwargs(opts) _charsets = mail._charsets(ui) @@ -629,7 +632,7 @@ # check if revision exist on the public destination publicurl = repo.ui.config('patchbomb', 'publicurl') if publicurl: - repo.ui.debug('checking that revision exist in the public repo') + repo.ui.debug('checking that revision exist in the public repo\n') try: publicpeer = hg.peer(repo, {}, publicurl) except error.RepoError: @@ -637,7 +640,7 @@ % publicurl) raise if not publicpeer.capable('known'): - repo.ui.debug('skipping existence checks: public repo too old') + repo.ui.debug('skipping existence checks: public repo too old\n') else: out = [repo[r] for r in revs] known = publicpeer.known(h.node() for h in out) @@ -672,12 +675,13 @@ prompt(ui, 'From', ui.username())) if bundle: - bundledata = _getbundle(repo, dest, **opts) - bundleopts = opts.copy() - bundleopts.pop('bundle', None) # already processed + stropts = pycompat.strkwargs(opts) + bundledata = _getbundle(repo, dest, **stropts) + bundleopts = stropts.copy() + bundleopts.pop(r'bundle', None) # already processed msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts) else: - msgs = _getpatchmsgs(repo, sender, revs, **opts) + msgs = _getpatchmsgs(repo, sender, revs, **pycompat.strkwargs(opts)) showaddrs = [] diff -r 87676e8ee056 -r 27b6df1b5adb hgext/rebase.py --- a/hgext/rebase.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/rebase.py Mon Jan 22 17:53:02 2018 -0500 @@ -21,7 +21,6 @@ from mercurial.i18n import _ from mercurial.node import ( - hex, nullid, nullrev, short, @@ -43,6 +42,7 @@ obsutil, patch, phases, + pycompat, registrar, repair, revset, @@ -53,7 +53,6 @@ ) release = lock.release -templateopts = cmdutil.templateopts # The following constants are used throughout the rebase module. The ordering of # their values must be maintained. @@ -137,7 +136,7 @@ class rebaseruntime(object): """This class is a container for rebase runtime state""" - def __init__(self, repo, ui, opts=None): + def __init__(self, repo, ui, inmemory=False, opts=None): if opts is None: opts = {} @@ -179,6 +178,8 @@ # other extensions self.keepopen = opts.get('keepopen', False) self.obsoletenotrebased = {} + self.obsoletewithoutsuccessorindestination = set() + self.inmemory = inmemory @property def repo(self): @@ -311,9 +312,10 @@ if not self.ui.configbool('experimental', 'rebaseskipobsolete'): return obsoleteset = set(obsoleterevs) - self.obsoletenotrebased = _computeobsoletenotrebased(self.repo, - obsoleteset, destmap) + self.obsoletenotrebased, self.obsoletewithoutsuccessorindestination = \ + _computeobsoletenotrebased(self.repo, obsoleteset, destmap) skippedset = set(self.obsoletenotrebased) + skippedset.update(self.obsoletewithoutsuccessorindestination) _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) def _prepareabortorcontinue(self, isabort): @@ -380,7 +382,18 @@ self.prepared = True + def _assignworkingcopy(self): + if self.inmemory: + from mercurial.context import overlayworkingctx + self.wctx = overlayworkingctx(self.repo) + self.repo.ui.debug("rebasing in-memory\n") + else: + self.wctx = self.repo[None] + self.repo.ui.debug("rebasing on disk\n") + self.repo.ui.log("rebase", "", rebase_imm_used=self.wctx.isinmemory()) + def _performrebase(self, tr): + self._assignworkingcopy() repo, ui = self.repo, self.ui if self.keepbranchesf: # insert _savebranch at the start of extrafns so if @@ -419,12 +432,26 @@ def _performrebasesubset(self, tr, subset, pos, total): repo, ui, opts = self.repo, self.ui, self.opts sortedrevs = repo.revs('sort(%ld, -topo)', subset) + allowdivergence = self.ui.configbool( + 'experimental', 'evolution.allowdivergence') + if not allowdivergence: + sortedrevs -= repo.revs( + 'descendants(%ld) and not %ld', + self.obsoletewithoutsuccessorindestination, + self.obsoletewithoutsuccessorindestination, + ) for rev in sortedrevs: dest = self.destmap[rev] ctx = repo[rev] desc = _ctxdesc(ctx) if self.state[rev] == rev: ui.status(_('already rebased %s\n') % desc) + elif (not allowdivergence + and rev in self.obsoletewithoutsuccessorindestination): + msg = _('note: not rebasing %s and its descendants as ' + 'this would cause divergence\n') % desc + repo.ui.status(msg) + self.skipped.add(rev) elif rev in self.obsoletenotrebased: succ = self.obsoletenotrebased[rev] if succ is None: @@ -459,22 +486,35 @@ ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, base, self.state, - self.collapsef, dest) + self.collapsef, dest, wctx=self.wctx) if stats and stats[3] > 0: - raise error.InterventionRequired( - _('unresolved conflicts (see hg ' - 'resolve, then hg rebase --continue)')) + if self.wctx.isinmemory(): + raise error.InMemoryMergeConflictsError() + else: + raise error.InterventionRequired( + _('unresolved conflicts (see hg ' + 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not self.collapsef: merging = p2 != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) - newnode = concludenode(repo, rev, p1, p2, - extrafn=_makeextrafn(self.extrafns), - editor=editor, - keepbranches=self.keepbranchesf, - date=self.date) + if self.wctx.isinmemory(): + newnode = concludememorynode(repo, rev, p1, p2, + wctx=self.wctx, + extrafn=_makeextrafn(self.extrafns), + editor=editor, + keepbranches=self.keepbranchesf, + date=self.date) + mergemod.mergestate.clean(repo) + else: + newnode = concludenode(repo, rev, p1, p2, + extrafn=_makeextrafn(self.extrafns), + editor=editor, + keepbranches=self.keepbranchesf, + date=self.date) + if newnode is None: # If it ended up being a no-op commit, then the normal # merge state clean-up path doesn't happen, so do it @@ -482,7 +522,10 @@ mergemod.mergestate.clean(repo) else: # Skip commit if we are collapsing - repo.setparents(repo[p1].node()) + if self.wctx.isinmemory(): + self.wctx.setbase(repo[p1]) + else: + repo.setparents(repo[p1].node()) newnode = None # Update the state if newnode is not None: @@ -522,15 +565,24 @@ revtoreuse = max(self.state) dsguard = None - if ui.configbool('rebase', 'singletransaction'): - dsguard = dirstateguard.dirstateguard(repo, 'rebase') - with util.acceptintervention(dsguard): - newnode = concludenode(repo, revtoreuse, p1, self.external, - commitmsg=commitmsg, - extrafn=_makeextrafn(self.extrafns), - editor=editor, - keepbranches=self.keepbranchesf, - date=self.date) + if self.inmemory: + newnode = concludememorynode(repo, revtoreuse, p1, + self.external, + commitmsg=commitmsg, + extrafn=_makeextrafn(self.extrafns), + editor=editor, + keepbranches=self.keepbranchesf, + date=self.date, wctx=self.wctx) + else: + if ui.configbool('rebase', 'singletransaction'): + dsguard = dirstateguard.dirstateguard(repo, 'rebase') + with util.acceptintervention(dsguard): + newnode = concludenode(repo, revtoreuse, p1, self.external, + commitmsg=commitmsg, + extrafn=_makeextrafn(self.extrafns), + editor=editor, + keepbranches=self.keepbranchesf, + date=self.date) if newnode is not None: newrev = repo[newnode].rev() for oldrev in self.state.iterkeys(): @@ -545,7 +597,8 @@ if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = self.originalwd - if newwd not in [c.rev() for c in repo[None].parents()]: + if (newwd not in [c.rev() for c in repo[None].parents()] and + not self.inmemory): ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) @@ -594,7 +647,7 @@ ('t', 'tool', '', _('specify merge tool')), ('c', 'continue', False, _('continue an interrupted rebase')), ('a', 'abort', False, _('abort an interrupted rebase'))] + - templateopts, + cmdutil.formatteropts, _('[-s REV | -b REV] [-d REV] [OPTION]')) def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch @@ -628,6 +681,11 @@ 4. If you do not specify any of ``--rev``, ``source``, or ``--base``, rebase will use ``--base .`` as above. + If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC`` + can be used in ``--dest``. Destination would be calculated per source + revision with ``SRC`` substituted by that single source revision and + ``ALLSRC`` substituted by all source revisions. + Rebase will destroy original changesets unless you use ``--keep``. It will also move your bookmarks (even if you do). @@ -676,6 +734,12 @@ hg rebase -r "branch(featureX)" -d 1.3 --keepbranches + - stabilize orphaned changesets so history looks linear:: + + hg rebase -r 'orphan()-obsolete()'\ + -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\ + max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))' + Configuration Options: You can make rebase require a destination if you set the following config @@ -693,13 +757,43 @@ [rebase] singletransaction = True + By default, rebase writes to the working copy, but you can configure it to + run in-memory for for better performance, and to allow it to run if the + working copy is dirty:: + + [rebase] + experimental.inmemory = True + Return Values: Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ - rbsrt = rebaseruntime(repo, ui, opts) + inmemory = ui.configbool('rebase', 'experimental.inmemory') + if (opts.get('continue') or opts.get('abort') or + repo.currenttransaction() is not None): + # in-memory rebase is not compatible with resuming rebases. + # (Or if it is run within a transaction, since the restart logic can + # fail the entire transaction.) + inmemory = False + + if inmemory: + try: + # in-memory merge doesn't support conflicts, so if we hit any, abort + # and re-run as an on-disk merge. + return _origrebase(ui, repo, inmemory=inmemory, **opts) + except error.InMemoryMergeConflictsError: + ui.warn(_('hit merge conflicts; re-running rebase without in-memory' + ' merge\n')) + _origrebase(ui, repo, **{'abort': True}) + return _origrebase(ui, repo, inmemory=False, **opts) + else: + return _origrebase(ui, repo, **opts) + +def _origrebase(ui, repo, inmemory=False, **opts): + opts = pycompat.byteskwargs(opts) + rbsrt = rebaseruntime(repo, ui, inmemory, opts) with repo.wlock(), repo.lock(): # Validate input and define rebasing points @@ -746,7 +840,7 @@ if retcode is not None: return retcode else: - destmap = _definedestmap(ui, repo, destf, srcf, basef, revf, + destmap = _definedestmap(ui, repo, rbsrt, destf, srcf, basef, revf, destspace=destspace) retcode = rbsrt._preparenewrebase(destmap) if retcode is not None: @@ -758,16 +852,22 @@ singletr = ui.configbool('rebase', 'singletransaction') if singletr: tr = repo.transaction('rebase') + + # If `rebase.singletransaction` is enabled, wrap the entire operation in + # one transaction here. Otherwise, transactions are obtained when + # committing each node, which is slower but allows partial success. with util.acceptintervention(tr): - if singletr: + # Same logic for the dirstate guard, except we don't create one when + # rebasing in-memory (it's not needed). + if singletr and not inmemory: dsguard = dirstateguard.dirstateguard(repo, 'rebase') with util.acceptintervention(dsguard): rbsrt._performrebase(tr) rbsrt._finishrebase() -def _definedestmap(ui, repo, destf=None, srcf=None, basef=None, revf=None, - destspace=None): +def _definedestmap(ui, repo, rbsrt, destf=None, srcf=None, basef=None, + revf=None, destspace=None): """use revisions argument to define destmap {srcrev: destrev}""" if revf is None: revf = [] @@ -781,8 +881,9 @@ if revf and srcf: raise error.Abort(_('cannot specify both a revision and a source')) - cmdutil.checkunfinished(repo) - cmdutil.bailifchanged(repo) + if not rbsrt.inmemory: + cmdutil.checkunfinished(repo) + cmdutil.bailifchanged(repo) if ui.configbool('commands', 'rebase.requiredest') and not destf: raise error.Abort(_('you must specify a destination'), @@ -855,6 +956,23 @@ ui.status(_('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return None + # If rebasing the working copy parent, force in-memory merge to be off. + # + # This is because the extra work of checking out the newly rebased commit + # outweights the benefits of rebasing in-memory, and executing an extra + # update command adds a bit of overhead, so better to just do it on disk. In + # all other cases leave it on. + # + # Note that there are cases where this isn't true -- e.g., rebasing large + # stacks that include the WCP. However, I'm not yet sure where the cutoff + # is. + rebasingwcp = repo['.'].rev() in rebaseset + ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp) + if rbsrt.inmemory and rebasingwcp: + rbsrt.inmemory = False + # Check these since we did not before. + cmdutil.checkunfinished(repo) + cmdutil.bailifchanged(repo) if not destf: dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] @@ -868,8 +986,6 @@ # fast path: try to resolve dest without SRC alias dest = scmutil.revsingle(repo, destf, localalias=alias) except error.RepoLookupError: - if not ui.configbool('experimental', 'rebase.multidest'): - raise # multi-dest path: resolve dest for each SRC separately destmap = {} for r in rebaseset: @@ -920,6 +1036,44 @@ (max(destancestors), ', '.join(str(p) for p in sorted(parents)))) +def concludememorynode(repo, rev, p1, p2, wctx=None, + commitmsg=None, editor=None, extrafn=None, + keepbranches=False, date=None): + '''Commit the memory changes with parents p1 and p2. Reuse commit info from + rev but also store useful information in extra. + Return node of committed revision.''' + ctx = repo[rev] + if commitmsg is None: + commitmsg = ctx.description() + keepbranch = keepbranches and repo[p1].branch() != ctx.branch() + extra = {'rebase_source': ctx.hex()} + if extrafn: + extrafn(ctx, extra) + + destphase = max(ctx.phase(), phases.draft) + overrides = {('phases', 'new-commit'): destphase} + with repo.ui.configoverride(overrides, 'rebase'): + if keepbranch: + repo.ui.setconfig('ui', 'allowemptycommit', True) + # Replicates the empty check in ``repo.commit``. + if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'): + return None + + if date is None: + date = ctx.date() + + # By convention, ``extra['branch']`` (set by extrafn) clobbers + # ``branch`` (used when passing ``--keepbranches``). + branch = repo[p1].branch() + if 'branch' in extra: + branch = extra['branch'] + + memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date, + extra=extra, user=ctx.user(), branch=branch, editor=editor) + commitres = repo.commitctx(memctx) + wctx.clean() # Might be reused + return commitres + def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None, keepbranches=False, date=None): '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev @@ -952,24 +1106,29 @@ repo.dirstate.setbranch(repo[newnode].branch()) return newnode -def rebasenode(repo, rev, p1, base, state, collapse, dest): +def rebasenode(repo, rev, p1, base, state, collapse, dest, wctx): 'Rebase a single revision rev on top of p1 using base as merge ancestor' # Merge phase # Update to destination and merge it with local - if repo['.'].rev() != p1: - repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) - mergemod.update(repo, p1, False, True) + if wctx.isinmemory(): + wctx.setbase(repo[p1]) else: - repo.ui.debug(" already in destination\n") - repo.dirstate.write(repo.currenttransaction()) + if repo['.'].rev() != p1: + repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) + mergemod.update(repo, p1, False, True) + else: + repo.ui.debug(" already in destination\n") + # This is, alas, necessary to invalidate workingctx's manifest cache, + # as well as other data we litter on it in other places. + wctx = repo[None] + repo.dirstate.write(repo.currenttransaction()) repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev])) if base is not None: repo.ui.debug(" detach base %d:%s\n" % (base, repo[base])) # When collapsing in-place, the parent is the common ancestor, we # have to allow merging with it. - wctx = repo[None] stats = mergemod.update(repo, rev, True, True, base, collapse, - labels=['dest', 'source']) + labels=['dest', 'source'], wc=wctx) if collapse: copies.duplicatecopies(repo, wctx, rev, dest) else: @@ -1546,22 +1705,26 @@ replacements[oldnode] = succs scmutil.cleanupnodes(repo, replacements, 'rebase', moves) if fm: - nodechanges = {hex(oldn): [hex(n) for n in newn] - for oldn, newn in replacements.iteritems()} + hf = fm.hexfunc + fl = fm.formatlist + fd = fm.formatdict + nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node') + for oldn, newn in replacements.iteritems()}, + key="oldnode", value="newnodes") fm.data(nodechanges=nodechanges) def pullrebase(orig, ui, repo, *args, **opts): 'Call rebase after pull if the latter has been invoked with --rebase' ret = None - if opts.get('rebase'): + if opts.get(r'rebase'): if ui.configbool('commands', 'rebase.requiredest'): msg = _('rebase destination required by configuration') hint = _('use hg pull followed by hg rebase -d DEST') raise error.Abort(msg, hint=hint) with repo.wlock(), repo.lock(): - if opts.get('update'): - del opts['update'] + if opts.get(r'update'): + del opts[r'update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') @@ -1582,15 +1745,15 @@ if revspostpull > revsprepull: # --rev option from pull conflict with rebase own --rev # dropping it - if 'rev' in opts: - del opts['rev'] + if r'rev' in opts: + del opts[r'rev'] # positional argument from pull conflicts with rebase's own # --source. - if 'source' in opts: - del opts['source'] + if r'source' in opts: + del opts[r'source'] # revsprepull is the len of the repo, not revnum of tip. destspace = list(repo.changelog.revs(start=revsprepull)) - opts['_destspace'] = destspace + opts[r'_destspace'] = destspace try: rebase(ui, repo, **opts) except error.NoMergeDestAbort: @@ -1604,7 +1767,7 @@ # with warning and trumpets commands.update(ui, repo) else: - if opts.get('tool'): + if opts.get(r'tool'): raise error.Abort(_('--tool can only be used with --rebase')) ret = orig(ui, repo, *args, **opts) @@ -1615,11 +1778,16 @@ return set(r for r in revs if repo[r].obsolete()) def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): - """return a mapping obsolete => successor for all obsolete nodes to be - rebased that have a successors in the destination + """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination). + + `obsoletenotrebased` is a mapping mapping obsolete => successor for all + obsolete nodes to be rebased given in `rebaseobsrevs`. - obsolete => None entries in the mapping indicate nodes with no successor""" + `obsoletewithoutsuccessorindestination` is a set with obsolete revisions + without a successor in destination. + """ obsoletenotrebased = {} + obsoletewithoutsuccessorindestination = set([]) assert repo.filtername is None cl = repo.changelog @@ -1640,8 +1808,15 @@ if cl.isancestor(succnode, destnode): obsoletenotrebased[srcrev] = nodemap[succnode] break + else: + # If 'srcrev' has a successor in rebase set but none in + # destination (which would be catched above), we shall skip it + # and its descendants to avoid divergence. + if any(nodemap[s] in destmap + for s in successors if s != srcnode): + obsoletewithoutsuccessorindestination.add(srcrev) - return obsoletenotrebased + return obsoletenotrebased, obsoletewithoutsuccessorindestination def summaryhook(ui, repo): if not repo.vfs.exists('rebasestate'): diff -r 87676e8ee056 -r 27b6df1b5adb hgext/record.py --- a/hgext/record.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/record.py Mon Jan 22 17:53:02 2018 -0500 @@ -68,13 +68,13 @@ raise error.Abort(_('running non-interactively, use %s instead') % 'commit') - opts["interactive"] = True + opts[r"interactive"] = True overrides = {('experimental', 'crecord'): False} with ui.configoverride(overrides, 'record'): return commands.commit(ui, repo, *pats, **opts) def qrefresh(origfn, ui, repo, *pats, **opts): - if not opts['interactive']: + if not opts[r'interactive']: return origfn(ui, repo, *pats, **opts) mq = extensions.find('mq') @@ -112,7 +112,7 @@ repo.mq.checkpatchname(patch) def committomq(ui, repo, *pats, **opts): - opts['checkname'] = False + opts[r'checkname'] = False mq.new(ui, repo, patch, *pats, **opts) overrides = {('experimental', 'crecord'): False} @@ -121,7 +121,7 @@ cmdutil.recordfilter, *pats, **opts) def qnew(origfn, ui, repo, patch, *args, **opts): - if opts['interactive']: + if opts[r'interactive']: return _qrecord(None, ui, repo, patch, *args, **opts) return origfn(ui, repo, patch, *args, **opts) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/releasenotes.py --- a/hgext/releasenotes.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/releasenotes.py Mon Jan 22 17:53:02 2018 -0500 @@ -25,6 +25,7 @@ error, minirst, node, + pycompat, registrar, scmutil, util, @@ -570,6 +571,8 @@ admonitions along with their title. This also includes the custom admonitions (if any). """ + + opts = pycompat.byteskwargs(opts) sections = releasenotessections(ui, repo) listflag = opts.get('list') diff -r 87676e8ee056 -r 27b6df1b5adb hgext/shelve.py --- a/hgext/shelve.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/shelve.py Mon Jan 22 17:53:02 2018 -0500 @@ -43,6 +43,7 @@ node as nodemod, patch, phases, + pycompat, registrar, repair, scmutil, @@ -380,7 +381,7 @@ editor_ = False if editor: editor_ = cmdutil.getcommiteditor(editform='shelve.shelve', - **opts) + **pycompat.strkwargs(opts)) with repo.ui.configoverride(overrides): return repo.commit(message, shelveuser, opts.get('date'), match, editor=editor_, extra=extra) @@ -389,6 +390,7 @@ repo.mq.checkapplied = saved def interactivecommitfunc(ui, repo, *pats, **opts): + opts = pycompat.byteskwargs(opts) match = scmutil.match(repo['.'], pats, {}) message = opts['message'] return commitfunc(ui, repo, message, match, opts) @@ -465,7 +467,7 @@ else: node = cmdutil.dorecord(ui, repo, commitfunc, None, False, cmdutil.recordfilter, *pats, - **opts) + **pycompat.strkwargs(opts)) if not node: _nothingtoshelvemessaging(ui, repo, pats, opts) return 1 @@ -852,6 +854,7 @@ return _dounshelve(ui, repo, *shelved, **opts) def _dounshelve(ui, repo, *shelved, **opts): + opts = pycompat.byteskwargs(opts) abortf = opts.get('abort') continuef = opts.get('continue') if not abortf and not continuef: @@ -1010,6 +1013,7 @@ To delete specific shelved changes, use ``--delete``. To delete all shelved changes, use ``--cleanup``. ''' + opts = pycompat.byteskwargs(opts) allowables = [ ('addremove', {'create'}), # 'create' is pseudo action ('unknown', {'create'}), diff -r 87676e8ee056 -r 27b6df1b5adb hgext/show.py --- a/hgext/show.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/show.py Mon Jan 22 17:53:02 2018 -0500 @@ -28,7 +28,10 @@ from __future__ import absolute_import from mercurial.i18n import _ -from mercurial.node import nullrev +from mercurial.node import ( + hex, + nullrev, +) from mercurial import ( cmdutil, commands, @@ -252,7 +255,9 @@ # our simplicity and the customizations required. # TODO use proper graph symbols from graphmod - shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen) + tres = formatter.templateresources(ui, repo) + shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen, + resources=tres) def shortest(ctx): return shortesttmpl.render({'ctx': ctx, 'node': ctx.hex()}) @@ -438,14 +443,11 @@ If we fail to do this, a value of e.g. ``10023`` could mean either revision 10023 or node ``10023abc...``. """ - tmpl = formatter.maketemplater(repo.ui, '{shortest(node, %d)}' % minlen) - lens = [minlen] - for rev in revs: - ctx = repo[rev] - shortest = tmpl.render({'ctx': ctx, 'node': ctx.hex()}) - lens.append(len(shortest)) - - return max(lens) + if not revs: + return minlen + # don't use filtered repo because it's slow. see templater.shortest(). + cl = repo.unfiltered().changelog + return max(len(cl.shortest(hex(cl.node(r)), minlen)) for r in revs) # Adjust the docstring of the show command so it shows all registered views. # This is a bit hacky because it runs at the end of module load. When moved diff -r 87676e8ee056 -r 27b6df1b5adb hgext/sparse.py --- a/hgext/sparse.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/sparse.py Mon Jan 22 17:53:02 2018 -0500 @@ -82,6 +82,7 @@ extensions, hg, match as matchmod, + pycompat, registrar, sparse, util, @@ -286,6 +287,7 @@ Returns 0 if editing the sparse checkout succeeds. """ + opts = pycompat.byteskwargs(opts) include = opts.get('include') exclude = opts.get('exclude') force = opts.get('force') diff -r 87676e8ee056 -r 27b6df1b5adb hgext/split.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/split.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,177 @@ +# split.py - split a changeset into smaller ones +# +# Copyright 2015 Laurent Charignon +# Copyright 2017 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +"""command to split a changeset into smaller ones (EXPERIMENTAL)""" + +from __future__ import absolute_import + +from mercurial.i18n import _ + +from mercurial.node import ( + nullid, + short, +) + +from mercurial import ( + bookmarks, + cmdutil, + commands, + error, + hg, + obsolete, + phases, + registrar, + revsetlang, + scmutil, +) + +# allow people to use split without explicitly enabling rebase extension +from . import ( + rebase, +) + +cmdtable = {} +command = registrar.command(cmdtable) + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +@command('^split', + [('r', 'rev', '', _("revision to split"), _('REV')), + ('', 'rebase', True, _('rebase descendants after split')), + ] + cmdutil.commitopts2, + _('hg split [--no-rebase] [[-r] REV]')) +def split(ui, repo, *revs, **opts): + """split a changeset into smaller ones + + Repeatedly prompt changes and commit message for new changesets until there + is nothing left in the original changeset. + + If --rev was not given, split the working directory parent. + + By default, rebase connected non-obsoleted descendants onto the new + changeset. Use --no-rebase to avoid the rebase. + """ + revlist = [] + if opts.get('rev'): + revlist.append(opts.get('rev')) + revlist.extend(revs) + with repo.wlock(), repo.lock(), repo.transaction('split') as tr: + revs = scmutil.revrange(repo, revlist or ['.']) + if len(revs) > 1: + raise error.Abort(_('cannot split multiple revisions')) + + rev = revs.first() + ctx = repo[rev] + if rev is None or ctx.node() == nullid: + ui.status(_('nothing to split\n')) + return 1 + if ctx.node() is None: + raise error.Abort(_('cannot split working directory')) + + # rewriteutil.precheck is not very useful here because: + # 1. null check is done above and it's more friendly to return 1 + # instead of abort + # 2. mergestate check is done below by cmdutil.bailifchanged + # 3. unstable check is more complex here because of --rebase + # + # So only "public" check is useful and it's checked directly here. + if ctx.phase() == phases.public: + raise error.Abort(_('cannot split public changeset'), + hint=_("see 'hg help phases' for details")) + + descendants = list(repo.revs('(%d::) - (%d)', rev, rev)) + alloworphaned = obsolete.isenabled(repo, obsolete.allowunstableopt) + if opts.get('rebase'): + # Skip obsoleted descendants and their descendants so the rebase + # won't cause conflicts for sure. + torebase = list(repo.revs('%ld - (%ld & obsolete())::', + descendants, descendants)) + if not alloworphaned and len(torebase) != len(descendants): + raise error.Abort(_('split would leave orphaned changesets ' + 'behind')) + else: + if not alloworphaned and descendants: + raise error.Abort( + _('cannot split changeset with children without rebase')) + torebase = () + + if len(ctx.parents()) > 1: + raise error.Abort(_('cannot split a merge changeset')) + + cmdutil.bailifchanged(repo) + + # Deactivate bookmark temporarily so it won't get moved unintentionally + bname = repo._activebookmark + if bname and repo._bookmarks[bname] != ctx.node(): + bookmarks.deactivate(repo) + + wnode = repo['.'].node() + top = None + try: + top = dosplit(ui, repo, tr, ctx, opts) + finally: + # top is None: split failed, need update --clean recovery. + # wnode == ctx.node(): wnode split, no need to update. + if top is None or wnode != ctx.node(): + hg.clean(repo, wnode, show_stats=False) + if bname: + bookmarks.activate(repo, bname) + if torebase and top: + dorebase(ui, repo, torebase, top) + +def dosplit(ui, repo, tr, ctx, opts): + committed = [] # [ctx] + + # Set working parent to ctx.p1(), and keep working copy as ctx's content + # NOTE: if we can have "update without touching working copy" API, the + # revert step could be cheaper. + hg.clean(repo, ctx.p1().node(), show_stats=False) + parents = repo.changelog.parents(ctx.node()) + ui.pushbuffer() + cmdutil.revert(ui, repo, ctx, parents) + ui.popbuffer() # discard "reverting ..." messages + + # Any modified, added, removed, deleted result means split is incomplete + incomplete = lambda repo: any(repo.status()[:4]) + + # Main split loop + while incomplete(repo): + if committed: + header = (_('HG: Splitting %s. So far it has been split into:\n') + % short(ctx.node())) + for c in committed: + firstline = c.description().split('\n', 1)[0] + header += _('HG: - %s: %s\n') % (short(c.node()), firstline) + header += _('HG: Write commit message for the next split ' + 'changeset.\n') + else: + header = _('HG: Splitting %s. Write commit message for the ' + 'first split changeset.\n') % short(ctx.node()) + opts.update({ + 'edit': True, + 'interactive': True, + 'message': header + ctx.description(), + }) + commands.commit(ui, repo, **opts) + newctx = repo['.'] + committed.append(newctx) + + if not committed: + raise error.Abort(_('cannot split an empty revision')) + + scmutil.cleanupnodes(repo, {ctx.node(): [c.node() for c in committed]}, + operation='split') + + return committed[-1] + +def dorebase(ui, repo, src, dest): + rebase.rebase(ui, repo, rev=[revsetlang.formatspec('%ld', src)], + dest=revsetlang.formatspec('%d', dest)) diff -r 87676e8ee056 -r 27b6df1b5adb hgext/uncommit.py --- a/hgext/uncommit.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/uncommit.py Mon Jan 22 17:53:02 2018 -0500 @@ -28,8 +28,10 @@ copies, error, node, - obsolete, + obsutil, + pycompat, registrar, + rewriteutil, scmutil, ) @@ -75,7 +77,7 @@ if path not in contentctx: return None fctx = contentctx[path] - mctx = context.memfilectx(repo, fctx.path(), fctx.data(), + mctx = context.memfilectx(repo, memctx, fctx.path(), fctx.data(), fctx.islink(), fctx.isexec(), copied=copied.get(path)) @@ -96,15 +98,13 @@ newid = repo.commitctx(new) return newid -def _uncommitdirstate(repo, oldctx, match): - """Fix the dirstate after switching the working directory from - oldctx to a copy of oldctx not containing changed files matched by - match. +def _fixdirstate(repo, oldctx, newctx, status): + """ fix the dirstate after switching the working directory from oldctx to + newctx which can be result of either unamend or uncommit. """ - ctx = repo['.'] ds = repo.dirstate copies = dict(ds.copies()) - s = repo.status(oldctx.p1(), oldctx, match=match) + s = status for f in s.modified: if ds[f] == 'r': # modified + removed -> removed @@ -136,7 +136,7 @@ for dst, src in oldcopies.iteritems()) # Adjust the dirstate copies for dst, src in copies.iteritems(): - if (src not in ctx or dst in ctx or ds[dst] != 'a'): + if (src not in newctx or dst in newctx or ds[dst] != 'a'): src = None ds.copy(src, dst) @@ -152,25 +152,17 @@ deleted in the changeset will be left unchanged, and so will remain modified in the working directory. """ + opts = pycompat.byteskwargs(opts) with repo.wlock(), repo.lock(): - wctx = repo[None] if not pats and not repo.ui.configbool('experimental', 'uncommitondirtywdir'): cmdutil.bailifchanged(repo) - if wctx.parents()[0].node() == node.nullid: - raise error.Abort(_("cannot uncommit null changeset")) - if len(wctx.parents()) > 1: - raise error.Abort(_("cannot uncommit while merging")) old = repo['.'] - if not old.mutable(): - raise error.Abort(_('cannot uncommit public changesets')) + rewriteutil.precheck(repo, [old.rev()], 'uncommit') if len(old.parents()) > 1: raise error.Abort(_("cannot uncommit merge changeset")) - allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) - if not allowunstable and old.children(): - raise error.Abort(_('cannot uncommit changeset with children')) with repo.transaction('uncommit'): match = scmutil.match(old, pats, opts) @@ -191,4 +183,75 @@ with repo.dirstate.parentchange(): repo.dirstate.setparents(newid, node.nullid) - _uncommitdirstate(repo, old, match) + s = repo.status(old.p1(), old, match=match) + _fixdirstate(repo, old, repo[newid], s) + +def predecessormarkers(ctx): + """yields the obsolete markers marking the given changeset as a successor""" + for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()): + yield obsutil.marker(ctx.repo(), data) + +@command('^unamend', []) +def unamend(ui, repo, **opts): + """ + undo the most recent amend operation on a current changeset + + This command will roll back to the previous version of a changeset, + leaving working directory in state in which it was before running + `hg amend` (e.g. files modified as part of an amend will be + marked as modified `hg status`) + """ + + unfi = repo.unfiltered() + with repo.wlock(), repo.lock(), repo.transaction('unamend'): + + # identify the commit from which to unamend + curctx = repo['.'] + + rewriteutil.precheck(repo, [curctx.rev()], 'unamend') + + # identify the commit to which to unamend + markers = list(predecessormarkers(curctx)) + if len(markers) != 1: + e = _("changeset must have one predecessor, found %i predecessors") + raise error.Abort(e % len(markers)) + + prednode = markers[0].prednode() + predctx = unfi[prednode] + + # add an extra so that we get a new hash + # note: allowing unamend to undo an unamend is an intentional feature + extras = predctx.extra() + extras['unamend_source'] = curctx.hex() + + def filectxfn(repo, ctx_, path): + try: + return predctx.filectx(path) + except KeyError: + return None + + # Make a new commit same as predctx + newctx = context.memctx(repo, + parents=(predctx.p1(), predctx.p2()), + text=predctx.description(), + files=predctx.files(), + filectxfn=filectxfn, + user=predctx.user(), + date=predctx.date(), + extra=extras) + # phase handling + commitphase = curctx.phase() + overrides = {('phases', 'new-commit'): commitphase} + with repo.ui.configoverride(overrides, 'uncommit'): + newprednode = repo.commitctx(newctx) + + newpredctx = repo[newprednode] + dirstate = repo.dirstate + + with dirstate.parentchange(): + dirstate.setparents(newprednode, node.nullid) + s = repo.status(predctx, curctx) + _fixdirstate(repo, curctx, newpredctx, s) + + mapping = {curctx.node(): (newprednode,)} + scmutil.cleanupnodes(repo, mapping, 'unamend') diff -r 87676e8ee056 -r 27b6df1b5adb hgext/win32text.py --- a/hgext/win32text.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/win32text.py Mon Jan 22 17:53:02 2018 -0500 @@ -139,7 +139,7 @@ # changegroup that contains an unacceptable commit followed later # by a commit that fixes the problem. tip = repo['tip'] - for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1): + for rev in xrange(repo.changelog.tiprev(), repo[node].rev() - 1, -1): c = repo[rev] for f in c.files(): if f in seen or f not in tip or f not in c: diff -r 87676e8ee056 -r 27b6df1b5adb hgext/zeroconf/Zeroconf.py --- a/hgext/zeroconf/Zeroconf.py Mon Jan 08 16:07:51 2018 -0800 +++ b/hgext/zeroconf/Zeroconf.py Mon Jan 22 17:53:02 2018 -0500 @@ -1613,7 +1613,8 @@ _DNS_TTL, service.address)) service = self.services.get(question.name.lower(), None) - if not service: continue + if not service: + continue if (question.type == _TYPE_SRV or question.type == _TYPE_ANY): diff -r 87676e8ee056 -r 27b6df1b5adb i18n/de.po --- a/i18n/de.po Mon Jan 08 16:07:51 2018 -0800 +++ b/i18n/de.po Mon Jan 22 17:53:02 2018 -0500 @@ -9744,86 +9744,9 @@ msgid "child process failed to start" msgstr "" -#. i18n: column positioning for "hg log" -#, python-format -msgid "changeset: %s\n" -msgstr "Änderung: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "branch: %s\n" -msgstr "Zweig: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "bookmark: %s\n" -msgstr "Lesezeichen: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "tag: %s\n" -msgstr "Marke: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "phase: %s\n" -msgstr "Phase: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "parent: %s\n" -msgstr "Vorgänger: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "manifest: %d:%s\n" -msgstr "Manifest: %d:%s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "user: %s\n" -msgstr "Nutzer: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "date: %s\n" -msgstr "Datum: %s\n" - -#. i18n: column positioning for "hg log" -msgid "files:" -msgstr "Dateien:" - -#. i18n: column positioning for "hg log" -msgid "files+:" -msgstr "Dateien+:" - -#. i18n: column positioning for "hg log" -msgid "files-:" -msgstr "Dateien-:" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "files: %s\n" -msgstr "Dateien: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "copies: %s\n" -msgstr "Kopien: %s\n" - -#. i18n: column positioning for "hg log" -#, python-format -msgid "extra: %s=%s\n" -msgstr "Extra: %s=%s\n" - msgid "description:\n" msgstr "Beschreibung:\n" -#. i18n: column positioning for "hg log" -#, python-format -msgid "summary: %s\n" -msgstr "Zusammenfassung: %s\n" - #, python-format msgid "%s: no key named '%s'" msgstr "%s: kein Schlüsselwort '%s'" @@ -23194,6 +23117,45 @@ ":emailuser: Beliebiger Text. Gibt den Nutzerteil einer E-Mail-Adresse\n" " (vor dem @-Zeichen) zurück." +#. i18n: column positioning for "hg log" +#, python-format +msgid "" +"bookmark: %s\n" +"branch: %s\n" +"changeset: %s\n" +"copies: %s\n" +"date: %s\n" +"extra: %s=%s\n" +"files+: %s\n" +"files-: %s\n" +"files: %s\n" +"instability: %s\n" +"manifest: %s\n" +"obsolete: %s\n" +"parent: %s\n" +"phase: %s\n" +"summary: %s\n" +"tag: %s\n" +"user: %s\n" +msgstr "" +"Lesezeichen: %s\n" +"Zweig: %s\n" +"Änderung: %s\n" +"Kopien: %s\n" +"Datum: %s\n" +"Extra: %s=%s\n" +"Dateien+: %s\n" +"Dateien-: %s\n" +"Dateien: %s\n" +"instability: %s\n" +"Manifest: %s\n" +"obsolete: %s\n" +"Vorgänger: %s\n" +"Phase: %s\n" +"Zusammenfassung: %s\n" +"Marke: %s\n" +"Nutzer: %s\n" + msgid ":author: String. The unmodified author of the changeset." msgstr ":author: Zeichenkette. Der unveränderte Autor eines Änderungssatzes." diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/__init__.py --- a/mercurial/__init__.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/__init__.py Mon Jan 22 17:53:02 2018 -0500 @@ -31,9 +31,6 @@ # Only handle Mercurial-related modules. if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')): return None - # selectors2 is already dual-version clean, don't try and mangle it - if fullname.startswith('mercurial.selectors2'): - return None # third-party packages are expected to be dual-version clean if fullname.startswith('mercurial.thirdparty'): return None diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/archival.py --- a/mercurial/archival.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/archival.py Mon Jan 22 17:53:02 2018 -0500 @@ -126,7 +126,7 @@ def __init__(self, *args, **kw): timestamp = None if 'timestamp' in kw: - timestamp = kw.pop('timestamp') + timestamp = kw.pop(r'timestamp') if timestamp is None: self.timestamp = time.time() else: @@ -262,6 +262,7 @@ def __init__(self, name, mtime): self.basedir = name self.opener = vfsmod.vfs(self.basedir) + self.mtime = mtime def addfile(self, name, mode, islink, data): if islink: @@ -272,6 +273,8 @@ f.close() destfile = os.path.join(self.basedir, name) os.chmod(destfile, mode) + if self.mtime is not None: + os.utime(destfile, (self.mtime, self.mtime)) def done(self): pass @@ -299,7 +302,12 @@ matchfn is function to filter names of files to write to archive. - prefix is name of path to put before every archive member.''' + prefix is name of path to put before every archive member. + + mtime is the modified time, in seconds, or None to use the changeset time. + + subrepos tells whether to include subrepos. + ''' if kind == 'files': if prefix: diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bdiff.c --- a/mercurial/bdiff.c Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/bdiff.c Mon Jan 22 17:53:02 2018 -0500 @@ -41,7 +41,7 @@ if (p == plast) i++; - *lr = l = (struct bdiff_line *)malloc(sizeof(struct bdiff_line) * i); + *lr = l = (struct bdiff_line *)calloc(i, sizeof(struct bdiff_line)); if (!l) return -1; @@ -95,7 +95,7 @@ /* try to allocate a large hash table to avoid collisions */ for (scale = 4; scale; scale /= 2) { - h = (struct pos *)malloc(scale * buckets * sizeof(struct pos)); + h = (struct pos *)calloc(buckets, scale * sizeof(struct pos)); if (h) break; } diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bookmarks.py --- a/mercurial/bookmarks.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/bookmarks.py Mon Jan 22 17:53:02 2018 -0500 @@ -8,17 +8,18 @@ from __future__ import absolute_import import errno +import struct from .i18n import _ from .node import ( bin, hex, short, + wdirid, ) from . import ( encoding, error, - lock as lockmod, obsutil, pycompat, scmutil, @@ -121,6 +122,12 @@ self._clean = False return dict.__delitem__(self, key) + def update(self, *others): + msg = ("bookmarks.update(...)' is deprecated, " + "use 'bookmarks.applychanges'") + self._repo.ui.deprecwarn(msg, '4.5') + return dict.update(self, *others) + def applychanges(self, repo, tr, changes): """Apply a list of changes to bookmarks """ @@ -390,14 +397,8 @@ bmchanges.append((bm, None)) if bmchanges: - lock = tr = None - try: - lock = repo.lock() - tr = repo.transaction('bookmark') + with repo.lock(), repo.transaction('bookmark') as tr: marks.applychanges(repo, tr, bmchanges) - tr.close() - finally: - lockmod.release(tr, lock) return bool(bmchanges) def listbinbookmarks(repo): @@ -418,11 +419,7 @@ return d def pushbookmark(repo, key, old, new): - w = l = tr = None - try: - w = repo.wlock() - l = repo.lock() - tr = repo.transaction('bookmarks') + with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr: marks = repo._bookmarks existing = hex(marks.get(key, '')) if existing != old and existing != new: @@ -434,10 +431,7 @@ return False changes = [(key, repo[new].node())] marks.applychanges(repo, tr, changes) - tr.close() return True - finally: - lockmod.release(tr, l, w) def comparebookmarks(repo, srcmarks, dstmarks, targets=None): '''Compare bookmarks between srcmarks and dstmarks @@ -550,6 +544,60 @@ binremotemarks[name] = bin(node) return binremotemarks +_binaryentry = struct.Struct('>20sH') + +def binaryencode(bookmarks): + """encode a '(bookmark, node)' iterable into a binary stream + + the binary format is: + + + + :node: is a 20 bytes binary node, + :bookmark-length: an unsigned short, + :bookmark-name: the name of the bookmark (of length ) + + wdirid (all bits set) will be used as a special value for "missing" + """ + binarydata = [] + for book, node in bookmarks: + if not node: # None or '' + node = wdirid + binarydata.append(_binaryentry.pack(node, len(book))) + binarydata.append(book) + return ''.join(binarydata) + +def binarydecode(stream): + """decode a binary stream into an '(bookmark, node)' iterable + + the binary format is: + + + + :node: is a 20 bytes binary node, + :bookmark-length: an unsigned short, + :bookmark-name: the name of the bookmark (of length )) + + wdirid (all bits set) will be used as a special value for "missing" + """ + entrysize = _binaryentry.size + books = [] + while True: + entry = stream.read(entrysize) + if len(entry) < entrysize: + if entry: + raise error.Abort(_('bad bookmark stream')) + break + node, length = _binaryentry.unpack(entry) + bookmark = stream.read(length) + if len(bookmark) < length: + if entry: + raise error.Abort(_('bad bookmark stream')) + if node == wdirid: + node = None + books.append((bookmark, node)) + return books + def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): ui.debug("checking for updated bookmarks\n") localmarks = repo._bookmarks @@ -788,6 +836,12 @@ cur = repo.changectx('.').node() newact = None changes = [] + hiddenrev = None + + # unhide revs if any + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + for mark in names: mark = checkformat(repo, mark) if newact is None: @@ -797,10 +851,21 @@ return tgt = cur if rev: - tgt = scmutil.revsingle(repo, rev).node() + ctx = scmutil.revsingle(repo, rev) + if ctx.hidden(): + hiddenrev = ctx.hex()[:12] + tgt = ctx.node() for bm in marks.checkconflict(mark, force, tgt): changes.append((bm, None)) changes.append((mark, tgt)) + + if hiddenrev: + repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev) + + if ctx.obsolete(): + msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx) + repo.ui.warn("(%s)\n" % msg) + marks.applychanges(repo, tr, changes) if not inactive and cur == marks[newact] and not rev: activate(repo, newact) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/branchmap.py --- a/mercurial/branchmap.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/branchmap.py Mon Jan 22 17:53:02 2018 -0500 @@ -84,6 +84,7 @@ # This create and ordering used for branchmap purpose. # the ordering may be partial subsettable = {None: 'visible', + 'visible-hidden': 'visible', 'visible': 'served', 'served': 'immutable', 'immutable': 'base'} diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bundle2.py --- a/mercurial/bundle2.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/bundle2.py Mon Jan 22 17:53:02 2018 -0500 @@ -148,6 +148,7 @@ from __future__ import absolute_import, division import errno +import os import re import string import struct @@ -155,6 +156,7 @@ from .i18n import _ from . import ( + bookmarks, changegroup, error, node as nodemod, @@ -162,6 +164,7 @@ phases, pushkey, pycompat, + streamclone, tags, url, util, @@ -180,7 +183,7 @@ _fpayloadsize = '>i' _fpartparamcount = '>BB' -preferedchunksize = 4096 +preferedchunksize = 32768 _parttypeforbidden = re.compile('[^a-zA-Z0-9_:-]') @@ -299,6 +302,8 @@ self.captureoutput = captureoutput self.hookargs = {} self._gettransaction = transactiongetter + # carries value that can modify part behavior + self.modes = {} def gettransaction(self): transaction = self._gettransaction() @@ -362,7 +367,7 @@ self.count = count self.current = p yield p - p.seek(0, 2) + p.consume() self.current = None self.iterator = func() return self.iterator @@ -384,11 +389,11 @@ try: if self.current: # consume the part content to not corrupt the stream. - self.current.seek(0, 2) + self.current.consume() for part in self.iterator: # consume the bundle content - part.seek(0, 2) + part.consume() except Exception: seekerror = True @@ -594,6 +599,10 @@ self.capabilities = dict(capabilities) self._compengine = util.compengines.forbundletype('UN') self._compopts = None + # If compression is being handled by a consumer of the raw + # data (e.g. the wire protocol), unsetting this flag tells + # consumers that the bundle is best left uncompressed. + self.prefercompressed = True def setcompression(self, alg, compopts=None): """setup core part compression to """ @@ -844,8 +853,9 @@ yield self._readexact(size) - def iterparts(self): + def iterparts(self, seekable=False): """yield all parts contained in the stream""" + cls = seekableunbundlepart if seekable else unbundlepart # make sure param have been loaded self.params # From there, payload need to be decompressed @@ -853,13 +863,12 @@ indebug(self.ui, 'start extraction of bundle2 parts') headerblock = self._readpartheader() while headerblock is not None: - part = unbundlepart(self.ui, headerblock, self._fp) + part = cls(self.ui, headerblock, self._fp) yield part - # Seek to the end of the part to force it's consumption so the next - # part can be read. But then seek back to the beginning so the - # code consuming this generator has a part that starts at 0. - part.seek(0, 2) - part.seek(0) + # Ensure part is fully consumed so we can start reading the next + # part. + part.consume() + headerblock = self._readpartheader() indebug(self.ui, 'end of bundle2 stream') @@ -1164,7 +1173,7 @@ raise finally: if not hardabort: - part.seek(0, 2) + part.consume() self.ui.debug('bundle2-input-stream-interrupt:' ' closing out of band context\n') @@ -1186,6 +1195,55 @@ def gettransaction(self): raise TransactionUnavailable('no repo access from stream interruption') +def decodepayloadchunks(ui, fh): + """Reads bundle2 part payload data into chunks. + + Part payload data consists of framed chunks. This function takes + a file handle and emits those chunks. + """ + dolog = ui.configbool('devel', 'bundle2.debug') + debug = ui.debug + + headerstruct = struct.Struct(_fpayloadsize) + headersize = headerstruct.size + unpack = headerstruct.unpack + + readexactly = changegroup.readexactly + read = fh.read + + chunksize = unpack(readexactly(fh, headersize))[0] + indebug(ui, 'payload chunk size: %i' % chunksize) + + # changegroup.readexactly() is inlined below for performance. + while chunksize: + if chunksize >= 0: + s = read(chunksize) + if len(s) < chunksize: + raise error.Abort(_('stream ended unexpectedly ' + ' (got %d bytes, expected %d)') % + (len(s), chunksize)) + + yield s + elif chunksize == flaginterrupt: + # Interrupt "signal" detected. The regular stream is interrupted + # and a bundle2 part follows. Consume it. + interrupthandler(ui, fh)() + else: + raise error.BundleValueError( + 'negative payload chunk size: %s' % chunksize) + + s = read(headersize) + if len(s) < headersize: + raise error.Abort(_('stream ended unexpectedly ' + ' (got %d bytes, expected %d)') % + (len(s), chunksize)) + + chunksize = unpack(s)[0] + + # indebug() inlined for performance. + if dolog: + debug('bundle2-input: payload chunk size: %i\n' % chunksize) + class unbundlepart(unpackermixin): """a bundle part read from a bundle""" @@ -1206,10 +1264,8 @@ self.advisoryparams = None self.params = None self.mandatorykeys = () - self._payloadstream = None self._readheader() self._mandatory = None - self._chunkindex = [] #(payload, file) position tuples for chunk starts self._pos = 0 def _fromheader(self, size): @@ -1236,46 +1292,6 @@ self.params.update(self.advisoryparams) self.mandatorykeys = frozenset(p[0] for p in mandatoryparams) - def _payloadchunks(self, chunknum=0): - '''seek to specified chunk and start yielding data''' - if len(self._chunkindex) == 0: - assert chunknum == 0, 'Must start with chunk 0' - self._chunkindex.append((0, self._tellfp())) - else: - assert chunknum < len(self._chunkindex), \ - 'Unknown chunk %d' % chunknum - self._seekfp(self._chunkindex[chunknum][1]) - - pos = self._chunkindex[chunknum][0] - payloadsize = self._unpack(_fpayloadsize)[0] - indebug(self.ui, 'payload chunk size: %i' % payloadsize) - while payloadsize: - if payloadsize == flaginterrupt: - # interruption detection, the handler will now read a - # single part and process it. - interrupthandler(self.ui, self._fp)() - elif payloadsize < 0: - msg = 'negative payload chunk size: %i' % payloadsize - raise error.BundleValueError(msg) - else: - result = self._readexact(payloadsize) - chunknum += 1 - pos += payloadsize - if chunknum == len(self._chunkindex): - self._chunkindex.append((pos, self._tellfp())) - yield result - payloadsize = self._unpack(_fpayloadsize)[0] - indebug(self.ui, 'payload chunk size: %i' % payloadsize) - - def _findchunk(self, pos): - '''for a given payload position, return a chunk number and offset''' - for chunk, (ppos, fpos) in enumerate(self._chunkindex): - if ppos == pos: - return chunk, 0 - elif ppos > pos: - return chunk - 1, pos - self._chunkindex[chunk - 1][0] - raise ValueError('Unknown chunk') - def _readheader(self): """read the header and setup the object""" typesize = self._unpackheader(_fparttypesize)[0] @@ -1311,6 +1327,24 @@ # we read the data, tell it self._initialized = True + def _payloadchunks(self): + """Generator of decoded chunks in the payload.""" + return decodepayloadchunks(self.ui, self._fp) + + def consume(self): + """Read the part payload until completion. + + By consuming the part data, the underlying stream read offset will + be advanced to the next part (or end of stream). + """ + if self.consumed: + return + + chunk = self.read(32768) + while chunk: + self._pos += len(chunk) + chunk = self.read(32768) + def read(self, size=None): """read payload data""" if not self._initialized: @@ -1327,23 +1361,82 @@ self.consumed = True return data +class seekableunbundlepart(unbundlepart): + """A bundle2 part in a bundle that is seekable. + + Regular ``unbundlepart`` instances can only be read once. This class + extends ``unbundlepart`` to enable bi-directional seeking within the + part. + + Bundle2 part data consists of framed chunks. Offsets when seeking + refer to the decoded data, not the offsets in the underlying bundle2 + stream. + + To facilitate quickly seeking within the decoded data, instances of this + class maintain a mapping between offsets in the underlying stream and + the decoded payload. This mapping will consume memory in proportion + to the number of chunks within the payload (which almost certainly + increases in proportion with the size of the part). + """ + def __init__(self, ui, header, fp): + # (payload, file) offsets for chunk starts. + self._chunkindex = [] + + super(seekableunbundlepart, self).__init__(ui, header, fp) + + def _payloadchunks(self, chunknum=0): + '''seek to specified chunk and start yielding data''' + if len(self._chunkindex) == 0: + assert chunknum == 0, 'Must start with chunk 0' + self._chunkindex.append((0, self._tellfp())) + else: + assert chunknum < len(self._chunkindex), \ + 'Unknown chunk %d' % chunknum + self._seekfp(self._chunkindex[chunknum][1]) + + pos = self._chunkindex[chunknum][0] + + for chunk in decodepayloadchunks(self.ui, self._fp): + chunknum += 1 + pos += len(chunk) + if chunknum == len(self._chunkindex): + self._chunkindex.append((pos, self._tellfp())) + + yield chunk + + def _findchunk(self, pos): + '''for a given payload position, return a chunk number and offset''' + for chunk, (ppos, fpos) in enumerate(self._chunkindex): + if ppos == pos: + return chunk, 0 + elif ppos > pos: + return chunk - 1, pos - self._chunkindex[chunk - 1][0] + raise ValueError('Unknown chunk') + def tell(self): return self._pos - def seek(self, offset, whence=0): - if whence == 0: + def seek(self, offset, whence=os.SEEK_SET): + if whence == os.SEEK_SET: newpos = offset - elif whence == 1: + elif whence == os.SEEK_CUR: newpos = self._pos + offset - elif whence == 2: + elif whence == os.SEEK_END: if not self.consumed: - self.read() + # Can't use self.consume() here because it advances self._pos. + chunk = self.read(32768) + while chunk: + chunk = self.read(32768) newpos = self._chunkindex[-1][0] - offset else: raise ValueError('Unknown whence value: %r' % (whence,)) if newpos > self._chunkindex[-1][0] and not self.consumed: - self.read() + # Can't use self.consume() here because it advances self._pos. + chunk = self.read(32768) + while chunk: + chunk = self.read(32668) + if not 0 <= newpos <= self._chunkindex[-1][0]: raise ValueError('Offset out of range') @@ -1389,6 +1482,7 @@ # These are only the static capabilities. # Check the 'getrepocaps' function for the rest. capabilities = {'HG20': (), + 'bookmarks': (), 'error': ('abort', 'unsupportedcontent', 'pushraced', 'pushkey'), 'listkeys': (), @@ -1397,13 +1491,21 @@ 'remote-changegroup': ('http', 'https'), 'hgtagsfnodes': (), 'phases': ('heads',), + 'stream': ('v2',), } -def getrepocaps(repo, allowpushback=False): +def getrepocaps(repo, allowpushback=False, role=None): """return the bundle2 capabilities for a given repo Exists to allow extensions (like evolution) to mutate the capabilities. + + The returned value is used for servers advertising their capabilities as + well as clients advertising their capabilities to servers as part of + bundle2 requests. The ``role`` argument specifies which is which. """ + if role not in ('client', 'server'): + raise error.ProgrammingError('role argument must be client or server') + caps = capabilities.copy() caps['changegroup'] = tuple(sorted( changegroup.supportedincomingversions(repo))) @@ -1417,6 +1519,18 @@ caps['checkheads'] = ('related',) if 'phases' in repo.ui.configlist('devel', 'legacy.exchange'): caps.pop('phases') + + # Don't advertise stream clone support in server mode if not configured. + if role == 'server': + streamsupported = repo.ui.configbool('server', 'uncompressed', + untrusted=True) + featuresupported = repo.ui.configbool('experimental', 'bundle2.stream') + + if not streamsupported or not featuresupported: + caps.pop('stream') + # Else always advertise support on client, because payload support + # should always be advertised. + return caps def bundle2caps(remote): @@ -1702,6 +1816,34 @@ replyto = int(inpart.params['in-reply-to']) op.records.add('changegroup', {'return': ret}, replyto) +@parthandler('check:bookmarks') +def handlecheckbookmarks(op, inpart): + """check location of bookmarks + + This part is to be used to detect push race regarding bookmark, it + contains binary encoded (bookmark, node) tuple. If the local state does + not marks the one in the part, a PushRaced exception is raised + """ + bookdata = bookmarks.binarydecode(inpart) + + msgstandard = ('repository changed while pushing - please try again ' + '(bookmark "%s" move from %s to %s)') + msgmissing = ('repository changed while pushing - please try again ' + '(bookmark "%s" is missing, expected %s)') + msgexist = ('repository changed while pushing - please try again ' + '(bookmark "%s" set on %s, expected missing)') + for book, node in bookdata: + currentnode = op.repo._bookmarks.get(book) + if currentnode != node: + if node is None: + finalmsg = msgexist % (book, nodemod.short(currentnode)) + elif currentnode is None: + finalmsg = msgmissing % (book, nodemod.short(node)) + else: + finalmsg = msgstandard % (book, nodemod.short(node), + nodemod.short(currentnode)) + raise error.PushRaced(finalmsg) + @parthandler('check:heads') def handlecheckheads(op, inpart): """check that head of the repo did not change @@ -1861,6 +2003,60 @@ kwargs[key] = inpart.params[key] raise error.PushkeyFailed(partid=str(inpart.id), **kwargs) +@parthandler('bookmarks') +def handlebookmark(op, inpart): + """transmit bookmark information + + The part contains binary encoded bookmark information. + + The exact behavior of this part can be controlled by the 'bookmarks' mode + on the bundle operation. + + When mode is 'apply' (the default) the bookmark information is applied as + is to the unbundling repository. Make sure a 'check:bookmarks' part is + issued earlier to check for push races in such update. This behavior is + suitable for pushing. + + When mode is 'records', the information is recorded into the 'bookmarks' + records of the bundle operation. This behavior is suitable for pulling. + """ + changes = bookmarks.binarydecode(inpart) + + pushkeycompat = op.repo.ui.configbool('server', 'bookmarks-pushkey-compat') + bookmarksmode = op.modes.get('bookmarks', 'apply') + + if bookmarksmode == 'apply': + tr = op.gettransaction() + bookstore = op.repo._bookmarks + if pushkeycompat: + allhooks = [] + for book, node in changes: + hookargs = tr.hookargs.copy() + hookargs['pushkeycompat'] = '1' + hookargs['namespace'] = 'bookmark' + hookargs['key'] = book + hookargs['old'] = nodemod.hex(bookstore.get(book, '')) + hookargs['new'] = nodemod.hex(node if node is not None else '') + allhooks.append(hookargs) + + for hookargs in allhooks: + op.repo.hook('prepushkey', throw=True, **hookargs) + + bookstore.applychanges(op.repo, op.gettransaction(), changes) + + if pushkeycompat: + def runhook(): + for hookargs in allhooks: + op.repo.hook('pushkey', **hookargs) + op.repo._afterlock(runhook) + + elif bookmarksmode == 'records': + for book, node in changes: + record = {'bookmark': book, 'node': node} + op.records.add('bookmarks', record) + else: + raise error.ProgrammingError('unkown bookmark mode: %s' % bookmarksmode) + @parthandler('phase-heads') def handlephases(op, inpart): """apply phases from bundle part to repo""" @@ -1885,7 +2081,7 @@ # The mergemarkers call will crash if marker creation is not enabled. # we want to avoid this if the part is advisory. if not inpart.mandatory and op.repo.obsstore.readonly: - op.repo.ui.debug('ignoring obsolescence markers, feature not enabled') + op.repo.ui.debug('ignoring obsolescence markers, feature not enabled\n') return new = op.repo.obsstore.mergemarkers(tr, markerdata) op.repo.invalidatevolatilesets() @@ -1943,3 +2139,27 @@ key = "USERVAR_" + key hookargs[key] = value op.addhookargs(hookargs) + +@parthandler('stream2', ('requirements', 'filecount', 'bytecount')) +def handlestreamv2bundle(op, part): + + requirements = part.params['requirements'].split() + filecount = int(part.params['filecount']) + bytecount = int(part.params['bytecount']) + + repo = op.repo + if len(repo): + msg = _('cannot apply stream clone to non empty repository') + raise error.Abort(msg) + + repo.ui.debug('applying stream bundle\n') + streamclone.applybundlev2(repo, part, filecount, bytecount, + requirements) + + # new requirements = old non-format requirements + + # new format-related remote requirements + # requirements from the streamed-in repository + repo.requirements = set(requirements) | ( + repo.requirements - repo.supportedformats) + repo._applyopenerreqs() + repo._writerequirements() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bundlerepo.py --- a/mercurial/bundlerepo.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/bundlerepo.py Mon Jan 22 17:53:02 2018 -0500 @@ -42,7 +42,7 @@ ) class bundlerevlog(revlog.revlog): - def __init__(self, opener, indexfile, bundle, linkmapper): + def __init__(self, opener, indexfile, cgunpacker, linkmapper): # How it works: # To retrieve a revision, we need to know the offset of the revision in # the bundle (an unbundle object). We store this offset in the index @@ -52,15 +52,15 @@ # check revision against repotiprev. opener = vfsmod.readonlyvfs(opener) revlog.revlog.__init__(self, opener, indexfile) - self.bundle = bundle + self.bundle = cgunpacker n = len(self) self.repotiprev = n - 1 self.bundlerevs = set() # used by 'bundle()' revset expression - for deltadata in bundle.deltaiter(): + for deltadata in cgunpacker.deltaiter(): node, p1, p2, cs, deltabase, delta, flags = deltadata size = len(delta) - start = bundle.tell() - size + start = cgunpacker.tell() - size link = linkmapper(cs) if node in self.nodemap: @@ -86,7 +86,7 @@ self.bundlerevs.add(n) n += 1 - def _chunk(self, rev): + def _chunk(self, rev, df=None): # Warning: in case of bundle, the diff is against what we stored as # delta base, not against rev - 1 # XXX: could use some caching @@ -108,7 +108,7 @@ return mdiff.textdiff(self.revision(rev1, raw=True), self.revision(rev2, raw=True)) - def revision(self, nodeorrev, raw=False): + def revision(self, nodeorrev, _df=None, raw=False): """return an uncompressed revision of a given node or revision number. """ @@ -152,20 +152,23 @@ # needs to override 'baserevision' and make more specific call here. return revlog.revlog.revision(self, nodeorrev, raw=True) - def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): + def addrevision(self, *args, **kwargs): + raise NotImplementedError + + def addgroup(self, *args, **kwargs): raise NotImplementedError - def addgroup(self, deltas, transaction, addrevisioncb=None): + + def strip(self, *args, **kwargs): raise NotImplementedError - def strip(self, rev, minlink): - raise NotImplementedError + def checksize(self): raise NotImplementedError class bundlechangelog(bundlerevlog, changelog.changelog): - def __init__(self, opener, bundle): + def __init__(self, opener, cgunpacker): changelog.changelog.__init__(self, opener) linkmapper = lambda x: x - bundlerevlog.__init__(self, opener, self.indexfile, bundle, + bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker, linkmapper) def baserevision(self, nodeorrev): @@ -183,9 +186,10 @@ self.filteredrevs = oldfilter class bundlemanifest(bundlerevlog, manifest.manifestrevlog): - def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''): + def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None, + dir=''): manifest.manifestrevlog.__init__(self, opener, dir=dir) - bundlerevlog.__init__(self, opener, self.indexfile, bundle, + bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker, linkmapper) if dirlogstarts is None: dirlogstarts = {} @@ -214,9 +218,9 @@ return super(bundlemanifest, self).dirlog(d) class bundlefilelog(bundlerevlog, filelog.filelog): - def __init__(self, opener, path, bundle, linkmapper): + def __init__(self, opener, path, cgunpacker, linkmapper): filelog.filelog.__init__(self, opener, path) - bundlerevlog.__init__(self, opener, self.indexfile, bundle, + bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker, linkmapper) def baserevision(self, nodeorrev): @@ -243,82 +247,106 @@ self.invalidate() self.dirty = True -def _getfilestarts(bundle): - bundlefilespos = {} - for chunkdata in iter(bundle.filelogheader, {}): +def _getfilestarts(cgunpacker): + filespos = {} + for chunkdata in iter(cgunpacker.filelogheader, {}): fname = chunkdata['filename'] - bundlefilespos[fname] = bundle.tell() - for chunk in iter(lambda: bundle.deltachunk(None), {}): + filespos[fname] = cgunpacker.tell() + for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): pass - return bundlefilespos + return filespos class bundlerepository(localrepo.localrepository): - def __init__(self, ui, path, bundlename): + """A repository instance that is a union of a local repo and a bundle. + + Instances represent a read-only repository composed of a local repository + with the contents of a bundle file applied. The repository instance is + conceptually similar to the state of a repository after an + ``hg unbundle`` operation. However, the contents of the bundle are never + applied to the actual base repository. + """ + def __init__(self, ui, repopath, bundlepath): self._tempparent = None try: - localrepo.localrepository.__init__(self, ui, path) + localrepo.localrepository.__init__(self, ui, repopath) except error.RepoError: self._tempparent = tempfile.mkdtemp() localrepo.instance(ui, self._tempparent, 1) localrepo.localrepository.__init__(self, ui, self._tempparent) self.ui.setconfig('phases', 'publish', False, 'bundlerepo') - if path: - self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename + if repopath: + self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath else: - self._url = 'bundle:' + bundlename + self._url = 'bundle:' + bundlepath self.tempfile = None - f = util.posixfile(bundlename, "rb") - self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename) + f = util.posixfile(bundlepath, "rb") + bundle = exchange.readbundle(ui, f, bundlepath) - if isinstance(self.bundle, bundle2.unbundle20): - hadchangegroup = False - for part in self.bundle.iterparts(): + if isinstance(bundle, bundle2.unbundle20): + self._bundlefile = bundle + self._cgunpacker = None + + cgpart = None + for part in bundle.iterparts(seekable=True): if part.type == 'changegroup': - if hadchangegroup: + if cgpart: raise NotImplementedError("can't process " "multiple changegroups") - hadchangegroup = True + cgpart = part - self._handlebundle2part(part) + self._handlebundle2part(bundle, part) - if not hadchangegroup: + if not cgpart: raise error.Abort(_("No changegroups found")) - elif self.bundle.compressed(): - f = self._writetempbundle(self.bundle.read, '.hg10un', - header='HG10UN') - self.bundlefile = self.bundle = exchange.readbundle(ui, f, - bundlename, - self.vfs) + # This is required to placate a later consumer, which expects + # the payload offset to be at the beginning of the changegroup. + # We need to do this after the iterparts() generator advances + # because iterparts() will seek to end of payload after the + # generator returns control to iterparts(). + cgpart.seek(0, os.SEEK_SET) - # dict with the mapping 'filename' -> position in the bundle - self.bundlefilespos = {} + elif isinstance(bundle, changegroup.cg1unpacker): + if bundle.compressed(): + f = self._writetempbundle(bundle.read, '.hg10un', + header='HG10UN') + bundle = exchange.readbundle(ui, f, bundlepath, self.vfs) + + self._bundlefile = bundle + self._cgunpacker = bundle + else: + raise error.Abort(_('bundle type %s cannot be read') % + type(bundle)) + + # dict with the mapping 'filename' -> position in the changegroup. + self._cgfilespos = {} self.firstnewrev = self.changelog.repotiprev + 1 phases.retractboundary(self, None, phases.draft, [ctx.node() for ctx in self[self.firstnewrev:]]) - def _handlebundle2part(self, part): - if part.type == 'changegroup': - cgstream = part - version = part.params.get('version', '01') - legalcgvers = changegroup.supportedincomingversions(self) - if version not in legalcgvers: - msg = _('Unsupported changegroup version: %s') - raise error.Abort(msg % version) - if self.bundle.compressed(): - cgstream = self._writetempbundle(part.read, - ".cg%sun" % version) + def _handlebundle2part(self, bundle, part): + if part.type != 'changegroup': + return - self.bundle = changegroup.getunbundler(version, cgstream, 'UN') + cgstream = part + version = part.params.get('version', '01') + legalcgvers = changegroup.supportedincomingversions(self) + if version not in legalcgvers: + msg = _('Unsupported changegroup version: %s') + raise error.Abort(msg % version) + if bundle.compressed(): + cgstream = self._writetempbundle(part.read, '.cg%sun' % version) + + self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN') def _writetempbundle(self, readfn, suffix, header=''): """Write a temporary file to disk """ fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-", - suffix=".hg10un") + suffix=suffix) self.tempfile = temp with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp: @@ -338,20 +366,29 @@ @localrepo.unfilteredpropertycache def changelog(self): # consume the header if it exists - self.bundle.changelogheader() - c = bundlechangelog(self.svfs, self.bundle) - self.manstart = self.bundle.tell() + self._cgunpacker.changelogheader() + c = bundlechangelog(self.svfs, self._cgunpacker) + self.manstart = self._cgunpacker.tell() return c def _constructmanifest(self): - self.bundle.seek(self.manstart) + self._cgunpacker.seek(self.manstart) # consume the header if it exists - self.bundle.manifestheader() + self._cgunpacker.manifestheader() linkmapper = self.unfiltered().changelog.rev - m = bundlemanifest(self.svfs, self.bundle, linkmapper) - self.filestart = self.bundle.tell() + m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper) + self.filestart = self._cgunpacker.tell() return m + def _consumemanifest(self): + """Consumes the manifest portion of the bundle, setting filestart so the + file portion can be read.""" + self._cgunpacker.seek(self.manstart) + self._cgunpacker.manifestheader() + for delta in self._cgunpacker.deltaiter(): + pass + self.filestart = self._cgunpacker.tell() + @localrepo.unfilteredpropertycache def manstart(self): self.changelog @@ -360,26 +397,34 @@ @localrepo.unfilteredpropertycache def filestart(self): self.manifestlog + + # If filestart was not set by self.manifestlog, that means the + # manifestlog implementation did not consume the manifests from the + # changegroup (ex: it might be consuming trees from a separate bundle2 + # part instead). So we need to manually consume it. + if 'filestart' not in self.__dict__: + self._consumemanifest() + return self.filestart def url(self): return self._url def file(self, f): - if not self.bundlefilespos: - self.bundle.seek(self.filestart) - self.bundlefilespos = _getfilestarts(self.bundle) + if not self._cgfilespos: + self._cgunpacker.seek(self.filestart) + self._cgfilespos = _getfilestarts(self._cgunpacker) - if f in self.bundlefilespos: - self.bundle.seek(self.bundlefilespos[f]) + if f in self._cgfilespos: + self._cgunpacker.seek(self._cgfilespos[f]) linkmapper = self.unfiltered().changelog.rev - return bundlefilelog(self.svfs, f, self.bundle, linkmapper) + return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper) else: return filelog.filelog(self.svfs, f) def close(self): """Close assigned bundle file immediately.""" - self.bundlefile.close() + self._bundlefile.close() if self.tempfile is not None: self.vfs.unlink(self.tempfile) if self._tempparent: @@ -496,10 +541,10 @@ and other.capable('bundle2')) if canbundle2: kwargs = {} - kwargs['common'] = common - kwargs['heads'] = rheads - kwargs['bundlecaps'] = exchange.caps20to10(repo) - kwargs['cg'] = True + kwargs[r'common'] = common + kwargs[r'heads'] = rheads + kwargs[r'bundlecaps'] = exchange.caps20to10(repo, role='client') + kwargs[r'cg'] = True b2 = other.getbundle('incoming', **kwargs) fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(), bundlename) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/byterange.py --- a/mercurial/byterange.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/byterange.py Mon Jan 22 17:53:02 2018 -0500 @@ -416,7 +416,7 @@ if range_header is None: return None if _rangere is None: - _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)') + _rangere = re.compile(br'^bytes=(\d{1,})-(\d*)') match = _rangere.match(range_header) if match: tup = range_tuple_normalize(match.group(1, 2)) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cacheutil.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/cacheutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,21 @@ +# scmutil.py - Mercurial core utility functions +# +# Copyright Matt Mackall and other +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +from __future__ import absolute_import + +from . import repoview + +def cachetocopy(srcrepo): + """return the list of cache file valuable to copy during a clone""" + # In local clones we're copying all nodes, not just served + # ones. Therefore copy all branch caches over. + cachefiles = ['branch2'] + cachefiles += ['branch2-%s' % f for f in repoview.filtertable] + cachefiles += ['rbc-names-v1', 'rbc-revs-v1'] + cachefiles += ['tags2'] + cachefiles += ['tags2-%s' % f for f in repoview.filtertable] + cachefiles += ['hgtagsfnodes1'] + return cachefiles diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/osutil.c --- a/mercurial/cext/osutil.c Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/cext/osutil.c Mon Jan 22 17:53:02 2018 -0500 @@ -20,6 +20,7 @@ #include #else #include +#include #include #include #include @@ -1111,6 +1112,43 @@ } #endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */ +#if defined(HAVE_BSD_STATFS) +/* given a directory path, return filesystem mount point (best-effort) */ +static PyObject *getfsmountpoint(PyObject *self, PyObject *args) +{ + const char *path = NULL; + struct statfs buf; + int r; + if (!PyArg_ParseTuple(args, "s", &path)) + return NULL; + + memset(&buf, 0, sizeof(buf)); + r = statfs(path, &buf); + if (r != 0) + return PyErr_SetFromErrno(PyExc_OSError); + return Py_BuildValue("s", buf.f_mntonname); +} +#endif /* defined(HAVE_BSD_STATFS) */ + +static PyObject *unblocksignal(PyObject *self, PyObject *args) +{ + int sig = 0; + int r; + if (!PyArg_ParseTuple(args, "i", &sig)) + return NULL; + sigset_t set; + r = sigemptyset(&set); + if (r != 0) + return PyErr_SetFromErrno(PyExc_OSError); + r = sigaddset(&set, sig); + if (r != 0) + return PyErr_SetFromErrno(PyExc_OSError); + r = sigprocmask(SIG_UNBLOCK, &set, NULL); + if (r != 0) + return PyErr_SetFromErrno(PyExc_OSError); + Py_RETURN_NONE; +} + #endif /* ndef _WIN32 */ static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs) @@ -1291,6 +1329,12 @@ {"getfstype", (PyCFunction)getfstype, METH_VARARGS, "get filesystem type (best-effort)\n"}, #endif +#if defined(HAVE_BSD_STATFS) + {"getfsmountpoint", (PyCFunction)getfsmountpoint, METH_VARARGS, + "get filesystem mount point (best-effort)\n"}, +#endif + {"unblocksignal", (PyCFunction)unblocksignal, METH_VARARGS, + "change signal mask to unblock a given signal\n"}, #endif /* ndef _WIN32 */ #ifdef __APPLE__ { @@ -1301,7 +1345,7 @@ {NULL, NULL} }; -static const int version = 1; +static const int version = 3; #ifdef IS_PY3K static struct PyModuleDef osutil_module = { diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/parsers.c --- a/mercurial/cext/parsers.c Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/cext/parsers.c Mon Jan 22 17:53:02 2018 -0500 @@ -710,7 +710,7 @@ void manifest_module_init(PyObject *mod); void revlog_module_init(PyObject *mod); -static const int version = 3; +static const int version = 4; static void module_init(PyObject *mod) { diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/revlog.c --- a/mercurial/cext/revlog.c Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/cext/revlog.c Mon Jan 22 17:53:02 2018 -0500 @@ -628,7 +628,7 @@ { PyObject *roots = Py_None; PyObject *ret = NULL; - PyObject *phaseslist = NULL; + PyObject *phasessize = NULL; PyObject *phaseroots = NULL; PyObject *phaseset = NULL; PyObject *phasessetlist = NULL; @@ -685,12 +685,10 @@ } } /* Transform phase list to a python list */ - phaseslist = PyList_New(len); - if (phaseslist == NULL) + phasessize = PyInt_FromLong(len); + if (phasessize == NULL) goto release; for (i = 0; i < len; i++) { - PyObject *phaseval; - phase = phases[i]; /* We only store the sets of phase for non public phase, the public phase * is computed as a difference */ @@ -702,15 +700,11 @@ PySet_Add(phaseset, rev); Py_XDECREF(rev); } - phaseval = PyInt_FromLong(phase); - if (phaseval == NULL) - goto release; - PyList_SET_ITEM(phaseslist, i, phaseval); } - ret = PyTuple_Pack(2, phaseslist, phasessetlist); + ret = PyTuple_Pack(2, phasessize, phasessetlist); release: - Py_XDECREF(phaseslist); + Py_XDECREF(phasessize); Py_XDECREF(phasessetlist); done: free(phases); diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/util.h --- a/mercurial/cext/util.h Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/cext/util.h Mon Jan 22 17:53:02 2018 -0500 @@ -27,7 +27,9 @@ extern PyTypeObject dirstateTupleType; #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateTupleType) +#ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* VC9 doesn't include bool and lacks stdbool.h based on my searching */ #if defined(_MSC_VER) || __STDC_VERSION__ < 199901L #define true 1 diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/changegroup.py --- a/mercurial/changegroup.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/changegroup.py Mon Jan 22 17:53:02 2018 -0500 @@ -32,14 +32,7 @@ _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" -def readexactly(stream, n): - '''read n bytes from stream.read and abort if less was available''' - s = stream.read(n) - if len(s) < n: - raise error.Abort(_("stream ended unexpectedly" - " (got %d bytes, expected %d)") - % (len(s), n)) - return s +readexactly = util.readexactly def getchunk(stream): """return the next chunk from stream as a string""" @@ -692,7 +685,7 @@ # Callback for the manifest, used to collect linkrevs for filelog # revisions. # Returns the linkrev node (collected in lookupcl). - def makelookupmflinknode(dir): + def makelookupmflinknode(dir, nodes): if fastpathlinkrev: assert not dir return mfs.__getitem__ @@ -713,7 +706,7 @@ the client before you can trust the list of files and treemanifests to send. """ - clnode = tmfnodes[dir][x] + clnode = nodes[x] mdata = mfl.get(dir, x).readfast(shallow=True) for p, n, fl in mdata.iterentries(): if fl == 't': # subdirectory manifest @@ -733,15 +726,13 @@ size = 0 while tmfnodes: - dir = min(tmfnodes) - nodes = tmfnodes[dir] + dir, nodes = tmfnodes.popitem() prunednodes = self.prune(dirlog(dir), nodes, commonrevs) if not dir or prunednodes: for x in self._packmanifests(dir, prunednodes, - makelookupmflinknode(dir)): + makelookupmflinknode(dir, nodes)): size += len(x) yield x - del tmfnodes[dir] self._verbosenote(_('%8.i (manifests)\n') % size) yield self._manifestsdone() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/changelog.py --- a/mercurial/changelog.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/changelog.py Mon Jan 22 17:53:02 2018 -0500 @@ -295,11 +295,14 @@ self._divert = False self.filteredrevs = frozenset() + def tiprev(self): + for i in xrange(len(self) -1, -2, -1): + if i not in self.filteredrevs: + return i + def tip(self): """filtered version of revlog.tip""" - for i in xrange(len(self) -1, -2, -1): - if i not in self.filteredrevs: - return self.node(i) + return self.node(self.tiprev()) def __contains__(self, rev): """filtered version of revlog.__contains__""" @@ -541,5 +544,10 @@ *args, **kwargs) revs = transaction.changes.get('revs') if revs is not None: - revs.add(rev) + if revs: + assert revs[-1] + 1 == rev + revs = xrange(revs[0], rev + 1) + else: + revs = xrange(rev, rev + 1) + transaction.changes['revs'] = revs return node diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/chgserver.py --- a/mercurial/chgserver.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/chgserver.py Mon Jan 22 17:53:02 2018 -0500 @@ -55,6 +55,7 @@ encoding, error, extensions, + node, pycompat, util, ) @@ -63,7 +64,7 @@ def _hashlist(items): """return sha1 hexdigest for a list""" - return hashlib.sha1(str(items)).hexdigest() + return node.hex(hashlib.sha1(str(items)).digest()) # sensitive config sections affecting confighash _configsections = [ @@ -220,16 +221,7 @@ newui._csystem = srcui._csystem # command line args - options = {} - if srcui.plain('strictflags'): - options.update(dispatch._earlyparseopts(args)) - else: - args = args[:] - options['config'] = dispatch._earlygetopt(['--config'], args) - cwds = dispatch._earlygetopt(['--cwd'], args) - options['cwd'] = cwds and cwds[-1] or '' - rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args) - options['repository'] = rpath and rpath[-1] or '' + options = dispatch._earlyparseopts(newui, args) dispatch._parseconfig(newui, options['config']) # stolen from tortoisehg.util.copydynamicconfig() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cmdutil.py --- a/mercurial/cmdutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/cmdutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -41,6 +41,8 @@ registrar, revlog, revset, + revsetlang, + rewriteutil, scmutil, smartset, templatekw, @@ -181,7 +183,7 @@ def setupwrapcolorwrite(ui): # wrap ui.write so diff output can be labeled/colorized def wrapwrite(orig, *args, **kw): - label = kw.pop('label', '') + label = kw.pop(r'label', '') for chunk, l in patch.difflabel(lambda: args): orig(chunk, label=label + l) @@ -372,7 +374,7 @@ # Make all of the pathnames absolute. newfiles = [repo.wjoin(nf) for nf in newfiles] - return commitfunc(ui, repo, *newfiles, **opts) + return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts)) finally: # 5. finally restore backed-up files try: @@ -712,6 +714,97 @@ raise error.UnknownCommand(cmd, allcmds) +def changebranch(ui, repo, revs, label): + """ Change the branch name of given revs to label """ + + with repo.wlock(), repo.lock(), repo.transaction('branches'): + # abort in case of uncommitted merge or dirty wdir + bailifchanged(repo) + revs = scmutil.revrange(repo, revs) + if not revs: + raise error.Abort("empty revision set") + roots = repo.revs('roots(%ld)', revs) + if len(roots) > 1: + raise error.Abort(_("cannot change branch of non-linear revisions")) + rewriteutil.precheck(repo, revs, 'change branch of') + + root = repo[roots.first()] + if not root.p1().branch() == label and label in repo.branchmap(): + raise error.Abort(_("a branch of the same name already exists")) + + if repo.revs('merge() and %ld', revs): + raise error.Abort(_("cannot change branch of a merge commit")) + if repo.revs('obsolete() and %ld', revs): + raise error.Abort(_("cannot change branch of a obsolete changeset")) + + # make sure only topological heads + if repo.revs('heads(%ld) - head()', revs): + raise error.Abort(_("cannot change branch in middle of a stack")) + + replacements = {} + # avoid import cycle mercurial.cmdutil -> mercurial.context -> + # mercurial.subrepo -> mercurial.cmdutil + from . import context + for rev in revs: + ctx = repo[rev] + oldbranch = ctx.branch() + # check if ctx has same branch + if oldbranch == label: + continue + + def filectxfn(repo, newctx, path): + try: + return ctx[path] + except error.ManifestLookupError: + return None + + ui.debug("changing branch of '%s' from '%s' to '%s'\n" + % (hex(ctx.node()), oldbranch, label)) + extra = ctx.extra() + extra['branch_change'] = hex(ctx.node()) + # While changing branch of set of linear commits, make sure that + # we base our commits on new parent rather than old parent which + # was obsoleted while changing the branch + p1 = ctx.p1().node() + p2 = ctx.p2().node() + if p1 in replacements: + p1 = replacements[p1][0] + if p2 in replacements: + p2 = replacements[p2][0] + + mc = context.memctx(repo, (p1, p2), + ctx.description(), + ctx.files(), + filectxfn, + user=ctx.user(), + date=ctx.date(), + extra=extra, + branch=label) + + commitphase = ctx.phase() + overrides = {('phases', 'new-commit'): commitphase} + with repo.ui.configoverride(overrides, 'branch-change'): + newnode = repo.commitctx(mc) + + replacements[ctx.node()] = (newnode,) + ui.debug('new node id is %s\n' % hex(newnode)) + + # create obsmarkers and move bookmarks + scmutil.cleanupnodes(repo, replacements, 'branch-change') + + # move the working copy too + wctx = repo[None] + # in-progress merge is a bit too complex for now. + if len(wctx.parents()) == 1: + newid = replacements.get(wctx.p1().node()) + if newid is not None: + # avoid import cycle mercurial.cmdutil -> mercurial.hg -> + # mercurial.cmdutil + from . import hg + hg.update(repo, newid[0], quietempty=True) + + ui.status(_("changed branch on %d changesets\n") % len(replacements)) + def findrepo(p): while not os.path.isdir(os.path.join(p, ".hg")): oldp, p = p, os.path.dirname(p) @@ -823,9 +916,9 @@ total=None, seqno=None, revwidth=None, pathname=None): node_expander = { 'H': lambda: hex(node), - 'R': lambda: str(repo.changelog.rev(node)), + 'R': lambda: '%d' % repo.changelog.rev(node), 'h': lambda: short(node), - 'm': lambda: re.sub('[^\w]', '_', str(desc)) + 'm': lambda: re.sub('[^\w]', '_', desc or '') } expander = { '%': lambda: '%', @@ -837,13 +930,13 @@ expander.update(node_expander) if node: expander['r'] = (lambda: - str(repo.changelog.rev(node)).zfill(revwidth or 0)) + ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0)) if total is not None: - expander['N'] = lambda: str(total) + expander['N'] = lambda: '%d' % total if seqno is not None: - expander['n'] = lambda: str(seqno) + expander['n'] = lambda: '%d' % seqno if total is not None and seqno is not None: - expander['n'] = lambda: str(seqno).zfill(len(str(total))) + expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total))) if pathname is not None: expander['s'] = lambda: os.path.basename(pathname) expander['d'] = lambda: os.path.dirname(pathname) or '.' @@ -1334,7 +1427,8 @@ if opts.get('exact'): editor = None else: - editor = getcommiteditor(editform=editform, **opts) + editor = getcommiteditor(editform=editform, + **pycompat.strkwargs(opts)) extra = {} for idfunc in extrapreimport: extrapreimportmap[idfunc](repo, extractdata, extra, opts) @@ -1518,7 +1612,7 @@ width = 80 if not ui.plain(): width = ui.termwidth() - chunks = patch.diff(repo, node1, node2, match, changes, diffopts, + chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts, prefix=prefix, relroot=relroot, hunksfilterfn=hunksfilterfn) for chunk, label in patch.diffstatui(util.iterlines(chunks), @@ -1526,7 +1620,7 @@ write(chunk, label=label) else: for chunk, label in patch.diffui(repo, node1, node2, match, - changes, diffopts, prefix=prefix, + changes, opts=diffopts, prefix=prefix, relroot=relroot, hunksfilterfn=hunksfilterfn): write(chunk, label=label) @@ -1571,6 +1665,7 @@ self.hunk = {} self.lastheader = None self.footer = None + self._columns = templatekw.getlogcolumns() def flush(self, ctx): rev = ctx.rev() @@ -1583,8 +1678,6 @@ if rev in self.hunk: self.ui.write(self.hunk[rev]) del self.hunk[rev] - return 1 - return 0 def close(self): if self.footer: @@ -1610,10 +1703,8 @@ label='log.node') return - date = util.datestr(ctx.date()) - - # i18n: column positioning for "hg log" - self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx), + columns = self._columns + self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx), label=_changesetlabels(ctx)) # branches are shown first before any other names due to backwards @@ -1621,9 +1712,7 @@ branch = ctx.branch() # don't show the default branch name if branch != 'default': - # i18n: column positioning for "hg log" - self.ui.write(_("branch: %s\n") % branch, - label='log.branch') + self.ui.write(columns['branch'] % branch, label='log.branch') for nsname, ns in self.repo.names.iteritems(): # branches has special logic already handled above, so here we just @@ -1636,33 +1725,25 @@ self.ui.write(ns.logfmt % name, label='log.%s' % ns.colorname) if self.ui.debugflag: - # i18n: column positioning for "hg log" - self.ui.write(_("phase: %s\n") % ctx.phasestr(), - label='log.phase') + self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase') for pctx in scmutil.meaningfulparents(self.repo, ctx): label = 'log.parent changeset.%s' % pctx.phasestr() - # i18n: column positioning for "hg log" - self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx), + self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx), label=label) if self.ui.debugflag and rev is not None: mnode = ctx.manifestnode() mrev = self.repo.manifestlog._revlog.rev(mnode) - # i18n: column positioning for "hg log" - self.ui.write(_("manifest: %s\n") + self.ui.write(columns['manifest'] % scmutil.formatrevnode(self.ui, mrev, mnode), label='ui.debug log.manifest') - # i18n: column positioning for "hg log" - self.ui.write(_("user: %s\n") % ctx.user(), - label='log.user') - # i18n: column positioning for "hg log" - self.ui.write(_("date: %s\n") % date, + self.ui.write(columns['user'] % ctx.user(), label='log.user') + self.ui.write(columns['date'] % util.datestr(ctx.date()), label='log.date') if ctx.isunstable(): - # i18n: column positioning for "hg log" instabilities = ctx.instabilities() - self.ui.write(_("instability: %s\n") % ', '.join(instabilities), + self.ui.write(columns['instability'] % ', '.join(instabilities), label='log.instability') elif ctx.obsolete(): @@ -1672,31 +1753,22 @@ if self.ui.debugflag: files = ctx.p1().status(ctx)[:3] - for key, value in zip([# i18n: column positioning for "hg log" - _("files:"), - # i18n: column positioning for "hg log" - _("files+:"), - # i18n: column positioning for "hg log" - _("files-:")], files): + for key, value in zip(['files', 'files+', 'files-'], files): if value: - self.ui.write("%-12s %s\n" % (key, " ".join(value)), + self.ui.write(columns[key] % " ".join(value), label='ui.debug log.files') elif ctx.files() and self.ui.verbose: - # i18n: column positioning for "hg log" - self.ui.write(_("files: %s\n") % " ".join(ctx.files()), + self.ui.write(columns['files'] % " ".join(ctx.files()), label='ui.note log.files') if copies and self.ui.verbose: copies = ['%s (%s)' % c for c in copies] - # i18n: column positioning for "hg log" - self.ui.write(_("copies: %s\n") % ' '.join(copies), + self.ui.write(columns['copies'] % ' '.join(copies), label='ui.note log.copies') extra = ctx.extra() if extra and self.ui.debugflag: for key, value in sorted(extra.items()): - # i18n: column positioning for "hg log" - self.ui.write(_("extra: %s=%s\n") - % (key, util.escapestr(value)), + self.ui.write(columns['extra'] % (key, util.escapestr(value)), label='ui.debug log.extra') description = ctx.description().strip() @@ -1708,9 +1780,7 @@ label='ui.note log.description') self.ui.write("\n\n") else: - # i18n: column positioning for "hg log" - self.ui.write(_("summary: %s\n") % - description.splitlines()[0], + self.ui.write(columns['summary'] % description.splitlines()[0], label='log.summary') self.ui.write("\n") @@ -1721,8 +1791,7 @@ if obsfate: for obsfateline in obsfate: - # i18n: column positioning for "hg log" - self.ui.write(_("obsolete: %s\n") % obsfateline, + self.ui.write(self._columns['obsolete'] % obsfateline, label='log.obsfate') def _exthook(self, ctx): @@ -1748,7 +1817,8 @@ diffordiffstat(self.ui, self.repo, diffopts, prev, node, match=matchfn, stat=False, hunksfilterfn=hunksfilterfn) - self.ui.write("\n") + if stat or diff: + self.ui.write("\n") class jsonchangeset(changeset_printer): '''format changeset information.''' @@ -1850,7 +1920,13 @@ self.ui.write("\n }") class changeset_templater(changeset_printer): - '''format changeset information.''' + '''format changeset information. + + Note: there are a variety of convenience functions to build a + changeset_templater for common cases. See functions such as: + makelogtemplater, show_changeset, buildcommittemplate, or other + functions that use changesest_templater. + ''' # Arguments before "buffered" used to be positional. Consider not # adding/removing arguments before "buffered" to not break callers. @@ -1859,10 +1935,13 @@ diffopts = diffopts or {} changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered) + tres = formatter.templateresources(ui, repo) self.t = formatter.loadtemplater(ui, tmplspec, + defaults=templatekw.keywords, + resources=tres, cache=templatekw.defaulttempl) self._counter = itertools.count() - self.cache = {} + self.cache = tres['cache'] # shared with _graphnodeformatter() self._tref = tmplspec.ref self._parts = {'header': '', 'footer': '', @@ -1901,14 +1980,9 @@ def _show(self, ctx, copies, matchfn, hunksfilterfn, props): '''show a single changeset or file revision''' props = props.copy() - props.update(templatekw.keywords) - props['templ'] = self.t props['ctx'] = ctx - props['repo'] = self.repo - props['ui'] = self.repo.ui props['index'] = index = next(self._counter) props['revcache'] = {'copies': copies} - props['cache'] = self.cache props = pycompat.strkwargs(props) # write separator, which wouldn't work well with the header part below @@ -1972,7 +2046,8 @@ return formatter.lookuptemplate(ui, 'changeset', tmpl) def makelogtemplater(ui, repo, tmpl, buffered=False): - """Create a changeset_templater from a literal template 'tmpl'""" + """Create a changeset_templater from a literal template 'tmpl' + byte-string.""" spec = logtemplatespec(tmpl, None) return changeset_templater(ui, repo, spec, buffered=buffered) @@ -2050,6 +2125,21 @@ if windowsize < sizelimit: windowsize *= 2 +def _walkrevs(repo, opts): + # Default --rev value depends on --follow but --follow behavior + # depends on revisions resolved from --rev... + follow = opts.get('follow') or opts.get('follow_first') + if opts.get('rev'): + revs = scmutil.revrange(repo, opts['rev']) + elif follow and repo.dirstate.p1() == nullid: + revs = smartset.baseset() + elif follow: + revs = repo.revs('reverse(:.)') + else: + revs = smartset.spanset(repo) + revs.reverse() + return revs + class FileWalkError(Exception): pass @@ -2204,12 +2294,11 @@ function on each context in the window in forward order.''' follow = opts.get('follow') or opts.get('follow_first') - revs = _logrevs(repo, opts) + revs = _walkrevs(repo, opts) if not revs: return [] wanted = set() - slowpath = match.anypats() or ((match.isexact() or match.prefix()) and - opts.get('removed')) + slowpath = match.anypats() or (not match.always() and opts.get('removed')) fncache = {} change = repo.changectx @@ -2326,90 +2415,36 @@ return iterate() -def _makefollowlogfilematcher(repo, files, followfirst): - # When displaying a revision with --patch --follow FILE, we have - # to know which file of the revision must be diffed. With - # --follow, we want the names of the ancestors of FILE in the - # revision, stored in "fcache". "fcache" is populated by - # reproducing the graph traversal already done by --follow revset - # and relating revs to file names (which is not "correct" but - # good enough). - fcache = {} - fcacheready = [False] - pctx = repo['.'] - - def populate(): - for fn in files: - fctx = pctx[fn] - fcache.setdefault(fctx.introrev(), set()).add(fctx.path()) - for c in fctx.ancestors(followfirst=followfirst): - fcache.setdefault(c.rev(), set()).add(c.path()) - - def filematcher(rev): - if not fcacheready[0]: - # Lazy initialization - fcacheready[0] = True - populate() - return scmutil.matchfiles(repo, fcache.get(rev, [])) - - return filematcher - -def _makenofollowlogfilematcher(repo, pats, opts): - '''hook for extensions to override the filematcher for non-follow cases''' - return None - -def _makelogrevset(repo, pats, opts, revs): - """Return (expr, filematcher) where expr is a revset string built - from log options and file patterns or None. If --stat or --patch - are not passed filematcher is None. Otherwise it is a callable - taking a revision number and returning a match objects filtering - the files to be detailed when displaying the revision. +def _makelogmatcher(repo, revs, pats, opts): + """Build matcher and expanded patterns from log options + + If --follow, revs are the revisions to follow from. + + Returns (match, pats, slowpath) where + - match: a matcher built from the given pats and -I/-X opts + - pats: patterns used (globs are expanded on Windows) + - slowpath: True if patterns aren't as simple as scanning filelogs """ - opt2revset = { - 'no_merges': ('not merge()', None), - 'only_merges': ('merge()', None), - '_ancestors': ('ancestors(%(val)s)', None), - '_fancestors': ('_firstancestors(%(val)s)', None), - '_descendants': ('descendants(%(val)s)', None), - '_fdescendants': ('_firstdescendants(%(val)s)', None), - '_matchfiles': ('_matchfiles(%(val)s)', None), - 'date': ('date(%(val)r)', None), - 'branch': ('branch(%(val)r)', ' or '), - '_patslog': ('filelog(%(val)r)', ' or '), - '_patsfollow': ('follow(%(val)r)', ' or '), - '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '), - 'keyword': ('keyword(%(val)r)', ' or '), - 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '), - 'user': ('user(%(val)r)', ' or '), - } - - opts = dict(opts) - # follow or not follow? - follow = opts.get('follow') or opts.get('follow_first') - if opts.get('follow_first'): - followfirst = 1 - else: - followfirst = 0 - # --follow with FILE behavior depends on revs... - it = iter(revs) - startrev = next(it) - followdescendants = startrev < next(it, startrev) - - # branch and only_branch are really aliases and must be handled at - # the same time - opts['branch'] = opts.get('branch', []) + opts.get('only_branch', []) - opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']] # pats/include/exclude are passed to match.match() directly in # _matchfiles() revset but walkchangerevs() builds its matcher with # scmutil.match(). The difference is input pats are globbed on # platforms without shell expansion (windows). wctx = repo[None] match, pats = scmutil.matchandpats(wctx, pats, opts) - slowpath = match.anypats() or ((match.isexact() or match.prefix()) and - opts.get('removed')) + slowpath = match.anypats() or (not match.always() and opts.get('removed')) if not slowpath: + follow = opts.get('follow') or opts.get('follow_first') + startctxs = [] + if follow and opts.get('rev'): + startctxs = [repo[r] for r in revs] for f in match.files(): - if follow and f not in wctx: + if follow and startctxs: + # No idea if the path was a directory at that revision, so + # take the slow path. + if any(f not in c for c in startctxs): + slowpath = True + continue + elif follow and f not in wctx: # If the file exists, it may be a directory, so let it # take the slow path. if os.path.exists(repo.wjoin(f)): @@ -2417,7 +2452,7 @@ continue else: raise error.Abort(_('cannot follow file not in parent ' - 'revision: "%s"') % f) + 'revision: "%s"') % f) filelog = repo.file(f) if not filelog: # A zero count may be a directory or deleted file, so @@ -2438,15 +2473,62 @@ else: slowpath = False - fpats = ('_patsfollow', '_patsfollowfirst') - fnopats = (('_ancestors', '_fancestors'), - ('_descendants', '_fdescendants')) + return match, pats, slowpath + +def _fileancestors(repo, revs, match, followfirst): + fctxs = [] + for r in revs: + ctx = repo[r] + fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match)) + + # When displaying a revision with --patch --follow FILE, we have + # to know which file of the revision must be diffed. With + # --follow, we want the names of the ancestors of FILE in the + # revision, stored in "fcache". "fcache" is populated as a side effect + # of the graph traversal. + fcache = {} + def filematcher(rev): + return scmutil.matchfiles(repo, fcache.get(rev, [])) + + def revgen(): + for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst): + fcache[rev] = [c.path() for c in cs] + yield rev + return smartset.generatorset(revgen(), iterasc=False), filematcher + +def _makenofollowlogfilematcher(repo, pats, opts): + '''hook for extensions to override the filematcher for non-follow cases''' + return None + +_opt2logrevset = { + 'no_merges': ('not merge()', None), + 'only_merges': ('merge()', None), + '_matchfiles': (None, '_matchfiles(%ps)'), + 'date': ('date(%s)', None), + 'branch': ('branch(%s)', '%lr'), + '_patslog': ('filelog(%s)', '%lr'), + 'keyword': ('keyword(%s)', '%lr'), + 'prune': ('ancestors(%s)', 'not %lr'), + 'user': ('user(%s)', '%lr'), +} + +def _makelogrevset(repo, match, pats, slowpath, opts): + """Return a revset string built from log options and file patterns""" + opts = dict(opts) + # follow or not follow? + follow = opts.get('follow') or opts.get('follow_first') + + # branch and only_branch are really aliases and must be handled at + # the same time + opts['branch'] = opts.get('branch', []) + opts.get('only_branch', []) + opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']] + if slowpath: # See walkchangerevs() slow path. # # pats/include/exclude cannot be represented as separate # revset expressions as their filtering logic applies at file - # level. For instance "-I a -X a" matches a revision touching + # level. For instance "-I a -X b" matches a revision touching # "a" and "b" while "file(a) and not file(b)" does # not. Besides, filesets are evaluated against the working # directory. @@ -2457,130 +2539,84 @@ matchargs.append('i:' + p) for p in opts.get('exclude', []): matchargs.append('x:' + p) - matchargs = ','.join(('%r' % p) for p in matchargs) opts['_matchfiles'] = matchargs - if follow: - opts[fnopats[0][followfirst]] = '.' - else: - if follow: - if pats: - # follow() revset interprets its file argument as a - # manifest entry, so use match.files(), not pats. - opts[fpats[followfirst]] = list(match.files()) - else: - op = fnopats[followdescendants][followfirst] - opts[op] = 'rev(%d)' % startrev - else: - opts['_patslog'] = list(pats) - - filematcher = None - if opts.get('patch') or opts.get('stat'): - # When following files, track renames via a special matcher. - # If we're forced to take the slowpath it means we're following - # at least one pattern/directory, so don't bother with rename tracking. - if follow and not match.always() and not slowpath: - # _makefollowlogfilematcher expects its files argument to be - # relative to the repo root, so use match.files(), not pats. - filematcher = _makefollowlogfilematcher(repo, match.files(), - followfirst) - else: - filematcher = _makenofollowlogfilematcher(repo, pats, opts) - if filematcher is None: - filematcher = lambda rev: match + elif not follow: + opts['_patslog'] = list(pats) expr = [] for op, val in sorted(opts.iteritems()): if not val: continue - if op not in opt2revset: + if op not in _opt2logrevset: continue - revop, andor = opt2revset[op] - if '%(val)' not in revop: + revop, listop = _opt2logrevset[op] + if revop and '%' not in revop: expr.append(revop) + elif not listop: + expr.append(revsetlang.formatspec(revop, val)) else: - if not isinstance(val, list): - e = revop % {'val': val} - else: - e = '(' + andor.join((revop % {'val': v}) for v in val) + ')' - expr.append(e) + if revop: + val = [revsetlang.formatspec(revop, v) for v in val] + expr.append(revsetlang.formatspec(listop, val)) if expr: expr = '(' + ' and '.join(expr) + ')' else: expr = None - return expr, filematcher + return expr def _logrevs(repo, opts): - # Default --rev value depends on --follow but --follow behavior - # depends on revisions resolved from --rev... + """Return the initial set of revisions to be filtered or followed""" follow = opts.get('follow') or opts.get('follow_first') if opts.get('rev'): revs = scmutil.revrange(repo, opts['rev']) elif follow and repo.dirstate.p1() == nullid: revs = smartset.baseset() elif follow: - revs = repo.revs('reverse(:.)') + revs = repo.revs('.') else: revs = smartset.spanset(repo) revs.reverse() return revs -def getgraphlogrevs(repo, pats, opts): - """Return (revs, expr, filematcher) where revs is an iterable of - revision numbers, expr is a revset string built from log options - and file patterns or None, and used to filter 'revs'. If --stat or - --patch are not passed filematcher is None. Otherwise it is a - callable taking a revision number and returning a match objects - filtering the files to be detailed when displaying the revision. +def getlogrevs(repo, pats, opts): + """Return (revs, filematcher) where revs is a smartset + + filematcher is a callable taking a revision number and returning a match + objects filtering the files to be detailed when displaying the revision. """ + follow = opts.get('follow') or opts.get('follow_first') + followfirst = opts.get('follow_first') limit = loglimit(opts) revs = _logrevs(repo, opts) if not revs: - return smartset.baseset(), None, None - expr, filematcher = _makelogrevset(repo, pats, opts, revs) - if opts.get('rev'): + return smartset.baseset(), None + match, pats, slowpath = _makelogmatcher(repo, revs, pats, opts) + filematcher = None + if follow: + if slowpath or match.always(): + revs = dagop.revancestors(repo, revs, followfirst=followfirst) + else: + revs, filematcher = _fileancestors(repo, revs, match, followfirst) + revs.reverse() + if filematcher is None: + filematcher = _makenofollowlogfilematcher(repo, pats, opts) + if filematcher is None: + def filematcher(rev): + return match + + expr = _makelogrevset(repo, match, pats, slowpath, opts) + if opts.get('graph') and opts.get('rev'): # User-specified revs might be unsorted, but don't sort before # _makelogrevset because it might depend on the order of revs if not (revs.isdescending() or revs.istopo()): revs.sort(reverse=True) if expr: - matcher = revset.match(repo.ui, expr) + matcher = revset.match(None, expr) revs = matcher(repo, revs) if limit is not None: - limitedrevs = [] - for idx, rev in enumerate(revs): - if idx >= limit: - break - limitedrevs.append(rev) - revs = smartset.baseset(limitedrevs) - - return revs, expr, filematcher - -def getlogrevs(repo, pats, opts): - """Return (revs, expr, filematcher) where revs is an iterable of - revision numbers, expr is a revset string built from log options - and file patterns or None, and used to filter 'revs'. If --stat or - --patch are not passed filematcher is None. Otherwise it is a - callable taking a revision number and returning a match objects - filtering the files to be detailed when displaying the revision. - """ - limit = loglimit(opts) - revs = _logrevs(repo, opts) - if not revs: - return smartset.baseset([]), None, None - expr, filematcher = _makelogrevset(repo, pats, opts, revs) - if expr: - matcher = revset.match(repo.ui, expr) - revs = matcher(repo, revs) - if limit is not None: - limitedrevs = [] - for idx, r in enumerate(revs): - if limit <= idx: - break - limitedrevs.append(r) - revs = smartset.baseset(limitedrevs) - - return revs, expr, filematcher + revs = revs.slice(0, limit) + return revs, filematcher def _parselinerangelogopt(repo, opts): """Parse --line-range log option and return a list of tuples (filename, @@ -2675,18 +2711,13 @@ return templatekw.showgraphnode # fast path for "{graphnode}" spec = templater.unquotestring(spec) - templ = formatter.maketemplater(ui, spec) - cache = {} + tres = formatter.templateresources(ui) if isinstance(displayer, changeset_templater): - cache = displayer.cache # reuse cache of slow templates - props = templatekw.keywords.copy() - props['templ'] = templ - props['cache'] = cache + tres['cache'] = displayer.cache # reuse cache of slow templates + templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords, + resources=tres) def formatnode(repo, ctx): - props['ctx'] = ctx - props['repo'] = repo - props['ui'] = repo.ui - props['revcache'] = {} + props = {'ctx': ctx, 'repo': repo, 'revcache': {}} return templ.render(props) return formatnode @@ -2733,7 +2764,7 @@ firstedge = next(edges) width = firstedge[2] displayer.show(ctx, copies=copies, matchfn=revmatchfn, - _graphwidth=width, **props) + _graphwidth=width, **pycompat.strkwargs(props)) lines = displayer.hunk.pop(rev).split('\n') if not lines[-1]: del lines[-1] @@ -2743,9 +2774,8 @@ lines = [] displayer.close() -def graphlog(ui, repo, pats, opts): +def graphlog(ui, repo, revs, filematcher, opts): # Parameters are identical to log command ones - revs, expr, filematcher = getgraphlogrevs(repo, pats, opts) revdag = graphmod.dagwalker(repo, revs) getrenamed = None @@ -2975,8 +3005,9 @@ for f in remaining: count += 1 ui.progress(_('skipping'), count, total=total, unit=_('files')) - warnings.append(_('not removing %s: file still exists\n') - % m.rel(f)) + if ui.verbose or (f in files): + warnings.append(_('not removing %s: file still exists\n') + % m.rel(f)) ret = 1 ui.progress(_('skipping'), None) else: @@ -3021,21 +3052,34 @@ return ret +def _updatecatformatter(fm, ctx, matcher, path, decode): + """Hook for adding data to the formatter used by ``hg cat``. + + Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call + this method first.""" + data = ctx[path].data() + if decode: + data = ctx.repo().wwritedata(path, data) + fm.startitem() + fm.write('data', '%s', data) + fm.data(abspath=path, path=matcher.rel(path)) + def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts): err = 1 + opts = pycompat.byteskwargs(opts) def write(path): filename = None if fntemplate: filename = makefilename(repo, fntemplate, ctx.node(), pathname=os.path.join(prefix, path)) + # attempt to create the directory if it does not already exist + try: + os.makedirs(os.path.dirname(filename)) + except OSError: + pass with formatter.maybereopen(basefm, filename, opts) as fm: - data = ctx[path].data() - if opts.get('decode'): - data = repo.wwritedata(path, data) - fm.startitem() - fm.write('data', '%s', data) - fm.data(abspath=path, path=matcher.rel(path)) + _updatecatformatter(fm, ctx, matcher, path, opts.get('decode')) # Automation often uses hg cat on single files, so special case it # for performance to avoid the cost of parsing the manifest. @@ -3060,7 +3104,8 @@ submatch = matchmod.subdirmatcher(subpath, matcher) if not sub.cat(submatch, basefm, fntemplate, - os.path.join(prefix, sub._path), **opts): + os.path.join(prefix, sub._path), + **pycompat.strkwargs(opts)): err = 0 except error.RepoLookupError: ui.status(_("skipping missing subrepository: %s\n") @@ -3124,6 +3169,8 @@ # base o - first parent of the changeset to amend wctx = repo[None] + # Copy to avoid mutating input + extra = extra.copy() # Update extra dict from amended commit (e.g. to preserve graft # source) extra.update(old.extra()) @@ -3200,7 +3247,7 @@ fctx = wctx[path] flags = fctx.flags() - mctx = context.memfilectx(repo, + mctx = context.memfilectx(repo, ctx_, fctx.path(), fctx.data(), islink='l' in flags, isexec='x' in flags, @@ -3445,6 +3492,7 @@ return repo.status(match=scmutil.match(repo[None], pats, opts)) def revert(ui, repo, ctx, parents, *pats, **opts): + opts = pycompat.byteskwargs(opts) parent, p2 = parents node = ctx.node() @@ -3706,7 +3754,7 @@ else: util.rename(target, bakname) if ui.verbose or not exact: - if not isinstance(msg, basestring): + if not isinstance(msg, bytes): msg = msg(abs) ui.status(msg % rel) elif exact: @@ -3722,7 +3770,8 @@ # Revert the subrepos on the revert list for sub in targetsubs: try: - wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts) + wctx.sub(sub).revert(ctx.substate[sub], *pats, + **pycompat.strkwargs(opts)) except KeyError: raise error.Abort("subrepository '%s' does not exist in %s!" % (sub, short(ctx.node()))) @@ -3802,9 +3851,8 @@ operation = 'discard' reversehunks = True if node != parent: - operation = 'revert' - reversehunks = repo.ui.configbool('experimental', - 'revertalternateinteractivemode') + operation = 'apply' + reversehunks = False if reversehunks: diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts) else: @@ -3869,6 +3917,7 @@ repo.dirstate.copy(copied[f], f) class command(registrar.command): + """deprecated: used registrar.command instead""" def _doregister(self, func, name, *args, **kwargs): func._deprecatedregistrar = True # flag for deprecwarn in extensions.py return super(command, self)._doregister(func, name, *args, **kwargs) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/color.py --- a/mercurial/color.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/color.py Mon Jan 22 17:53:02 2018 -0500 @@ -87,12 +87,14 @@ 'branches.inactive': 'none', 'diff.changed': 'white', 'diff.deleted': 'red', + 'diff.deleted.highlight': 'red bold underline', 'diff.diffline': 'bold', 'diff.extended': 'cyan bold', 'diff.file_a': 'red bold', 'diff.file_b': 'green bold', 'diff.hunk': 'magenta', 'diff.inserted': 'green', + 'diff.inserted.highlight': 'green bold underline', 'diff.tab': '', 'diff.trailingwhitespace': 'bold red_background', 'changeset.public': '', @@ -100,6 +102,15 @@ 'changeset.secret': '', 'diffstat.deleted': 'red', 'diffstat.inserted': 'green', + 'formatvariant.name.mismatchconfig': 'red', + 'formatvariant.name.mismatchdefault': 'yellow', + 'formatvariant.name.uptodate': 'green', + 'formatvariant.repo.mismatchconfig': 'red', + 'formatvariant.repo.mismatchdefault': 'yellow', + 'formatvariant.repo.uptodate': 'green', + 'formatvariant.config.special': 'yellow', + 'formatvariant.config.default': 'green', + 'formatvariant.default': '', 'histedit.remaining': 'red bold', 'ui.prompt': 'yellow', 'log.changeset': 'yellow', @@ -181,7 +192,7 @@ configstyles(ui) def _modesetup(ui): - if ui.plain(): + if ui.plain('color'): return None config = ui.config('ui', 'color') if config == 'debug': @@ -473,7 +484,7 @@ _win32print(ui, text, writefunc, **opts) def _win32print(ui, text, writefunc, **opts): - label = opts.get('label', '') + label = opts.get(r'label', '') attr = origattr def mapcolor(val, attr): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/commands.py --- a/mercurial/commands.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/commands.py Mon Jan 22 17:53:02 2018 -0500 @@ -43,12 +43,14 @@ lock as lockmod, merge as mergemod, obsolete, + obsutil, patch, phases, pycompat, rcutil, registrar, revsetlang, + rewriteutil, scmutil, server, sshserver, @@ -65,6 +67,7 @@ table.update(debugcommandsmod.command._table) command = registrar.command(table) +readonly = registrar.command.readonly # common command options @@ -102,10 +105,6 @@ _("when to paginate (boolean, always, auto, or never)"), _('TYPE')), ] -# options which must be pre-parsed before loading configs and extensions -# TODO: perhaps --debugger should be included -earlyoptflags = ("--cwd", "-R", "--repository", "--repo", "--config") - dryrunopts = cmdutil.dryrunopts remoteopts = cmdutil.remoteopts walkopts = cmdutil.walkopts @@ -295,7 +294,10 @@ # to mimic the behavior of Mercurial before version 1.5 opts['file'] = True - ctx = scmutil.revsingle(repo, opts.get('rev')) + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev) rootfm = ui.formatter('annotate', opts) if ui.quiet: @@ -466,7 +468,10 @@ ''' opts = pycompat.byteskwargs(opts) - ctx = scmutil.revsingle(repo, opts.get('rev')) + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev) if not ctx: raise error.Abort(_('no working directory: please specify a revision')) node = ctx.node() @@ -857,7 +862,7 @@ ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition)) hbisect.checkstate(state) # bisect - nodes, changesets, bgood = hbisect.bisect(repo.changelog, state) + nodes, changesets, bgood = hbisect.bisect(repo, state) # update to next check node = nodes[0] mayupdate(repo, node, show_stats=False) @@ -870,7 +875,7 @@ hbisect.checkstate(state) # actually bisect - nodes, changesets, good = hbisect.bisect(repo.changelog, state) + nodes, changesets, good = hbisect.bisect(repo, state) if extend: if not changesets: extendnode = hbisect.extendrange(repo, state, nodes, good) @@ -997,7 +1002,9 @@ @command('branch', [('f', 'force', None, _('set branch name even if it shadows an existing branch')), - ('C', 'clean', None, _('reset branch name to parent branch name'))], + ('C', 'clean', None, _('reset branch name to parent branch name')), + ('r', 'rev', [], _('change branches of the given revs (EXPERIMENTAL)')), + ], _('[-fC] [NAME]')) def branch(ui, repo, label=None, **opts): """set or show the current branch name @@ -1029,10 +1036,13 @@ Returns 0 on success. """ opts = pycompat.byteskwargs(opts) + revs = opts.get('rev') if label: label = label.strip() if not opts.get('clean') and not label: + if revs: + raise error.Abort(_("no branch name specified for the revisions")) ui.write("%s\n" % repo.dirstate.branch()) return @@ -1042,13 +1052,18 @@ repo.dirstate.setbranch(label) ui.status(_('reset working directory to branch %s\n') % label) elif label: + + scmutil.checknewlabel(repo, label, 'branch') + if revs: + return cmdutil.changebranch(ui, repo, revs, label) + if not opts.get('force') and label in repo.branchmap(): if label not in [p.branch() for p in repo[None].parents()]: raise error.Abort(_('a branch of the same name already' ' exists'), # i18n: "it" refers to an existing branch hint=_("use 'hg update' to switch to it")) - scmutil.checknewlabel(repo, label, 'branch') + repo.dirstate.setbranch(label) ui.status(_('marked working directory as branch %s\n') % label) @@ -1064,7 +1079,7 @@ _('show only branches that have unmerged heads (DEPRECATED)')), ('c', 'closed', False, _('show normal and closed branches')), ] + formatteropts, - _('[-c]')) + _('[-c]'), cmdtype=readonly) def branches(ui, repo, active=False, closed=False, **opts): """list repository named branches @@ -1258,7 +1273,7 @@ ('', 'decode', None, _('apply any matching decode filter')), ] + walkopts + formatteropts, _('[OPTION]... FILE...'), - inferrepo=True) + inferrepo=True, cmdtype=readonly) def cat(ui, repo, file1, *pats, **opts): """output the current or given revision of files @@ -1280,7 +1295,11 @@ Returns 0 on success. """ - ctx = scmutil.revsingle(repo, opts.get('rev')) + opts = pycompat.byteskwargs(opts) + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev) m = scmutil.match(ctx, (file1,) + pats, opts) fntemplate = opts.pop('output', '') if cmdutil.isstdiofilename(fntemplate): @@ -1292,7 +1311,8 @@ ui.pager('cat') fm = ui.formatter('cat', opts) with fm: - return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '', **opts) + return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '', + **pycompat.strkwargs(opts)) @command('^clone', [('U', 'noupdate', None, _('the clone will include an empty working ' @@ -1544,13 +1564,7 @@ raise error.Abort(_('cannot amend with ui.commitsubrepos enabled')) old = repo['.'] - if not old.mutable(): - raise error.Abort(_('cannot amend public changesets')) - if len(repo[None].parents()) > 1: - raise error.Abort(_('cannot amend while merging')) - allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) - if not allowunstable and old.children(): - raise error.Abort(_('cannot amend changeset with children')) + rewriteutil.precheck(repo, [old.rev()], 'amend') # Currently histedit gets confused if an amend happens while histedit # is in progress. Since we have a checkunfinished command, we are @@ -1604,7 +1618,7 @@ ('l', 'local', None, _('edit repository config')), ('g', 'global', None, _('edit global config'))] + formatteropts, _('[-u] [NAME]...'), - optionalrepo=True) + optionalrepo=True, cmdtype=readonly) def config(ui, repo, *values, **opts): """show combined config settings from all hgrc files @@ -1751,7 +1765,7 @@ def debugcomplete(ui, cmd='', **opts): """returns the completion list associated with the given command""" - if opts.get('options'): + if opts.get(r'options'): options = [] otables = [globalopts] if cmd: @@ -1777,7 +1791,7 @@ ('c', 'change', '', _('change made by revision'), _('REV')) ] + diffopts + diffopts2 + walkopts + subrepoopts, _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'), - inferrepo=True) + inferrepo=True, cmdtype=readonly) def diff(ui, repo, *pats, **opts): """diff repository (or selected files) @@ -1846,9 +1860,11 @@ msg = _('cannot specify --rev and --change at the same time') raise error.Abort(msg) elif change: + repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn') node2 = scmutil.revsingle(repo, change, None).node() node1 = repo[node2].p1().node() else: + repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn') node1, node2 = scmutil.revpair(repo, revs) if reverse: @@ -1867,7 +1883,7 @@ ('', 'switch-parent', None, _('diff against the second parent')), ('r', 'rev', [], _('revisions to export'), _('REV')), ] + diffopts, - _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...')) + _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'), cmdtype=readonly) def export(ui, repo, *changesets, **opts): """dump the header and diffs for one or more changesets @@ -1932,6 +1948,7 @@ changesets += tuple(opts.get('rev', [])) if not changesets: changesets = ['.'] + repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn') revs = scmutil.revrange(repo, changesets) if not revs: raise error.Abort(_("export requires at least one changeset")) @@ -1948,7 +1965,7 @@ [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')), ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), ] + walkopts + formatteropts + subrepoopts, - _('[OPTION]... [FILE]...')) + _('[OPTION]... [FILE]...'), cmdtype=readonly) def files(ui, repo, *pats, **opts): """list tracked files @@ -1995,7 +2012,10 @@ """ opts = pycompat.byteskwargs(opts) - ctx = scmutil.revsingle(repo, opts.get('rev'), None) + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev, None) end = '\n' if opts.get('print0'): @@ -2321,7 +2341,7 @@ ('d', 'date', None, _('list the date (short with -q)')), ] + formatteropts + walkopts, _('[OPTION]... PATTERN [FILE]...'), - inferrepo=True) + inferrepo=True, cmdtype=readonly) def grep(ui, repo, pattern, *pats, **opts): """search revision history for a pattern in specified files @@ -2564,7 +2584,7 @@ ('a', 'active', False, _('show active branchheads only (DEPRECATED)')), ('c', 'closed', False, _('show normal and closed branch heads')), ] + templateopts, - _('[-ct] [-r STARTREV] [REV]...')) + _('[-ct] [-r STARTREV] [REV]...'), cmdtype=readonly) def heads(ui, repo, *branchrevs, **opts): """show branch heads @@ -2592,8 +2612,10 @@ opts = pycompat.byteskwargs(opts) start = None - if 'rev' in opts: - start = scmutil.revsingle(repo, opts['rev'], None).node() + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + start = scmutil.revsingle(repo, rev, None).node() if opts.get('topo'): heads = [repo[h] for h in repo.heads(start)] @@ -2637,7 +2659,7 @@ ('s', 'system', [], _('show help for specific platform(s)')), ], _('[-ecks] [TOPIC]'), - norepo=True) + norepo=True, cmdtype=readonly) def help_(ui, name=None, **opts): """show help for a given topic or a help overview @@ -2679,7 +2701,7 @@ ('B', 'bookmarks', None, _('show bookmarks')), ] + remoteopts + formatteropts, _('[-nibtB] [-r REV] [SOURCE]'), - optionalrepo=True) + optionalrepo=True, cmdtype=readonly) def identify(ui, repo, source=None, rev=None, num=None, id=None, branch=None, tags=None, bookmarks=None, **opts): """identify the working directory or specified revision @@ -2777,6 +2799,8 @@ fm.data(node=hex(remoterev)) fm.data(bookmarks=fm.formatlist(bms, name='bookmark')) else: + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') ctx = scmutil.revsingle(repo, rev, None) if ctx.rev() is None: @@ -3254,7 +3278,7 @@ _('do not display revision or any of its ancestors'), _('REV')), ] + logopts + walkopts, _('[OPTION]... [FILE]'), - inferrepo=True) + inferrepo=True, cmdtype=readonly) def log(ui, repo, *pats, **opts): """show revision history of entire repository or files @@ -3268,7 +3292,7 @@ File history is shown without following rename or copy history of files. Use -f/--follow with a filename to follow history across renames and copies. --follow without a filename will only show - ancestors or descendants of the starting revision. + ancestors of the starting revision. By default this command prints revision number and changeset id, tags, non-trivial parents, user, date and time, and a summary for @@ -3393,17 +3417,14 @@ _('FILE arguments are not compatible with --line-range option') ) - if opts.get('follow') and opts.get('rev'): - opts['rev'] = [revsetlang.formatspec('reverse(::%lr)', opts.get('rev'))] - del opts['follow'] + repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn') + revs, filematcher = cmdutil.getlogrevs(repo, pats, opts) + hunksfilter = None if opts.get('graph'): if linerange: raise error.Abort(_('graph not supported with line range patterns')) - return cmdutil.graphlog(ui, repo, pats, opts) - - revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts) - hunksfilter = None + return cmdutil.graphlog(ui, repo, revs, filematcher, opts) if linerange: revs, lrfilematcher, hunksfilter = cmdutil.getloglinerangerevs( @@ -3420,9 +3441,6 @@ elif filematcher is None: filematcher = lrfilematcher - limit = cmdutil.loglimit(opts) - count = 0 - getrenamed = None if opts.get('copies'): endrev = None @@ -3433,8 +3451,6 @@ ui.pager('log') displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True) for rev in revs: - if count == limit: - break ctx = repo[rev] copies = None if getrenamed is not None and rev: @@ -3453,8 +3469,7 @@ revhunksfilter = None displayer.show(ctx, copies=copies, matchfn=revmatchfn, hunksfilterfn=revhunksfilter) - if displayer.flush(ctx): - count += 1 + displayer.flush(ctx) displayer.close() @@ -3462,7 +3477,7 @@ [('r', 'rev', '', _('revision to display'), _('REV')), ('', 'all', False, _("list files from all revisions"))] + formatteropts, - _('[-r REV]')) + _('[-r REV]'), cmdtype=readonly) def manifest(ui, repo, node=None, rev=None, **opts): """output the current or given revision of the project manifest @@ -3509,6 +3524,8 @@ char = {'l': '@', 'x': '*', '': ''} mode = {'l': '644', 'x': '755', '': '644'} + if node: + repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn') ctx = scmutil.revsingle(repo, node) mf = ctx.manifest() ui.pager('manifest') @@ -3525,7 +3542,8 @@ _('force a merge including outstanding changes (DEPRECATED)')), ('r', 'rev', '', _('revision to merge'), _('REV')), ('P', 'preview', None, - _('review revisions to merge (no merge is performed)')) + _('review revisions to merge (no merge is performed)')), + ('', 'abort', None, _('abort the ongoing merge')), ] + mergetoolopts, _('[-P] [[-r] REV]')) def merge(ui, repo, node=None, **opts): @@ -3550,7 +3568,7 @@ See :hg:`help resolve` for information on handling file conflicts. - To undo an uncommitted merge, use :hg:`update --clean .` which + To undo an uncommitted merge, use :hg:`merge --abort` which will check out a clean copy of the original merge parent, losing all changes. @@ -3558,6 +3576,16 @@ """ opts = pycompat.byteskwargs(opts) + abort = opts.get('abort') + if abort and repo.dirstate.p2() == nullid: + cmdutil.wrongtooltocontinue(repo, _('merge')) + if abort: + if node: + raise error.Abort(_("cannot specify a node with --abort")) + if opts.get('rev'): + raise error.Abort(_("cannot specify both --rev and --abort")) + if opts.get('preview'): + raise error.Abort(_("cannot specify --preview with --abort")) if opts.get('rev') and node: raise error.Abort(_("please specify just one revision")) if not node: @@ -3566,7 +3594,7 @@ if node: node = scmutil.revsingle(repo, node).node() - if not node: + if not node and not abort: node = repo[destutil.destmerge(repo)].node() if opts.get('preview'): @@ -3587,7 +3615,7 @@ force = opts.get('force') labels = ['working copy', 'merge rev'] return hg.merge(repo, node, force=force, mergeforce=force, - labels=labels) + labels=labels, abort=abort) finally: ui.setconfig('ui', 'forcemerge', '', 'merge') @@ -3696,7 +3724,10 @@ """ opts = pycompat.byteskwargs(opts) - ctx = scmutil.revsingle(repo, opts.get('rev'), None) + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev, None) if file_: m = scmutil.match(ctx, (file_,), opts) @@ -3726,7 +3757,8 @@ displayer.show(repo[n]) displayer.close() -@command('paths', formatteropts, _('[NAME]'), optionalrepo=True) +@command('paths', formatteropts, _('[NAME]'), optionalrepo=True, + cmdtype=readonly) def paths(ui, repo, search=None, **opts): """show aliases for remote repositories @@ -3841,7 +3873,6 @@ revs = scmutil.revrange(repo, revs) - lock = None ret = 0 if targetphase is None: # display @@ -3849,10 +3880,7 @@ ctx = repo[r] ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr())) else: - tr = None - lock = repo.lock() - try: - tr = repo.transaction("phase") + with repo.lock(), repo.transaction("phase") as tr: # set phase if not revs: raise error.Abort(_('empty revision set')) @@ -3865,11 +3893,6 @@ phases.advanceboundary(repo, tr, targetphase, nodes) if opts['force']: phases.retractboundary(repo, tr, targetphase, nodes) - tr.close() - finally: - if tr is not None: - tr.release() - lock.release() getphase = unfi._phasecache.phase newdata = [getphase(unfi, r) for r in unfi] changes = sum(newdata[r] != olddata[r] for r in unfi) @@ -3923,7 +3946,7 @@ @command('^pull', [('u', 'update', None, - _('update to new branch head if changesets were pulled')), + _('update to new branch head if new descendants were pulled')), ('f', 'force', None, _('run even when remote repository is unrelated')), ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')), ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')), @@ -3978,12 +4001,13 @@ # not ending up with the name of the bookmark because of a race # condition on the server. (See issue 4689 for details) remotebookmarks = other.listkeys('bookmarks') + remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks) pullopargs['remotebookmarks'] = remotebookmarks for b in opts['bookmark']: b = repo._bookmarks.expandname(b) if b not in remotebookmarks: raise error.Abort(_('remote bookmark %s not found!') % b) - revs.append(remotebookmarks[b]) + revs.append(hex(remotebookmarks[b])) if revs: try: @@ -4002,36 +4026,40 @@ "so a rev cannot be specified.") raise error.Abort(err) - pullopargs.update(opts.get('opargs', {})) - modheads = exchange.pull(repo, other, heads=revs, - force=opts.get('force'), - bookmarks=opts.get('bookmark', ()), - opargs=pullopargs).cgresult - - # brev is a name, which might be a bookmark to be activated at - # the end of the update. In other words, it is an explicit - # destination of the update - brev = None - - if checkout: - checkout = str(repo.changelog.rev(checkout)) - - # order below depends on implementation of - # hg.addbranchrevs(). opts['bookmark'] is ignored, - # because 'checkout' is determined without it. - if opts.get('rev'): - brev = opts['rev'][0] - elif opts.get('branch'): - brev = opts['branch'][0] - else: - brev = branches[0] - repo._subtoppath = source - try: - ret = postincoming(ui, repo, modheads, opts.get('update'), - checkout, brev) - - finally: - del repo._subtoppath + wlock = util.nullcontextmanager() + if opts.get('update'): + wlock = repo.wlock() + with wlock: + pullopargs.update(opts.get('opargs', {})) + modheads = exchange.pull(repo, other, heads=revs, + force=opts.get('force'), + bookmarks=opts.get('bookmark', ()), + opargs=pullopargs).cgresult + + # brev is a name, which might be a bookmark to be activated at + # the end of the update. In other words, it is an explicit + # destination of the update + brev = None + + if checkout: + checkout = str(repo.changelog.rev(checkout)) + + # order below depends on implementation of + # hg.addbranchrevs(). opts['bookmark'] is ignored, + # because 'checkout' is determined without it. + if opts.get('rev'): + brev = opts['rev'][0] + elif opts.get('branch'): + brev = opts['branch'][0] + else: + brev = branches[0] + repo._subtoppath = source + try: + ret = postincoming(ui, repo, modheads, opts.get('update'), + checkout, brev) + + finally: + del repo._subtoppath finally: other.close() @@ -4522,8 +4550,7 @@ ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), ('r', 'rev', '', _('revert to the specified revision'), _('REV')), ('C', 'no-backup', None, _('do not save backup copies of files')), - ('i', 'interactive', None, - _('interactively select the changes (EXPERIMENTAL)')), + ('i', 'interactive', None, _('interactively select the changes')), ] + walkopts + dryrunopts, _('[OPTION]... [-r REV] [NAME]...')) def revert(ui, repo, *pats, **opts): @@ -4563,6 +4590,7 @@ Returns 0 on success. """ + opts = pycompat.byteskwargs(opts) if opts.get("date"): if opts.get("rev"): raise error.Abort(_("you can't specify a revision and a date")) @@ -4574,7 +4602,10 @@ raise error.Abort(_('uncommitted merge with no revision specified'), hint=_("use 'hg update' or see 'hg help revert'")) - ctx = scmutil.revsingle(repo, opts.get('rev')) + rev = opts.get('rev') + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev) if (not (pats or opts.get('include') or opts.get('exclude') or opts.get('all') or opts.get('interactive'))): @@ -4598,7 +4629,8 @@ hint = _("use --all to revert all files") raise error.Abort(msg, hint=hint) - return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts) + return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, + **pycompat.strkwargs(opts)) @command('rollback', dryrunopts + [('f', 'force', False, _('ignore safety measures'))]) @@ -4653,7 +4685,7 @@ return repo.rollback(dryrun=opts.get(r'dry_run'), force=opts.get(r'force')) -@command('root', []) +@command('root', [], cmdtype=readonly) def root(ui, repo): """print the root (top) of the current working directory @@ -4701,7 +4733,7 @@ Please note that the server does not implement access control. This means that, by default, anybody can read from the server and - nobody can write to it by default. Set the ``web.allow_push`` + nobody can write to it by default. Set the ``web.allow-push`` option to ``*`` to allow everybody to push to the server. You should use a real web server if you need to authenticate users. @@ -4747,7 +4779,7 @@ ('', 'change', '', _('list the changed files of a revision'), _('REV')), ] + walkopts + subrepoopts + formatteropts, _('[OPTION]... [FILE]...'), - inferrepo=True) + inferrepo=True, cmdtype=readonly) def status(ui, repo, *pats, **opts): """show changed files in the working directory @@ -4845,9 +4877,11 @@ msg = _('cannot use --terse with --rev') raise error.Abort(msg) elif change: + repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn') node2 = scmutil.revsingle(repo, change, None).node() node1 = repo[node2].p1().node() else: + repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn') node1, node2 = scmutil.revpair(repo, revs) if pats or ui.configbool('commands', 'status.relative'): @@ -4912,7 +4946,8 @@ fm.end() @command('^summary|sum', - [('', 'remote', None, _('check for push and pull'))], '[--remote]') + [('', 'remote', None, _('check for push and pull'))], + '[--remote]', cmdtype=readonly) def summary(ui, repo, **opts): """summarize working directory state @@ -5313,7 +5348,7 @@ finally: release(lock, wlock) -@command('tags', formatteropts, '') +@command('tags', formatteropts, '', cmdtype=readonly) def tags(ui, repo, **opts): """list repository tags @@ -5510,7 +5545,17 @@ # if we defined a bookmark, we have to remember the original name brev = rev - rev = scmutil.revsingle(repo, rev, rev).rev() + if rev: + repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') + ctx = scmutil.revsingle(repo, rev, rev) + rev = ctx.rev() + if ctx.hidden(): + ctxstr = ctx.hex()[:12] + ui.warn(_("updating to a hidden changeset %s\n") % ctxstr) + + if ctx.obsolete(): + obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx) + ui.warn("(%s)\n" % obsfatemsg) repo.ui.setconfig('ui', 'forcemerge', tool, 'update') @@ -5536,7 +5581,7 @@ """ return hg.verify(repo) -@command('version', [] + formatteropts, norepo=True) +@command('version', [] + formatteropts, norepo=True, cmdtype=readonly) def version_(ui, **opts): """output version and copyright information""" opts = pycompat.byteskwargs(opts) @@ -5548,7 +5593,7 @@ util.version()) license = _( "(see https://mercurial-scm.org for more information)\n" - "\nCopyright (C) 2005-2017 Matt Mackall and others\n" + "\nCopyright (C) 2005-2018 Matt Mackall and others\n" "This is free software; see the source for copying conditions. " "There is NO\nwarranty; " "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/commandserver.py --- a/mercurial/commandserver.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/commandserver.py Mon Jan 22 17:53:02 2018 -0500 @@ -17,11 +17,11 @@ import traceback from .i18n import _ +from .thirdparty import selectors2 from . import ( encoding, error, pycompat, - selectors2, util, ) @@ -247,13 +247,13 @@ req = dispatch.request(args[:], copiedui, self.repo, self.cin, self.cout, self.cerr) - ret = (dispatch.dispatch(req) or 0) & 255 # might return None - - # restore old cwd - if '--cwd' in args: - os.chdir(self.cwd) - - self.cresult.write(struct.pack('>i', int(ret))) + try: + ret = (dispatch.dispatch(req) or 0) & 255 # might return None + self.cresult.write(struct.pack('>i', int(ret))) + finally: + # restore old cwd + if '--cwd' in args: + os.chdir(self.cwd) def getencoding(self): """ writes the current encoding to the result channel """ @@ -449,6 +449,8 @@ def init(self): self._sock = socket.socket(socket.AF_UNIX) self._servicehandler.bindsocket(self._sock, self.address) + if util.safehasattr(util, 'unblocksignal'): + util.unblocksignal(signal.SIGCHLD) o = signal.signal(signal.SIGCHLD, self._sigchldhandler) self._oldsigchldhandler = o self._socketunlinked = False diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/configitems.py --- a/mercurial/configitems.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/configitems.py Mon Jan 22 17:53:02 2018 -0500 @@ -362,6 +362,9 @@ coreconfigitem('devel', 'warn-config-unknown', default=None, ) +coreconfigitem('devel', 'debug.peer-request', + default=False, +) coreconfigitem('diff', 'nodates', default=False, ) @@ -428,6 +431,9 @@ coreconfigitem('experimental', 'bundle2.pushback', default=False, ) +coreconfigitem('experimental', 'bundle2.stream', + default=False, +) coreconfigitem('experimental', 'bundle2lazylocking', default=False, ) @@ -452,6 +458,12 @@ coreconfigitem('experimental', 'crecordtest', default=None, ) +coreconfigitem('experimental', 'directaccess', + default=False, +) +coreconfigitem('experimental', 'directaccess.revnums', + default=False, +) coreconfigitem('experimental', 'editortmpinhg', default=False, ) @@ -469,7 +481,7 @@ default=None, ) coreconfigitem('experimental', 'evolution.effect-flags', - default=False, + default=True, alias=[('experimental', 'effect-flags')] ) coreconfigitem('experimental', 'evolution.exchange', @@ -478,9 +490,15 @@ coreconfigitem('experimental', 'evolution.bundle-obsmarker', default=False, ) +coreconfigitem('experimental', 'evolution.report-instabilities', + default=True, +) coreconfigitem('experimental', 'evolution.track-operation', default=True, ) +coreconfigitem('experimental', 'worddiff', + default=False, +) coreconfigitem('experimental', 'maxdeltachainspan', default=-1, ) @@ -529,15 +547,15 @@ coreconfigitem('experimental', 'obsmarkers-exchange-debug', default=False, ) -coreconfigitem('experimental', 'rebase.multidest', +coreconfigitem('experimental', 'remotenames', default=False, ) -coreconfigitem('experimental', 'revertalternateinteractivemode', - default=True, -) coreconfigitem('experimental', 'revlogv2', default=None, ) +coreconfigitem('experimental', 'single-head-per-branch', + default=False, +) coreconfigitem('experimental', 'spacemovesdown', default=False, ) @@ -553,6 +571,9 @@ coreconfigitem('experimental', 'treemanifest', default=False, ) +coreconfigitem('experimental', 'update.atomic-file', + default=False, +) coreconfigitem('extensions', '.*', default=None, generic=True, @@ -838,6 +859,9 @@ coreconfigitem('push', 'pushvars.server', default=False, ) +coreconfigitem('server', 'bookmarks-pushkey-compat', + default=True, +) coreconfigitem('server', 'bundle1', default=True, ) @@ -1060,6 +1084,9 @@ coreconfigitem('ui', 'ssh', default='ssh', ) +coreconfigitem('ui', 'ssherrorhint', + default=None, +) coreconfigitem('ui', 'statuscopies', default=False, ) @@ -1078,6 +1105,9 @@ coreconfigitem('ui', 'timeout', default='600', ) +coreconfigitem('ui', 'timeout.warn', + default=0, +) coreconfigitem('ui', 'traceback', default=False, ) @@ -1102,10 +1132,12 @@ coreconfigitem('web', 'allowgz', default=False, ) -coreconfigitem('web', 'allowpull', +coreconfigitem('web', 'allow-pull', + alias=[('web', 'allowpull')], default=True, ) -coreconfigitem('web', 'allow_push', +coreconfigitem('web', 'allow-push', + alias=[('web', 'allow_push')], default=list, ) coreconfigitem('web', 'allowzip', @@ -1239,6 +1271,9 @@ coreconfigitem('worker', 'backgroundclosethreadcount', default=4, ) +coreconfigitem('worker', 'enabled', + default=True, +) coreconfigitem('worker', 'numcpus', default=None, ) @@ -1255,3 +1290,6 @@ coreconfigitem('rebase', 'singletransaction', default=False, ) +coreconfigitem('rebase', 'experimental.inmemory', + default=False, +) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/context.py --- a/mercurial/context.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/context.py Mon Jan 22 17:53:02 2018 -0500 @@ -36,6 +36,7 @@ match as matchmod, mdiff, obsolete as obsmod, + obsutil, patch, pathutil, phases, @@ -354,7 +355,7 @@ ctx2 = self.p1() if ctx2 is not None: ctx2 = self._repo[ctx2] - diffopts = patch.diffopts(self._repo.ui, opts) + diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts)) return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) def dirs(self): @@ -433,8 +434,20 @@ This is extracted in a function to help extensions (eg: evolve) to experiment with various message variants.""" if repo.filtername.startswith('visible'): - msg = _("hidden revision '%s'") % changeid + + # Check if the changeset is obsolete + unfilteredrepo = repo.unfiltered() + ctx = unfilteredrepo[changeid] + + # If the changeset is obsolete, enrich the message with the reason + # that made this changeset not visible + if ctx.obsolete(): + msg = obsutil._getfilteredreason(repo, changeid, ctx) + else: + msg = _("hidden revision '%s'") % changeid + hint = _('use --hidden to access hidden revisions') + return error.FilteredRepoLookupError(msg, hint=hint) msg = _("filtered revision '%s' (not in '%s' subset)") msg %= (changeid, repo.filtername) @@ -615,10 +628,13 @@ def closesbranch(self): return 'close' in self._changeset.extra def extra(self): + """Return a dict of extra information.""" return self._changeset.extra def tags(self): + """Return a list of byte tag names""" return self._repo.nodetags(self._node) def bookmarks(self): + """Return a list of byte bookmark names.""" return self._repo.nodebookmarks(self._node) def phase(self): return self._repo._phasecache.phase(self._repo, self._rev) @@ -629,7 +645,11 @@ return False def children(self): - """return contexts for each child changeset""" + """return list of changectx contexts for each child changeset. + + This returns only the immediate child changesets. Use descendants() to + recursively walk children. + """ c = self._repo.changelog.children(self._node) return [changectx(self._repo, x) for x in c] @@ -638,6 +658,10 @@ yield changectx(self._repo, a) def descendants(self): + """Recursively yield all children of the changeset. + + For just the immediate children, use children() + """ for d in self._repo.changelog.descendants([self._rev]): yield changectx(self._repo, d) @@ -819,6 +843,10 @@ return self._changectx.phase() def phasestr(self): return self._changectx.phasestr() + def obsolete(self): + return self._changectx.obsolete() + def instabilities(self): + return self._changectx.instabilities() def manifest(self): return self._changectx.manifest() def changectx(self): @@ -931,6 +959,14 @@ return self.linkrev() return self._adjustlinkrev(self.rev(), inclusive=True) + def introfilectx(self): + """Return filectx having identical contents, but pointing to the + changeset revision where this filectx was introduced""" + introrev = self.introrev() + if self.rev() == introrev: + return self + return self.filectx(self.filenode(), changeid=introrev) + def _parentfilectx(self, path, fileid, filelog): """create parent filectx keeping ancestry info for _adjustlinkrev()""" fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) @@ -1021,19 +1057,16 @@ return pl # use linkrev to find the first changeset where self appeared - base = self - introrev = self.introrev() - if self.rev() != introrev: - base = self.filectx(self.filenode(), changeid=introrev) + base = self.introfilectx() if getattr(base, '_ancestrycontext', None) is None: cl = self._repo.changelog - if introrev is None: + if base.rev() is None: # wctx is not inclusive, but works because _ancestrycontext # is used to test filelog revisions ac = cl.ancestors([p.rev() for p in base.parents()], inclusive=True) else: - ac = cl.ancestors([introrev], inclusive=True) + ac = cl.ancestors([base.rev()], inclusive=True) base._ancestrycontext = ac # This algorithm would prefer to be recursive, but Python is a @@ -1088,7 +1121,7 @@ hist[f] = curr del pcache[f] - return zip(hist[base][0], hist[base][1].splitlines(True)) + return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True)) def ancestors(self, followfirst=False): visit = {} @@ -1633,9 +1666,6 @@ listsubrepos=listsubrepos, badfn=badfn, icasefs=icasefs) - def flushall(self): - pass # For overlayworkingfilectx compatibility. - def _filtersuspectsymlink(self, files): if not files or self._repo.dirstate._checklink: return files @@ -1932,10 +1962,11 @@ """wraps unlink for a repo's working directory""" self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) - def write(self, data, flags, backgroundclose=False): + def write(self, data, flags, backgroundclose=False, **kwargs): """wraps repo.wwrite""" self._repo.wwrite(self._path, data, flags, - backgroundclose=backgroundclose) + backgroundclose=backgroundclose, + **kwargs) def markcopied(self, src): """marks this file a copy of `src`""" @@ -1959,25 +1990,33 @@ def setflags(self, l, x): self._repo.wvfs.setflags(self._path, l, x) -class overlayworkingctx(workingctx): - """Wraps another mutable context with a write-back cache that can be flushed - at a later time. +class overlayworkingctx(committablectx): + """Wraps another mutable context with a write-back cache that can be + converted into a commit context. self._cache[path] maps to a dict with keys: { 'exists': bool? 'date': date? 'data': str? 'flags': str? + 'copied': str? (path or None) } If `exists` is True, `flags` must be non-None and 'date' is non-None. If it is `False`, the file was deleted. """ - def __init__(self, repo, wrappedctx): + def __init__(self, repo): super(overlayworkingctx, self).__init__(repo) self._repo = repo + self.clean() + + def setbase(self, wrappedctx): self._wrappedctx = wrappedctx - self._clean() + self._parents = [wrappedctx] + # Drop old manifest cache as it is now out of date. + # This is necessary when, e.g., rebasing several nodes with one + # ``overlayworkingctx`` (e.g. with --collapse). + util.clearcachedproperty(self, '_manifest') def data(self, path): if self.isdirty(path): @@ -1989,10 +2028,47 @@ return self._wrappedctx[path].data() else: raise error.ProgrammingError("No such file or directory: %s" % - self._path) + path) else: return self._wrappedctx[path].data() + @propertycache + def _manifest(self): + parents = self.parents() + man = parents[0].manifest().copy() + + flag = self._flagfunc + for path in self.added(): + man[path] = addednodeid + man.setflag(path, flag(path)) + for path in self.modified(): + man[path] = modifiednodeid + man.setflag(path, flag(path)) + for path in self.removed(): + del man[path] + return man + + @propertycache + def _flagfunc(self): + def f(path): + return self._cache[path]['flags'] + return f + + def files(self): + return sorted(self.added() + self.modified() + self.removed()) + + def modified(self): + return [f for f in self._cache.keys() if self._cache[f]['exists'] and + self._existsinparent(f)] + + def added(self): + return [f for f in self._cache.keys() if self._cache[f]['exists'] and + not self._existsinparent(f)] + + def removed(self): + return [f for f in self._cache.keys() if + not self._cache[f]['exists'] and self._existsinparent(f)] + def isinmemory(self): return True @@ -2002,6 +2078,18 @@ else: return self._wrappedctx[path].date() + def markcopied(self, path, origin): + if self.isdirty(path): + self._cache[path]['copied'] = origin + else: + raise error.ProgrammingError('markcopied() called on clean context') + + def copydata(self, path): + if self.isdirty(path): + return self._cache[path]['copied'] + else: + raise error.ProgrammingError('copydata() called on clean context') + def flags(self, path): if self.isdirty(path): if self._cache[path]['exists']: @@ -2012,9 +2100,60 @@ else: return self._wrappedctx[path].flags() - def write(self, path, data, flags=''): + def _existsinparent(self, path): + try: + # ``commitctx` raises a ``ManifestLookupError`` if a path does not + # exist, unlike ``workingctx``, which returns a ``workingfilectx`` + # with an ``exists()`` function. + self._wrappedctx[path] + return True + except error.ManifestLookupError: + return False + + def _auditconflicts(self, path): + """Replicates conflict checks done by wvfs.write(). + + Since we never write to the filesystem and never call `applyupdates` in + IMM, we'll never check that a path is actually writable -- e.g., because + it adds `a/foo`, but `a` is actually a file in the other commit. + """ + def fail(path, component): + # p1() is the base and we're receiving "writes" for p2()'s + # files. + if 'l' in self.p1()[component].flags(): + raise error.Abort("error: %s conflicts with symlink %s " + "in %s." % (path, component, + self.p1().rev())) + else: + raise error.Abort("error: '%s' conflicts with file '%s' in " + "%s." % (path, component, + self.p1().rev())) + + # Test that each new directory to be created to write this path from p2 + # is not a file in p1. + components = path.split('/') + for i in xrange(len(components)): + component = "/".join(components[0:i]) + if component in self.p1(): + fail(path, component) + + # Test the other direction -- that this path from p2 isn't a directory + # in p1 (test that p1 doesn't any paths matching `path/*`). + match = matchmod.match('/', '', [path + '/'], default=b'relpath') + matches = self.p1().manifest().matches(match) + if len(matches) > 0: + if len(matches) == 1 and matches.keys()[0] == path: + return + raise error.Abort("error: file '%s' cannot be written because " + " '%s/' is a folder in %s (containing %d " + "entries: %s)" + % (path, path, self.p1(), len(matches), + ', '.join(matches.keys()))) + + def write(self, path, data, flags='', **kwargs): if data is None: raise error.ProgrammingError("data must be non-None") + self._auditconflicts(path) self._markdirty(path, exists=True, data=data, date=util.makedate(), flags=flags) @@ -2037,13 +2176,15 @@ return self.exists(self._cache[path]['data'].strip()) else: return self._cache[path]['exists'] - return self._wrappedctx[path].exists() + + return self._existsinparent(path) def lexists(self, path): """lexists returns True if the path exists""" if self.isdirty(path): return self._cache[path]['exists'] - return self._wrappedctx[path].lexists() + + return self._existsinparent(path) def size(self, path): if self.isdirty(path): @@ -2054,48 +2195,90 @@ self._path) return self._wrappedctx[path].size() - def flushall(self): - for path in self._writeorder: - entry = self._cache[path] - if entry['exists']: - self._wrappedctx[path].clearunknown() - if entry['data'] is not None: - if entry['flags'] is None: - raise error.ProgrammingError('data set but not flags') - self._wrappedctx[path].write( - entry['data'], - entry['flags']) - else: - self._wrappedctx[path].setflags( - 'l' in entry['flags'], - 'x' in entry['flags']) + def tomemctx(self, text, branch=None, extra=None, date=None, parents=None, + user=None, editor=None): + """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be + committed. + + ``text`` is the commit message. + ``parents`` (optional) are rev numbers. + """ + # Default parents to the wrapped contexts' if not passed. + if parents is None: + parents = self._wrappedctx.parents() + if len(parents) == 1: + parents = (parents[0], None) + + # ``parents`` is passed as rev numbers; convert to ``commitctxs``. + if parents[1] is None: + parents = (self._repo[parents[0]], None) + else: + parents = (self._repo[parents[0]], self._repo[parents[1]]) + + files = self._cache.keys() + def getfile(repo, memctx, path): + if self._cache[path]['exists']: + return memfilectx(repo, memctx, path, + self._cache[path]['data'], + 'l' in self._cache[path]['flags'], + 'x' in self._cache[path]['flags'], + self._cache[path]['copied']) else: - self._wrappedctx[path].remove(path) - self._clean() + # Returning None, but including the path in `files`, is + # necessary for memctx to register a deletion. + return None + return memctx(self._repo, parents, text, files, getfile, date=date, + extra=extra, user=user, branch=branch, editor=editor) def isdirty(self, path): return path in self._cache - def _clean(self): + def isempty(self): + # We need to discard any keys that are actually clean before the empty + # commit check. + self._compact() + return len(self._cache) == 0 + + def clean(self): self._cache = {} - self._writeorder = [] + + def _compact(self): + """Removes keys from the cache that are actually clean, by comparing + them with the underlying context. + + This can occur during the merge process, e.g. by passing --tool :local + to resolve a conflict. + """ + keys = [] + for path in self._cache.keys(): + cache = self._cache[path] + try: + underlying = self._wrappedctx[path] + if (underlying.data() == cache['data'] and + underlying.flags() == cache['flags']): + keys.append(path) + except error.ManifestLookupError: + # Path not in the underlying manifest (created). + continue + + for path in keys: + del self._cache[path] + return keys def _markdirty(self, path, exists, data=None, date=None, flags=''): - if path not in self._cache: - self._writeorder.append(path) - self._cache[path] = { 'exists': exists, 'data': data, 'date': date, 'flags': flags, + 'copied': None, } def filectx(self, path, filelog=None): return overlayworkingfilectx(self._repo, path, parent=self, filelog=filelog) -class overlayworkingfilectx(workingfilectx): +class overlayworkingfilectx(committablefilectx): """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory cache, which can be flushed through later by calling ``flush()``.""" @@ -2109,7 +2292,7 @@ def cmp(self, fctx): return self.data() != fctx.data() - def ctx(self): + def changectx(self): return self._parent def data(self): @@ -2125,16 +2308,17 @@ return self._parent.exists(self._path) def renamed(self): - # Copies are currently tracked in the dirstate as before. Straight copy - # from workingfilectx. - rp = self._repo.dirstate.copied(self._path) - if not rp: + path = self._parent.copydata(self._path) + if not path: return None - return rp, self._changectx._parents[0]._manifest.get(rp, nullid) + return path, self._changectx._parents[0]._manifest.get(path, nullid) def size(self): return self._parent.size(self._path) + def markcopied(self, origin): + self._parent.markcopied(self._path, origin) + def audit(self): pass @@ -2144,12 +2328,15 @@ def setflags(self, islink, isexec): return self._parent.setflags(self._path, islink, isexec) - def write(self, data, flags, backgroundclose=False): - return self._parent.write(self._path, data, flags) + def write(self, data, flags, backgroundclose=False, **kwargs): + return self._parent.write(self._path, data, flags, **kwargs) def remove(self, ignoremissing=False): return self._parent.remove(self._path) + def clearunknown(self): + pass + class workingcommitctx(workingctx): """A workingcommitctx object makes access to data related to the revision being committed convenient. @@ -2215,9 +2402,9 @@ copied = fctx.renamed() if copied: copied = copied[0] - return memfilectx(repo, path, fctx.data(), + return memfilectx(repo, memctx, path, fctx.data(), islink=fctx.islink(), isexec=fctx.isexec(), - copied=copied, memctx=memctx) + copied=copied) return getfilectx @@ -2231,9 +2418,8 @@ if data is None: return None islink, isexec = mode - return memfilectx(repo, path, data, islink=islink, - isexec=isexec, copied=copied, - memctx=memctx) + return memfilectx(repo, memctx, path, data, islink=islink, + isexec=isexec, copied=copied) return getfilectx @@ -2365,8 +2551,8 @@ See memctx and committablefilectx for more details. """ - def __init__(self, repo, path, data, islink=False, - isexec=False, copied=None, memctx=None): + def __init__(self, repo, changectx, path, data, islink=False, + isexec=False, copied=None): """ path is the normalized file path relative to repository root. data is the file content as a string. @@ -2374,7 +2560,7 @@ isexec is True if the file is executable. copied is the source file path if current file was copied in the revision being committed, or None.""" - super(memfilectx, self).__init__(repo, path, None, memctx) + super(memfilectx, self).__init__(repo, path, None, changectx) self._data = data self._flags = (islink and 'l' or '') + (isexec and 'x' or '') self._copied = None @@ -2389,7 +2575,7 @@ # need to figure out what to do here del self._changectx[self._path] - def write(self, data, flags): + def write(self, data, flags, **kwargs): """wraps repo.wwrite""" self._data = data @@ -2598,7 +2784,7 @@ def remove(self): util.unlink(self._path) - def write(self, data, flags): + def write(self, data, flags, **kwargs): assert not flags with open(self._path, "w") as f: f.write(data) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/copies.py --- a/mercurial/copies.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/copies.py Mon Jan 22 17:53:02 2018 -0500 @@ -107,7 +107,7 @@ return min(limit, a, b) def _chain(src, dst, a, b): - '''chain two sets of copies a->b''' + """chain two sets of copies a->b""" t = a.copy() for k, v in b.iteritems(): if v in t: @@ -130,8 +130,8 @@ return t def _tracefile(fctx, am, limit=-1): - '''return file context that is the ancestor of fctx present in ancestor - manifest am, stopping after the first ancestor lower than limit''' + """return file context that is the ancestor of fctx present in ancestor + manifest am, stopping after the first ancestor lower than limit""" for f in fctx.ancestors(): if am.get(f.path(), None) == f.filenode(): @@ -139,11 +139,11 @@ if limit >= 0 and f.linkrev() < limit and f.rev() < limit: return None -def _dirstatecopies(d): +def _dirstatecopies(d, match=None): ds = d._repo.dirstate c = ds.copies().copy() for k in list(c): - if ds[k] not in 'anm': + if ds[k] not in 'anm' or (match and not match(k)): del c[k] return c @@ -156,18 +156,8 @@ mb = b.manifest() return mb.filesnotin(ma, match=match) -def _forwardcopies(a, b, match=None): - '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' - - # check for working copy - w = None - if b.rev() is None: - w = b - b = w.p1() - if a == b: - # short-circuit to avoid issues with merge states - return _dirstatecopies(w) - +def _committedforwardcopies(a, b, match): + """Like _forwardcopies(), but b.rev() cannot be None (working copy)""" # files might have to be traced back to the fctx parent of the last # one-side-only changeset, but not further back than that limit = _findlimit(a._repo, a.rev(), b.rev()) @@ -199,12 +189,21 @@ ofctx = _tracefile(fctx, am, limit) if ofctx: cm[f] = ofctx.path() + return cm - # combine copies from dirstate if necessary - if w is not None: - cm = _chain(a, w, cm, _dirstatecopies(w)) +def _forwardcopies(a, b, match=None): + """find {dst@b: src@a} copy mapping where a is an ancestor of b""" - return cm + # check for working copy + if b.rev() is None: + if a == b.p1(): + # short-circuit to avoid issues with merge states + return _dirstatecopies(b, match) + + cm = _committedforwardcopies(a, b.p1(), match) + # combine copies from dirstate if necessary + return _chain(a, b, cm, _dirstatecopies(b, match)) + return _committedforwardcopies(a, b, match) def _backwardrenames(a, b): if a._repo.ui.config('experimental', 'copytrace') == 'off': @@ -223,7 +222,7 @@ return r def pathcopies(x, y, match=None): - '''find {dst@y: src@x} copy mapping for directed compare''' + """find {dst@y: src@x} copy mapping for directed compare""" if x == y or not x or not y: return {} a = y.ancestor(x) @@ -861,13 +860,13 @@ return def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None): - '''reproduce copies from fromrev to rev in the dirstate + """reproduce copies from fromrev to rev in the dirstate If skiprev is specified, it's a revision that should be used to filter copy records. Any copies that occur between fromrev and skiprev will not be duplicated, even if they appear in the set of copies between fromrev and rev. - ''' + """ exclude = {} if (skiprev is not None and repo.ui.config('experimental', 'copytrace') != 'off'): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/crecord.py --- a/mercurial/crecord.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/crecord.py Mon Jan 22 17:53:02 2018 -0500 @@ -555,7 +555,7 @@ return chunkselector.opts _headermessages = { # {operation: text} - 'revert': _('Select hunks to revert'), + 'apply': _('Select hunks to apply'), 'discard': _('Select hunks to discard'), None: _('Select hunks to record'), } @@ -581,6 +581,13 @@ # maps custom nicknames of color-pairs to curses color-pair values self.colorpairnames = {} + # Honor color setting of ui section. Keep colored setup as + # long as not explicitly set to a falsy value - especially, + # when not set at all. This is to stay most compatible with + # previous (color only) behaviour. + uicolor = util.parsebool(self.ui.config('ui', 'color')) + self.usecolor = uicolor is not False + # the currently selected header, hunk, or hunk-line self.currentselecteditem = self.headerlist[0] @@ -1371,11 +1378,19 @@ colorpair = self.colorpairs[(fgcolor, bgcolor)] else: pairindex = len(self.colorpairs) + 1 - curses.init_pair(pairindex, fgcolor, bgcolor) - colorpair = self.colorpairs[(fgcolor, bgcolor)] = ( - curses.color_pair(pairindex)) - if name is not None: - self.colorpairnames[name] = curses.color_pair(pairindex) + if self.usecolor: + curses.init_pair(pairindex, fgcolor, bgcolor) + colorpair = self.colorpairs[(fgcolor, bgcolor)] = ( + curses.color_pair(pairindex)) + if name is not None: + self.colorpairnames[name] = curses.color_pair(pairindex) + else: + cval = 0 + if name is not None: + if name == 'selected': + cval = curses.A_REVERSE + self.colorpairnames[name] = cval + colorpair = self.colorpairs[(fgcolor, bgcolor)] = cval # add attributes if possible if attrlist is None: @@ -1704,7 +1719,10 @@ self.yscreensize, self.xscreensize = self.stdscr.getmaxyx() curses.start_color() - curses.use_default_colors() + try: + curses.use_default_colors() + except curses.error: + self.usecolor = False # available colors: black, blue, cyan, green, magenta, white, yellow # init_pair(color_id, foreground_color, background_color) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dagop.py --- a/mercurial/dagop.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/dagop.py Mon Jan 22 17:53:02 2018 -0500 @@ -75,6 +75,46 @@ if prev != node.nullrev: heapq.heappush(pendingheap, (heapsign * prev, pdepth)) +def filectxancestors(fctxs, followfirst=False): + """Like filectx.ancestors(), but can walk from multiple files/revisions, + and includes the given fctxs themselves + + Yields (rev, {fctx, ...}) pairs in descending order. + """ + visit = {} + visitheap = [] + def addvisit(fctx): + rev = fctx.rev() + if rev not in visit: + visit[rev] = set() + heapq.heappush(visitheap, -rev) # max heap + visit[rev].add(fctx) + + if followfirst: + cut = 1 + else: + cut = None + + for c in fctxs: + addvisit(c) + while visit: + currev = -heapq.heappop(visitheap) + curfctxs = visit.pop(currev) + yield currev, curfctxs + for c in curfctxs: + for parent in c.parents()[:cut]: + addvisit(parent) + assert not visitheap + +def filerevancestors(fctxs, followfirst=False): + """Like filectx.ancestors(), but can walk from multiple files/revisions, + and includes the given fctxs themselves + + Returns a smartset. + """ + gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst)) + return generatorset(gen, iterasc=False) + def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc): if followfirst: cut = 1 @@ -251,9 +291,7 @@ `fromline`-`toline` range. """ diffopts = patch.diffopts(fctx._repo.ui) - introrev = fctx.introrev() - if fctx.rev() != introrev: - fctx = fctx.filectx(fctx.filenode(), changeid=introrev) + fctx = fctx.introfilectx() visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} while visit: c, linerange2 = visit.pop(max(visit)) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dagutil.py --- a/mercurial/dagutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/dagutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -154,8 +154,9 @@ class revlogdag(revlogbaseddag): '''dag interface to a revlog''' - def __init__(self, revlog): + def __init__(self, revlog, localsubset=None): revlogbaseddag.__init__(self, revlog, set(revlog)) + self._heads = localsubset def _getheads(self): return [r for r in self._revlog.headrevs() if r != nullrev] diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/debugcommands.py --- a/mercurial/debugcommands.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/debugcommands.py Mon Jan 22 17:53:02 2018 -0500 @@ -69,6 +69,7 @@ templater, treediscovery, upgrade, + url as urlmod, util, vfs as vfsmod, ) @@ -179,11 +180,11 @@ ui.progress(_('building'), id, unit=_('revisions'), total=total) for type, data in dagparser.parsedag(text): if type == 'n': - ui.note(('node %s\n' % str(data))) + ui.note(('node %s\n' % pycompat.bytestr(data))) id, ps = data files = [] - fctxs = {} + filecontent = {} p2 = None if mergeable_file: @@ -204,27 +205,30 @@ ml[id * linesperrev] += " r%i" % id mergedtext = "\n".join(ml) files.append(fn) - fctxs[fn] = context.memfilectx(repo, fn, mergedtext) + filecontent[fn] = mergedtext if overwritten_file: fn = "of" files.append(fn) - fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id) + filecontent[fn] = "r%i\n" % id if new_file: fn = "nf%i" % id files.append(fn) - fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id) + filecontent[fn] = "r%i\n" % id if len(ps) > 1: if not p2: p2 = repo[ps[1]] for fn in p2: if fn.startswith("nf"): files.append(fn) - fctxs[fn] = p2[fn] + filecontent[fn] = p2[fn].data() def fctxfn(repo, cx, path): - return fctxs.get(path) + if path in filecontent: + return context.memfilectx(repo, cx, path, + filecontent[path]) + return None if len(ps) == 0 or ps[0] < 0: pars = [None, None] @@ -296,7 +300,7 @@ msg %= indent_string, exc.version, len(data) ui.write(msg) else: - msg = "%sversion: %s (%d bytes)\n" + msg = "%sversion: %d (%d bytes)\n" msg %= indent_string, version, len(data) ui.write(msg) fm = ui.formatter('debugobsolete', opts) @@ -360,6 +364,25 @@ return _debugbundle2(ui, gen, all=all, **opts) _debugchangegroup(ui, gen, all=all, **opts) +@command('debugcapabilities', + [], _('PATH'), + norepo=True) +def debugcapabilities(ui, path, **opts): + """lists the capabilities of a remote peer""" + opts = pycompat.byteskwargs(opts) + peer = hg.peer(ui, opts, path) + caps = peer.capabilities() + ui.write(('Main capabilities:\n')) + for c in sorted(caps): + ui.write((' %s\n') % c) + b2caps = bundle2.bundle2caps(peer) + if b2caps: + ui.write(('Bundle2 capabilities:\n')) + for key, values in sorted(b2caps.iteritems()): + ui.write((' %s\n') % key) + for v in values: + ui.write((' %s\n') % v) + @command('debugcheckstate', [], '') def debugcheckstate(ui, repo): """validate the correctness of the current dirstate""" @@ -569,11 +592,23 @@ the delta chain for this revision :``extraratio``: extradist divided by chainsize; another representation of how much unrelated data is needed to load this delta chain + + If the repository is configured to use the sparse read, additional keywords + are available: + + :``readsize``: total size of data read from the disk for a revision + (sum of the sizes of all the blocks) + :``largestblock``: size of the largest block of data read from the disk + :``readdensity``: density of useful bytes in the data read from the disk + :``srchunks``: in how many data hunks the whole revision would be read + + The sparse read can be enabled with experimental.sparse-read = True """ opts = pycompat.byteskwargs(opts) r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts) index = r.index generaldelta = r.version & revlog.FLAG_GENERALDELTA + withsparseread = getattr(r, '_withsparseread', False) def revinfo(rev): e = index[rev] @@ -609,15 +644,20 @@ fm.plain(' rev chain# chainlen prev delta ' 'size rawsize chainsize ratio lindist extradist ' - 'extraratio\n') + 'extraratio') + if withsparseread: + fm.plain(' readsize largestblk rddensity srchunks') + fm.plain('\n') chainbases = {} for rev in r: comp, uncomp, deltatype, chain, chainsize = revinfo(rev) chainbase = chain[0] chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) - basestart = r.start(chainbase) - revstart = r.start(rev) + start = r.start + length = r.length + basestart = start(chainbase) + revstart = start(rev) lineardist = revstart + comp - basestart extradist = lineardist - chainsize try: @@ -632,7 +672,7 @@ fm.write('rev chainid chainlen prevrev deltatype compsize ' 'uncompsize chainsize chainratio lindist extradist ' 'extraratio', - '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n', + '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f', rev, chainid, len(chain), prevrev, deltatype, comp, uncomp, chainsize, chainratio, lineardist, extradist, extraratio, @@ -641,6 +681,29 @@ uncompsize=uncomp, chainsize=chainsize, chainratio=chainratio, lindist=lineardist, extradist=extradist, extraratio=extraratio) + if withsparseread: + readsize = 0 + largestblock = 0 + srchunks = 0 + + for revschunk in revlog._slicechunk(r, chain): + srchunks += 1 + blkend = start(revschunk[-1]) + length(revschunk[-1]) + blksize = blkend - start(revschunk[0]) + + readsize += blksize + if largestblock < blksize: + largestblock = blksize + + readdensity = float(chainsize) / float(readsize) + + fm.write('readsize largestblock readdensity srchunks', + ' %10d %10d %9.5f %8d', + readsize, largestblock, readdensity, srchunks, + readsize=readsize, largestblock=largestblock, + readdensity=readdensity, srchunks=srchunks) + + fm.plain('\n') fm.end() @@ -665,8 +728,9 @@ elif nodates: timestr = 'set ' else: - timestr = time.strftime("%Y-%m-%d %H:%M:%S ", + timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])) + timestr = encoding.strtolocal(timestr) if ent[1] & 0o20000: mode = 'lnk' else: @@ -679,24 +743,21 @@ [('', 'old', None, _('use old-style discovery')), ('', 'nonheads', None, _('use old-style discovery with non-heads included')), + ('', 'rev', [], 'restrict discovery to this set of revs'), ] + cmdutil.remoteopts, - _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]')) + _('[--rev REV] [OTHER]')) def debugdiscovery(ui, repo, remoteurl="default", **opts): """runs the changeset discovery protocol in isolation""" opts = pycompat.byteskwargs(opts) - remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), - opts.get('branch')) + remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl)) remote = hg.peer(repo, opts, remoteurl) ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl)) # make sure tests are repeatable random.seed(12323) - def doit(localheads, remoteheads, remote=remote): + def doit(pushedrevs, remoteheads, remote=remote): if opts.get('old'): - if localheads: - raise error.Abort('cannot use localheads with old style ' - 'discovery') if not util.safehasattr(remote, 'branches'): # enable in-client legacy support remote = localrepo.locallegacypeer(remote.local()) @@ -710,7 +771,12 @@ all = dag.ancestorset(dag.internalizeall(common)) common = dag.externalizeall(dag.headsetofconnecteds(all)) else: - common, any, hds = setdiscovery.findcommonheads(ui, repo, remote) + nodes = None + if pushedrevs: + revs = scmutil.revrange(repo, pushedrevs) + nodes = [repo[r].node() for r in revs] + common, any, hds = setdiscovery.findcommonheads(ui, repo, remote, + ancestorsof=nodes) common = set(common) rheads = set(hds) lheads = set(repo.heads()) @@ -721,26 +787,33 @@ elif rheads <= common: ui.write(("remote is subset\n")) - serverlogs = opts.get('serverlog') - if serverlogs: - for filename in serverlogs: - with open(filename, 'r') as logfile: - line = logfile.readline() - while line: - parts = line.strip().split(';') - op = parts[1] - if op == 'cg': - pass - elif op == 'cgss': - doit(parts[2].split(' '), parts[3].split(' ')) - elif op == 'unb': - doit(parts[3].split(' '), parts[2].split(' ')) - line = logfile.readline() - else: - remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, - opts.get('remote_head')) - localrevs = opts.get('local_head') - doit(localrevs, remoterevs) + remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None) + localrevs = opts['rev'] + doit(localrevs, remoterevs) + +_chunksize = 4 << 10 + +@command('debugdownload', + [ + ('o', 'output', '', _('path')), + ], + optionalrepo=True) +def debugdownload(ui, repo, url, output=None, **opts): + """download a resource using Mercurial logic and config + """ + fh = urlmod.open(ui, url, output) + + dest = ui + if output: + dest = open(output, "wb", _chunksize) + try: + data = fh.read(_chunksize) + while data: + dest.write(data) + data = fh.read(_chunksize) + finally: + if output: + dest.close() @command('debugextensions', cmdutil.formatteropts, [], norepo=True) def debugextensions(ui, **opts): @@ -801,9 +874,74 @@ for f in ctx.getfileset(expr): ui.write("%s\n" % f) +@command('debugformat', + [] + cmdutil.formatteropts, + _('')) +def debugformat(ui, repo, **opts): + """display format information about the current repository + + Use --verbose to get extra information about current config value and + Mercurial default.""" + opts = pycompat.byteskwargs(opts) + maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant) + maxvariantlength = max(len('format-variant'), maxvariantlength) + + def makeformatname(name): + return '%s:' + (' ' * (maxvariantlength - len(name))) + + fm = ui.formatter('debugformat', opts) + if fm.isplain(): + def formatvalue(value): + if util.safehasattr(value, 'startswith'): + return value + if value: + return 'yes' + else: + return 'no' + else: + formatvalue = pycompat.identity + + fm.plain('format-variant') + fm.plain(' ' * (maxvariantlength - len('format-variant'))) + fm.plain(' repo') + if ui.verbose: + fm.plain(' config default') + fm.plain('\n') + for fv in upgrade.allformatvariant: + fm.startitem() + repovalue = fv.fromrepo(repo) + configvalue = fv.fromconfig(repo) + + if repovalue != configvalue: + namelabel = 'formatvariant.name.mismatchconfig' + repolabel = 'formatvariant.repo.mismatchconfig' + elif repovalue != fv.default: + namelabel = 'formatvariant.name.mismatchdefault' + repolabel = 'formatvariant.repo.mismatchdefault' + else: + namelabel = 'formatvariant.name.uptodate' + repolabel = 'formatvariant.repo.uptodate' + + fm.write('name', makeformatname(fv.name), fv.name, + label=namelabel) + fm.write('repo', ' %3s', formatvalue(repovalue), + label=repolabel) + if fv.default != configvalue: + configlabel = 'formatvariant.config.special' + else: + configlabel = 'formatvariant.config.default' + fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue), + label=configlabel) + fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default), + label='formatvariant.default') + fm.plain('\n') + fm.end() + @command('debugfsinfo', [], _('[PATH]'), norepo=True) def debugfsinfo(ui, path="."): """show information detected about current filesystem""" + ui.write(('path: %s\n') % path) + ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)')) ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no')) ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)')) ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no')) @@ -1066,6 +1204,11 @@ fm.formatlist([e.name() for e in wirecompengines if e.wireprotosupport()], name='compengine', fmt='%s', sep=', ')) + re2 = 'missing' + if util._re2: + re2 = 'available' + fm.plain(_('checking "re2" regexp engine (%s)\n') % re2) + fm.data(re2=bool(util._re2)) # templates p = templater.templatepaths() @@ -1155,7 +1298,10 @@ @command('debuglocks', [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), ('W', 'force-wlock', None, - _('free the working state lock (DANGEROUS)'))], + _('free the working state lock (DANGEROUS)')), + ('s', 'set-lock', None, _('set the store lock until stopped')), + ('S', 'set-wlock', None, + _('set the working state lock until stopped'))], _('[OPTION]...')) def debuglocks(ui, repo, **opts): """show or modify state of locks @@ -1174,6 +1320,10 @@ instance, on a shared filesystem). Removing locks may also be blocked by filesystem permissions. + Setting a lock will prevent other commands from changing the data. + The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs. + The set locks are removed when the command exits. + Returns 0 if no locks are held. """ @@ -1182,9 +1332,27 @@ repo.svfs.unlink('lock') if opts.get(r'force_wlock'): repo.vfs.unlink('wlock') - if opts.get(r'force_lock') or opts.get(r'force_lock'): + if opts.get(r'force_lock') or opts.get(r'force_wlock'): return 0 + locks = [] + try: + if opts.get(r'set_wlock'): + try: + locks.append(repo.wlock(False)) + except error.LockHeld: + raise error.Abort(_('wlock is already held')) + if opts.get(r'set_lock'): + try: + locks.append(repo.lock(False)) + except error.LockHeld: + raise error.Abort(_('lock is already held')) + if len(locks): + ui.promptchoice(_("ready to release the lock (y)? $$ &Yes")) + return 0 + finally: + release(*locks) + now = time.time() held = 0 @@ -2170,15 +2338,11 @@ cache = {} ctx2str = str node2str = short - if ui.debug(): - def ctx2str(ctx): - return ctx.hex() - node2str = hex for rev in scmutil.revrange(repo, revs): ctx = repo[rev] ui.write('%s\n'% ctx2str(ctx)) for succsset in obsutil.successorssets(repo, ctx.node(), - closest=opts['closest'], + closest=opts[r'closest'], cache=cache): if succsset: ui.write(' ') @@ -2228,8 +2392,8 @@ ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') if revs is None: - t = formatter.maketemplater(ui, tmpl) - props['ui'] = ui + tres = formatter.templateresources(ui, repo) + t = formatter.maketemplater(ui, tmpl, resources=tres) ui.write(t.render(props)) else: displayer = cmdutil.makelogtemplater(ui, repo, tmpl) @@ -2304,6 +2468,7 @@ for k, v in opts.iteritems(): if v: args[k] = v + args = pycompat.strkwargs(args) # run twice to check that we don't mess up the stream for the next command res1 = repo.debugwireargs(*vals, **args) res2 = repo.debugwireargs(*vals, **args) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dirstate.py --- a/mercurial/dirstate.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/dirstate.py Mon Jan 22 17:53:02 2018 -0500 @@ -80,6 +80,7 @@ self._plchangecallbacks = {} self._origpl = None self._updatedfiles = set() + self._mapcls = dirstatemap @contextlib.contextmanager def parentchange(self): @@ -127,9 +128,8 @@ @propertycache def _map(self): - '''Return the dirstate contents as a map from filename to - (state, mode, size, time).''' - self._map = dirstatemap(self._ui, self._opener, self._root) + """Return the dirstate contents (see documentation for dirstatemap).""" + self._map = self._mapcls(self._ui, self._opener, self._root) return self._map @property @@ -158,8 +158,8 @@ def _pl(self): return self._map.parents() - def dirs(self): - return self._map.dirs + def hasdir(self, d): + return self._map.hastrackeddir(d) @rootcache('.hgignore') def _ignore(self): @@ -387,40 +387,23 @@ def copies(self): return self._map.copymap - def _droppath(self, f): - if self[f] not in "?r" and "dirs" in self._map.__dict__: - self._map.dirs.delpath(f) - - if "filefoldmap" in self._map.__dict__: - normed = util.normcase(f) - if normed in self._map.filefoldmap: - del self._map.filefoldmap[normed] - - self._updatedfiles.add(f) - def _addpath(self, f, state, mode, size, mtime): oldstate = self[f] if state == 'a' or oldstate == 'r': scmutil.checkfilename(f) - if f in self._map.dirs: + if self._map.hastrackeddir(f): raise error.Abort(_('directory %r already in dirstate') % f) # shadows for d in util.finddirs(f): - if d in self._map.dirs: + if self._map.hastrackeddir(d): break entry = self._map.get(d) if entry is not None and entry[0] != 'r': raise error.Abort( _('file %r in dirstate clashes with %r') % (d, f)) - if oldstate in "?r" and "dirs" in self._map.__dict__: - self._map.dirs.addpath(f) self._dirty = True self._updatedfiles.add(f) - self._map[f] = dirstatetuple(state, mode, size, mtime) - if state != 'n' or mtime == -1: - self._map.nonnormalset.add(f) - if size == -2: - self._map.otherparentset.add(f) + self._map.addfile(f, oldstate, state, mode, size, mtime) def normal(self, f): '''Mark a file normal and clean.''' @@ -458,8 +441,6 @@ return self._addpath(f, 'n', 0, -1, -1) self._map.copymap.pop(f, None) - if f in self._map.nonnormalset: - self._map.nonnormalset.remove(f) def otherparent(self, f): '''Mark as coming from the other parent, always dirty.''' @@ -482,7 +463,7 @@ def remove(self, f): '''Mark a file removed.''' self._dirty = True - self._droppath(f) + oldstate = self[f] size = 0 if self._pl[1] != nullid: entry = self._map.get(f) @@ -493,8 +474,8 @@ elif entry[0] == 'n' and entry[2] == -2: # other parent size = -2 self._map.otherparentset.add(f) - self._map[f] = dirstatetuple('r', 0, size, 0) - self._map.nonnormalset.add(f) + self._updatedfiles.add(f) + self._map.removefile(f, oldstate, size) if size == 0: self._map.copymap.pop(f, None) @@ -506,12 +487,10 @@ def drop(self, f): '''Drop a file from the dirstate''' - if f in self._map: + oldstate = self[f] + if self._map.dropfile(f, oldstate): self._dirty = True - self._droppath(f) - del self._map[f] - if f in self._map.nonnormalset: - self._map.nonnormalset.remove(f) + self._updatedfiles.add(f) self._map.copymap.pop(f, None) def _discoverpath(self, path, normed, ignoremissing, exists, storemap): @@ -635,12 +614,7 @@ # emulate dropping timestamp in 'parsers.pack_dirstate' now = _getfsnow(self._opener) - dmap = self._map - for f in self._updatedfiles: - e = dmap.get(f) - if e is not None and e[0] == 'n' and e[3] == now: - dmap[f] = dirstatetuple(e[0], e[1], e[2], -1) - self._map.nonnormalset.add(f) + self._map.clearambiguoustimes(self._updatedfiles, now) # emulate that all 'dirstate.normal' results are written out self._lastnormaltime = 0 @@ -797,7 +771,6 @@ results = dict.fromkeys(subrepos) results['.hg'] = None - alldirs = None for ff in files: # constructing the foldmap is expensive, so don't do it for the # common case where files is ['.'] @@ -828,9 +801,7 @@ if nf in dmap: # does it exactly match a missing file? results[nf] = None else: # does it match a missing directory? - if alldirs is None: - alldirs = util.dirs(dmap._map) - if nf in alldirs: + if self._map.hasdir(nf): if matchedir: matchedir(nf) notfoundadd(nf) @@ -1198,6 +1169,39 @@ self._opener.unlink(backupname) class dirstatemap(object): + """Map encapsulating the dirstate's contents. + + The dirstate contains the following state: + + - `identity` is the identity of the dirstate file, which can be used to + detect when changes have occurred to the dirstate file. + + - `parents` is a pair containing the parents of the working copy. The + parents are updated by calling `setparents`. + + - the state map maps filenames to tuples of (state, mode, size, mtime), + where state is a single character representing 'normal', 'added', + 'removed', or 'merged'. It is read by treating the dirstate as a + dict. File state is updated by calling the `addfile`, `removefile` and + `dropfile` methods. + + - `copymap` maps destination filenames to their source filename. + + The dirstate also provides the following views onto the state: + + - `nonnormalset` is a set of the filenames that have state other + than 'normal', or are normal but have an mtime of -1 ('normallookup'). + + - `otherparentset` is a set of the filenames that are marked as coming + from the second parent when the dirstate is currently being merged. + + - `filefoldmap` is a dict mapping normalized filenames to the denormalized + form that they appear as in the dirstate. + + - `dirfoldmap` is a dict mapping normalized directory names to the + denormalized form that they appear as in the dirstate. + """ + def __init__(self, ui, opener, root): self._ui = ui self._opener = opener @@ -1226,6 +1230,12 @@ self._map.clear() self.copymap.clear() self.setparents(nullid, nullid) + util.clearcachedproperty(self, "_dirs") + util.clearcachedproperty(self, "_alldirs") + util.clearcachedproperty(self, "filefoldmap") + util.clearcachedproperty(self, "dirfoldmap") + util.clearcachedproperty(self, "nonnormalset") + util.clearcachedproperty(self, "otherparentset") def iteritems(self): return self._map.iteritems() @@ -1242,15 +1252,9 @@ def __contains__(self, key): return key in self._map - def __setitem__(self, key, value): - self._map[key] = value - def __getitem__(self, key): return self._map[key] - def __delitem__(self, key): - del self._map[key] - def keys(self): return self._map.keys() @@ -1258,6 +1262,60 @@ """Loads the underlying data, if it's not already loaded""" self._map + def addfile(self, f, oldstate, state, mode, size, mtime): + """Add a tracked file to the dirstate.""" + if oldstate in "?r" and "_dirs" in self.__dict__: + self._dirs.addpath(f) + if oldstate == "?" and "_alldirs" in self.__dict__: + self._alldirs.addpath(f) + self._map[f] = dirstatetuple(state, mode, size, mtime) + if state != 'n' or mtime == -1: + self.nonnormalset.add(f) + if size == -2: + self.otherparentset.add(f) + + def removefile(self, f, oldstate, size): + """ + Mark a file as removed in the dirstate. + + The `size` parameter is used to store sentinel values that indicate + the file's previous state. In the future, we should refactor this + to be more explicit about what that state is. + """ + if oldstate not in "?r" and "_dirs" in self.__dict__: + self._dirs.delpath(f) + if oldstate == "?" and "_alldirs" in self.__dict__: + self._alldirs.addpath(f) + if "filefoldmap" in self.__dict__: + normed = util.normcase(f) + self.filefoldmap.pop(normed, None) + self._map[f] = dirstatetuple('r', 0, size, 0) + self.nonnormalset.add(f) + + def dropfile(self, f, oldstate): + """ + Remove a file from the dirstate. Returns True if the file was + previously recorded. + """ + exists = self._map.pop(f, None) is not None + if exists: + if oldstate != "r" and "_dirs" in self.__dict__: + self._dirs.delpath(f) + if "_alldirs" in self.__dict__: + self._alldirs.delpath(f) + if "filefoldmap" in self.__dict__: + normed = util.normcase(f) + self.filefoldmap.pop(normed, None) + self.nonnormalset.discard(f) + return exists + + def clearambiguoustimes(self, files, now): + for f in files: + e = self.get(f) + if e is not None and e[0] == 'n' and e[3] == now: + self._map[f] = dirstatetuple(e[0], e[1], e[2], -1) + self.nonnormalset.add(f) + def nonnormalentries(self): '''Compute the nonnormal dirstate entries from the dmap''' try: @@ -1293,13 +1351,28 @@ f['.'] = '.' # prevents useless util.fspath() invocation return f + def hastrackeddir(self, d): + """ + Returns True if the dirstate contains a tracked (not removed) file + in this directory. + """ + return d in self._dirs + + def hasdir(self, d): + """ + Returns True if the dirstate contains a file (tracked or removed) + in this directory. + """ + return d in self._alldirs + @propertycache - def dirs(self): - """Returns a set-like object containing all the directories in the - current dirstate. - """ + def _dirs(self): return util.dirs(self._map, 'r') + @propertycache + def _alldirs(self): + return util.dirs(self._map) + def _opendirstatefile(self): fp, mode = txnutil.trypending(self._root, self._opener, self._filename) if self._pendingmode is not None and self._pendingmode != mode: @@ -1387,8 +1460,6 @@ # Avoid excess attribute lookups by fast pathing certain checks self.__contains__ = self._map.__contains__ self.__getitem__ = self._map.__getitem__ - self.__setitem__ = self._map.__setitem__ - self.__delitem__ = self._map.__delitem__ self.get = self._map.get def write(self, st, now): @@ -1419,6 +1490,6 @@ def dirfoldmap(self): f = {} normcase = util.normcase - for name in self.dirs: + for name in self._dirs: f[normcase(name)] = name return f diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/discovery.py --- a/mercurial/discovery.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/discovery.py Mon Jan 22 17:53:02 2018 -0500 @@ -21,12 +21,13 @@ branchmap, error, phases, + scmutil, setdiscovery, treediscovery, util, ) -def findcommonincoming(repo, remote, heads=None, force=False): +def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None): """Return a tuple (common, anyincoming, heads) used to identify the common subset of nodes between repo and remote. @@ -37,6 +38,9 @@ changegroupsubset. No code except for pull should be relying on this fact any longer. "heads" is either the supplied heads, or else the remote's heads. + "ancestorsof" if not None, restrict the discovery to a subset defined by + these nodes. Changeset outside of this set won't be considered (and + won't appears in "common") If you pass heads and they are all known locally, the response lists just these heads in "common" and in "heads". @@ -59,7 +63,8 @@ return (heads, False, heads) res = setdiscovery.findcommonheads(repo.ui, repo, remote, - abortwhenunrelated=not force) + abortwhenunrelated=not force, + ancestorsof=ancestorsof) common, anyinc, srvheads = res return (list(common), anyinc, heads or list(srvheads)) @@ -141,7 +146,8 @@ # get common set if not provided if commoninc is None: - commoninc = findcommonincoming(repo, other, force=force) + commoninc = findcommonincoming(repo, other, force=force, + ancestorsof=onlyheads) og.commonheads, _any, _hds = commoninc # compute outgoing @@ -365,11 +371,8 @@ if None in unsyncedheads: # old remote, no heads data heads = None - elif len(unsyncedheads) <= 4 or repo.ui.verbose: - heads = ' '.join(short(h) for h in unsyncedheads) else: - heads = (' '.join(short(h) for h in unsyncedheads[:4]) + - ' ' + _("and %s others") % (len(unsyncedheads) - 4)) + heads = scmutil.nodesummaries(repo, unsyncedheads) if heads is None: repo.ui.status(_("remote has heads that are " "not known locally\n")) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dispatch.py --- a/mercurial/dispatch.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/dispatch.py Mon Jan 22 17:53:02 2018 -0500 @@ -55,7 +55,7 @@ self.fout = fout self.ferr = ferr - # remember options pre-parsed by _earlyreqopt*() + # remember options pre-parsed by _earlyparseopts() self.earlyoptions = {} # reposetups which run before extensions, useful for chg to pre-fill @@ -96,10 +96,16 @@ err = e status = -1 if util.safehasattr(req.ui, 'ferr'): - if err is not None and err.errno != errno.EPIPE: - req.ui.ferr.write('abort: %s\n' % - encoding.strtolocal(err.strerror)) - req.ui.ferr.flush() + try: + if err is not None and err.errno != errno.EPIPE: + req.ui.ferr.write('abort: %s\n' % + encoding.strtolocal(err.strerror)) + req.ui.ferr.flush() + # There's not much we can do about an I/O error here. So (possibly) + # change the status code and move on. + except IOError: + status = -1 + sys.exit(status & 255) def _initstdio(): @@ -150,9 +156,8 @@ try: if not req.ui: req.ui = uimod.ui.load() - if req.ui.plain('strictflags'): - req.earlyoptions.update(_earlyparseopts(req.args)) - if _earlyreqoptbool(req, 'traceback', ['--traceback']): + req.earlyoptions.update(_earlyparseopts(req.ui, req.args)) + if req.earlyoptions['traceback']: req.ui.setconfig('ui', 'traceback', 'on', '--traceback') # set ui streams from the request @@ -201,7 +206,8 @@ req.ui.flush() if req.ui.logblockedtimes: req.ui._blockedtimes['command_duration'] = duration * 1000 - req.ui.log('uiblocked', 'ui blocked ms', **req.ui._blockedtimes) + req.ui.log('uiblocked', 'ui blocked ms', + **pycompat.strkwargs(req.ui._blockedtimes)) req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n", msg, ret or 0, duration) try: @@ -266,8 +272,7 @@ # read --config before doing anything else # (e.g. to change trust settings for reading .hg/hgrc) - cfgs = _parseconfig(req.ui, - _earlyreqopt(req, 'config', ['--config'])) + cfgs = _parseconfig(req.ui, req.earlyoptions['config']) if req.repo: # copy configs that were passed on the cmdline (--config) to @@ -281,7 +286,7 @@ if not debugger or ui.plain(): # if we are in HGPLAIN mode, then disable custom debugging debugger = 'pdb' - elif _earlyreqoptbool(req, 'debugger', ['--debugger']): + elif req.earlyoptions['debugger']: # This import can be slow for fancy debuggers, so only # do it when absolutely necessary, i.e. when actual # debugging has been requested @@ -295,7 +300,7 @@ debugmortem[debugger] = debugmod.post_mortem # enter the debugger before command execution - if _earlyreqoptbool(req, 'debugger', ['--debugger']): + if req.earlyoptions['debugger']: ui.warn(_("entering debugger - " "type c to continue starting hg or h for help\n")) @@ -311,7 +316,7 @@ ui.flush() except: # re-raises # enter the debugger when we hit an exception - if _earlyreqoptbool(req, 'debugger', ['--debugger']): + if req.earlyoptions['debugger']: traceback.print_exc() debugmortem[debugger](sys.exc_info()[2]) raise @@ -410,7 +415,7 @@ # tokenize each argument into exactly one word. replacemap['"$@"'] = ' '.join(util.shellquote(arg) for arg in args) # escape '\$' for regex - regex = '|'.join(replacemap.keys()).replace('$', r'\$') + regex = '|'.join(replacemap.keys()).replace('$', br'\$') r = re.compile(regex) return r.sub(lambda x: replacemap[x.group()], cmd) @@ -452,10 +457,10 @@ return m.group() else: ui.debug("No argument found for substitution " - "of %i variable in alias '%s' definition." + "of %i variable in alias '%s' definition.\n" % (int(m.groups()[0]), self.name)) return '' - cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:]) + cmd = re.sub(br'\$(\d+|\$)', _checkvar, self.definition[1:]) cmd = aliasinterpolate(self.name, args, cmd) return ui.system(cmd, environ=env, blockedtag='alias_%s' % self.name) @@ -468,16 +473,15 @@ self.badalias = (_("error in definition for alias '%s': %s") % (self.name, inst)) return + earlyopts, args = _earlysplitopts(args) + if earlyopts: + self.badalias = (_("error in definition for alias '%s': %s may " + "only be given on the command line") + % (self.name, '/'.join(zip(*earlyopts)[0]))) + return self.cmdname = cmd = args.pop(0) self.givenargs = args - for invalidarg in commands.earlyoptflags: - if _earlygetopt([invalidarg], args): - self.badalias = (_("error in definition for alias '%s': %s may " - "only be given on the command line") - % (self.name, invalidarg)) - return - try: tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1] if len(tableentry) > 2: @@ -646,139 +650,20 @@ return configs -def _earlyparseopts(args): +def _earlyparseopts(ui, args): options = {} fancyopts.fancyopts(args, commands.globalopts, options, - gnu=False, early=True) + gnu=not ui.plain('strictflags'), early=True, + optaliases={'repository': ['repo']}) return options -def _earlygetopt(aliases, args, strip=True): - """Return list of values for an option (or aliases). - - The values are listed in the order they appear in args. - The options and values are removed from args if strip=True. - - >>> args = [b'x', b'--cwd', b'foo', b'y'] - >>> _earlygetopt([b'--cwd'], args), args - (['foo'], ['x', 'y']) - - >>> args = [b'x', b'--cwd=bar', b'y'] - >>> _earlygetopt([b'--cwd'], args), args - (['bar'], ['x', 'y']) - - >>> args = [b'x', b'--cwd=bar', b'y'] - >>> _earlygetopt([b'--cwd'], args, strip=False), args - (['bar'], ['x', '--cwd=bar', 'y']) - - >>> args = [b'x', b'-R', b'foo', b'y'] - >>> _earlygetopt([b'-R'], args), args - (['foo'], ['x', 'y']) - - >>> args = [b'x', b'-R', b'foo', b'y'] - >>> _earlygetopt([b'-R'], args, strip=False), args - (['foo'], ['x', '-R', 'foo', 'y']) - - >>> args = [b'x', b'-Rbar', b'y'] - >>> _earlygetopt([b'-R'], args), args - (['bar'], ['x', 'y']) - - >>> args = [b'x', b'-Rbar', b'y'] - >>> _earlygetopt([b'-R'], args, strip=False), args - (['bar'], ['x', '-Rbar', 'y']) - - >>> args = [b'x', b'-R=bar', b'y'] - >>> _earlygetopt([b'-R'], args), args - (['=bar'], ['x', 'y']) - - >>> args = [b'x', b'-R', b'--', b'y'] - >>> _earlygetopt([b'-R'], args), args - ([], ['x', '-R', '--', 'y']) - """ - try: - argcount = args.index("--") - except ValueError: - argcount = len(args) - shortopts = [opt for opt in aliases if len(opt) == 2] - values = [] - pos = 0 - while pos < argcount: - fullarg = arg = args[pos] - equals = -1 - if arg.startswith('--'): - equals = arg.find('=') - if equals > -1: - arg = arg[:equals] - if arg in aliases: - if equals > -1: - values.append(fullarg[equals + 1:]) - if strip: - del args[pos] - argcount -= 1 - else: - pos += 1 - else: - if pos + 1 >= argcount: - # ignore and let getopt report an error if there is no value - break - values.append(args[pos + 1]) - if strip: - del args[pos:pos + 2] - argcount -= 2 - else: - pos += 2 - elif arg[:2] in shortopts: - # short option can have no following space, e.g. hg log -Rfoo - values.append(args[pos][2:]) - if strip: - del args[pos] - argcount -= 1 - else: - pos += 1 - else: - pos += 1 - return values - -def _earlyreqopt(req, name, aliases): - """Peek a list option without using a full options table""" - if req.ui.plain('strictflags'): - return req.earlyoptions[name] - values = _earlygetopt(aliases, req.args, strip=False) - req.earlyoptions[name] = values - return values - -def _earlyreqoptstr(req, name, aliases): - """Peek a string option without using a full options table""" - if req.ui.plain('strictflags'): - return req.earlyoptions[name] - value = (_earlygetopt(aliases, req.args, strip=False) or [''])[-1] - req.earlyoptions[name] = value - return value - -def _earlyreqoptbool(req, name, aliases): - """Peek a boolean option without using a full options table - - >>> req = request([b'x', b'--debugger'], uimod.ui()) - >>> _earlyreqoptbool(req, b'debugger', [b'--debugger']) - True - - >>> req = request([b'x', b'--', b'--debugger'], uimod.ui()) - >>> _earlyreqoptbool(req, b'debugger', [b'--debugger']) - """ - if req.ui.plain('strictflags'): - return req.earlyoptions[name] - try: - argcount = req.args.index("--") - except ValueError: - argcount = len(req.args) - value = None - pos = 0 - while pos < argcount: - arg = req.args[pos] - if arg in aliases: - value = True - pos += 1 - req.earlyoptions[name] = value - return value +def _earlysplitopts(args): + """Split args into a list of possible early options and remainder args""" + shortoptions = 'R:' + # TODO: perhaps 'debugger' should be included + longoptions = ['cwd=', 'repository=', 'repo=', 'config='] + return fancyopts.earlygetopt(args, shortoptions, longoptions, + gnu=True, keepsep=True) def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions): # run pre-hook, and abort if it fails @@ -847,8 +732,7 @@ if cmd and util.safehasattr(fn, 'shell'): # shell alias shouldn't receive early options which are consumed by hg - args = args[:] - _earlygetopt(commands.earlyoptflags, args, strip=True) + _earlyopts, args = _earlysplitopts(args) d = lambda: fn(ui, *args[1:]) return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {}) @@ -858,11 +742,11 @@ ui = req.ui # check for cwd - cwd = _earlyreqoptstr(req, 'cwd', ['--cwd']) + cwd = req.earlyoptions['cwd'] if cwd: os.chdir(cwd) - rpath = _earlyreqoptstr(req, 'repository', ["-R", "--repository", "--repo"]) + rpath = req.earlyoptions['repository'] path, lui = _getlocal(ui, rpath) uis = {ui, lui} @@ -870,7 +754,7 @@ if req.repo: uis.add(req.repo.ui) - if _earlyreqoptbool(req, 'profile', ['--profile']): + if req.earlyoptions['profile']: for ui_ in uis: ui_.setconfig('profiling', 'enabled', 'true', '--profile') @@ -1006,10 +890,11 @@ if not func.optionalrepo: if func.inferrepo and args and not path: # try to infer -R from command args - repos = map(cmdutil.findrepo, args) + repos = pycompat.maplist(cmdutil.findrepo, args) guess = repos[0] if guess and repos.count(guess) == len(repos): req.args = ['--repository', guess] + fullargs + req.earlyoptions['repository'] = guess return _dispatch(req) if not path: raise error.RepoError(_("no repository found in" diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/error.py --- a/mercurial/error.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/error.py Mon Jan 22 17:53:02 2018 -0500 @@ -301,3 +301,7 @@ class PeerTransportError(Abort): """Transport-level I/O error when communicating with a peer repo.""" + +class InMemoryMergeConflictsError(Exception): + """Exception raised when merge conflicts arose during an in-memory merge.""" + __bytes__ = _tobytes diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/exchange.py --- a/mercurial/exchange.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/exchange.py Mon Jan 22 17:53:02 2018 -0500 @@ -13,6 +13,7 @@ from .i18n import _ from .node import ( + bin, hex, nullid, ) @@ -23,6 +24,7 @@ discovery, error, lock as lockmod, + logexchange, obsolete, phases, pushkey, @@ -512,7 +514,11 @@ def _pushdiscoverychangeset(pushop): """discover the changeset that need to be pushed""" fci = discovery.findcommonincoming - commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) + if pushop.revs: + commoninc = fci(pushop.repo, pushop.remote, force=pushop.force, + ancestorsof=pushop.revs) + else: + commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) common, inc, remoteheads = commoninc fco = discovery.findcommonoutgoing outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs, @@ -742,6 +748,22 @@ or pushop.outobsmarkers or pushop.outbookmarks) +@b2partsgenerator('check-bookmarks') +def _pushb2checkbookmarks(pushop, bundler): + """insert bookmark move checking""" + if not _pushing(pushop) or pushop.force: + return + b2caps = bundle2.bundle2caps(pushop.remote) + hasbookmarkcheck = 'bookmarks' in b2caps + if not (pushop.outbookmarks and hasbookmarkcheck): + return + data = [] + for book, old, new in pushop.outbookmarks: + old = bin(old) + data.append((book, old)) + checkdata = bookmod.binaryencode(data) + bundler.newpart('check:bookmarks', data=checkdata) + @b2partsgenerator('check-phases') def _pushb2checkphases(pushop, bundler): """insert phase move checking""" @@ -879,8 +901,46 @@ if 'bookmarks' in pushop.stepsdone: return b2caps = bundle2.bundle2caps(pushop.remote) - if 'pushkey' not in b2caps: + + legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange') + legacybooks = 'bookmarks' in legacy + + if not legacybooks and 'bookmarks' in b2caps: + return _pushb2bookmarkspart(pushop, bundler) + elif 'pushkey' in b2caps: + return _pushb2bookmarkspushkey(pushop, bundler) + +def _bmaction(old, new): + """small utility for bookmark pushing""" + if not old: + return 'export' + elif not new: + return 'delete' + return 'update' + +def _pushb2bookmarkspart(pushop, bundler): + pushop.stepsdone.add('bookmarks') + if not pushop.outbookmarks: return + + allactions = [] + data = [] + for book, old, new in pushop.outbookmarks: + new = bin(new) + data.append((book, new)) + allactions.append((book, _bmaction(old, new))) + checkdata = bookmod.binaryencode(data) + bundler.newpart('bookmarks', data=checkdata) + + def handlereply(op): + ui = pushop.ui + # if success + for book, action in allactions: + ui.status(bookmsgmap[action][0] % book) + + return handlereply + +def _pushb2bookmarkspushkey(pushop, bundler): pushop.stepsdone.add('bookmarks') part2book = [] enc = pushkey.encode @@ -955,7 +1015,8 @@ # create reply capability capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo, - allowpushback=pushback)) + allowpushback=pushback, + role='client')) bundler.newpart('replycaps', data=capsblob) replyhandlers = [] for partgenname in b2partsgenorder: @@ -1273,7 +1334,8 @@ if opargs is None: opargs = {} pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks, - streamclonerequested=streamclonerequested, **opargs) + streamclonerequested=streamclonerequested, + **pycompat.strkwargs(opargs)) peerlocal = pullop.remote.local() if peerlocal: @@ -1284,11 +1346,8 @@ " %s") % (', '.join(sorted(missing))) raise error.Abort(msg) - wlock = lock = None - try: - wlock = pullop.repo.wlock() - lock = pullop.repo.lock() - pullop.trmanager = transactionmanager(repo, 'pull', remote.url()) + pullop.trmanager = transactionmanager(repo, 'pull', remote.url()) + with repo.wlock(), repo.lock(), pullop.trmanager: # This should ideally be in _pullbundle2(). However, it needs to run # before discovery to avoid extra work. _maybeapplyclonebundle(pullop) @@ -1300,9 +1359,10 @@ _pullphase(pullop) _pullbookmarks(pullop) _pullobsolete(pullop) - pullop.trmanager.close() - finally: - lockmod.release(pullop.trmanager, lock, wlock) + + # storing remotenames + if repo.ui.configbool('experimental', 'remotenames'): + logexchange.pullremotenames(repo, remote) return pullop @@ -1348,7 +1408,8 @@ # all known bundle2 servers now support listkeys, but lets be nice with # new implementation. return - pullop.remotebookmarks = pullop.remote.listkeys('bookmarks') + books = pullop.remote.listkeys('bookmarks') + pullop.remotebookmarks = bookmod.unhexlifybookmarks(books) @pulldiscovery('changegroup') @@ -1388,32 +1449,59 @@ """pull data using bundle2 For now, the only supported data are changegroup.""" - kwargs = {'bundlecaps': caps20to10(pullop.repo)} + kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')} + + # make ui easier to access + ui = pullop.repo.ui # At the moment we don't do stream clones over bundle2. If that is # implemented then here's where the check for that will go. - streaming = False + streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0] - # pulling changegroup - pullop.stepsdone.add('changegroup') - + # declare pull perimeters kwargs['common'] = pullop.common kwargs['heads'] = pullop.heads or pullop.rheads - kwargs['cg'] = pullop.fetch - ui = pullop.repo.ui - legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') - hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) - if (not legacyphase and hasbinaryphase): - kwargs['phases'] = True + if streaming: + kwargs['cg'] = False + kwargs['stream'] = True + pullop.stepsdone.add('changegroup') pullop.stepsdone.add('phases') + else: + # pulling changegroup + pullop.stepsdone.add('changegroup') + + kwargs['cg'] = pullop.fetch + + legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') + hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) + if (not legacyphase and hasbinaryphase): + kwargs['phases'] = True + pullop.stepsdone.add('phases') + + if 'listkeys' in pullop.remotebundle2caps: + if 'phases' not in pullop.stepsdone: + kwargs['listkeys'] = ['phases'] + + bookmarksrequested = False + legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange') + hasbinarybook = 'bookmarks' in pullop.remotebundle2caps + + if pullop.remotebookmarks is not None: + pullop.stepsdone.add('request-bookmarks') + + if ('request-bookmarks' not in pullop.stepsdone + and pullop.remotebookmarks is None + and not legacybookmark and hasbinarybook): + kwargs['bookmarks'] = True + bookmarksrequested = True + if 'listkeys' in pullop.remotebundle2caps: - if 'phases' not in pullop.stepsdone: - kwargs['listkeys'] = ['phases'] - if pullop.remotebookmarks is None: + if 'request-bookmarks' not in pullop.stepsdone: # make sure to always includes bookmark data when migrating # `hg incoming --bundle` to using this function. + pullop.stepsdone.add('request-bookmarks') kwargs.setdefault('listkeys', []).append('bookmarks') # If this is a full pull / clone and the server supports the clone bundles @@ -1441,7 +1529,9 @@ _pullbundle2extraprepare(pullop, kwargs) bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs)) try: - op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction) + op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction) + op.modes['bookmarks'] = 'records' + bundle2.processbundle(pullop.repo, bundle, op=op) except bundle2.AbortFromPart as exc: pullop.repo.ui.status(_('remote: abort: %s\n') % exc) raise error.Abort(_('pull failed on remote'), hint=exc.hint) @@ -1457,9 +1547,15 @@ _pullapplyphases(pullop, value) # processing bookmark update - for namespace, value in op.records['listkeys']: - if namespace == 'bookmarks': - pullop.remotebookmarks = value + if bookmarksrequested: + books = {} + for record in op.records['bookmarks']: + books[record['bookmark']] = record["node"] + pullop.remotebookmarks = books + else: + for namespace, value in op.records['listkeys']: + if namespace == 'bookmarks': + pullop.remotebookmarks = bookmod.unhexlifybookmarks(value) # bookmark data were either already there or pulled in the bundle if pullop.remotebookmarks is not None: @@ -1552,7 +1648,6 @@ pullop.stepsdone.add('bookmarks') repo = pullop.repo remotebookmarks = pullop.remotebookmarks - remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks) bookmod.updatefromremote(repo.ui, repo, remotebookmarks, pullop.remote.url(), pullop.gettransaction, @@ -1586,10 +1681,10 @@ pullop.repo.invalidatevolatilesets() return tr -def caps20to10(repo): +def caps20to10(repo, role): """return a set with appropriate options to use bundle20 during getbundle""" caps = {'HG20'} - capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo)) + capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role)) caps.add('bundle2=' + urlreq.quote(capsblob)) return caps @@ -1632,9 +1727,11 @@ Could be a bundle HG10 or a bundle HG20 depending on bundlecaps passed. - Returns an iterator over raw chunks (of varying sizes). + Returns a 2-tuple of a dict with metadata about the generated bundle + and an iterator over raw chunks (of varying sizes). """ kwargs = pycompat.byteskwargs(kwargs) + info = {} usebundle2 = bundle2requested(bundlecaps) # bundle10 case if not usebundle2: @@ -1645,10 +1742,12 @@ raise ValueError(_('unsupported getbundle arguments: %s') % ', '.join(sorted(kwargs.keys()))) outgoing = _computeoutgoing(repo, heads, common) - return changegroup.makestream(repo, outgoing, '01', source, - bundlecaps=bundlecaps) + info['bundleversion'] = 1 + return info, changegroup.makestream(repo, outgoing, '01', source, + bundlecaps=bundlecaps) # bundle20 case + info['bundleversion'] = 2 b2caps = {} for bcaps in bundlecaps: if bcaps.startswith('bundle2='): @@ -1664,14 +1763,41 @@ func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps, **pycompat.strkwargs(kwargs)) - return bundler.getchunks() + info['prefercompressed'] = bundler.prefercompressed + + return info, bundler.getchunks() + +@getbundle2partsgenerator('stream2') +def _getbundlestream2(bundler, repo, source, bundlecaps=None, + b2caps=None, heads=None, common=None, **kwargs): + if not kwargs.get('stream', False): + return + + if not streamclone.allowservergeneration(repo): + raise error.Abort(_('stream data requested but server does not allow ' + 'this feature'), + hint=_('well-behaved clients should not be ' + 'requesting stream data from servers not ' + 'advertising it; the client may be buggy')) + + # Stream clones don't compress well. And compression undermines a + # goal of stream clones, which is to be fast. Communicate the desire + # to avoid compression to consumers of the bundle. + bundler.prefercompressed = False + + filecount, bytecount, it = streamclone.generatev2(repo) + requirements = ' '.join(sorted(repo.requirements)) + part = bundler.newpart('stream2', data=it) + part.addparam('bytecount', '%d' % bytecount, mandatory=True) + part.addparam('filecount', '%d' % filecount, mandatory=True) + part.addparam('requirements', requirements, mandatory=True) @getbundle2partsgenerator('changegroup') def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, common=None, **kwargs): """add a changegroup part to the requested bundle""" cgstream = None - if kwargs.get('cg', True): + if kwargs.get(r'cg', True): # build changegroup bundle here. version = '01' cgversions = b2caps.get('changegroup') @@ -1695,11 +1821,24 @@ if 'treemanifest' in repo.requirements: part.addparam('treemanifest', '1') +@getbundle2partsgenerator('bookmarks') +def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None, + b2caps=None, **kwargs): + """add a bookmark part to the requested bundle""" + if not kwargs.get(r'bookmarks', False): + return + if 'bookmarks' not in b2caps: + raise ValueError(_('no common bookmarks exchange method')) + books = bookmod.listbinbookmarks(repo) + data = bookmod.binaryencode(books) + if data: + bundler.newpart('bookmarks', data=data) + @getbundle2partsgenerator('listkeys') def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs): """add parts containing listkeys namespaces to the requested bundle""" - listkeys = kwargs.get('listkeys', ()) + listkeys = kwargs.get(r'listkeys', ()) for namespace in listkeys: part = bundler.newpart('listkeys') part.addparam('namespace', namespace) @@ -1710,7 +1849,7 @@ def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs): """add an obsolescence markers part to the requested bundle""" - if kwargs.get('obsmarkers', False): + if kwargs.get(r'obsmarkers', False): if heads is None: heads = repo.heads() subset = [c.node() for c in repo.set('::%ln', heads)] @@ -1722,7 +1861,7 @@ def _getbundlephasespart(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs): """add phase heads part to the requested bundle""" - if kwargs.get('phases', False): + if kwargs.get(r'phases', False): if not 'heads' in b2caps.get('phases'): raise ValueError(_('no common phases exchange method')) if heads is None: @@ -1779,23 +1918,12 @@ # Don't send unless: # - changeset are being exchanged, # - the client supports it. - if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps): + if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps): return outgoing = _computeoutgoing(repo, heads, common) bundle2.addparttagsfnodescache(repo, bundler, outgoing) -def _getbookmarks(repo, **kwargs): - """Returns bookmark to node mapping. - - This function is primarily used to generate `bookmarks` bundle2 part. - It is a separate function in order to make it easy to wrap it - in extensions. Passing `kwargs` to the function makes it easy to - add new parameters in extensions. - """ - - return dict(bookmod.listbinbookmarks(repo)) - def check_heads(repo, their_heads, context): """check if the heads of a repo have been modified diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/fancyopts.py --- a/mercurial/fancyopts.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/fancyopts.py Mon Jan 22 17:53:02 2018 -0500 @@ -119,7 +119,7 @@ >>> get([b'--cwd=foo', b'x', b'y', b'-R', b'bar', b'--debugger'], gnu=False) ([('--cwd', 'foo')], ['x', 'y', '-R', 'bar', '--debugger']) >>> get([b'--unknown', b'--cwd=foo', b'--', '--debugger'], gnu=False) - ([], ['--unknown', '--cwd=foo', '--debugger']) + ([], ['--unknown', '--cwd=foo', '--', '--debugger']) stripping early options (without loosing '--'): @@ -141,6 +141,13 @@ >>> get([b'-q', b'--']) ([('-q', '')], []) + '--' may be a value: + + >>> get([b'-R', b'--', b'x']) + ([('-R', '--')], ['x']) + >>> get([b'--cwd', b'--', b'x']) + ([('--cwd', '--')], ['x']) + value passed to bool options: >>> get([b'--debugger=foo', b'x']) @@ -163,20 +170,16 @@ >>> get([b'-', b'y']) ([], ['-', 'y']) """ - # ignoring everything just after '--' isn't correct as '--' may be an - # option value (e.g. ['-R', '--']), but we do that consistently. - try: - argcount = args.index('--') - except ValueError: - argcount = len(args) - parsedopts = [] parsedargs = [] pos = 0 - while pos < argcount: + while pos < len(args): arg = args[pos] + if arg == '--': + pos += not keepsep + break flag, hasval, val, takeval = _earlyoptarg(arg, shortlist, namelist) - if not hasval and takeval and pos + 1 >= argcount: + if not hasval and takeval and pos + 1 >= len(args): # missing last argument break if not flag or hasval and not takeval: @@ -195,38 +198,10 @@ parsedopts.append((flag, args[pos + 1])) pos += 2 - parsedargs.extend(args[pos:argcount]) - parsedargs.extend(args[argcount + (not keepsep):]) + parsedargs.extend(args[pos:]) return parsedopts, parsedargs -def gnugetopt(args, options, longoptions): - """Parse options mostly like getopt.gnu_getopt. - - This is different from getopt.gnu_getopt in that an argument of - will - become an argument of - instead of vanishing completely. - """ - extraargs = [] - if '--' in args: - stopindex = args.index('--') - extraargs = args[stopindex + 1:] - args = args[:stopindex] - opts, parseargs = pycompat.getoptb(args, options, longoptions) - args = [] - while parseargs: - arg = parseargs.pop(0) - if arg and arg[0:1] == '-' and len(arg) > 1: - parseargs.insert(0, arg) - topts, newparseargs = pycompat.getoptb(parseargs,\ - options, longoptions) - opts = opts + topts - parseargs = newparseargs - else: - args.append(arg) - args.extend(extraargs) - return opts, args - - -def fancyopts(args, options, state, gnu=False, early=False): +def fancyopts(args, options, state, gnu=False, early=False, optaliases=None): """ read args, parse options, and store options in state @@ -246,8 +221,15 @@ integer - parameter strings is stored as int function - call function with parameter + optaliases is a mapping from a canonical option name to a list of + additional long options. This exists for preserving backward compatibility + of early options. If we want to use it extensively, please consider moving + the functionality to the options table (e.g separate long options by '|'.) + non-option args are returned """ + if optaliases is None: + optaliases = {} namelist = [] shortlist = '' argmap = {} @@ -261,10 +243,13 @@ else: short, name, default, comment = option # convert opts to getopt format - oname = name + onames = [name] + onames.extend(optaliases.get(name, [])) name = name.replace('-', '_') - argmap['-' + short] = argmap['--' + oname] = name + argmap['-' + short] = name + for n in onames: + argmap['--' + n] = name defmap[name] = default # copy defaults to state @@ -279,30 +264,30 @@ if not (default is None or default is True or default is False): if short: short += ':' - if oname: - oname += '=' - elif oname not in nevernegate: - if oname.startswith('no-'): - insert = oname[3:] - else: - insert = 'no-' + oname - # backout (as a practical example) has both --commit and - # --no-commit options, so we don't want to allow the - # negations of those flags. - if insert not in alllong: - assert ('--' + oname) not in negations - negations['--' + insert] = '--' + oname - namelist.append(insert) + onames = [n + '=' for n in onames] + elif name not in nevernegate: + for n in onames: + if n.startswith('no-'): + insert = n[3:] + else: + insert = 'no-' + n + # backout (as a practical example) has both --commit and + # --no-commit options, so we don't want to allow the + # negations of those flags. + if insert not in alllong: + assert ('--' + n) not in negations + negations['--' + insert] = '--' + n + namelist.append(insert) if short: shortlist += short if name: - namelist.append(oname) + namelist.extend(onames) # parse arguments if early: parse = functools.partial(earlygetopt, gnu=gnu) elif gnu: - parse = gnugetopt + parse = pycompat.gnugetoptb else: parse = pycompat.getoptb opts, args = parse(args, shortlist, namelist) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/filelog.py --- a/mercurial/filelog.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/filelog.py Mon Jan 22 17:53:02 2018 -0500 @@ -43,6 +43,8 @@ def __init__(self, opener, path): super(filelog, self).__init__(opener, "/".join(("data", path + ".i"))) + # full name of the user visible file, relative to the repository root + self.filename = path def read(self, node): t = self.revision(node) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/filemerge.py --- a/mercurial/filemerge.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/filemerge.py Mon Jan 22 17:53:02 2018 -0500 @@ -241,6 +241,12 @@ ui = repo.ui fd = fcd.path() + # Avoid prompting during an in-memory merge since it doesn't support merge + # conflicts. + if fcd.changectx().isinmemory(): + raise error.InMemoryMergeConflictsError('in-memory merge does not ' + 'support file conflicts') + prompts = partextras(labels) prompts['fd'] = fd try: @@ -465,11 +471,10 @@ a = _workingpath(repo, fcd) fd = fcd.path() - # Run ``flushall()`` to make any missing folders the following wwrite - # calls might be depending on. from . import context if isinstance(fcd, context.overlayworkingfilectx): - fcd.ctx().flushall() + raise error.InMemoryMergeConflictsError('in-memory merge does not ' + 'support the :dump tool.') util.writefile(a + ".local", fcd.decodeddata()) repo.wwrite(fd + ".other", fco.data(), fco.flags()) @@ -485,6 +490,18 @@ return _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=labels) +def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): + # In-memory merge simply raises an exception on all external merge tools, + # for now. + # + # It would be possible to run most tools with temporary files, but this + # raises the question of what to do if the user only partially resolves the + # file -- we can't leave a merge state. (Copy to somewhere in the .hg/ + # directory and tell the user how to get it is my best idea, but it's + # clunky.) + raise error.InMemoryMergeConflictsError('in-memory merge does not support ' + 'external merge tools') + def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): tool, toolpath, binary, symlink = toolconf if fcd.isabsent() or fco.isabsent(): @@ -526,7 +543,7 @@ util.unlink(b) util.unlink(c) -def _formatconflictmarker(repo, ctx, template, label, pad): +def _formatconflictmarker(ctx, template, label, pad): """Applies the given template to the ctx, prefixed by the label. Pad is the minimum width of the label prefix, so that multiple markers @@ -535,10 +552,7 @@ if ctx.node() is None: ctx = ctx.p1() - props = templatekw.keywords.copy() - props['templ'] = template - props['ctx'] = ctx - props['repo'] = repo + props = {'ctx': ctx} templateresult = template.render(props) label = ('%s:' % label).ljust(pad + 1) @@ -564,14 +578,16 @@ ui = repo.ui template = ui.config('ui', 'mergemarkertemplate') template = templater.unquotestring(template) - tmpl = formatter.maketemplater(ui, template) + tres = formatter.templateresources(ui, repo) + tmpl = formatter.maketemplater(ui, template, defaults=templatekw.keywords, + resources=tres) pad = max(len(l) for l in labels) - newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad), - _formatconflictmarker(repo, co, tmpl, labels[1], pad)] + newlabels = [_formatconflictmarker(cd, tmpl, labels[0], pad), + _formatconflictmarker(co, tmpl, labels[1], pad)] if len(labels) > 2: - newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad)) + newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad)) return newlabels def partextras(labels): @@ -602,6 +618,9 @@ (if any), the backup is used to undo certain premerges, confirm whether a merge changed anything, and determine what line endings the new file should have. + + Backups only need to be written once (right before the premerge) since their + content doesn't change afterwards. """ if fcd.isabsent(): return None @@ -612,21 +631,26 @@ back = scmutil.origpath(ui, repo, a) inworkingdir = (back.startswith(repo.wvfs.base) and not back.startswith(repo.vfs.base)) - if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir: # If the backup file is to be in the working directory, and we're # merging in-memory, we must redirect the backup to the memory context # so we don't disturb the working directory. relpath = back[len(repo.wvfs.base) + 1:] - wctx[relpath].write(fcd.data(), fcd.flags()) + if premerge: + wctx[relpath].write(fcd.data(), fcd.flags()) return wctx[relpath] else: - # Otherwise, write to wherever the user specified the backups should go. - # + if premerge: + # Otherwise, write to wherever path the user specified the backups + # should go. We still need to switch based on whether the source is + # in-memory so we can use the fast path of ``util.copy`` if both are + # on disk. + if isinstance(fcd, context.overlayworkingfilectx): + util.writefile(back, fcd.data()) + else: + util.copyfile(a, back) # A arbitraryfilectx is returned, so we can run the same functions on # the backup context regardless of where it lives. - if premerge: - util.copyfile(a, back) return context.arbitraryfilectx(back, repo=repo) def _maketempfiles(repo, fco, fca): @@ -683,16 +707,14 @@ onfailure = func.onfailure precheck = func.precheck else: - func = _xmerge + if wctx.isinmemory(): + func = _xmergeimm + else: + func = _xmerge mergetype = fullmerge onfailure = _("merging %s failed!\n") precheck = None - # If using deferred writes, must flush any deferred contents if running - # an external merge tool since it has arbitrary access to the working - # copy. - wctx.flushall() - toolconf = tool, toolpath, binary, symlink if mergetype == nomerge: @@ -710,6 +732,10 @@ if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf): if onfailure: + if wctx.isinmemory(): + raise error.InMemoryMergeConflictsError('in-memory merge does ' + 'not support merge ' + 'conflicts') ui.warn(onfailure % fd) return True, 1, False @@ -736,6 +762,10 @@ if r: if onfailure: + if wctx.isinmemory(): + raise error.InMemoryMergeConflictsError('in-memory merge ' + 'does not support ' + 'merge conflicts') ui.warn(onfailure % fd) _onfilemergefailure(ui) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/fileset.py --- a/mercurial/fileset.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/fileset.py Mon Jan 22 17:53:02 2018 -0500 @@ -12,6 +12,7 @@ from .i18n import _ from . import ( error, + match as matchmod, merge, parser, pycompat, @@ -23,6 +24,7 @@ elements = { # token-type: binding-strength, primary, prefix, infix, suffix "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None), + ":": (15, None, None, ("kindpat", 15), None), "-": (5, None, ("negate", 19), ("minus", 5), None), "not": (10, None, ("not", 10), None, None), "!": (10, None, ("not", 10), None, None), @@ -49,7 +51,7 @@ c = program[pos] if c.isspace(): # skip inter-token whitespace pass - elif c in "(),-|&+!": # handle simple operators + elif c in "(),-:|&+!": # handle simple operators yield (c, None, pos) elif (c in '"\'' or c == 'r' and program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings @@ -99,11 +101,28 @@ raise error.ParseError(_("invalid token"), pos) return tree +def getsymbol(x): + if x and x[0] == 'symbol': + return x[1] + raise error.ParseError(_('not a symbol')) + def getstring(x, err): if x and (x[0] == 'string' or x[0] == 'symbol'): return x[1] raise error.ParseError(err) +def _getkindpat(x, y, allkinds, err): + kind = getsymbol(x) + pat = getstring(y, err) + if kind not in allkinds: + raise error.ParseError(_("invalid pattern kind: %s") % kind) + return '%s:%s' % (kind, pat) + +def getpattern(x, allkinds, err): + if x and x[0] == 'kindpat': + return _getkindpat(x[1], x[2], allkinds, err) + return getstring(x, err) + def getset(mctx, x): if not x: raise error.ParseError(_("missing argument")) @@ -113,6 +132,10 @@ m = mctx.matcher([x]) return [f for f in mctx.subset if m(f)] +def kindpatset(mctx, x, y): + return stringset(mctx, _getkindpat(x, y, matchmod.allpatternkinds, + _("pattern must be a string"))) + def andset(mctx, x, y): return getset(mctx.narrow(getset(mctx, x)), y) @@ -131,6 +154,9 @@ yl = set(getset(mctx, y)) return [f for f in xl if f not in yl] +def negateset(mctx, x): + raise error.ParseError(_("can't use negate operator in this context")) + def listset(mctx, a, b): raise error.ParseError(_("can't use a list in this context"), hint=_('see hg help "filesets.x or y"')) @@ -225,8 +251,8 @@ return [f for f in mctx.subset if f in s] def func(mctx, a, b): - if a[0] == 'symbol' and a[1] in symbols: - funcname = a[1] + funcname = getsymbol(a) + if funcname in symbols: enabled = mctx._existingenabled mctx._existingenabled = funcname in _existingcallers try: @@ -237,7 +263,7 @@ keep = lambda fn: getattr(fn, '__doc__', None) is not None syms = [s for (s, fn) in symbols.items() if keep(fn)] - raise error.UnknownIdentifier(a[1], syms) + raise error.UnknownIdentifier(funcname, syms) def getlist(x): if not x: @@ -344,6 +370,34 @@ except ValueError: raise error.ParseError(_("couldn't parse size: %s") % s) +def sizematcher(x): + """Return a function(size) -> bool from the ``size()`` expression""" + + # i18n: "size" is a keyword + expr = getstring(x, _("size requires an expression")).strip() + if '-' in expr: # do we have a range? + a, b = expr.split('-', 1) + a = util.sizetoint(a) + b = util.sizetoint(b) + return lambda x: x >= a and x <= b + elif expr.startswith("<="): + a = util.sizetoint(expr[2:]) + return lambda x: x <= a + elif expr.startswith("<"): + a = util.sizetoint(expr[1:]) + return lambda x: x < a + elif expr.startswith(">="): + a = util.sizetoint(expr[2:]) + return lambda x: x >= a + elif expr.startswith(">"): + a = util.sizetoint(expr[1:]) + return lambda x: x > a + elif expr[0].isdigit or expr[0] == '.': + a = util.sizetoint(expr) + b = _sizetomax(expr) + return lambda x: x >= a and x <= b + raise error.ParseError(_("couldn't parse size: %s") % expr) + @predicate('size(expression)', callexisting=True) def size(mctx, x): """File size matches the given expression. Examples: @@ -353,33 +407,7 @@ - size('>= .5MB') - files at least 524288 bytes - size('4k - 1MB') - files from 4096 bytes to 1048576 bytes """ - - # i18n: "size" is a keyword - expr = getstring(x, _("size requires an expression")).strip() - if '-' in expr: # do we have a range? - a, b = expr.split('-', 1) - a = util.sizetoint(a) - b = util.sizetoint(b) - m = lambda x: x >= a and x <= b - elif expr.startswith("<="): - a = util.sizetoint(expr[2:]) - m = lambda x: x <= a - elif expr.startswith("<"): - a = util.sizetoint(expr[1:]) - m = lambda x: x < a - elif expr.startswith(">="): - a = util.sizetoint(expr[2:]) - m = lambda x: x >= a - elif expr.startswith(">"): - a = util.sizetoint(expr[1:]) - m = lambda x: x > a - elif expr[0].isdigit or expr[0] == '.': - a = util.sizetoint(expr) - b = _sizetomax(expr) - m = lambda x: x >= a and x <= b - else: - raise error.ParseError(_("couldn't parse size: %s") % expr) - + m = sizematcher(x) return [f for f in mctx.existing() if m(mctx.ctx[f].size())] @predicate('encoding(name)', callexisting=True) @@ -496,10 +524,9 @@ ctx = mctx.ctx sstate = sorted(ctx.substate) if x: - # i18n: "subrepo" is a keyword - pat = getstring(x, _("subrepo requires a pattern or no arguments")) - - from . import match as matchmod # avoid circular import issues + pat = getpattern(x, matchmod.allpatternkinds, + # i18n: "subrepo" is a keyword + _("subrepo requires a pattern or no arguments")) fast = not matchmod.patkind(pat) if fast: def m(s): @@ -513,9 +540,11 @@ methods = { 'string': stringset, 'symbol': stringset, + 'kindpat': kindpatset, 'and': andset, 'or': orset, 'minus': minusset, + 'negate': negateset, 'list': listset, 'group': getset, 'not': notset, diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/formatter.py --- a/mercurial/formatter.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/formatter.py Mon Jan 22 17:53:02 2018 -0500 @@ -94,14 +94,14 @@ >>> def subrepos(ui, fm): ... fm.startitem() -... fm.write(b'repo', b'[%s]\\n', b'baz') +... fm.write(b'reponame', b'[%s]\\n', b'baz') ... files(ui, fm.nested(b'files')) ... fm.end() >>> show(subrepos) [baz] foo bar ->>> show(subrepos, template=b'{repo}: {join(files % "{path}", ", ")}\\n') +>>> show(subrepos, template=b'{reponame}: {join(files % "{path}", ", ")}\\n') baz: foo, bar """ @@ -363,11 +363,12 @@ self._out = out spec = lookuptemplate(ui, topic, opts.get('template', '')) self._tref = spec.ref - self._t = loadtemplater(ui, spec, cache=templatekw.defaulttempl) + self._t = loadtemplater(ui, spec, defaults=templatekw.keywords, + resources=templateresources(ui), + cache=templatekw.defaulttempl) self._parts = templatepartsmap(spec, self._t, ['docheader', 'docfooter', 'separator']) self._counter = itertools.count() - self._cache = {} # for templatekw/funcs to store reusable data self._renderitem('docheader', {}) def _showitem(self): @@ -386,17 +387,14 @@ # function will have to declare dependent resources. e.g. # @templatekeyword(..., requires=('ctx',)) props = {} - if 'ctx' in item: - props.update(templatekw.keywords) # explicitly-defined fields precede templatekw props.update(item) if 'ctx' in item: # but template resources must be always available - props['templ'] = self._t props['repo'] = props['ctx'].repo() props['revcache'] = {} props = pycompat.strkwargs(props) - g = self._t(ref, ui=self._ui, cache=self._cache, **props) + g = self._t(ref, **props) self._out.write(templater.stringify(g)) def end(self): @@ -468,24 +466,39 @@ partsmap[part] = ref return partsmap -def loadtemplater(ui, spec, cache=None): +def loadtemplater(ui, spec, defaults=None, resources=None, cache=None): """Create a templater from either a literal template or loading from a map file""" assert not (spec.tmpl and spec.mapfile) if spec.mapfile: - return templater.templater.frommapfile(spec.mapfile, cache=cache) - return maketemplater(ui, spec.tmpl, cache=cache) + frommapfile = templater.templater.frommapfile + return frommapfile(spec.mapfile, defaults=defaults, resources=resources, + cache=cache) + return maketemplater(ui, spec.tmpl, defaults=defaults, resources=resources, + cache=cache) -def maketemplater(ui, tmpl, cache=None): +def maketemplater(ui, tmpl, defaults=None, resources=None, cache=None): """Create a templater from a string template 'tmpl'""" aliases = ui.configitems('templatealias') - t = templater.templater(cache=cache, aliases=aliases) + t = templater.templater(defaults=defaults, resources=resources, + cache=cache, aliases=aliases) t.cache.update((k, templater.unquotestring(v)) for k, v in ui.configitems('templates')) if tmpl: t.cache[''] = tmpl return t +def templateresources(ui, repo=None): + """Create a dict of template resources designed for the default templatekw + and function""" + return { + 'cache': {}, # for templatekw/funcs to store reusable data + 'ctx': None, + 'repo': repo, + 'revcache': None, # per-ctx cache; set later + 'ui': ui, + } + def formatter(ui, out, topic, opts): template = opts.get("template", "") if template == "json": diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/graphmod.py --- a/mercurial/graphmod.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/graphmod.py Mon Jan 22 17:53:02 2018 -0500 @@ -48,9 +48,6 @@ returned. """ - if not revs: - return - gpcache = {} for rev in revs: diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hbisect.py --- a/mercurial/hbisect.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hbisect.py Mon Jan 22 17:53:02 2018 -0500 @@ -21,7 +21,7 @@ error, ) -def bisect(changelog, state): +def bisect(repo, state): """find the next node (if any) for testing during a bisect search. returns a (nodes, number, good) tuple. @@ -32,33 +32,15 @@ if searching for a first bad one. """ + changelog = repo.changelog clparents = changelog.parentrevs skip = set([changelog.rev(n) for n in state['skip']]) def buildancestors(bad, good): - # only the earliest bad revision matters badrev = min([changelog.rev(n) for n in bad]) - goodrevs = [changelog.rev(n) for n in good] - goodrev = min(goodrevs) - # build visit array - ancestors = [None] * (len(changelog) + 1) # an extra for [-1] - - # set nodes descended from goodrevs - for rev in goodrevs: + ancestors = collections.defaultdict(lambda: None) + for rev in repo.revs("descendants(%ln) - ancestors(%ln)", good, good): ancestors[rev] = [] - for rev in changelog.revs(goodrev + 1): - for prev in clparents(rev): - if ancestors[prev] == []: - ancestors[rev] = [] - - # clear good revs from array - for rev in goodrevs: - ancestors[rev] = None - for rev in changelog.revs(len(changelog), goodrev): - if ancestors[rev] is None: - for prev in clparents(rev): - ancestors[prev] = None - if ancestors[badrev] is None: return badrev, None return badrev, ancestors diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help.py --- a/mercurial/help.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help.py Mon Jan 22 17:53:02 2018 -0500 @@ -226,6 +226,7 @@ (['color'], _("Colorizing Outputs"), loaddoc('color')), (["config", "hgrc"], _("Configuration Files"), loaddoc('config')), (["dates"], _("Date Formats"), loaddoc('dates')), + (["flags"], _("Command-line flags"), loaddoc('flags')), (["patterns"], _("File Name Patterns"), loaddoc('patterns')), (['environment', 'env'], _('Environment Variables'), loaddoc('environment')), @@ -452,7 +453,7 @@ rst.append(' :%s: %s\n' % (f, h[f])) ex = opts.get - anyopts = (ex('keyword') or not (ex('command') or ex('extension'))) + anyopts = (ex(r'keyword') or not (ex(r'command') or ex(r'extension'))) if not name and anyopts: exts = listexts(_('enabled extensions:'), extensions.enabled()) if exts: diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/config.txt --- a/mercurial/help/config.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/config.txt Mon Jan 22 17:53:02 2018 -0500 @@ -1723,6 +1723,14 @@ Controls generic server settings. +``bookmarks-pushkey-compat`` + Trigger pushkey hook when being pushed bookmark updates. This config exist + for compatibility purpose (default to True) + + If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark + movement we recommend you migrate them to ``txnclose-bookmark`` and + ``pretxnclose-bookmark``. + ``compressionengines`` List of compression engines and their relative priority to advertise to clients. @@ -2176,6 +2184,8 @@ (default: True) ``slash`` + (Deprecated. Use ``slashpath`` template filter instead.) + Display paths using a slash (``/``) as the path separator. This only makes a difference on systems where the default path separator is not the slash character (e.g. Windows uses the @@ -2188,6 +2198,10 @@ ``ssh`` Command to use for SSH connections. (default: ``ssh``) +``ssherrorhint`` + A hint shown to the user in the case of SSH error (e.g. + ``Please see http://company/internalwiki/ssh.html``) + ``strict`` Require exact command names, instead of allowing unambiguous abbreviations. (default: False) @@ -2211,6 +2225,10 @@ The timeout used when a lock is held (in seconds), a negative value means no timeout. (default: 600) +``timeout.warn`` + Time (in seconds) before a warning is printed about held lock. A negative + value means no warning. (default: 0) + ``traceback`` Mercurial always prints a traceback when an unknown exception occurs. Setting this to True will make Mercurial print a traceback @@ -2260,7 +2278,7 @@ you want it to accept pushes from anybody, you can use the following command line:: - $ hg --config web.allow_push=* --config web.push_ssl=False serve + $ hg --config web.allow-push=* --config web.push_ssl=False serve Note that this will allow anybody to push anything to the server and that this should not be used for public servers. @@ -2287,16 +2305,16 @@ revisions. (default: False) -``allowpull`` +``allow-pull`` Whether to allow pulling from the repository. (default: True) -``allow_push`` +``allow-push`` Whether to allow pushing to the repository. If empty or not set, pushing is not allowed. If the special value ``*``, any remote user can push, including unauthenticated users. Otherwise, the remote user must have been authenticated, and the authenticated user name must be present in this list. The contents of the - allow_push list are examined after the deny_push list. + allow-push list are examined after the deny_push list. ``allow_read`` If the user has not already been denied repository access due to @@ -2390,7 +2408,7 @@ push is not denied. If the special value ``*``, all remote users are denied push. Otherwise, unauthenticated users are all denied, and any authenticated user name present in this list is also denied. The - contents of the deny_push list are examined before the allow_push list. + contents of the deny_push list are examined before the allow-push list. ``deny_read`` Whether to deny reading/viewing of the repository. If this list is @@ -2547,6 +2565,10 @@ directory updates in parallel on Unix-like systems, which greatly helps performance. +``enabled`` + Whether to enable workers code to be used. + (default: true) + ``numcpus`` Number of CPUs to use for parallel operations. A zero or negative value is treated as ``use the default``. diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/environment.txt --- a/mercurial/help/environment.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/environment.txt Mon Jan 22 17:53:02 2018 -0500 @@ -73,6 +73,8 @@ ``alias`` Don't remove aliases. + ``color`` + Don't disable colored output. ``i18n`` Preserve internationalization. ``revsetalias`` diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/filesets.txt --- a/mercurial/help/filesets.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/filesets.txt Mon Jan 22 17:53:02 2018 -0500 @@ -9,7 +9,8 @@ or double quotes if they contain characters outside of ``[.*{}[]?/\_a-zA-Z0-9\x80-\xff]`` or if they match one of the predefined predicates. This generally applies to file patterns other -than globs and arguments for predicates. +than globs and arguments for predicates. Pattern prefixes such as +``path:`` may be specified without quoting. Special characters can be used in quoted identifiers by escaping them, e.g., ``\n`` is interpreted as a newline. To prevent them from being @@ -75,4 +76,4 @@ - Remove files listed in foo.lst that contain the letter a or b:: - hg remove "set: 'listfile:foo.lst' and (**a* or **b*)" + hg remove "set: listfile:foo.lst and (**a* or **b*)" diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/flags.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/flags.txt Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,104 @@ +Most Mercurial commands accept various flags. + +Flag names +========== + +Flags for each command are listed in :hg:`help` for that command. +Additionally, some flags, such as --repository, are global and can be used with +any command - those are seen in :hg:`help -v`, and can be specified before or +after the command. + +Every flag has at least a long name, such as --repository. Some flags may also +have a short one-letter name, such as the equivalent -R. Using the short or long +name is equivalent and has the same effect. + +Flags that have a short name can also be bundled together - for instance, to +specify both --edit (short -e) and --interactive (short -i), one could use:: + + hg commit -ei + +If any of the bundled flags takes a value (i.e. is not a boolean), it must be +last, followed by the value:: + + hg commit -im 'Message' + +Flag types +========== + +Mercurial command-line flags can be strings, numbers, booleans, or lists of +strings. + +Specifying flag values +====================== + +The following syntaxes are allowed, assuming a flag 'flagname' with short name +'f':: + + --flagname=foo + --flagname foo + -f foo + -ffoo + +This syntax applies to all non-boolean flags (strings, numbers or lists). + +Specifying boolean flags +======================== + +Boolean flags do not take a value parameter. To specify a boolean, use the flag +name to set it to true, or the same name prefixed with 'no-' to set it to +false:: + + hg commit --interactive + hg commit --no-interactive + +Specifying list flags +===================== + +List flags take multiple values. To specify them, pass the flag multiple times:: + + hg files --include mercurial --include tests + +Setting flag defaults +===================== + +In order to set a default value for a flag in an hgrc file, it is recommended to +use aliases:: + + [alias] + commit = commit --interactive + +For more information on hgrc files, see :hg:`help config`. + +Overriding flags on the command line +==================================== + +If the same non-list flag is specified multiple times on the command line, the +latest specification is used:: + + hg commit -m "Ignored value" -m "Used value" + +This includes the use of aliases - e.g., if one has:: + + [alias] + committemp = commit -m "Ignored value" + +then the following command will override that -m:: + + hg committemp -m "Used value" + +Overriding flag defaults +======================== + +Every flag has a default value, and you may also set your own defaults in hgrc +as described above. +Except for list flags, defaults can be overridden on the command line simply by +specifying the flag in that location. + +Hidden flags +============ + +Some flags are not shown in a command's help by default - specifically, those +that are deemed to be experimental, deprecated or advanced. To show all flags, +add the --verbose flag for the help command:: + + hg help --verbose commit diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/hg.1.txt --- a/mercurial/help/hg.1.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/hg.1.txt Mon Jan 22 17:53:02 2018 -0500 @@ -112,7 +112,7 @@ Copying """"""" -Copyright (C) 2005-2017 Matt Mackall. +Copyright (C) 2005-2018 Matt Mackall. Free use of this software is granted under the terms of the GNU General Public License version 2 or any later version. diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/hgignore.5.txt --- a/mercurial/help/hgignore.5.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/hgignore.5.txt Mon Jan 22 17:53:02 2018 -0500 @@ -26,7 +26,7 @@ Copying ======= This manual page is copyright 2006 Vadim Gelfer. -Mercurial is copyright 2005-2017 Matt Mackall. +Mercurial is copyright 2005-2018 Matt Mackall. Free use of this software is granted under the terms of the GNU General Public License version 2 or any later version. diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/hgrc.5.txt --- a/mercurial/help/hgrc.5.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/hgrc.5.txt Mon Jan 22 17:53:02 2018 -0500 @@ -34,7 +34,7 @@ Copying ======= This manual page is copyright 2005 Bryan O'Sullivan. -Mercurial is copyright 2005-2017 Matt Mackall. +Mercurial is copyright 2005-2018 Matt Mackall. Free use of this software is granted under the terms of the GNU General Public License version 2 or any later version. diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/internals/wireprotocol.txt --- a/mercurial/help/internals/wireprotocol.txt Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/help/internals/wireprotocol.txt Mon Jan 22 17:53:02 2018 -0500 @@ -731,6 +731,8 @@ cbattempted Boolean indicating whether the client attempted to use the *clone bundles* feature before performing this request. +bookmarks + Boolean indicating whether bookmark data is requested. phases Boolean indicating whether phases data is requested. diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hg.py --- a/mercurial/hg.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hg.py Mon Jan 22 17:53:02 2018 -0500 @@ -14,11 +14,14 @@ import shutil from .i18n import _ -from .node import nullid +from .node import ( + nullid, +) from . import ( bookmarks, bundlerepo, + cacheutil, cmdutil, destutil, discovery, @@ -28,10 +31,10 @@ httppeer, localrepo, lock, + logexchange, merge as mergemod, node, phases, - repoview, scmutil, sshpeer, statichttprepo, @@ -306,16 +309,13 @@ """ default = defaultpath or sourcerepo.ui.config('paths', 'default') if default: - fp = destrepo.vfs("hgrc", "w", text=True) - fp.write("[paths]\n") - fp.write("default = %s\n" % default) - fp.close() + template = ('[paths]\n' + 'default = %s\n') + destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) with destrepo.wlock(): if bookmarks: - fp = destrepo.vfs('shared', 'w') - fp.write(sharedbookmarks + '\n') - fp.close() + destrepo.vfs.write('shared', sharedbookmarks + '\n') def _postshareupdate(repo, update, checkout=None): """Maybe perform a working directory update after a shared repo is created. @@ -459,18 +459,6 @@ os.mkdir(dstcachedir) util.copyfile(srcbranchcache, dstbranchcache) -def _cachetocopy(srcrepo): - """return the list of cache file valuable to copy during a clone""" - # In local clones we're copying all nodes, not just served - # ones. Therefore copy all branch caches over. - cachefiles = ['branch2'] - cachefiles += ['branch2-%s' % f for f in repoview.filtertable] - cachefiles += ['rbc-names-v1', 'rbc-revs-v1'] - cachefiles += ['tags2'] - cachefiles += ['tags2-%s' % f for f in repoview.filtertable] - cachefiles += ['hgtagsfnodes1'] - return cachefiles - def clone(ui, peeropts, source, dest=None, pull=False, rev=None, update=True, stream=False, branch=None, shareopts=None): """Make a copy of an existing repository. @@ -568,7 +556,7 @@ 'unable to resolve identity of remote)\n')) elif sharenamemode == 'remote': sharepath = os.path.join( - sharepool, hashlib.sha1(source).hexdigest()) + sharepool, node.hex(hashlib.sha1(source).digest())) else: raise error.Abort(_('unknown share naming mode: %s') % sharenamemode) @@ -629,7 +617,7 @@ util.copyfile(srcbookmarks, dstbookmarks) dstcachedir = os.path.join(destpath, 'cache') - for cache in _cachetocopy(srcrepo): + for cache in cacheutil.cachetocopy(srcrepo): _copycache(srcrepo, dstcachedir, cache) # we need to re-init the repo after manually copying the data @@ -658,6 +646,9 @@ checkout = revs[0] local = destpeer.local() if local: + u = util.url(abspath) + defaulturl = bytes(u) + local.ui.setconfig('paths', 'default', defaulturl, 'clone') if not stream: if pull: stream = False @@ -680,14 +671,14 @@ destrepo = destpeer.local() if destrepo: template = uimod.samplehgrcs['cloned'] - fp = destrepo.vfs("hgrc", "wb") u = util.url(abspath) u.passwd = None defaulturl = bytes(u) - fp.write(util.tonativeeol(template % defaulturl)) - fp.close() + destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl)) + destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') - destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') + if ui.configbool('experimental', 'remotenames'): + logexchange.pullremotenames(destrepo, srcpeer) if update: if update is not True: @@ -843,16 +834,32 @@ return ret -def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None): +def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None, + abort=False): """Branch merge with node, resolving changes. Return true if any unresolved conflicts.""" - stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce, - labels=labels) + if not abort: + stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce, + labels=labels) + else: + ms = mergemod.mergestate.read(repo) + if ms.active(): + # there were conflicts + node = ms.localctx.hex() + else: + # there were no conficts, mergestate was not stored + node = repo['.'].hex() + + repo.ui.status(_("aborting the merge, updating back to" + " %s\n") % node[:12]) + stats = mergemod.update(repo, node, branchmerge=False, force=True, + labels=labels) + _showstats(repo, stats) if stats[3]: repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " - "or 'hg update -C .' to abandon\n")) - elif remind: + "or 'hg merge --abort' to abandon\n")) + elif remind and not abort: repo.ui.status(_("(branch merge, don't forget to commit)\n")) return stats[3] > 0 @@ -912,8 +919,13 @@ return _incoming(display, subreporecurse, ui, repo, source, opts) def _outgoing(ui, repo, dest, opts): - dest = ui.expandpath(dest or 'default-push', dest or 'default') - dest, branches = parseurl(dest, opts.get('branch')) + path = ui.paths.getpath(dest, default=('default-push', 'default')) + if not path: + raise error.Abort(_('default repository not configured!'), + hint=_("see 'hg help config.paths'")) + dest = path.pushloc or path.loc + branches = path.branch, opts.get('branch') or [] + ui.status(_('comparing with %s\n') % util.hidepassword(dest)) revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) if revs: diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/common.py --- a/mercurial/hgweb/common.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hgweb/common.py Mon Jan 22 17:53:02 2018 -0500 @@ -75,7 +75,7 @@ if deny and (not user or ismember(hgweb.repo.ui, user, deny)): raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') - allow = hgweb.configlist('web', 'allow_push') + allow = hgweb.configlist('web', 'allow-push') if not (allow and ismember(hgweb.repo.ui, user, allow)): raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/hgweb_mod.py --- a/mercurial/hgweb/hgweb_mod.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hgweb/hgweb_mod.py Mon Jan 22 17:53:02 2018 -0500 @@ -114,7 +114,7 @@ self.stripecount = self.configint('web', 'stripes') self.maxshortchanges = self.configint('web', 'maxshortchanges') self.maxfiles = self.configint('web', 'maxfiles') - self.allowpull = self.configbool('web', 'allowpull') + self.allowpull = self.configbool('web', 'allow-pull') # we use untrusted=False to prevent a repo owner from using # web.templates in .hg/hgrc to get access to any file readable diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/protocol.py --- a/mercurial/hgweb/protocol.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hgweb/protocol.py Mon Jan 22 17:53:02 2018 -0500 @@ -102,25 +102,20 @@ urlreq.quote(self.req.env.get('REMOTE_HOST', '')), urlreq.quote(self.req.env.get('REMOTE_USER', ''))) - def responsetype(self, v1compressible=False): + def responsetype(self, prefer_uncompressed): """Determine the appropriate response type and compression settings. - The ``v1compressible`` argument states whether the response with - application/mercurial-0.1 media types should be zlib compressed. - Returns a tuple of (mediatype, compengine, engineopts). """ - # For now, if it isn't compressible in the old world, it's never - # compressible. We can change this to send uncompressed 0.2 payloads - # later. - if not v1compressible: - return HGTYPE, None, None - # Determine the response media type and compression engine based # on the request parameters. protocaps = decodevaluefromheaders(self.req, r'X-HgProto').split(' ') if '0.2' in protocaps: + # All clients are expected to support uncompressed data. + if prefer_uncompressed: + return HGTYPE2, util._noopengine(), {} + # Default as defined by wire protocol spec. compformats = ['zlib', 'none'] for cap in protocaps: @@ -155,7 +150,7 @@ def call(repo, req, cmd): p = webproto(req, repo.ui) - def genversion2(gen, compress, engine, engineopts): + def genversion2(gen, engine, engineopts): # application/mercurial-0.2 always sends a payload header # identifying the compression engine. name = engine.wireprotosupport().name @@ -163,31 +158,27 @@ yield struct.pack('B', len(name)) yield name - if compress: - for chunk in engine.compressstream(gen, opts=engineopts): - yield chunk - else: - for chunk in gen: - yield chunk + for chunk in gen: + yield chunk rsp = wireproto.dispatch(repo, p, cmd) if isinstance(rsp, bytes): req.respond(HTTP_OK, HGTYPE, body=rsp) return [] + elif isinstance(rsp, wireproto.streamres_legacy): + gen = rsp.gen + req.respond(HTTP_OK, HGTYPE) + return gen elif isinstance(rsp, wireproto.streamres): - if rsp.reader: - gen = iter(lambda: rsp.reader.read(32768), '') - else: - gen = rsp.gen + gen = rsp.gen # This code for compression should not be streamres specific. It # is here because we only compress streamres at the moment. - mediatype, engine, engineopts = p.responsetype(rsp.v1compressible) + mediatype, engine, engineopts = p.responsetype(rsp.prefer_uncompressed) + gen = engine.compressstream(gen, engineopts) - if mediatype == HGTYPE and rsp.v1compressible: - gen = engine.compressstream(gen, engineopts) - elif mediatype == HGTYPE2: - gen = genversion2(gen, rsp.v1compressible, engine, engineopts) + if mediatype == HGTYPE2: + gen = genversion2(gen, engine, engineopts) req.respond(HTTP_OK, mediatype) return gen diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/webcommands.py --- a/mercurial/hgweb/webcommands.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hgweb/webcommands.py Mon Jan 22 17:53:02 2018 -0500 @@ -13,7 +13,7 @@ import re from ..i18n import _ -from ..node import hex, short +from ..node import hex, nullid, short from .common import ( ErrorResponse, @@ -36,9 +36,7 @@ revsetlang, scmutil, smartset, - templatefilters, templater, - url, util, ) @@ -415,7 +413,7 @@ else: nextentry = [] - return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav, + return tmpl('shortlog' if shortlog else 'changelog', changenav=changenav, node=ctx.hex(), rev=pos, symrev=symrev, changesets=count, entries=entries, latestentry=latestentry, nextentry=nextentry, @@ -1178,11 +1176,16 @@ Information rendered by this handler can be used to create visual representations of repository topology. - The ``revision`` URL parameter controls the starting changeset. + The ``revision`` URL parameter controls the starting changeset. If it's + absent, the default is ``tip``. The ``revcount`` query string argument can define the number of changesets to show information for. + The ``graphtop`` query string argument can specify the starting changeset + for producing ``jsdata`` variable that is used for rendering graph in + JavaScript. By default it has the same value as ``revision``. + This handler will render the ``graph`` template. """ @@ -1209,6 +1212,10 @@ morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 + graphtop = req.form.get('graphtop', [ctx.hex()])[0] + graphvars = copy.copy(tmpl.defaults['sessionvars']) + graphvars['graphtop'] = graphtop + count = len(web.repo) pos = rev @@ -1217,94 +1224,97 @@ changenav = webutil.revnav(web.repo).gen(pos, revcount, count) tree = [] + nextentry = [] + lastrev = 0 if pos != -1: allrevs = web.repo.changelog.revs(pos, 0) revs = [] for i in allrevs: revs.append(i) - if len(revs) >= revcount: + if len(revs) >= revcount + 1: break + if len(revs) > revcount: + nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])] + revs = revs[:-1] + + lastrev = revs[-1] + # We have to feed a baseset to dagwalker as it is expecting smartset # object. This does not have a big impact on hgweb performance itself # since hgweb graphing code is not itself lazy yet. dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) # As we said one line above... not lazy. - tree = list(graphmod.colored(dag, web.repo)) - - def getcolumns(tree): - cols = 0 - for (id, type, ctx, vtx, edges) in tree: - if type != graphmod.CHANGESET: - continue - cols = max(cols, max([edge[0] for edge in edges] or [0]), - max([edge[1] for edge in edges] or [0])) - return cols - - def graphdata(usetuples, encodestr): - data = [] + tree = list(item for item in graphmod.colored(dag, web.repo) + if item[1] == graphmod.CHANGESET) - row = 0 - for (id, type, ctx, vtx, edges) in tree: - if type != graphmod.CHANGESET: - continue - node = pycompat.bytestr(ctx) - age = encodestr(templatefilters.age(ctx.date())) - desc = templatefilters.firstline(encodestr(ctx.description())) - desc = url.escape(templatefilters.nonempty(desc)) - user = url.escape(templatefilters.person(encodestr(ctx.user()))) - branch = url.escape(encodestr(ctx.branch())) - try: - branchnode = web.repo.branchtip(branch) - except error.RepoLookupError: - branchnode = None - branch = branch, branchnode == ctx.node() + def nodecurrent(ctx): + wpnodes = web.repo.dirstate.parents() + if wpnodes[1] == nullid: + wpnodes = wpnodes[:1] + if ctx.node() in wpnodes: + return '@' + return '' + + def nodesymbol(ctx): + if ctx.obsolete(): + return 'x' + elif ctx.isunstable(): + return '*' + elif ctx.closesbranch(): + return '_' + else: + return 'o' - if usetuples: - data.append((node, vtx, edges, desc, user, age, branch, - [url.escape(encodestr(x)) for x in ctx.tags()], - [url.escape(encodestr(x)) - for x in ctx.bookmarks()])) - else: - edgedata = [{'col': edge[0], 'nextcol': edge[1], - 'color': (edge[2] - 1) % 6 + 1, - 'width': edge[3], 'bcolor': edge[4]} - for edge in edges] + def fulltree(): + pos = web.repo[graphtop].rev() + tree = [] + if pos != -1: + revs = web.repo.changelog.revs(pos, lastrev) + dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) + tree = list(item for item in graphmod.colored(dag, web.repo) + if item[1] == graphmod.CHANGESET) + return tree + + def jsdata(): + return [{'node': pycompat.bytestr(ctx), + 'graphnode': nodecurrent(ctx) + nodesymbol(ctx), + 'vertex': vtx, + 'edges': edges} + for (id, type, ctx, vtx, edges) in fulltree()] - data.append( - {'node': node, - 'col': vtx[0], - 'color': (vtx[1] - 1) % 6 + 1, - 'edges': edgedata, - 'row': row, - 'nextrow': row + 1, - 'desc': desc, - 'user': user, - 'age': age, - 'bookmarks': webutil.nodebookmarksdict( - web.repo, ctx.node()), - 'branches': webutil.nodebranchdict(web.repo, ctx), - 'inbranch': webutil.nodeinbranch(web.repo, ctx), - 'tags': webutil.nodetagsdict(web.repo, ctx.node())}) + def nodes(): + parity = paritygen(web.stripecount) + for row, (id, type, ctx, vtx, edges) in enumerate(tree): + entry = webutil.commonentry(web.repo, ctx) + edgedata = [{'col': edge[0], + 'nextcol': edge[1], + 'color': (edge[2] - 1) % 6 + 1, + 'width': edge[3], + 'bcolor': edge[4]} + for edge in edges] - row += 1 - - return data + entry.update({'col': vtx[0], + 'color': (vtx[1] - 1) % 6 + 1, + 'parity': next(parity), + 'edges': edgedata, + 'row': row, + 'nextrow': row + 1}) - cols = getcolumns(tree) + yield entry + rows = len(tree) - canvasheight = (rows + 1) * bg_height - 27 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount, uprev=uprev, lessvars=lessvars, morevars=morevars, downrev=downrev, - cols=cols, rows=rows, - canvaswidth=(cols + 1) * bg_height, - truecanvasheight=rows * bg_height, - canvasheight=canvasheight, bg_height=bg_height, - # {jsdata} will be passed to |json, so it must be in utf-8 - jsdata=lambda **x: graphdata(True, encoding.fromlocal), - nodes=lambda **x: graphdata(False, pycompat.bytestr), + graphvars=graphvars, + rows=rows, + bg_height=bg_height, + changesets=count, + nextentry=nextentry, + jsdata=lambda **x: jsdata(), + nodes=lambda **x: nodes(), node=ctx.hex(), changenav=changenav) def _getdoc(e): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/webutil.py --- a/mercurial/hgweb/webutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hgweb/webutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -32,6 +32,7 @@ pathutil, pycompat, templatefilters, + templatekw, ui as uimod, util, ) @@ -351,6 +352,12 @@ def formatlinerange(fromline, toline): return '%d:%d' % (fromline + 1, toline) +def succsandmarkers(repo, ctx): + for item in templatekw.showsuccsandmarkers(repo, ctx): + item['successors'] = _siblings(repo[successor] + for successor in item['successors']) + yield item + def commonentry(repo, ctx): node = ctx.node() return { @@ -361,6 +368,9 @@ 'date': ctx.date(), 'extra': ctx.extra(), 'phase': ctx.phasestr(), + 'obsolete': ctx.obsolete(), + 'succsandmarkers': lambda **x: succsandmarkers(repo, ctx), + 'instabilities': [{"instability": i} for i in ctx.instabilities()], 'branch': nodebranchnodefault(ctx), 'inbranch': nodeinbranch(repo, ctx), 'branches': nodebranchdict(repo, ctx), @@ -409,7 +419,7 @@ files = [] parity = paritygen(web.stripecount) for blockno, f in enumerate(ctx.files()): - template = f in ctx and 'filenodelink' or 'filenolink' + template = 'filenodelink' if f in ctx else 'filenolink' files.append(tmpl(template, node=ctx.hex(), file=f, blockno=blockno + 1, parity=next(parity))) @@ -571,7 +581,7 @@ fileno = 0 for filename, adds, removes, isbinary in stats: - template = filename in files and 'diffstatlink' or 'diffstatnolink' + template = 'diffstatlink' if filename in files else 'diffstatnolink' total = adds + removes fileno += 1 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno, diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hook.py --- a/mercurial/hook.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/hook.py Mon Jan 22 17:53:02 2018 -0500 @@ -91,7 +91,7 @@ starttime = util.timer() try: - r = obj(ui=ui, repo=repo, hooktype=htype, **args) + r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args)) except Exception as exc: if isinstance(exc, error.Abort): ui.warn(_('error: %s hook failed: %s\n') % diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/httpconnection.py --- a/mercurial/httpconnection.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/httpconnection.py Mon Jan 22 17:53:02 2018 -0500 @@ -248,7 +248,7 @@ return self.https_open(req) def makehttpcon(*args, **kwargs): k2 = dict(kwargs) - k2['use_ssl'] = False + k2[r'use_ssl'] = False return HTTPConnection(*args, **k2) return self.do_open(makehttpcon, req, False) @@ -288,8 +288,8 @@ if '[' in host: host = host[1:-1] - kwargs['keyfile'] = keyfile - kwargs['certfile'] = certfile + kwargs[r'keyfile'] = keyfile + kwargs[r'certfile'] = certfile con = HTTPConnection(host, port, use_ssl=True, ssl_wrap_socket=sslutil.wrapsocket, diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/httppeer.py --- a/mercurial/httppeer.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/httppeer.py Mon Jan 22 17:53:02 2018 -0500 @@ -161,6 +161,41 @@ h.close() getattr(h, "close_all", lambda: None)() + def _openurl(self, req): + if (self._ui.debugflag + and self._ui.configbool('devel', 'debug.peer-request')): + dbg = self._ui.debug + line = 'devel-peer-request: %s\n' + dbg(line % '%s %s' % (req.get_method(), req.get_full_url())) + hgargssize = None + + for header, value in sorted(req.header_items()): + if header.startswith('X-hgarg-'): + if hgargssize is None: + hgargssize = 0 + hgargssize += len(value) + else: + dbg(line % ' %s %s' % (header, value)) + + if hgargssize is not None: + dbg(line % ' %d bytes of commands arguments in headers' + % hgargssize) + + if req.has_data(): + data = req.get_data() + length = getattr(data, 'length', None) + if length is None: + length = len(data) + dbg(line % ' %d bytes of data' % length) + + start = util.timer() + + ret = self._urlopener.open(req) + if self._ui.configbool('devel', 'debug.peer-request'): + dbg(line % ' finished in %.4f seconds (%s)' + % (util.timer() - start, ret.code)) + return ret + # Begin of _basepeer interface. @util.propertycache @@ -204,6 +239,7 @@ self._caps = set(self._call('capabilities').split()) def _callstream(self, cmd, _compressible=False, **args): + args = pycompat.byteskwargs(args) if cmd == 'pushkey': args['data'] = '' data = args.pop('data', None) @@ -222,7 +258,7 @@ if not data: data = strargs else: - if isinstance(data, basestring): + if isinstance(data, bytes): i = io.BytesIO(data) i.length = len(data) data = i @@ -297,7 +333,7 @@ self.ui.debug("sending %s bytes\n" % size) req.add_unredirected_header('Content-Length', '%d' % size) try: - resp = self._urlopener.open(req) + resp = self._openurl(req) except urlerr.httperror as inst: if inst.code == 401: raise error.Abort(_('authorization failed')) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/keepalive.py --- a/mercurial/keepalive.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/keepalive.py Mon Jan 22 17:53:02 2018 -0500 @@ -92,6 +92,7 @@ from .i18n import _ from . import ( + node, pycompat, urllibcompat, util, @@ -322,7 +323,7 @@ data = urllibcompat.getdata(req) h.putrequest( req.get_method(), urllibcompat.getselector(req), - **skipheaders) + **pycompat.strkwargs(skipheaders)) if 'content-type' not in headers: h.putheader('Content-type', 'application/x-www-form-urlencoded') @@ -331,7 +332,7 @@ else: h.putrequest( req.get_method(), urllibcompat.getselector(req), - **skipheaders) + **pycompat.strkwargs(skipheaders)) except socket.error as err: raise urlerr.urlerror(err) for k, v in headers.items(): @@ -366,8 +367,8 @@ def __init__(self, sock, debuglevel=0, strict=0, method=None): extrakw = {} if not pycompat.ispy3: - extrakw['strict'] = True - extrakw['buffering'] = True + extrakw[r'strict'] = True + extrakw[r'buffering'] = True httplib.HTTPResponse.__init__(self, sock, debuglevel=debuglevel, method=method, **extrakw) self.fileno = sock.fileno @@ -607,7 +608,7 @@ foo = fo.read() fo.close() m = md5(foo) - print(format % ('normal urllib', m.hexdigest())) + print(format % ('normal urllib', node.hex(m.digest()))) # now install the keepalive handler and try again opener = urlreq.buildopener(HTTPHandler()) @@ -617,7 +618,7 @@ foo = fo.read() fo.close() m = md5(foo) - print(format % ('keepalive read', m.hexdigest())) + print(format % ('keepalive read', node.hex(m.digest()))) fo = urlreq.urlopen(url) foo = '' @@ -629,7 +630,7 @@ break fo.close() m = md5(foo) - print(format % ('keepalive readline', m.hexdigest())) + print(format % ('keepalive readline', node.hex(m.digest()))) def comp(N, url): print(' making %i connections to:\n %s' % (N, url)) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/localrepo.py --- a/mercurial/localrepo.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/localrepo.py Mon Jan 22 17:53:02 2018 -0500 @@ -197,7 +197,7 @@ **kwargs): chunks = exchange.getbundlechunks(self._repo, source, heads=heads, common=common, bundlecaps=bundlecaps, - **kwargs) + **kwargs)[1] cb = util.chunkbuffer(chunks) if exchange.bundle2requested(bundlecaps): @@ -364,11 +364,14 @@ self.root = self.wvfs.base self.path = self.wvfs.join(".hg") self.origroot = path - # These auditor are not used by the vfs, - # only used when writing this comment: basectx.match - self.auditor = pathutil.pathauditor(self.root, self._checknested) - self.nofsauditor = pathutil.pathauditor(self.root, self._checknested, - realfs=False, cached=True) + # This is only used by context.workingctx.match in order to + # detect files in subrepos. + self.auditor = pathutil.pathauditor( + self.root, callback=self._checknested) + # This is only used by context.basectx.match in order to detect + # files in subrepos. + self.nofsauditor = pathutil.pathauditor( + self.root, callback=self._checknested, realfs=False, cached=True) self.baseui = baseui self.ui = baseui.copy() self.ui.copy = baseui.copy # prevent copying repo configuration @@ -499,9 +502,6 @@ # post-dirstate-status hooks self._postdsstatus = [] - # Cache of types representing filtered repos. - self._filteredrepotypes = weakref.WeakKeyDictionary() - # generic mapping between names and nodes self.names = namespaces.namespaces() @@ -577,7 +577,8 @@ def _restrictcapabilities(self, caps): if self.ui.configbool('experimental', 'bundle2-advertise'): caps = set(caps) - capsblob = bundle2.encodecaps(bundle2.getrepocaps(self)) + capsblob = bundle2.encodecaps(bundle2.getrepocaps(self, + role='client')) caps.add('bundle2=' + urlreq.quote(capsblob)) return caps @@ -675,23 +676,10 @@ Intended to be overwritten by filtered repo.""" return self - def filtered(self, name): + def filtered(self, name, visibilityexceptions=None): """Return a filtered version of a repository""" - # Python <3.4 easily leaks types via __mro__. See - # https://bugs.python.org/issue17950. We cache dynamically - # created types so this method doesn't leak on every - # invocation. - - key = self.unfiltered().__class__ - if key not in self._filteredrepotypes: - # Build a new type with the repoview mixin and the base - # class of this repo. Give it a name containing the - # filter name to aid debugging. - bases = (repoview.repoview, key) - cls = type(r'%sfilteredrepo' % name, bases, {}) - self._filteredrepotypes[key] = cls - - return self._filteredrepotypes[key](self, name) + cls = repoview.newtype(self.unfiltered().__class__) + return cls(self, name, visibilityexceptions) @repofilecache('bookmarks', 'bookmarks.current') def _bookmarks(self): @@ -701,8 +689,8 @@ def _activebookmark(self): return self._bookmarks.active - # _phaserevs and _phasesets depend on changelog. what we need is to - # call _phasecache.invalidate() if '00changelog.i' was changed, but it + # _phasesets depend on changelog. what we need is to call + # _phasecache.invalidate() if '00changelog.i' was changed, but it # can't be easily expressed in filecache mechanism. @storecache('phaseroots', '00changelog.i') def _phasecache(self): @@ -775,7 +763,9 @@ __bool__ = __nonzero__ def __len__(self): - return len(self.changelog) + # no need to pay the cost of repoview.changelog + unfi = self.unfiltered() + return len(unfi.changelog) def __iter__(self): return iter(self.changelog) @@ -1112,7 +1102,7 @@ data = self.wvfs.read(filename) return self._filter(self._encodefilterpats, filename, data) - def wwrite(self, filename, data, flags, backgroundclose=False): + def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): """write ``data`` into ``filename`` in the working directory This returns length of written (maybe decoded) data. @@ -1121,9 +1111,12 @@ if 'l' in flags: self.wvfs.symlink(data, filename) else: - self.wvfs.write(filename, data, backgroundclose=backgroundclose) + self.wvfs.write(filename, data, backgroundclose=backgroundclose, + **kwargs) if 'x' in flags: self.wvfs.setflags(filename, False, True) + else: + self.wvfs.setflags(filename, False, False) return len(data) def wwritedata(self, filename, data): @@ -1147,7 +1140,6 @@ raise error.ProgrammingError('transaction requires locking') tr = self.currenttransaction() if tr is not None: - scmutil.registersummarycallback(self, tr, desc) return tr.nest() # abort here if the journal already exists @@ -1244,6 +1236,8 @@ # gating. tracktags(tr2) repo = reporef() + if repo.ui.configbool('experimental', 'single-head-per-branch'): + scmutil.enforcesinglehead(repo, tr2, desc) if hook.hashook(repo.ui, 'pretxnclose-bookmark'): for name, (old, new) in sorted(tr.changes['bookmarks'].items()): args = tr.hookargs.copy() @@ -1286,7 +1280,7 @@ validator=validate, releasefn=releasefn, checkambigfiles=_cachedfiles) - tr.changes['revs'] = set() + tr.changes['revs'] = xrange(0, 0) tr.changes['obsmarkers'] = set() tr.changes['phases'] = {} tr.changes['bookmarks'] = {} @@ -1329,7 +1323,11 @@ **pycompat.strkwargs(hookargs)) reporef()._afterlock(hookfunc) tr.addfinalize('txnclose-hook', txnclosehook) - tr.addpostclose('warms-cache', self._buildcacheupdater(tr)) + # Include a leading "-" to make it happen before the transaction summary + # reports registered via scmutil.registersummarycallback() whose names + # are 00-txnreport etc. That way, the caches will be warm when the + # callbacks run. + tr.addpostclose('-warm-cache', self._buildcacheupdater(tr)) def txnaborthook(tr2): """To be run if transaction is aborted """ @@ -1587,29 +1585,18 @@ # determine whether it can be inherited if parentenvvar is not None: parentlock = encoding.environ.get(parentenvvar) - try: - l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn, - acquirefn=acquirefn, desc=desc, - inheritchecker=inheritchecker, - parentlock=parentlock) - except error.LockHeld as inst: - if not wait: - raise - # show more details for new-style locks - if ':' in inst.locker: - host, pid = inst.locker.split(":", 1) - self.ui.warn( - _("waiting for lock on %s held by process %r " - "on host %r\n") % (desc, pid, host)) - else: - self.ui.warn(_("waiting for lock on %s held by %r\n") % - (desc, inst.locker)) - # default to 600 seconds timeout - l = lockmod.lock(vfs, lockname, - int(self.ui.config("ui", "timeout")), - releasefn=releasefn, acquirefn=acquirefn, - desc=desc) - self.ui.warn(_("got lock after %s seconds\n") % l.delay) + + timeout = 0 + warntimeout = 0 + if wait: + timeout = self.ui.configint("ui", "timeout") + warntimeout = self.ui.configint("ui", "timeout.warn") + + l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout, + releasefn=releasefn, + acquirefn=acquirefn, desc=desc, + inheritchecker=inheritchecker, + parentlock=parentlock) return l def _afterlock(self, callback): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/lock.py --- a/mercurial/lock.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/lock.py Mon Jan 22 17:53:02 2018 -0500 @@ -14,6 +14,8 @@ import time import warnings +from .i18n import _ + from . import ( encoding, error, @@ -39,6 +41,58 @@ raise return result +def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs): + """return an acquired lock or raise an a LockHeld exception + + This function is responsible to issue warnings and or debug messages about + the held lock while trying to acquires it.""" + + def printwarning(printer, locker): + """issue the usual "waiting on lock" message through any channel""" + # show more details for new-style locks + if ':' in locker: + host, pid = locker.split(":", 1) + msg = _("waiting for lock on %s held by process %r " + "on host %r\n") % (l.desc, pid, host) + else: + msg = _("waiting for lock on %s held by %r\n") % (l.desc, locker) + printer(msg) + + l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs) + + debugidx = 0 if (warntimeout and timeout) else -1 + warningidx = 0 + if not timeout: + warningidx = -1 + elif warntimeout: + warningidx = warntimeout + + delay = 0 + while True: + try: + l._trylock() + break + except error.LockHeld as inst: + if delay == debugidx: + printwarning(ui.debug, inst.locker) + if delay == warningidx: + printwarning(ui.warn, inst.locker) + if timeout <= delay: + raise error.LockHeld(errno.ETIMEDOUT, inst.filename, + l.desc, inst.locker) + time.sleep(1) + delay += 1 + + l.delay = delay + if l.delay: + if 0 <= warningidx <= l.delay: + ui.warn(_("got lock after %s seconds\n") % l.delay) + else: + ui.debug("got lock after %s seconds\n" % l.delay) + if l.acquirefn: + l.acquirefn() + return l + class lock(object): '''An advisory lock held by one process to control access to a set of files. Non-cooperating processes or incorrectly written scripts @@ -60,7 +114,8 @@ _host = None def __init__(self, vfs, file, timeout=-1, releasefn=None, acquirefn=None, - desc=None, inheritchecker=None, parentlock=None): + desc=None, inheritchecker=None, parentlock=None, + dolock=True): self.vfs = vfs self.f = file self.held = 0 @@ -74,9 +129,10 @@ self._inherited = False self.postrelease = [] self.pid = self._getpid() - self.delay = self.lock() - if self.acquirefn: - self.acquirefn() + if dolock: + self.delay = self.lock() + if self.acquirefn: + self.acquirefn() def __enter__(self): return self diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/logexchange.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/logexchange.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,118 @@ +# logexchange.py +# +# Copyright 2017 Augie Fackler +# Copyright 2017 Sean Farley +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from .node import hex + +from . import ( + vfs as vfsmod, +) + +# directory name in .hg/ in which remotenames files will be present +remotenamedir = 'logexchange' + +def readremotenamefile(repo, filename): + """ + reads a file from .hg/logexchange/ directory and yields it's content + filename: the file to be read + yield a tuple (node, remotepath, name) + """ + + vfs = vfsmod.vfs(repo.vfs.join(remotenamedir)) + if not vfs.exists(filename): + return + f = vfs(filename) + lineno = 0 + for line in f: + line = line.strip() + if not line: + continue + # contains the version number + if lineno == 0: + lineno += 1 + try: + node, remote, rname = line.split('\0') + yield node, remote, rname + except ValueError: + pass + + f.close() + +def readremotenames(repo): + """ + read the details about the remotenames stored in .hg/logexchange/ and + yields a tuple (node, remotepath, name). It does not yields information + about whether an entry yielded is branch or bookmark. To get that + information, call the respective functions. + """ + + for bmentry in readremotenamefile(repo, 'bookmarks'): + yield bmentry + for branchentry in readremotenamefile(repo, 'branches'): + yield branchentry + +def writeremotenamefile(repo, remotepath, names, nametype): + vfs = vfsmod.vfs(repo.vfs.join(remotenamedir)) + f = vfs(nametype, 'w', atomictemp=True) + # write the storage version info on top of file + # version '0' represents the very initial version of the storage format + f.write('0\n\n') + + olddata = set(readremotenamefile(repo, nametype)) + # re-save the data from a different remote than this one. + for node, oldpath, rname in sorted(olddata): + if oldpath != remotepath: + f.write('%s\0%s\0%s\n' % (node, oldpath, rname)) + + for name, node in sorted(names.iteritems()): + if nametype == "branches": + for n in node: + f.write('%s\0%s\0%s\n' % (n, remotepath, name)) + elif nametype == "bookmarks": + if node: + f.write('%s\0%s\0%s\n' % (node, remotepath, name)) + + f.close() + +def saveremotenames(repo, remotepath, branches=None, bookmarks=None): + """ + save remotenames i.e. remotebookmarks and remotebranches in their + respective files under ".hg/logexchange/" directory. + """ + wlock = repo.wlock() + try: + if bookmarks: + writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks') + if branches: + writeremotenamefile(repo, remotepath, branches, 'branches') + finally: + wlock.release() + +def pullremotenames(localrepo, remoterepo): + """ + pulls bookmarks and branches information of the remote repo during a + pull or clone operation. + localrepo is our local repository + remoterepo is the peer instance + """ + remotepath = remoterepo.url() + bookmarks = remoterepo.listkeys('bookmarks') + # on a push, we don't want to keep obsolete heads since + # they won't show up as heads on the next pull, so we + # remove them here otherwise we would require the user + # to issue a pull to refresh the storage + bmap = {} + repo = localrepo.unfiltered() + for branch, nodes in remoterepo.branchmap().iteritems(): + bmap[branch] = [] + for node in nodes: + if node in repo and not repo[node].obsolete(): + bmap[branch].append(hex(node)) + + saveremotenames(localrepo, remotepath, bmap, bookmarks) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/mail.py --- a/mercurial/mail.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/mail.py Mon Jan 22 17:53:02 2018 -0500 @@ -152,7 +152,7 @@ fp = open(mbox, 'ab+') # Should be time.asctime(), but Windows prints 2-characters day # of month instead of one. Make them print the same thing. - date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime()) + date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime()) fp.write('From %s %s\n' % (sender, date)) fp.write(msg) fp.write('\n\n') diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/manifest.py --- a/mercurial/manifest.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/manifest.py Mon Jan 22 17:53:02 2018 -0500 @@ -810,7 +810,7 @@ if p in self._files: yield self._subpath(p) else: - for f in self._dirs[p].iterkeys(): + for f in self._dirs[p]: yield f def keys(self): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/match.py --- a/mercurial/match.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/match.py Mon Jan 22 17:53:02 2018 -0500 @@ -305,9 +305,6 @@ Returns the string 'all' if the given directory and all subdirectories should be visited. Otherwise returns True or False indicating whether the given directory should be visited. - - This function's behavior is undefined if it has returned False for - one of the dir's parent directories. ''' return True @@ -460,17 +457,10 @@ class differencematcher(basematcher): '''Composes two matchers by matching if the first matches and the second - does not. Well, almost... If the user provides a pattern like "-X foo foo", - Mercurial actually does match "foo" against that. That's because exact - matches are treated specially. So, since this differencematcher is used for - excludes, it needs to special-case exact matching. + does not. The second matcher's non-matching-attributes (root, cwd, bad, explicitdir, traversedir) are ignored. - - TODO: If we want to keep the behavior described above for exact matches, we - should consider instead treating the above case something like this: - union(exact(foo), difference(pattern(foo), include(foo))) ''' def __init__(self, m1, m2): super(differencematcher, self).__init__(m1._root, m1._cwd) @@ -481,7 +471,7 @@ self.traversedir = m1.traversedir def matchfn(self, f): - return self._m1(f) and (not self._m2(f) or self._m1.exact(f)) + return self._m1(f) and not self._m2(f) @propertycache def _files(self): @@ -496,9 +486,6 @@ def visitdir(self, dir): if self._m2.visitdir(dir) == 'all': - # There's a bug here: If m1 matches file 'dir/file' and m2 excludes - # 'dir' (recursively), we should still visit 'dir' due to the - # exception we have for exact matches. return False return bool(self._m1.visitdir(dir)) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/mdiff.py --- a/mercurial/mdiff.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/mdiff.py Mon Jan 22 17:53:02 2018 -0500 @@ -67,6 +67,7 @@ 'ignoreblanklines': False, 'upgrade': False, 'showsimilarity': False, + 'worddiff': False, } def __init__(self, **opts): @@ -99,7 +100,7 @@ if blank and opts.ignoreblanklines: text = re.sub('\n+', '\n', text).strip('\n') if opts.ignorewseol: - text = re.sub(r'[ \t\r\f]+\n', r'\n', text) + text = re.sub(br'[ \t\r\f]+\n', r'\n', text) return text def splitblock(base1, lines1, base2, lines2, opts): @@ -355,7 +356,7 @@ # the previous hunk context until we find a line starting with an # alphanumeric char. for i in xrange(astart - 1, lastpos - 1, -1): - if l1[i][0].isalnum(): + if l1[i][0:1].isalnum(): func = ' ' + l1[i].rstrip()[:40] lastfunc[1] = func break diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/merge.py --- a/mercurial/merge.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/merge.py Mon Jan 22 17:53:02 2018 -0500 @@ -646,6 +646,14 @@ return config def _checkunknownfile(repo, wctx, mctx, f, f2=None): + if wctx.isinmemory(): + # Nothing to do in IMM because nothing in the "working copy" can be an + # unknown file. + # + # Note that we should bail out here, not in ``_checkunknownfiles()``, + # because that function does other useful work. + return False + if f2 is None: f2 = f return (repo.wvfs.audit.check(f) @@ -674,7 +682,11 @@ # updated with any new dirs that are checked and found to be absent. self._missingdircache = set() - def __call__(self, repo, f): + def __call__(self, repo, wctx, f): + if wctx.isinmemory(): + # Nothing to do in IMM for the same reason as ``_checkunknownfile``. + return False + # Check for path prefixes that exist as unknown files. for p in reversed(list(util.finddirs(f))): if p in self._missingdircache: @@ -726,7 +738,7 @@ if _checkunknownfile(repo, wctx, mctx, f): fileconflicts.add(f) elif pathconfig and f not in wctx: - path = checkunknowndirs(repo, f) + path = checkunknowndirs(repo, wctx, f) if path is not None: pathconflicts.add(path) elif m == 'dg': @@ -1333,10 +1345,6 @@ repo.ui.warn(_("current directory was removed\n" "(consider changing to repo root: %s)\n") % repo.root) - # It's necessary to flush here in case we're inside a worker fork and will - # quit after this function. - wctx.flushall() - def batchget(repo, mctx, wctx, actions): """apply gets to the working directory @@ -1368,7 +1376,9 @@ if repo.wvfs.lexists(absf): util.rename(absf, orig) wctx[f].clearunknown() - wctx[f].write(fctx(f).data(), flags, backgroundclose=True) + atomictemp = ui.configbool("experimental", "update.atomic-file") + wctx[f].write(fctx(f).data(), flags, backgroundclose=True, + atomictemp=atomictemp) if i == 100: yield i, f i = 0 @@ -1376,9 +1386,6 @@ if i > 0: yield i, f - # It's necessary to flush here in case we're inside a worker fork and will - # quit after this function. - wctx.flushall() def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None): """apply the merge action list to the working directory @@ -1479,10 +1486,6 @@ z += 1 progress(_updating, z, item=f, total=numupdates, unit=_files) - # We should flush before forking into worker processes, since those workers - # flush when they complete, and we don't want to duplicate work. - wctx.flushall() - # get in parallel prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx), actions['g']) @@ -1555,6 +1558,9 @@ usemergedriver = not overwrite and mergeactions and ms.mergedriver if usemergedriver: + if wctx.isinmemory(): + raise error.InMemoryMergeConflictsError("in-memory merge does not " + "support mergedriver") ms.commit() proceed = driverpreprocess(repo, ms, wctx, labels=labels) # the driver might leave some files unresolved @@ -1850,8 +1856,9 @@ if not force and (wc.files() or wc.deleted()): raise error.Abort(_("uncommitted changes"), hint=_("use 'hg status' to list changes")) - for s in sorted(wc.substate): - wc.sub(s).bailifchanged() + if not wc.isinmemory(): + for s in sorted(wc.substate): + wc.sub(s).bailifchanged() elif not overwrite: if p1 == p2: # no-op update @@ -1966,7 +1973,7 @@ ### apply phase if not branchmerge: # just jump to the new rev fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' - if not partial: + if not partial and not wc.isinmemory(): repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) # note that we're in the middle of an update repo.vfs.write('updatestate', p2.hex()) @@ -2004,9 +2011,8 @@ 'see "hg help -e fsmonitor")\n')) stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels) - wc.flushall() - if not partial: + if not partial and not wc.isinmemory(): with repo.dirstate.parentchange(): repo.setparents(fp1, fp2) recordupdates(repo, actions, branchmerge) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/minifileset.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/minifileset.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,85 @@ +# minifileset.py - a simple language to select files +# +# Copyright 2017 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from .i18n import _ +from . import ( + error, + fileset, +) + +def _compile(tree): + if not tree: + raise error.ParseError(_("missing argument")) + op = tree[0] + if op in {'symbol', 'string', 'kindpat'}: + name = fileset.getpattern(tree, {'path'}, _('invalid file pattern')) + if name.startswith('**'): # file extension test, ex. "**.tar.gz" + ext = name[2:] + for c in ext: + if c in '*{}[]?/\\': + raise error.ParseError(_('reserved character: %s') % c) + return lambda n, s: n.endswith(ext) + elif name.startswith('path:'): # directory or full path test + p = name[5:] # prefix + pl = len(p) + f = lambda n, s: n.startswith(p) and (len(n) == pl or n[pl] == '/') + return f + raise error.ParseError(_("unsupported file pattern"), + hint=_('paths must be prefixed with "path:"')) + elif op == 'or': + func1 = _compile(tree[1]) + func2 = _compile(tree[2]) + return lambda n, s: func1(n, s) or func2(n, s) + elif op == 'and': + func1 = _compile(tree[1]) + func2 = _compile(tree[2]) + return lambda n, s: func1(n, s) and func2(n, s) + elif op == 'not': + return lambda n, s: not _compile(tree[1])(n, s) + elif op == 'group': + return _compile(tree[1]) + elif op == 'func': + symbols = { + 'all': lambda n, s: True, + 'none': lambda n, s: False, + 'size': lambda n, s: fileset.sizematcher(tree[2])(s), + } + + name = fileset.getsymbol(tree[1]) + if name in symbols: + return symbols[name] + + raise error.UnknownIdentifier(name, symbols.keys()) + elif op == 'minus': # equivalent to 'x and not y' + func1 = _compile(tree[1]) + func2 = _compile(tree[2]) + return lambda n, s: func1(n, s) and not func2(n, s) + elif op == 'negate': + raise error.ParseError(_("can't use negate operator in this context")) + elif op == 'list': + raise error.ParseError(_("can't use a list in this context"), + hint=_('see hg help "filesets.x or y"')) + raise error.ProgrammingError('illegal tree: %r' % (tree,)) + +def compile(text): + """generate a function (path, size) -> bool from filter specification. + + "text" could contain the operators defined by the fileset language for + common logic operations, and parenthesis for grouping. The supported path + tests are '**.extname' for file extension test, and '"path:dir/subdir"' + for prefix test. The ``size()`` predicate is borrowed from filesets to test + file size. The predicates ``all()`` and ``none()`` are also supported. + + '(**.php & size(">10MB")) | **.zip | (path:bin & !path:bin/README)' for + example, will catch all php files whose size is greater than 10 MB, all + files whose name ends with ".zip", and all files under "bin" in the repo + root except for "bin/README". + """ + tree = fileset.parse(text) + return _compile(tree) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/namespaces.py --- a/mercurial/namespaces.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/namespaces.py Mon Jan 22 17:53:02 2018 -0500 @@ -25,6 +25,7 @@ def __init__(self): self._names = util.sortdict() + columns = templatekw.getlogcolumns() # we need current mercurial named objects (bookmarks, tags, and # branches) to be initialized somewhere, so that place is here @@ -32,8 +33,7 @@ bmknamemap = lambda repo, name: tolist(repo._bookmarks.get(name)) bmknodemap = lambda repo, node: repo.nodebookmarks(node) n = namespace("bookmarks", templatename="bookmark", - # i18n: column positioning for "hg log" - logfmt=_("bookmark: %s\n"), + logfmt=columns['bookmark'], listnames=bmknames, namemap=bmknamemap, nodemap=bmknodemap, builtin=True) @@ -43,8 +43,7 @@ tagnamemap = lambda repo, name: tolist(repo._tagscache.tags.get(name)) tagnodemap = lambda repo, node: repo.nodetags(node) n = namespace("tags", templatename="tag", - # i18n: column positioning for "hg log" - logfmt=_("tag: %s\n"), + logfmt=columns['tag'], listnames=tagnames, namemap=tagnamemap, nodemap=tagnodemap, deprecated={'tip'}, @@ -55,8 +54,7 @@ bnamemap = lambda repo, name: tolist(repo.branchtip(name, True)) bnodemap = lambda repo, node: [repo[node].branch()] n = namespace("branches", templatename="branch", - # i18n: column positioning for "hg log" - logfmt=_("branch: %s\n"), + logfmt=columns['branch'], listnames=bnames, namemap=bnamemap, nodemap=bnodemap, builtin=True) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/obsolete.py --- a/mercurial/obsolete.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/obsolete.py Mon Jan 22 17:53:02 2018 -0500 @@ -776,7 +776,7 @@ # rely on obsstore class default when possible. kwargs = {} if defaultformat is not None: - kwargs['defaultformat'] = defaultformat + kwargs[r'defaultformat'] = defaultformat readonly = not isenabled(repo, createmarkersopt) store = obsstore(repo.svfs, readonly=readonly, **kwargs) if store and readonly: @@ -838,18 +838,10 @@ repo.ui.warn(_('unexpected old value for %r') % key) return False data = util.b85decode(new) - lock = repo.lock() - try: - tr = repo.transaction('pushkey: obsolete markers') - try: - repo.obsstore.mergemarkers(tr, data) - repo.invalidatevolatilesets() - tr.close() - return True - finally: - tr.release() - finally: - lock.release() + with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr: + repo.obsstore.mergemarkers(tr, data) + repo.invalidatevolatilesets() + return True # keep compatibility for the 4.3 cycle def allprecursors(obsstore, nodes, ignoreflags=0): @@ -994,10 +986,10 @@ public = phases.public cl = repo.changelog torev = cl.nodemap.get - for ctx in repo.set('(not public()) and (not obsolete())'): - rev = ctx.rev() + tonode = cl.node + for rev in repo.revs('(not public()) and (not obsolete())'): # We only evaluate mutable, non-obsolete revision - node = ctx.node() + node = tonode(rev) # (future) A cache of predecessors may worth if split is very common for pnode in obsutil.allpredecessors(repo.obsstore, [node], ignoreflags=bumpedfix): @@ -1023,8 +1015,10 @@ divergent = set() obsstore = repo.obsstore newermap = {} - for ctx in repo.set('(not public()) - obsolete()'): - mark = obsstore.predecessors.get(ctx.node(), ()) + tonode = repo.changelog.node + for rev in repo.revs('(not public()) - obsolete()'): + node = tonode(rev) + mark = obsstore.predecessors.get(node, ()) toprocess = set(mark) seen = set() while toprocess: @@ -1036,7 +1030,7 @@ obsutil.successorssets(repo, prec, cache=newermap) newer = [n for n in newermap[prec] if n] if len(newer) > 1: - divergent.add(ctx.rev()) + divergent.add(rev) break toprocess.update(obsstore.predecessors.get(prec, ())) return divergent @@ -1079,8 +1073,7 @@ saveeffectflag = repo.ui.configbool('experimental', 'evolution.effect-flags') - tr = repo.transaction('add-obsolescence-marker') - try: + with repo.transaction('add-obsolescence-marker') as tr: markerargs = [] for rel in relations: prec = rel[0] @@ -1121,6 +1114,3 @@ date=date, metadata=localmetadata, ui=repo.ui) repo.filteredrevcache.clear() - tr.close() - finally: - tr.release() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/obsutil.py --- a/mercurial/obsutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/obsutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -9,9 +9,11 @@ import re +from .i18n import _ from . import ( + node as nodemod, phases, - util + util, ) class marker(object): @@ -441,12 +443,12 @@ public = phases.public addedmarkers = tr.changes.get('obsmarkers') addedrevs = tr.changes.get('revs') - seenrevs = set(addedrevs) + seenrevs = set() obsoleted = set() for mark in addedmarkers: node = mark[0] rev = torev(node) - if rev is None or rev in seenrevs: + if rev is None or rev in seenrevs or rev in addedrevs: continue seenrevs.add(rev) if phase(repo, rev) == public: @@ -751,8 +753,35 @@ return values -def successorsetverb(successorset): - """ Return the verb summarizing the successorset +def _getobsfate(successorssets): + """ Compute a changeset obsolescence fate based on its successorssets. + Successors can be the tipmost ones or the immediate ones. This function + return values are not meant to be shown directly to users, it is meant to + be used by internal functions only. + Returns one fate from the following values: + - pruned + - diverged + - superseded + - superseded_split + """ + + if len(successorssets) == 0: + # The commit has been pruned + return 'pruned' + elif len(successorssets) > 1: + return 'diverged' + else: + # No divergence, only one set of successors + successors = successorssets[0] + + if len(successors) == 1: + return 'superseded' + else: + return 'superseded_split' + +def obsfateverb(successorset, markers): + """ Return the verb summarizing the successorset and potentially using + information from the markers """ if not successorset: verb = 'pruned' @@ -795,7 +824,7 @@ line = [] # Verb - line.append(successorsetverb(successors)) + line.append(obsfateverb(successors, markers)) # Operations operations = markersoperations(markers) @@ -835,3 +864,43 @@ line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date)) return "".join(line) + + +filteredmsgtable = { + "pruned": _("hidden revision '%s' is pruned"), + "diverged": _("hidden revision '%s' has diverged"), + "superseded": _("hidden revision '%s' was rewritten as: %s"), + "superseded_split": _("hidden revision '%s' was split as: %s"), + "superseded_split_several": _("hidden revision '%s' was split as: %s and " + "%d more"), +} + +def _getfilteredreason(repo, changeid, ctx): + """return a human-friendly string on why a obsolete changeset is hidden + """ + successors = successorssets(repo, ctx.node()) + fate = _getobsfate(successors) + + # Be more precise in case the revision is superseded + if fate == 'pruned': + return filteredmsgtable['pruned'] % changeid + elif fate == 'diverged': + return filteredmsgtable['diverged'] % changeid + elif fate == 'superseded': + single_successor = nodemod.short(successors[0][0]) + return filteredmsgtable['superseded'] % (changeid, single_successor) + elif fate == 'superseded_split': + + succs = [] + for node_id in successors[0]: + succs.append(nodemod.short(node_id)) + + if len(succs) <= 2: + fmtsuccs = ', '.join(succs) + return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs) + else: + firstsuccessors = ', '.join(succs[:2]) + remainingnumber = len(succs) - 2 + + args = (changeid, firstsuccessors, remainingnumber) + return filteredmsgtable['superseded_split_several'] % args diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/patch.py --- a/mercurial/patch.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/patch.py Mon Jan 22 17:53:02 2018 -0500 @@ -10,7 +10,9 @@ import collections import copy +import difflib import email +import email.parser as emailparser import errno import hashlib import os @@ -45,6 +47,7 @@ gitre = re.compile(br'diff --git a/(.*) b/(.*)') tabsplitter = re.compile(br'(\t+|[^\t]+)') +_nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])') PatchError = error.PatchError @@ -106,7 +109,7 @@ cur.append(line) c = chunk(cur) - m = email.Parser.Parser().parse(c) + m = emailparser.Parser().parse(c) if not m.is_multipart(): yield msgfp(m) else: @@ -149,6 +152,8 @@ raise StopIteration return l + __next__ = next + inheader = False cur = [] @@ -203,7 +208,7 @@ # attempt to detect the start of a patch # (this heuristic is borrowed from quilt) - diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |' + diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |' br'retrieving revision [0-9]+(\.[0-9]+)*$|' br'---[ \t].*?^\+\+\+[ \t]|' br'\*\*\*[ \t].*?^---[ \t])', @@ -213,7 +218,7 @@ fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') tmpfp = os.fdopen(fd, pycompat.sysstr('w')) try: - msg = email.Parser.Parser().parse(fileobj) + msg = emailparser.Parser().parse(fileobj) subject = msg['Subject'] and mail.headdecode(msg['Subject']) data['user'] = msg['From'] and mail.headdecode(msg['From']) @@ -997,16 +1002,26 @@ def getmessages(): return { 'multiple': { + 'apply': _("apply change %d/%d to '%s'?"), 'discard': _("discard change %d/%d to '%s'?"), 'record': _("record change %d/%d to '%s'?"), - 'revert': _("revert change %d/%d to '%s'?"), }, 'single': { + 'apply': _("apply this change to '%s'?"), 'discard': _("discard this change to '%s'?"), 'record': _("record this change to '%s'?"), - 'revert': _("revert this change to '%s'?"), }, 'help': { + 'apply': _('[Ynesfdaq?]' + '$$ &Yes, apply this change' + '$$ &No, skip this change' + '$$ &Edit this change manually' + '$$ &Skip remaining changes to this file' + '$$ Apply remaining changes to this &file' + '$$ &Done, skip remaining changes and files' + '$$ Apply &all changes to all remaining files' + '$$ &Quit, applying no changes' + '$$ &? (display help)'), 'discard': _('[Ynesfdaq?]' '$$ &Yes, discard this change' '$$ &No, skip this change' @@ -1027,16 +1042,6 @@ '$$ Record &all changes to all remaining files' '$$ &Quit, recording no changes' '$$ &? (display help)'), - 'revert': _('[Ynesfdaq?]' - '$$ &Yes, revert this change' - '$$ &No, skip this change' - '$$ &Edit this change manually' - '$$ &Skip remaining changes to this file' - '$$ Revert remaining changes to this &file' - '$$ &Done, skip remaining changes and files' - '$$ Revert &all changes to all remaining files' - '$$ &Quit, reverting no changes' - '$$ &? (display help)') } } @@ -1990,14 +1995,16 @@ return _applydiff(ui, fp, patchfile, backend, store, strip=strip, prefix=prefix, eolmode=eolmode) +def _canonprefix(repo, prefix): + if prefix: + prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix) + if prefix != '': + prefix += '/' + return prefix + def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='', eolmode='strict'): - - if prefix: - prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(), - prefix) - if prefix != '': - prefix += '/' + prefix = _canonprefix(backend.repo, prefix) def pstrip(p): return pathtransform(p, strip - 1, prefix)[1] @@ -2183,20 +2190,22 @@ return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode, similarity) -def changedfiles(ui, repo, patchpath, strip=1): +def changedfiles(ui, repo, patchpath, strip=1, prefix=''): backend = fsbackend(ui, repo.root) + prefix = _canonprefix(repo, prefix) with open(patchpath, 'rb') as fp: changed = set() for state, values in iterhunks(fp): if state == 'file': afile, bfile, first_hunk, gp = values if gp: - gp.path = pathtransform(gp.path, strip - 1, '')[1] + gp.path = pathtransform(gp.path, strip - 1, prefix)[1] if gp.oldpath: - gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1] + gp.oldpath = pathtransform(gp.oldpath, strip - 1, + prefix)[1] else: gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, - '') + prefix) changed.add(gp.path) if gp.op == 'RENAME': changed.add(gp.oldpath) @@ -2246,6 +2255,7 @@ 'showfunc': get('show_function', 'showfunc'), 'context': get('unified', getter=ui.config), } + buildopts['worddiff'] = ui.configbool('experimental', 'worddiff') if git: buildopts['git'] = get('git') @@ -2434,7 +2444,7 @@ modified = sorted(modifiedset) added = sorted(addedset) removed = sorted(removedset) - for dst, src in copy.items(): + for dst, src in list(copy.items()): if src not in ctx1: # Files merged in during a merge and then copied/renamed are # reported as copies. We want to show them in the diff as additions. @@ -2457,6 +2467,9 @@ def difflabel(func, *args, **kw): '''yields 2-tuples of (output, label) based on the output of func()''' + inlinecolor = False + if kw.get(r'opts'): + inlinecolor = kw[r'opts'].worddiff headprefixes = [('diff', 'diff.diffline'), ('copy', 'diff.extended'), ('rename', 'diff.extended'), @@ -2473,6 +2486,9 @@ head = False for chunk in func(*args, **kw): lines = chunk.split('\n') + matches = {} + if inlinecolor: + matches = _findmatches(lines) for i, line in enumerate(lines): if i != 0: yield ('\n', '') @@ -2496,11 +2512,17 @@ for prefix, label in prefixes: if stripline.startswith(prefix): if diffline: - for token in tabsplitter.findall(stripline): - if '\t' == token[0]: - yield (token, 'diff.tab') - else: - yield (token, label) + if i in matches: + for t, l in _inlinediff(lines[i].rstrip(), + lines[matches[i]].rstrip(), + label): + yield (t, l) + else: + for token in tabsplitter.findall(stripline): + if '\t' == token[0]: + yield (token, 'diff.tab') + else: + yield (token, label) else: yield (stripline, label) break @@ -2509,6 +2531,75 @@ if line != stripline: yield (line[len(stripline):], 'diff.trailingwhitespace') +def _findmatches(slist): + '''Look for insertion matches to deletion and returns a dict of + correspondences. + ''' + lastmatch = 0 + matches = {} + for i, line in enumerate(slist): + if line == '': + continue + if line[0] == '-': + lastmatch = max(lastmatch, i) + newgroup = False + for j, newline in enumerate(slist[lastmatch + 1:]): + if newline == '': + continue + if newline[0] == '-' and newgroup: # too far, no match + break + if newline[0] == '+': # potential match + newgroup = True + sim = difflib.SequenceMatcher(None, line, newline).ratio() + if sim > 0.7: + lastmatch = lastmatch + 1 + j + matches[i] = lastmatch + matches[lastmatch] = i + break + return matches + +def _inlinediff(s1, s2, operation): + '''Perform string diff to highlight specific changes.''' + operation_skip = '+?' if operation == 'diff.deleted' else '-?' + if operation == 'diff.deleted': + s2, s1 = s1, s2 + + buff = [] + # we never want to higlight the leading +- + if operation == 'diff.deleted' and s2.startswith('-'): + label = operation + token = '-' + s2 = s2[1:] + s1 = s1[1:] + elif operation == 'diff.inserted' and s1.startswith('+'): + label = operation + token = '+' + s2 = s2[1:] + s1 = s1[1:] + else: + raise error.ProgrammingError("Case not expected, operation = %s" % + operation) + + s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1)) + for part in s: + if part[0] in operation_skip or len(part) == 2: + continue + l = operation + '.highlight' + if part[0] in ' ': + l = operation + if part[2:] == '\t': + l = 'diff.tab' + if l == label: # contiguous token with same label + token += part[2:] + continue + else: + buff.append((token, label)) + label = l + token = part[2:] + buff.append((token, label)) + + return buff + def diffui(*args, **kw): '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' return difflabel(diff, *args, **kw) @@ -2564,7 +2655,7 @@ l = len(text) s = hashlib.sha1('blob %d\0' % l) s.update(text) - return s.hexdigest() + return hex(s.digest()) if opts.noprefix: aprefix = bprefix = '' diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/phases.py --- a/mercurial/phases.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/phases.py Mon Jan 22 17:53:02 2018 -0500 @@ -115,6 +115,7 @@ ) from . import ( error, + pycompat, smartset, txnutil, util, @@ -202,31 +203,43 @@ if _load: # Cheap trick to allow shallow-copy without copy module self.phaseroots, self.dirty = _readroots(repo, phasedefaults) - self._phaserevs = None + self._loadedrevslen = 0 self._phasesets = None self.filterunknown(repo) self.opener = repo.svfs - def getrevset(self, repo, phases): + def getrevset(self, repo, phases, subset=None): """return a smartset for the given phases""" self.loadphaserevs(repo) # ensure phase's sets are loaded - - if self._phasesets and all(self._phasesets[p] is not None - for p in phases): - # fast path - use _phasesets - revs = self._phasesets[phases[0]] - if len(phases) > 1: - revs = revs.copy() # only copy when needed - for p in phases[1:]: - revs.update(self._phasesets[p]) + phases = set(phases) + if public not in phases: + # fast path: _phasesets contains the interesting sets, + # might only need a union and post-filtering. + if len(phases) == 1: + [p] = phases + revs = self._phasesets[p] + else: + revs = set.union(*[self._phasesets[p] for p in phases]) if repo.changelog.filteredrevs: revs = revs - repo.changelog.filteredrevs - return smartset.baseset(revs) + if subset is None: + return smartset.baseset(revs) + else: + return subset & smartset.baseset(revs) else: - # slow path - enumerate all revisions - phase = self.phase - revs = (r for r in repo if phase(repo, r) in phases) - return smartset.generatorset(revs, iterasc=True) + phases = set(allphases).difference(phases) + if not phases: + return smartset.fullreposet(repo) + if len(phases) == 1: + [p] = phases + revs = self._phasesets[p] + else: + revs = set.union(*[self._phasesets[p] for p in phases]) + if subset is None: + subset = smartset.fullreposet(repo) + if not revs: + return subset + return subset.filter(lambda r: r not in revs) def copy(self): # Shallow copy meant to ensure isolation in @@ -235,13 +248,14 @@ ph.phaseroots = self.phaseroots[:] ph.dirty = self.dirty ph.opener = self.opener - ph._phaserevs = self._phaserevs + ph._loadedrevslen = self._loadedrevslen ph._phasesets = self._phasesets return ph def replace(self, phcache): """replace all values in 'self' with content of phcache""" - for a in ('phaseroots', 'dirty', 'opener', '_phaserevs', '_phasesets'): + for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen', + '_phasesets'): setattr(self, a, getattr(phcache, a)) def _getphaserevsnative(self, repo): @@ -253,42 +267,38 @@ def _computephaserevspure(self, repo): repo = repo.unfiltered() - revs = [public] * len(repo.changelog) - self._phaserevs = revs - self._populatephaseroots(repo) - for phase in trackedphases: - roots = list(map(repo.changelog.rev, self.phaseroots[phase])) - if roots: - for rev in roots: - revs[rev] = phase - for rev in repo.changelog.descendants(roots): - revs[rev] = phase + cl = repo.changelog + self._phasesets = [set() for phase in allphases] + roots = pycompat.maplist(cl.rev, self.phaseroots[secret]) + if roots: + ps = set(cl.descendants(roots)) + for root in roots: + ps.add(root) + self._phasesets[secret] = ps + roots = pycompat.maplist(cl.rev, self.phaseroots[draft]) + if roots: + ps = set(cl.descendants(roots)) + for root in roots: + ps.add(root) + ps.difference_update(self._phasesets[secret]) + self._phasesets[draft] = ps + self._loadedrevslen = len(cl) def loadphaserevs(self, repo): """ensure phase information is loaded in the object""" - if self._phaserevs is None: + if self._phasesets is None: try: res = self._getphaserevsnative(repo) - self._phaserevs, self._phasesets = res + self._loadedrevslen, self._phasesets = res except AttributeError: self._computephaserevspure(repo) def invalidate(self): - self._phaserevs = None + self._loadedrevslen = 0 self._phasesets = None - def _populatephaseroots(self, repo): - """Fills the _phaserevs cache with phases for the roots. - """ - cl = repo.changelog - phaserevs = self._phaserevs - for phase in trackedphases: - roots = map(cl.rev, self.phaseroots[phase]) - for root in roots: - phaserevs[root] = phase - def phase(self, repo, rev): - # We need a repo argument here to be able to build _phaserevs + # We need a repo argument here to be able to build _phasesets # if necessary. The repository instance is not stored in # phasecache to avoid reference cycles. The changelog instance # is not stored because it is a filecache() property and can @@ -297,10 +307,13 @@ return public if rev < nullrev: raise ValueError(_('cannot lookup negative revision')) - if self._phaserevs is None or rev >= len(self._phaserevs): + if rev >= self._loadedrevslen: self.invalidate() self.loadphaserevs(repo) - return self._phaserevs[rev] + for phase in trackedphases: + if rev in self._phasesets[phase]: + return phase + return public def write(self): if not self.dirty: @@ -455,10 +468,10 @@ if filtered: self.dirty = True # filterunknown is called by repo.destroyed, we may have no changes in - # root but phaserevs contents is certainly invalid (or at least we + # root but _phasesets contents is certainly invalid (or at least we # have not proper way to check that). related to issue 3858. # - # The other caller is __init__ that have no _phaserevs initialized + # The other caller is __init__ that have no _phasesets initialized # anyway. If this change we should consider adding a dedicated # "destroyed" function to phasecache or a proper cache key mechanism # (see branchmap one) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/policy.py --- a/mercurial/policy.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/policy.py Mon Jan 22 17:53:02 2018 -0500 @@ -74,8 +74,8 @@ (r'cext', r'bdiff'): 1, (r'cext', r'diffhelpers'): 1, (r'cext', r'mpatch'): 1, - (r'cext', r'osutil'): 1, - (r'cext', r'parsers'): 3, + (r'cext', r'osutil'): 3, + (r'cext', r'parsers'): 4, } # map import request to other package or module diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/posix.py --- a/mercurial/posix.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/posix.py Mon Jan 22 17:53:02 2018 -0500 @@ -24,9 +24,12 @@ from . import ( encoding, error, + policy, pycompat, ) +osutil = policy.importmod(r'osutil') + posixfile = open normpath = os.path.normpath samestat = os.path.samestat @@ -302,6 +305,20 @@ Returns None if the path is ok, or a UI string describing the problem.''' return None # on posix platforms, every path is ok +def getfsmountpoint(dirpath): + '''Get the filesystem mount point from a directory (best-effort) + + Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. + ''' + return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) + +def getfstype(dirpath): + '''Get the filesystem type name from a directory (best-effort) + + Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. + ''' + return getattr(osutil, 'getfstype', lambda x: None)(dirpath) + def setbinary(fd): pass diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/pycompat.py --- a/mercurial/pycompat.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/pycompat.py Mon Jan 22 17:53:02 2018 -0500 @@ -63,6 +63,7 @@ sysexecutable = os.fsencode(sysexecutable) stringio = io.BytesIO maplist = lambda *args: list(map(*args)) + ziplist = lambda *args: list(zip(*args)) rawinput = input # TODO: .buffer might not exist if std streams were replaced; we'll need @@ -214,7 +215,7 @@ def open(name, mode='r', buffering=-1): return builtins.open(name, sysstr(mode), buffering) - def getoptb(args, shortlist, namelist): + def _getoptbwrapper(orig, args, shortlist, namelist): """ Takes bytes arguments, converts them to unicode, pass them to getopt.getopt(), convert the returned values back to bytes and then @@ -224,7 +225,7 @@ args = [a.decode('latin-1') for a in args] shortlist = shortlist.decode('latin-1') namelist = [a.decode('latin-1') for a in namelist] - opts, args = getopt.getopt(args, shortlist, namelist) + opts, args = orig(args, shortlist, namelist) opts = [(a[0].encode('latin-1'), a[1].encode('latin-1')) for a in opts] args = [a.encode('latin-1') for a in args] @@ -291,8 +292,8 @@ def getdoc(obj): return getattr(obj, '__doc__', None) - def getoptb(args, shortlist, namelist): - return getopt.getopt(args, shortlist, namelist) + def _getoptbwrapper(orig, args, shortlist, namelist): + return orig(args, shortlist, namelist) strkwargs = identity byteskwargs = identity @@ -313,6 +314,7 @@ shlexsplit = shlex.split stringio = cStringIO.StringIO maplist = map + ziplist = zip rawinput = raw_input isjython = sysplatform.startswith('java') @@ -320,3 +322,9 @@ isdarwin = sysplatform == 'darwin' isposix = osname == 'posix' iswindows = osname == 'nt' + +def getoptb(args, shortlist, namelist): + return _getoptbwrapper(getopt.getopt, args, shortlist, namelist) + +def gnugetoptb(args, shortlist, namelist): + return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/registrar.py --- a/mercurial/registrar.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/registrar.py Mon Jan 22 17:53:02 2018 -0500 @@ -112,35 +112,53 @@ The created object can be used as a decorator for adding commands to that command table. This accepts multiple arguments to define a command. - The first argument is the command name. + The first argument is the command name (as bytes). - The options argument is an iterable of tuples defining command arguments. - See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple. + The `options` keyword argument is an iterable of tuples defining command + arguments. See ``mercurial.fancyopts.fancyopts()`` for the format of each + tuple. - The synopsis argument defines a short, one line summary of how to use the + The `synopsis` argument defines a short, one line summary of how to use the command. This shows up in the help output. - The norepo argument defines whether the command does not require a + There are three arguments that control what repository (if any) is found + and passed to the decorated function: `norepo`, `optionalrepo`, and + `inferrepo`. + + The `norepo` argument defines whether the command does not require a local repository. Most commands operate against a repository, thus the - default is False. + default is False. When True, no repository will be passed. - The optionalrepo argument defines whether the command optionally requires - a local repository. + The `optionalrepo` argument defines whether the command optionally requires + a local repository. If no repository can be found, None will be passed + to the decorated function. - The inferrepo argument defines whether to try to find a repository from the - command line arguments. If True, arguments will be examined for potential - repository locations. See ``findrepo()``. If a repository is found, it - will be used. + The `inferrepo` argument defines whether to try to find a repository from + the command line arguments. If True, arguments will be examined for + potential repository locations. See ``findrepo()``. If a repository is + found, it will be used and passed to the decorated function. There are three constants in the class which tells what type of the command that is. That information will be helpful at various places. It will be also be used to decide what level of access the command has on hidden commits. The constants are: - unrecoverablewrite is for those write commands which can't be recovered like - push. - recoverablewrite is for write commands which can be recovered like commit. - readonly is for commands which are read only. + `unrecoverablewrite` is for those write commands which can't be recovered + like push. + `recoverablewrite` is for write commands which can be recovered like commit. + `readonly` is for commands which are read only. + + The signature of the decorated function looks like this: + def cmd(ui[, repo] [, ] [, ]) + + `repo` is required if `norepo` is False. + `` are positional args (or `*args`) arguments, of non-option + arguments from the command line. + `` are keyword arguments (or `**options`) of option arguments + from the command line. + + See the WritingExtensions and MercurialApi documentation for more exhaustive + descriptions and examples. """ unrecoverablewrite = "unrecoverable" diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/repair.py --- a/mercurial/repair.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/repair.py Mon Jan 22 17:53:02 2018 -0500 @@ -203,8 +203,9 @@ deleteobsmarkers(repo.obsstore, stripobsidx) del repo.obsstore + repo.invalidatevolatilesets() + repo._phasecache.filterunknown(repo) - repo._phasecache.filterunknown(repo) if tmpbundlefile: ui.note(_("adding branch\n")) f = vfs.open(tmpbundlefile, "rb") @@ -222,8 +223,6 @@ if not repo.ui.verbose: repo.ui.popbuffer() f.close() - repo._phasecache.invalidate() - with repo.transaction('repair') as tr: bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/repoview.py --- a/mercurial/repoview.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/repoview.py Mon Jan 22 17:53:02 2018 -0500 @@ -9,11 +9,13 @@ from __future__ import absolute_import import copy +import weakref from .node import nullrev from . import ( obsolete, phases, + pycompat, tags as tagsmod, ) @@ -63,7 +65,7 @@ hidden.remove(p) stack.append(p) -def computehidden(repo): +def computehidden(repo, visibilityexceptions=None): """compute the set of hidden revision to filter During most operation hidden should be filtered.""" @@ -72,6 +74,8 @@ hidden = hideablerevs(repo) if hidden: hidden = set(hidden - pinnedrevs(repo)) + if visibilityexceptions: + hidden -= visibilityexceptions pfunc = repo.changelog.parentrevs mutablephases = (phases.draft, phases.secret) mutable = repo._phasecache.getrevset(repo, mutablephases) @@ -80,7 +84,7 @@ _revealancestors(pfunc, hidden, visible) return frozenset(hidden) -def computeunserved(repo): +def computeunserved(repo, visibilityexceptions=None): """compute the set of revision that should be filtered when used a server Secret and hidden changeset should not pretend to be here.""" @@ -98,7 +102,7 @@ else: return hiddens -def computemutable(repo): +def computemutable(repo, visibilityexceptions=None): assert not repo.changelog.filteredrevs # fast check to avoid revset call on huge repo if any(repo._phasecache.phaseroots[1:]): @@ -107,7 +111,7 @@ return frozenset(r for r in maymutable if getphase(repo, r)) return frozenset() -def computeimpactable(repo): +def computeimpactable(repo, visibilityexceptions=None): """Everything impactable by mutable revision The immutable filter still have some chance to get invalidated. This will @@ -139,14 +143,21 @@ # Otherwise your filter will have to recompute all its branches cache # from scratch (very slow). filtertable = {'visible': computehidden, + 'visible-hidden': computehidden, 'served': computeunserved, 'immutable': computemutable, 'base': computeimpactable} -def filterrevs(repo, filtername): - """returns set of filtered revision for this filter name""" +def filterrevs(repo, filtername, visibilityexceptions=None): + """returns set of filtered revision for this filter name + + visibilityexceptions is a set of revs which must are exceptions for + hidden-state and must be visible. They are dynamic and hence we should not + cache it's result""" if filtername not in repo.filteredrevcache: func = filtertable[filtername] + if visibilityexceptions: + return func(repo.unfiltered, visibilityexceptions) repo.filteredrevcache[filtername] = func(repo.unfiltered()) return repo.filteredrevcache[filtername] @@ -185,11 +196,14 @@ subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`. """ - def __init__(self, repo, filtername): + def __init__(self, repo, filtername, visibilityexceptions=None): object.__setattr__(self, r'_unfilteredrepo', repo) object.__setattr__(self, r'filtername', filtername) object.__setattr__(self, r'_clcachekey', None) object.__setattr__(self, r'_clcache', None) + # revs which are exceptions and must not be hidden + object.__setattr__(self, r'_visibilityexceptions', + visibilityexceptions) # not a propertycache on purpose we shall implement a proper cache later @property @@ -205,7 +219,7 @@ unfilen = len(unfiindex) - 1 unfinode = unfiindex[unfilen - 1][7] - revs = filterrevs(unfi, self.filtername) + revs = filterrevs(unfi, self.filtername, self._visibilityexceptions) cl = self._clcache newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed) # if cl.index is not unfiindex, unfi.changelog would be @@ -225,11 +239,16 @@ """Return an unfiltered version of a repo""" return self._unfilteredrepo - def filtered(self, name): + def filtered(self, name, visibilityexceptions=None): """Return a filtered version of a repository""" - if name == self.filtername: + if name == self.filtername and not visibilityexceptions: return self - return self.unfiltered().filtered(name) + return self.unfiltered().filtered(name, visibilityexceptions) + + def __repr__(self): + return r'<%s:%s %r>' % (self.__class__.__name__, + pycompat.sysstr(self.filtername), + self.unfiltered()) # everything access are forwarded to the proxied repo def __getattr__(self, attr): @@ -240,3 +259,16 @@ def __delattr__(self, attr): return delattr(self._unfilteredrepo, attr) + +# Python <3.4 easily leaks types via __mro__. See +# https://bugs.python.org/issue17950. We cache dynamically created types +# so they won't be leaked on every invocation of repo.filtered(). +_filteredrepotypes = weakref.WeakKeyDictionary() + +def newtype(base): + """Create a new type with the repoview mixin and the given base class""" + if base not in _filteredrepotypes: + class filteredrepo(repoview, base): + pass + _filteredrepotypes[base] = filteredrepo + return _filteredrepotypes[base] diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/revlog.py --- a/mercurial/revlog.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/revlog.py Mon Jan 22 17:53:02 2018 -0500 @@ -33,6 +33,9 @@ wdirrev, ) from .i18n import _ +from .thirdparty import ( + attr, +) from . import ( ancestor, error, @@ -251,6 +254,184 @@ if chunk: yield chunk +@attr.s(slots=True, frozen=True) +class _deltainfo(object): + distance = attr.ib() + deltalen = attr.ib() + data = attr.ib() + base = attr.ib() + chainbase = attr.ib() + chainlen = attr.ib() + compresseddeltalen = attr.ib() + +class _deltacomputer(object): + def __init__(self, revlog): + self.revlog = revlog + + def _getcandidaterevs(self, p1, p2, cachedelta): + """ + Provides revisions that present an interest to be diffed against, + grouped by level of easiness. + """ + revlog = self.revlog + curr = len(revlog) + prev = curr - 1 + p1r, p2r = revlog.rev(p1), revlog.rev(p2) + + # should we try to build a delta? + if prev != nullrev and revlog.storedeltachains: + tested = set() + # This condition is true most of the time when processing + # changegroup data into a generaldelta repo. The only time it + # isn't true is if this is the first revision in a delta chain + # or if ``format.generaldelta=true`` disabled ``lazydeltabase``. + if cachedelta and revlog._generaldelta and revlog._lazydeltabase: + # Assume what we received from the server is a good choice + # build delta will reuse the cache + yield (cachedelta[0],) + tested.add(cachedelta[0]) + + if revlog._generaldelta: + # exclude already lazy tested base if any + parents = [p for p in (p1r, p2r) + if p != nullrev and p not in tested] + if parents and not revlog._aggressivemergedeltas: + # Pick whichever parent is closer to us (to minimize the + # chance of having to build a fulltext). + parents = [max(parents)] + tested.update(parents) + yield parents + + if prev not in tested: + # other approach failed try against prev to hopefully save us a + # fulltext. + yield (prev,) + + def buildtext(self, revinfo, fh): + """Builds a fulltext version of a revision + + revinfo: _revisioninfo instance that contains all needed info + fh: file handle to either the .i or the .d revlog file, + depending on whether it is inlined or not + """ + btext = revinfo.btext + if btext[0] is not None: + return btext[0] + + revlog = self.revlog + cachedelta = revinfo.cachedelta + flags = revinfo.flags + node = revinfo.node + + baserev = cachedelta[0] + delta = cachedelta[1] + # special case deltas which replace entire base; no need to decode + # base revision. this neatly avoids censored bases, which throw when + # they're decoded. + hlen = struct.calcsize(">lll") + if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev), + len(delta) - hlen): + btext[0] = delta[hlen:] + else: + basetext = revlog.revision(baserev, _df=fh, raw=True) + btext[0] = mdiff.patch(basetext, delta) + + try: + res = revlog._processflags(btext[0], flags, 'read', raw=True) + btext[0], validatehash = res + if validatehash: + revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2) + if flags & REVIDX_ISCENSORED: + raise RevlogError(_('node %s is not censored') % node) + except CensoredNodeError: + # must pass the censored index flag to add censored revisions + if not flags & REVIDX_ISCENSORED: + raise + return btext[0] + + def _builddeltadiff(self, base, revinfo, fh): + revlog = self.revlog + t = self.buildtext(revinfo, fh) + if revlog.iscensored(base): + # deltas based on a censored revision must replace the + # full content in one patch, so delta works everywhere + header = mdiff.replacediffheader(revlog.rawsize(base), len(t)) + delta = header + t + else: + ptext = revlog.revision(base, _df=fh, raw=True) + delta = mdiff.textdiff(ptext, t) + + return delta + + def _builddeltainfo(self, revinfo, base, fh): + # can we use the cached delta? + if revinfo.cachedelta and revinfo.cachedelta[0] == base: + delta = revinfo.cachedelta[1] + else: + delta = self._builddeltadiff(base, revinfo, fh) + revlog = self.revlog + header, data = revlog.compress(delta) + deltalen = len(header) + len(data) + chainbase = revlog.chainbase(base) + offset = revlog.end(len(revlog) - 1) + dist = deltalen + offset - revlog.start(chainbase) + if revlog._generaldelta: + deltabase = base + else: + deltabase = chainbase + chainlen, compresseddeltalen = revlog._chaininfo(base) + chainlen += 1 + compresseddeltalen += deltalen + return _deltainfo(dist, deltalen, (header, data), deltabase, + chainbase, chainlen, compresseddeltalen) + + def finddeltainfo(self, revinfo, fh): + """Find an acceptable delta against a candidate revision + + revinfo: information about the revision (instance of _revisioninfo) + fh: file handle to either the .i or the .d revlog file, + depending on whether it is inlined or not + + Returns the first acceptable candidate revision, as ordered by + _getcandidaterevs + """ + cachedelta = revinfo.cachedelta + p1 = revinfo.p1 + p2 = revinfo.p2 + revlog = self.revlog + + deltainfo = None + for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta): + nominateddeltas = [] + for candidaterev in candidaterevs: + candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh) + if revlog._isgooddeltainfo(candidatedelta, revinfo.textlen): + nominateddeltas.append(candidatedelta) + if nominateddeltas: + deltainfo = min(nominateddeltas, key=lambda x: x.deltalen) + break + + return deltainfo + +@attr.s(slots=True, frozen=True) +class _revisioninfo(object): + """Information about a revision that allows building its fulltext + node: expected hash of the revision + p1, p2: parent revs of the revision + btext: built text cache consisting of a one-element list + cachedelta: (baserev, uncompressed_delta) or None + flags: flags associated to the revision storage + + One of btext[0] or cachedelta must be set. + """ + node = attr.ib() + p1 = attr.ib() + p2 = attr.ib() + btext = attr.ib() + textlen = attr.ib() + cachedelta = attr.ib() + flags = attr.ib() + # index v0: # 4 bytes: offset # 4 bytes: compressed length @@ -622,12 +803,14 @@ def parentrevs(self, rev): try: - return self.index[rev][5:7] + entry = self.index[rev] except IndexError: if rev == wdirrev: raise error.WdirUnsupported raise + return entry[5], entry[6] + def node(self, rev): try: return self.index[rev][7] @@ -1687,7 +1870,7 @@ self._chunkclear() def addrevision(self, text, transaction, link, p1, p2, cachedelta=None, - node=None, flags=REVIDX_DEFAULT_FLAGS): + node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None): """add a revision to the log text - the revision data to add @@ -1699,6 +1882,8 @@ computed by default as hash(text, p1, p2), however subclasses might use different hashing method (and override checkhash() in such case) flags - the known flags to set on the revision + deltacomputer - an optional _deltacomputer instance shared between + multiple calls """ if link == nullrev: raise RevlogError(_("attempted to add linkrev -1 to %s") @@ -1727,10 +1912,11 @@ self.checkhash(rawtext, node, p1=p1, p2=p2) return self.addrawrevision(rawtext, transaction, link, p1, p2, node, - flags, cachedelta=cachedelta) + flags, cachedelta=cachedelta, + deltacomputer=deltacomputer) def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags, - cachedelta=None): + cachedelta=None, deltacomputer=None): """add a raw revision with known flags, node and parents useful when reusing a revision not stored in this revlog (ex: received over wire, or read from an external bundle). @@ -1741,7 +1927,8 @@ ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig) try: return self._addrevision(node, rawtext, transaction, link, p1, p2, - flags, cachedelta, ifh, dfh) + flags, cachedelta, ifh, dfh, + deltacomputer=deltacomputer) finally: if dfh: dfh.close() @@ -1817,39 +2004,42 @@ return compressor.decompress(data) - def _isgooddelta(self, d, textlen): + def _isgooddeltainfo(self, d, textlen): """Returns True if the given delta is good. Good means that it is within the disk span, disk size, and chain length bounds that we know to be performant.""" if d is None: return False - # - 'dist' is the distance from the base revision -- bounding it limits - # the amount of I/O we need to do. - # - 'compresseddeltalen' is the sum of the total size of deltas we need - # to apply -- bounding it limits the amount of CPU we consume. - dist, l, data, base, chainbase, chainlen, compresseddeltalen = d + # - 'd.distance' is the distance from the base revision -- bounding it + # limits the amount of I/O we need to do. + # - 'd.compresseddeltalen' is the sum of the total size of deltas we + # need to apply -- bounding it limits the amount of CPU we consume. defaultmax = textlen * 4 maxdist = self._maxdeltachainspan if not maxdist: - maxdist = dist # ensure the conditional pass + maxdist = d.distance # ensure the conditional pass maxdist = max(maxdist, defaultmax) - if (dist > maxdist or l > textlen or - compresseddeltalen > textlen * 2 or - (self._maxchainlen and chainlen > self._maxchainlen)): + if (d.distance > maxdist or d.deltalen > textlen or + d.compresseddeltalen > textlen * 2 or + (self._maxchainlen and d.chainlen > self._maxchainlen)): return False return True def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags, - cachedelta, ifh, dfh, alwayscache=False): + cachedelta, ifh, dfh, alwayscache=False, + deltacomputer=None): """internal function to add revisions to the log see addrevision for argument descriptions. note: "addrevision" takes non-raw text, "_addrevision" takes raw text. + if "deltacomputer" is not provided or None, a defaultdeltacomputer will + be used. + invariants: - rawtext is optional (can be None); if not set, cachedelta must be set. if both are set, they must correspond to each other. @@ -1861,76 +2051,16 @@ raise RevlogError(_("%s: attempt to add wdir revision") % (self.indexfile)) - btext = [rawtext] - def buildtext(): - if btext[0] is not None: - return btext[0] - baserev = cachedelta[0] - delta = cachedelta[1] - # special case deltas which replace entire base; no need to decode - # base revision. this neatly avoids censored bases, which throw when - # they're decoded. - hlen = struct.calcsize(">lll") - if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev), - len(delta) - hlen): - btext[0] = delta[hlen:] - else: - if self._inline: - fh = ifh - else: - fh = dfh - basetext = self.revision(baserev, _df=fh, raw=True) - btext[0] = mdiff.patch(basetext, delta) + if self._inline: + fh = ifh + else: + fh = dfh - try: - res = self._processflags(btext[0], flags, 'read', raw=True) - btext[0], validatehash = res - if validatehash: - self.checkhash(btext[0], node, p1=p1, p2=p2) - if flags & REVIDX_ISCENSORED: - raise RevlogError(_('node %s is not censored') % node) - except CensoredNodeError: - # must pass the censored index flag to add censored revisions - if not flags & REVIDX_ISCENSORED: - raise - return btext[0] - - def builddelta(rev): - # can we use the cached delta? - if cachedelta and cachedelta[0] == rev: - delta = cachedelta[1] - else: - t = buildtext() - if self.iscensored(rev): - # deltas based on a censored revision must replace the - # full content in one patch, so delta works everywhere - header = mdiff.replacediffheader(self.rawsize(rev), len(t)) - delta = header + t - else: - if self._inline: - fh = ifh - else: - fh = dfh - ptext = self.revision(rev, _df=fh, raw=True) - delta = mdiff.textdiff(ptext, t) - header, data = self.compress(delta) - deltalen = len(header) + len(data) - chainbase = self.chainbase(rev) - dist = deltalen + offset - self.start(chainbase) - if self._generaldelta: - base = rev - else: - base = chainbase - chainlen, compresseddeltalen = self._chaininfo(rev) - chainlen += 1 - compresseddeltalen += deltalen - return (dist, deltalen, (header, data), base, - chainbase, chainlen, compresseddeltalen) + btext = [rawtext] curr = len(self) prev = curr - 1 offset = self.end(prev) - delta = None p1r, p2r = self.rev(p1), self.rev(p2) # full versions are inserted when the needed deltas @@ -1941,46 +2071,19 @@ else: textlen = len(rawtext) - # should we try to build a delta? - if prev != nullrev and self.storedeltachains: - tested = set() - # This condition is true most of the time when processing - # changegroup data into a generaldelta repo. The only time it - # isn't true is if this is the first revision in a delta chain - # or if ``format.generaldelta=true`` disabled ``lazydeltabase``. - if cachedelta and self._generaldelta and self._lazydeltabase: - # Assume what we received from the server is a good choice - # build delta will reuse the cache - candidatedelta = builddelta(cachedelta[0]) - tested.add(cachedelta[0]) - if self._isgooddelta(candidatedelta, textlen): - delta = candidatedelta - if delta is None and self._generaldelta: - # exclude already lazy tested base if any - parents = [p for p in (p1r, p2r) - if p != nullrev and p not in tested] - if parents and not self._aggressivemergedeltas: - # Pick whichever parent is closer to us (to minimize the - # chance of having to build a fulltext). - parents = [max(parents)] - tested.update(parents) - pdeltas = [] - for p in parents: - pd = builddelta(p) - if self._isgooddelta(pd, textlen): - pdeltas.append(pd) - if pdeltas: - delta = min(pdeltas, key=lambda x: x[1]) - if delta is None and prev not in tested: - # other approach failed try against prev to hopefully save us a - # fulltext. - candidatedelta = builddelta(prev) - if self._isgooddelta(candidatedelta, textlen): - delta = candidatedelta - if delta is not None: - dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta + if deltacomputer is None: + deltacomputer = _deltacomputer(self) + + revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags) + deltainfo = deltacomputer.finddeltainfo(revinfo, fh) + + if deltainfo is not None: + base = deltainfo.base + chainbase = deltainfo.chainbase + data = deltainfo.data + l = deltainfo.deltalen else: - rawtext = buildtext() + rawtext = deltacomputer.buildtext(revinfo, fh) data = self.compress(rawtext) l = len(data[1]) + len(data[0]) base = chainbase = curr @@ -1994,7 +2097,7 @@ self._writeentry(transaction, ifh, dfh, entry, data, link, offset) if alwayscache and rawtext is None: - rawtext = buildtext() + rawtext = deltacomputer._buildtext(revinfo, fh) if type(rawtext) == str: # only accept immutable objects self._cache = (node, curr, rawtext) @@ -2064,6 +2167,7 @@ dfh.flush() ifh.flush() try: + deltacomputer = _deltacomputer(self) # loop through our set of deltas for data in deltas: node, p1, p2, linknode, deltabase, delta, flags = data @@ -2110,7 +2214,8 @@ self._addrevision(node, None, transaction, link, p1, p2, flags, (baserev, delta), ifh, dfh, - alwayscache=bool(addrevisioncb)) + alwayscache=bool(addrevisioncb), + deltacomputer=deltacomputer) if addrevisioncb: addrevisioncb(self, node) @@ -2264,7 +2369,9 @@ DELTAREUSESAMEREVS = 'samerevs' DELTAREUSENEVER = 'never' - DELTAREUSEALL = {'always', 'samerevs', 'never'} + DELTAREUSEFULLADD = 'fulladd' + + DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'} def clone(self, tr, destrevlog, addrevisioncb=None, deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None): @@ -2331,6 +2438,7 @@ populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS, self.DELTAREUSESAMEREVS) + deltacomputer = _deltacomputer(destrevlog) index = self.index for rev in self: entry = index[rev] @@ -2355,18 +2463,26 @@ if not cachedelta: rawtext = self.revision(rev, raw=True) - ifh = destrevlog.opener(destrevlog.indexfile, 'a+', - checkambig=False) - dfh = None - if not destrevlog._inline: - dfh = destrevlog.opener(destrevlog.datafile, 'a+') - try: - destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2, - flags, cachedelta, ifh, dfh) - finally: - if dfh: - dfh.close() - ifh.close() + + if deltareuse == self.DELTAREUSEFULLADD: + destrevlog.addrevision(rawtext, tr, linkrev, p1, p2, + cachedelta=cachedelta, + node=node, flags=flags, + deltacomputer=deltacomputer) + else: + ifh = destrevlog.opener(destrevlog.indexfile, 'a+', + checkambig=False) + dfh = None + if not destrevlog._inline: + dfh = destrevlog.opener(destrevlog.datafile, 'a+') + try: + destrevlog._addrevision(node, rawtext, tr, linkrev, p1, + p2, flags, cachedelta, ifh, dfh, + deltacomputer=deltacomputer) + finally: + if dfh: + dfh.close() + ifh.close() if addrevisioncb: addrevisioncb(self, rev, node) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/revset.py --- a/mercurial/revset.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/revset.py Mon Jan 22 17:53:02 2018 -0500 @@ -22,6 +22,7 @@ obsutil, pathutil, phases, + pycompat, registrar, repoview, revsetlang, @@ -123,7 +124,7 @@ def rangeall(repo, subset, x, order): assert x is None - return _makerangeset(repo, subset, 0, len(repo) - 1, order) + return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order) def rangepre(repo, subset, y, order): # ':y' can't be rewritten to '0:y' since '0' may be hidden @@ -136,7 +137,8 @@ m = getset(repo, fullreposet(repo), x) if not m: return baseset() - return _makerangeset(repo, subset, m.first(), len(repo) - 1, order) + return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(), + order) def _makerangeset(repo, subset, m, n, order): if m == n: @@ -144,7 +146,7 @@ elif n == node.wdirrev: r = spanset(repo, m, len(repo)) + baseset([n]) elif m == node.wdirrev: - r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1) + r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1) elif m < n: r = spanset(repo, m, n + 1) else: @@ -266,7 +268,8 @@ def _destupdate(repo, subset, x): # experimental revset for update destination args = getargsdict(x, 'limit', 'clean') - return subset & baseset([destutil.destupdate(repo, **args)[0]]) + return subset & baseset([destutil.destupdate(repo, + **pycompat.strkwargs(args))[0]]) @predicate('_destmerge') def _destmerge(repo, subset, x): @@ -909,48 +912,43 @@ return limit(repo, subset, x, order) def _follow(repo, subset, x, name, followfirst=False): - l = getargs(x, 0, 2, _("%s takes no arguments or a pattern " - "and an optional revset") % name) - c = repo['.'] - if l: - x = getstring(l[0], _("%s expected a pattern") % name) - rev = None - if len(l) >= 2: - revs = getset(repo, fullreposet(repo), l[1]) - if len(revs) != 1: - raise error.RepoLookupError( - _("%s expected one starting revision") % name) - rev = revs.last() - c = repo[rev] - matcher = matchmod.match(repo.root, repo.getcwd(), [x], - ctx=repo[rev], default='path') - - files = c.manifest().walk(matcher) - - s = set() - for fname in files: - fctx = c[fname] - s = s.union(set(c.rev() for c in fctx.ancestors(followfirst))) - # include the revision responsible for the most recent version - s.add(fctx.introrev()) + args = getargsdict(x, name, 'file startrev') + revs = None + if 'startrev' in args: + revs = getset(repo, fullreposet(repo), args['startrev']) + if 'file' in args: + x = getstring(args['file'], _("%s expected a pattern") % name) + if revs is None: + revs = [None] + fctxs = [] + for r in revs: + ctx = mctx = repo[r] + if r is None: + ctx = repo['.'] + m = matchmod.match(repo.root, repo.getcwd(), [x], + ctx=mctx, default='path') + fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m)) + s = dagop.filerevancestors(fctxs, followfirst) else: - s = dagop.revancestors(repo, baseset([c.rev()]), followfirst) + if revs is None: + revs = baseset([repo['.'].rev()]) + s = dagop.revancestors(repo, revs, followfirst) return subset & s -@predicate('follow([pattern[, startrev]])', safe=True) +@predicate('follow([file[, startrev]])', safe=True) def follow(repo, subset, x): """ An alias for ``::.`` (ancestors of the working directory's first parent). - If pattern is specified, the histories of files matching given + If file pattern is specified, the histories of files matching given pattern in the revision given by startrev are followed, including copies. """ return _follow(repo, subset, x, 'follow') @predicate('_followfirst', safe=True) def _followfirst(repo, subset, x): - # ``followfirst([pattern[, startrev]])`` - # Like ``follow([pattern[, startrev]])`` but follows only the first parent + # ``followfirst([file[, startrev]])`` + # Like ``follow([file[, startrev]])`` but follows only the first parent # of every revisions or files revisions. return _follow(repo, subset, x, '_followfirst', followfirst=True) @@ -1421,8 +1419,16 @@ l = getargs(x, 0, 1, _("outgoing takes one or no arguments")) # i18n: "outgoing" is a keyword dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' - dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') - dest, branches = hg.parseurl(dest) + if not dest: + # ui.paths.getpath() explicitly tests for None, not just a boolean + dest = None + path = repo.ui.paths.getpath(dest, default=('default-push', 'default')) + if not path: + raise error.Abort(_('default repository not configured!'), + hint=_("see 'hg help config.paths'")) + dest = path.pushloc or path.loc + branches = path.branch, [] + revs, checkout = hg.addbranchrevs(repo, repo, branches, []) if revs: revs = [repo.lookup(rev) for rev in revs] @@ -1509,8 +1515,7 @@ def _phase(repo, subset, *targets): """helper to select all rev in phases""" - s = repo._phasecache.getrevset(repo, targets) - return subset & s + return repo._phasecache.getrevset(repo, targets, subset) @predicate('draft()', safe=True) def draft(repo, subset, x): @@ -1617,11 +1622,7 @@ """Changeset in public phase.""" # i18n: "public" is a keyword getargs(x, 0, 0, _("public takes no arguments")) - phase = repo._phasecache.phase - target = phases.public - condition = lambda r: phase(repo, r) == target - return subset.filter(condition, condrepr=('', target), - cache=False) + return _phase(repo, subset, phases.public) @predicate('remote([id [,path]])', safe=False) def remote(repo, subset, x): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/revsetlang.py --- a/mercurial/revsetlang.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/revsetlang.py Mon Jan 22 17:53:02 2018 -0500 @@ -27,8 +27,10 @@ "~": (18, None, None, ("ancestor", 18), None), "^": (18, None, None, ("parent", 18), "parentpost"), "-": (5, None, ("negate", 19), ("minus", 5), None), - "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), - "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), + "::": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17), + "dagrangepost"), + "..": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17), + "dagrangepost"), ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"), "not": (10, None, ("not", 10), None, None), "!": (10, None, ("not", 10), None, None), @@ -288,6 +290,8 @@ post = ('parentpost', x[1]) if x[2][0] == 'dagrangepre': return _fixops(('dagrange', post, x[2][1])) + elif x[2][0] == 'dagrangeall': + return _fixops(('dagrangepost', post)) elif x[2][0] == 'rangepre': return _fixops(('range', post, x[2][1])) elif x[2][0] == 'rangeall': @@ -313,6 +317,8 @@ return _analyze(_build('only(_, _)', *x[1:])) elif op == 'onlypost': return _analyze(_build('only(_)', x[1])) + elif op == 'dagrangeall': + raise error.ParseError(_("can't use '::' in this context")) elif op == 'dagrangepre': return _analyze(_build('ancestors(_)', x[1])) elif op == 'dagrangepost': @@ -549,6 +555,52 @@ """ return "'%s'" % util.escapestr(pycompat.bytestr(s)) +def _formatargtype(c, arg): + if c == 'd': + return '%d' % int(arg) + elif c == 's': + return _quote(arg) + elif c == 'r': + parse(arg) # make sure syntax errors are confined + return '(%s)' % arg + elif c == 'n': + return _quote(node.hex(arg)) + elif c == 'b': + try: + return _quote(arg.branch()) + except AttributeError: + raise TypeError + raise error.ParseError(_('unexpected revspec format character %s') % c) + +def _formatlistexp(s, t): + l = len(s) + if l == 0: + return "_list('')" + elif l == 1: + return _formatargtype(t, s[0]) + elif t == 'd': + return "_intlist('%s')" % "\0".join('%d' % int(a) for a in s) + elif t == 's': + return "_list(%s)" % _quote("\0".join(s)) + elif t == 'n': + return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) + elif t == 'b': + try: + return "_list('%s')" % "\0".join(a.branch() for a in s) + except AttributeError: + raise TypeError + + m = l // 2 + return '(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t)) + +def _formatparamexp(args, t): + return ', '.join(_formatargtype(t, a) for a in args) + +_formatlistfuncs = { + 'l': _formatlistexp, + 'p': _formatparamexp, +} + def formatspec(expr, *args): ''' This is a convenience function for using revsets internally, and @@ -564,7 +616,8 @@ %n = hex(arg), single-quoted %% = a literal '%' - Prefixing the type with 'l' specifies a parenthesized list of that type. + Prefixing the type with 'l' specifies a parenthesized list of that type, + and 'p' specifies a list of function parameters of that type. >>> formatspec(b'%r:: and %lr', b'10 or 11', (b"this()", b"that()")) '(10 or 11):: and ((this()) or (that()))' @@ -579,68 +632,61 @@ >>> formatspec(b'branch(%b)', b) "branch('default')" >>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd']) - "root(_list('a\\x00b\\x00c\\x00d'))" + "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))" + >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user']) + "sort((:), 'desc', 'user')" + >>> formatspec('%ls', ['a', "'"]) + "_list('a\\\\x00\\\\'')" ''' - - def argtype(c, arg): - if c == 'd': - return '%d' % int(arg) - elif c == 's': - return _quote(arg) - elif c == 'r': - parse(arg) # make sure syntax errors are confined - return '(%s)' % arg - elif c == 'n': - return _quote(node.hex(arg)) - elif c == 'b': - return _quote(arg.branch()) + expr = pycompat.bytestr(expr) + argiter = iter(args) + ret = [] + pos = 0 + while pos < len(expr): + q = expr.find('%', pos) + if q < 0: + ret.append(expr[pos:]) + break + ret.append(expr[pos:q]) + pos = q + 1 + try: + d = expr[pos] + except IndexError: + raise error.ParseError(_('incomplete revspec format character')) + if d == '%': + ret.append(d) + pos += 1 + continue - def listexp(s, t): - l = len(s) - if l == 0: - return "_list('')" - elif l == 1: - return argtype(t, s[0]) - elif t == 'd': - return "_intlist('%s')" % "\0".join('%d' % int(a) for a in s) - elif t == 's': - return "_list('%s')" % "\0".join(s) - elif t == 'n': - return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) - elif t == 'b': - return "_list('%s')" % "\0".join(a.branch() for a in s) - - m = l // 2 - return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t)) - - expr = pycompat.bytestr(expr) - ret = '' - pos = 0 - arg = 0 - while pos < len(expr): - c = expr[pos] - if c == '%': + try: + arg = next(argiter) + except StopIteration: + raise error.ParseError(_('missing argument for revspec')) + f = _formatlistfuncs.get(d) + if f: + # a list of some type pos += 1 - d = expr[pos] - if d == '%': - ret += d - elif d in 'dsnbr': - ret += argtype(d, args[arg]) - arg += 1 - elif d == 'l': - # a list of some type - pos += 1 + try: d = expr[pos] - ret += listexp(list(args[arg]), d) - arg += 1 - else: - raise error.Abort(_('unexpected revspec format character %s') - % d) + except IndexError: + raise error.ParseError(_('incomplete revspec format character')) + try: + ret.append(f(list(arg), d)) + except (TypeError, ValueError): + raise error.ParseError(_('invalid argument for revspec')) else: - ret += c + try: + ret.append(_formatargtype(d, arg)) + except (TypeError, ValueError): + raise error.ParseError(_('invalid argument for revspec')) pos += 1 - return ret + try: + next(argiter) + raise error.ParseError(_('too many revspec arguments specified')) + except StopIteration: + pass + return ''.join(ret) def prettyformat(tree): return parser.prettyformat(tree, ('string', 'symbol')) @@ -661,3 +707,34 @@ if tree[0] == 'func': funcs.add(tree[1][1]) return funcs + +_hashre = util.re.compile('[0-9a-fA-F]{1,40}$') + +def _ishashlikesymbol(symbol): + """returns true if the symbol looks like a hash""" + return _hashre.match(symbol) + +def gethashlikesymbols(tree): + """returns the list of symbols of the tree that look like hashes + + >>> gethashlikesymbols(('dagrange', ('symbol', '3'), ('symbol', 'abe3ff'))) + ['3', 'abe3ff'] + >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '.'))) + [] + >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '34'))) + ['34'] + >>> gethashlikesymbols(('symbol', 'abe3ffZ')) + [] + """ + if not tree: + return [] + + if tree[0] == "symbol": + if _ishashlikesymbol(tree[1]): + return [tree[1]] + elif len(tree) >= 3: + results = [] + for subtree in tree[1:]: + results += gethashlikesymbols(subtree) + return results + return [] diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/rewriteutil.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/rewriteutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,53 @@ +# rewriteutil.py - utility functions for rewriting changesets +# +# Copyright 2017 Octobus +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from .i18n import _ + +from . import ( + error, + node, + obsolete, + revset, +) + +def precheck(repo, revs, action='rewrite'): + """check if revs can be rewritten + action is used to control the error message. + + Make sure this function is called after taking the lock. + """ + if node.nullrev in revs: + msg = _("cannot %s null changeset") % (action) + hint = _("no changeset checked out") + raise error.Abort(msg, hint=hint) + + publicrevs = repo.revs('%ld and public()', revs) + if len(repo[None].parents()) > 1: + raise error.Abort(_("cannot %s while merging") % action) + + if publicrevs: + msg = _("cannot %s public changesets") % (action) + hint = _("see 'hg help phases' for details") + raise error.Abort(msg, hint=hint) + + newunstable = disallowednewunstable(repo, revs) + if newunstable: + raise error.Abort(_("cannot %s changeset with children") % action) + +def disallowednewunstable(repo, revs): + """Checks whether editing the revs will create new unstable changesets and + are we allowed to create them. + + To allow new unstable changesets, set the config: + `experimental.evolution.allowunstable=True` + """ + allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) + if allowunstable: + return revset.baseset() + return repo.revs("(%ld::) - %ld", revs, revs) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/scmutil.py --- a/mercurial/scmutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/scmutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -1100,12 +1100,11 @@ finally: if proc: proc.communicate() - if proc.returncode != 0: - # not an error so 'cmd | grep' can be empty - repo.ui.debug("extdata command '%s' %s\n" - % (cmd, util.explainexit(proc.returncode)[0])) if src: src.close() + if proc and proc.returncode != 0: + raise error.Abort(_("extdata command '%s' failed: %s") + % (cmd, util.explainexit(proc.returncode)[0])) return data @@ -1223,6 +1222,9 @@ 'unbundle', ] +# A marker that tells the evolve extension to suppress its own reporting +_reportstroubledchangesets = True + def registersummarycallback(repo, otr, txnname=''): """register a callback to issue a summary after the transaction is closed """ @@ -1245,7 +1247,7 @@ if filtername: repo = repo.filtered(filtername) func(repo, tr) - newcat = '%2i-txnreport' % len(categories) + newcat = '%02i-txnreport' % len(categories) otr.addpostclose(newcat, wrapped) categories.append(newcat) return wrapped @@ -1258,11 +1260,38 @@ repo.ui.status(_('obsoleted %i changesets\n') % len(obsoleted)) + if (obsolete.isenabled(repo, obsolete.createmarkersopt) and + repo.ui.configbool('experimental', 'evolution.report-instabilities')): + instabilitytypes = [ + ('orphan', 'orphan'), + ('phase-divergent', 'phasedivergent'), + ('content-divergent', 'contentdivergent'), + ] + + def getinstabilitycounts(repo): + filtered = repo.changelog.filteredrevs + counts = {} + for instability, revset in instabilitytypes: + counts[instability] = len(set(obsolete.getrevs(repo, revset)) - + filtered) + return counts + + oldinstabilitycounts = getinstabilitycounts(repo) + @reportsummary + def reportnewinstabilities(repo, tr): + newinstabilitycounts = getinstabilitycounts(repo) + for instability, revset in instabilitytypes: + delta = (newinstabilitycounts[instability] - + oldinstabilitycounts[instability]) + if delta > 0: + repo.ui.warn(_('%i new %s changesets\n') % + (delta, instability)) + if txmatch(_reportnewcssource): @reportsummary def reportnewcs(repo, tr): """Report the range of new revisions pulled/unbundled.""" - newrevs = list(tr.changes.get('revs', set())) + newrevs = tr.changes.get('revs', xrange(0, 0)) if not newrevs: return @@ -1279,3 +1308,108 @@ else: revrange = '%s:%s' % (minrev, maxrev) repo.ui.status(_('new changesets %s\n') % revrange) + +def nodesummaries(repo, nodes, maxnumnodes=4): + if len(nodes) <= maxnumnodes or repo.ui.verbose: + return ' '.join(short(h) for h in nodes) + first = ' '.join(short(h) for h in nodes[:maxnumnodes]) + return _("%s and %d others") % (first, len(nodes) - maxnumnodes) + +def enforcesinglehead(repo, tr, desc): + """check that no named branch has multiple heads""" + if desc in ('strip', 'repair'): + # skip the logic during strip + return + visible = repo.filtered('visible') + # possible improvement: we could restrict the check to affected branch + for name, heads in visible.branchmap().iteritems(): + if len(heads) > 1: + msg = _('rejecting multiple heads on branch "%s"') + msg %= name + hint = _('%d heads: %s') + hint %= (len(heads), nodesummaries(repo, heads)) + raise error.Abort(msg, hint=hint) + +def wrapconvertsink(sink): + """Allow extensions to wrap the sink returned by convcmd.convertsink() + before it is used, whether or not the convert extension was formally loaded. + """ + return sink + +def unhidehashlikerevs(repo, specs, hiddentype): + """parse the user specs and unhide changesets whose hash or revision number + is passed. + + hiddentype can be: 1) 'warn': warn while unhiding changesets + 2) 'nowarn': don't warn while unhiding changesets + + returns a repo object with the required changesets unhidden + """ + if not repo.filtername or not repo.ui.configbool('experimental', + 'directaccess'): + return repo + + if repo.filtername not in ('visible', 'visible-hidden'): + return repo + + symbols = set() + for spec in specs: + try: + tree = revsetlang.parse(spec) + except error.ParseError: # will be reported by scmutil.revrange() + continue + + symbols.update(revsetlang.gethashlikesymbols(tree)) + + if not symbols: + return repo + + revs = _getrevsfromsymbols(repo, symbols) + + if not revs: + return repo + + if hiddentype == 'warn': + unfi = repo.unfiltered() + revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs]) + repo.ui.warn(_("warning: accessing hidden changesets for write " + "operation: %s\n") % revstr) + + # we have to use new filtername to separate branch/tags cache until we can + # disbale these cache when revisions are dynamically pinned. + return repo.filtered('visible-hidden', revs) + +def _getrevsfromsymbols(repo, symbols): + """parse the list of symbols and returns a set of revision numbers of hidden + changesets present in symbols""" + revs = set() + unfi = repo.unfiltered() + unficl = unfi.changelog + cl = repo.changelog + tiprev = len(unficl) + pmatch = unficl._partialmatch + allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums') + for s in symbols: + try: + n = int(s) + if n <= tiprev: + if not allowrevnums: + continue + else: + if n not in cl: + revs.add(n) + continue + except ValueError: + pass + + try: + s = pmatch(s) + except error.LookupError: + s = None + + if s is not None: + rev = unficl.rev(s) + if rev not in cl: + revs.add(rev) + + return revs diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/selectors2.py --- a/mercurial/selectors2.py Mon Jan 08 16:07:51 2018 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,745 +0,0 @@ -""" Back-ported, durable, and portable selectors """ - -# MIT License -# -# Copyright (c) 2017 Seth Michael Larson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# no-check-code - -from __future__ import absolute_import - -import collections -import errno -import math -import select -import socket -import sys -import time - -from . import pycompat - -namedtuple = collections.namedtuple -Mapping = collections.Mapping - -try: - monotonic = time.monotonic -except AttributeError: - monotonic = time.time - -__author__ = 'Seth Michael Larson' -__email__ = 'sethmichaellarson@protonmail.com' -__version__ = '2.0.0' -__license__ = 'MIT' -__url__ = 'https://www.github.com/SethMichaelLarson/selectors2' - -__all__ = ['EVENT_READ', - 'EVENT_WRITE', - 'SelectorKey', - 'DefaultSelector', - 'BaseSelector'] - -EVENT_READ = (1 << 0) -EVENT_WRITE = (1 << 1) -_DEFAULT_SELECTOR = None -_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. -_ERROR_TYPES = (OSError, IOError, socket.error) - - -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) - - -class _SelectorMapping(Mapping): - """ Mapping of file objects to selector keys """ - - def __init__(self, selector): - self._selector = selector - - def __len__(self): - return len(self._selector._fd_to_key) - - def __getitem__(self, fileobj): - try: - fd = self._selector._fileobj_lookup(fileobj) - return self._selector._fd_to_key[fd] - except KeyError: - raise KeyError("{0!r} is not registered.".format(fileobj)) - - def __iter__(self): - return iter(self._selector._fd_to_key) - - -def _fileobj_to_fd(fileobj): - """ Return a file descriptor from a file object. If - given an integer will simply return that integer back. """ - if isinstance(fileobj, int): - fd = fileobj - else: - try: - fd = int(fileobj.fileno()) - except (AttributeError, TypeError, ValueError): - raise ValueError("Invalid file object: {0!r}".format(fileobj)) - if fd < 0: - raise ValueError("Invalid file descriptor: {0}".format(fd)) - return fd - - -class BaseSelector(object): - """ Abstract Selector class - - A selector supports registering file objects to be monitored - for specific I/O events. - - A file object is a file descriptor or any object with a - `fileno()` method. An arbitrary object can be attached to the - file object which can be used for example to store context info, - a callback, etc. - - A selector can use various implementations (select(), poll(), epoll(), - and kqueue()) depending on the platform. The 'DefaultSelector' class uses - the most efficient implementation for the current platform. - """ - def __init__(self): - # Maps file descriptors to keys. - self._fd_to_key = {} - - # Read-only mapping returned by get_map() - self._map = _SelectorMapping(self) - - def _fileobj_lookup(self, fileobj): - """ Return a file descriptor from a file object. - This wraps _fileobj_to_fd() to do an exhaustive - search in case the object is invalid but we still - have it in our map. Used by unregister() so we can - unregister an object that was previously registered - even if it is closed. It is also used by _SelectorMapping - """ - try: - return _fileobj_to_fd(fileobj) - except ValueError: - - # Search through all our mapped keys. - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - return key.fd - - # Raise ValueError after all. - raise - - def register(self, fileobj, events, data=None): - """ Register a file object for a set of events to monitor. """ - if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): - raise ValueError("Invalid events: {0!r}".format(events)) - - key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) - - if key.fd in self._fd_to_key: - raise KeyError("{0!r} (FD {1}) is already registered" - .format(fileobj, key.fd)) - - self._fd_to_key[key.fd] = key - return key - - def unregister(self, fileobj): - """ Unregister a file object from being monitored. """ - try: - key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - # Getting the fileno of a closed socket on Windows errors with EBADF. - except socket.error as err: - if err.errno != errno.EBADF: - raise - else: - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - self._fd_to_key.pop(key.fd) - break - else: - raise KeyError("{0!r} is not registered".format(fileobj)) - return key - - def modify(self, fileobj, events, data=None): - """ Change a registered file object monitored events and data. """ - # NOTE: Some subclasses optimize this operation even further. - try: - key = self._fd_to_key[self._fileobj_lookup(fileobj)] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - if events != key.events: - self.unregister(fileobj) - key = self.register(fileobj, events, data) - - elif data != key.data: - # Use a shortcut to update the data. - key = key._replace(data=data) - self._fd_to_key[key.fd] = key - - return key - - def select(self, timeout=None): - """ Perform the actual selection until some monitored file objects - are ready or the timeout expires. """ - raise NotImplementedError() - - def close(self): - """ Close the selector. This must be called to ensure that all - underlying resources are freed. """ - self._fd_to_key.clear() - self._map = None - - def get_key(self, fileobj): - """ Return the key associated with a registered file object. """ - mapping = self.get_map() - if mapping is None: - raise RuntimeError("Selector is closed") - try: - return mapping[fileobj] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - def get_map(self): - """ Return a mapping of file objects to selector keys """ - return self._map - - def _key_from_fd(self, fd): - """ Return the key associated to a given file descriptor - Return None if it is not found. """ - try: - return self._fd_to_key[fd] - except KeyError: - return None - - def __enter__(self): - return self - - def __exit__(self, *_): - self.close() - - -# Almost all platforms have select.select() -if hasattr(select, "select"): - class SelectSelector(BaseSelector): - """ Select-based selector. """ - def __init__(self): - super(SelectSelector, self).__init__() - self._readers = set() - self._writers = set() - - def register(self, fileobj, events, data=None): - key = super(SelectSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - self._readers.add(key.fd) - if events & EVENT_WRITE: - self._writers.add(key.fd) - return key - - def unregister(self, fileobj): - key = super(SelectSelector, self).unregister(fileobj) - self._readers.discard(key.fd) - self._writers.discard(key.fd) - return key - - def select(self, timeout=None): - # Selecting on empty lists on Windows errors out. - if not len(self._readers) and not len(self._writers): - return [] - - timeout = None if timeout is None else max(timeout, 0.0) - ready = [] - r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers, - self._writers, timeout) - r = set(r) - w = set(w) - for fd in r | w: - events = 0 - if fd in r: - events |= EVENT_READ - if fd in w: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def _wrap_select(self, r, w, timeout=None): - """ Wrapper for select.select because timeout is a positional arg """ - return select.select(r, w, [], timeout) - - __all__.append('SelectSelector') - - # Jython has a different implementation of .fileno() for socket objects. - if pycompat.isjython: - class _JythonSelectorMapping(object): - """ This is an implementation of _SelectorMapping that is built - for use specifically with Jython, which does not provide a hashable - value from socket.socket.fileno(). """ - - def __init__(self, selector): - assert isinstance(selector, JythonSelectSelector) - self._selector = selector - - def __len__(self): - return len(self._selector._sockets) - - def __getitem__(self, fileobj): - for sock, key in self._selector._sockets: - if sock is fileobj: - return key - else: - raise KeyError("{0!r} is not registered.".format(fileobj)) - - class JythonSelectSelector(SelectSelector): - """ This is an implementation of SelectSelector that is for Jython - which works around that Jython's socket.socket.fileno() does not - return an integer fd value. All SelectorKey.fd will be equal to -1 - and should not be used. This instead uses object id to compare fileobj - and will only use select.select as it's the only selector that allows - directly passing in socket objects rather than registering fds. - See: http://bugs.jython.org/issue1678 - https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer - """ - - def __init__(self): - super(JythonSelectSelector, self).__init__() - - self._sockets = [] # Uses a list of tuples instead of dictionary. - self._map = _JythonSelectorMapping(self) - self._readers = [] - self._writers = [] - - # Jython has a select.cpython_compatible_select function in older versions. - self._select_func = getattr(select, 'cpython_compatible_select', select.select) - - def register(self, fileobj, events, data=None): - for sock, _ in self._sockets: - if sock is fileobj: - raise KeyError("{0!r} is already registered" - .format(fileobj, sock)) - - key = SelectorKey(fileobj, -1, events, data) - self._sockets.append((fileobj, key)) - - if events & EVENT_READ: - self._readers.append(fileobj) - if events & EVENT_WRITE: - self._writers.append(fileobj) - return key - - def unregister(self, fileobj): - for i, (sock, key) in enumerate(self._sockets): - if sock is fileobj: - break - else: - raise KeyError("{0!r} is not registered.".format(fileobj)) - - if key.events & EVENT_READ: - self._readers.remove(fileobj) - if key.events & EVENT_WRITE: - self._writers.remove(fileobj) - - del self._sockets[i] - return key - - def _wrap_select(self, r, w, timeout=None): - """ Wrapper for select.select because timeout is a positional arg """ - return self._select_func(r, w, [], timeout) - - __all__.append('JythonSelectSelector') - SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used. - - -if hasattr(select, "poll"): - class PollSelector(BaseSelector): - """ Poll-based selector """ - def __init__(self): - super(PollSelector, self).__init__() - self._poll = select.poll() - - def register(self, fileobj, events, data=None): - key = super(PollSelector, self).register(fileobj, events, data) - event_mask = 0 - if events & EVENT_READ: - event_mask |= select.POLLIN - if events & EVENT_WRITE: - event_mask |= select.POLLOUT - self._poll.register(key.fd, event_mask) - return key - - def unregister(self, fileobj): - key = super(PollSelector, self).unregister(fileobj) - self._poll.unregister(key.fd) - return key - - def _wrap_poll(self, timeout=None): - """ Wrapper function for select.poll.poll() so that - _syscall_wrapper can work with only seconds. """ - if timeout is not None: - if timeout <= 0: - timeout = 0 - else: - # select.poll.poll() has a resolution of 1 millisecond, - # round away from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1000) - - result = self._poll.poll(timeout) - return result - - def select(self, timeout=None): - ready = [] - fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.POLLIN: - events |= EVENT_WRITE - if event_mask & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - - return ready - - __all__.append('PollSelector') - -if hasattr(select, "epoll"): - class EpollSelector(BaseSelector): - """ Epoll-based selector """ - def __init__(self): - super(EpollSelector, self).__init__() - self._epoll = select.epoll() - - def fileno(self): - return self._epoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(EpollSelector, self).register(fileobj, events, data) - events_mask = 0 - if events & EVENT_READ: - events_mask |= select.EPOLLIN - if events & EVENT_WRITE: - events_mask |= select.EPOLLOUT - _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) - return key - - def unregister(self, fileobj): - key = super(EpollSelector, self).unregister(fileobj) - try: - _syscall_wrapper(self._epoll.unregister, False, key.fd) - except _ERROR_TYPES: - # This can occur when the fd was closed since registry. - pass - return key - - def select(self, timeout=None): - if timeout is not None: - if timeout <= 0: - timeout = 0.0 - else: - # select.epoll.poll() has a resolution of 1 millisecond - # but luckily takes seconds so we don't need a wrapper - # like PollSelector. Just for better rounding. - timeout = math.ceil(timeout * 1000) * 0.001 - timeout = float(timeout) - else: - timeout = -1.0 # epoll.poll() must have a float. - - # We always want at least 1 to ensure that select can be called - # with no file descriptors registered. Otherwise will fail. - max_events = max(len(self._fd_to_key), 1) - - ready = [] - fd_events = _syscall_wrapper(self._epoll.poll, True, - timeout=timeout, - maxevents=max_events) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.EPOLLIN: - events |= EVENT_WRITE - if event_mask & ~select.EPOLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._epoll.close() - super(EpollSelector, self).close() - - __all__.append('EpollSelector') - - -if hasattr(select, "devpoll"): - class DevpollSelector(BaseSelector): - """Solaris /dev/poll selector.""" - - def __init__(self): - super(DevpollSelector, self).__init__() - self._devpoll = select.devpoll() - - def fileno(self): - return self._devpoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(DevpollSelector, self).register(fileobj, events, data) - poll_events = 0 - if events & EVENT_READ: - poll_events |= select.POLLIN - if events & EVENT_WRITE: - poll_events |= select.POLLOUT - self._devpoll.register(key.fd, poll_events) - return key - - def unregister(self, fileobj): - key = super(DevpollSelector, self).unregister(fileobj) - self._devpoll.unregister(key.fd) - return key - - def _wrap_poll(self, timeout=None): - """ Wrapper function for select.poll.poll() so that - _syscall_wrapper can work with only seconds. """ - if timeout is not None: - if timeout <= 0: - timeout = 0 - else: - # select.devpoll.poll() has a resolution of 1 millisecond, - # round away from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1000) - - result = self._devpoll.poll(timeout) - return result - - def select(self, timeout=None): - ready = [] - fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.POLLIN: - events |= EVENT_WRITE - if event_mask & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - - return ready - - def close(self): - self._devpoll.close() - super(DevpollSelector, self).close() - - __all__.append('DevpollSelector') - - -if hasattr(select, "kqueue"): - class KqueueSelector(BaseSelector): - """ Kqueue / Kevent-based selector """ - def __init__(self): - super(KqueueSelector, self).__init__() - self._kqueue = select.kqueue() - - def fileno(self): - return self._kqueue.fileno() - - def register(self, fileobj, events, data=None): - key = super(KqueueSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_ADD) - - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - - if events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_ADD) - - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - - return key - - def unregister(self, fileobj): - key = super(KqueueSelector, self).unregister(fileobj) - if key.events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - except _ERROR_TYPES: - pass - if key.events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) - except _ERROR_TYPES: - pass - - return key - - def select(self, timeout=None): - if timeout is not None: - timeout = max(timeout, 0) - - max_events = len(self._fd_to_key) * 2 - ready_fds = {} - - kevent_list = _syscall_wrapper(self._kqueue.control, True, - None, max_events, timeout) - - for kevent in kevent_list: - fd = kevent.ident - event_mask = kevent.filter - events = 0 - if event_mask == select.KQ_FILTER_READ: - events |= EVENT_READ - if event_mask == select.KQ_FILTER_WRITE: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - if key.fd not in ready_fds: - ready_fds[key.fd] = (key, events & key.events) - else: - old_events = ready_fds[key.fd][1] - ready_fds[key.fd] = (key, (events | old_events) & key.events) - - return list(ready_fds.values()) - - def close(self): - self._kqueue.close() - super(KqueueSelector, self).close() - - __all__.append('KqueueSelector') - - -def _can_allocate(struct): - """ Checks that select structs can be allocated by the underlying - operating system, not just advertised by the select module. We don't - check select() because we'll be hopeful that most platforms that - don't have it available will not advertise it. (ie: GAE) """ - try: - # select.poll() objects won't fail until used. - if struct == 'poll': - p = select.poll() - p.poll(0) - - # All others will fail on allocation. - else: - getattr(select, struct)().close() - return True - except (OSError, AttributeError): - return False - - -# Python 3.5 uses a more direct route to wrap system calls to increase speed. -if sys.version_info >= (3, 5): - def _syscall_wrapper(func, _, *args, **kwargs): - """ This is the short-circuit version of the below logic - because in Python 3.5+ all selectors restart system calls. """ - return func(*args, **kwargs) -else: - def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): - """ Wrapper function for syscalls that could fail due to EINTR. - All functions should be retried if there is time left in the timeout - in accordance with PEP 475. """ - timeout = kwargs.get("timeout", None) - if timeout is None: - expires = None - recalc_timeout = False - else: - timeout = float(timeout) - if timeout < 0.0: # Timeout less than 0 treated as no timeout. - expires = None - else: - expires = monotonic() + timeout - - args = list(args) - if recalc_timeout and "timeout" not in kwargs: - raise ValueError( - "Timeout must be in args or kwargs to be recalculated") - - result = _SYSCALL_SENTINEL - while result is _SYSCALL_SENTINEL: - try: - result = func(*args, **kwargs) - # OSError is thrown by select.select - # IOError is thrown by select.epoll.poll - # select.error is thrown by select.poll.poll - # Aren't we thankful for Python 3.x rework for exceptions? - except (OSError, IOError, select.error) as e: - # select.error wasn't a subclass of OSError in the past. - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - elif hasattr(e, "args"): - errcode = e.args[0] - - # Also test for the Windows equivalent of EINTR. - is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and - errcode == errno.WSAEINTR)) - - if is_interrupt: - if expires is not None: - current_time = monotonic() - if current_time > expires: - raise OSError(errno=errno.ETIMEDOUT) - if recalc_timeout: - if "timeout" in kwargs: - kwargs["timeout"] = expires - current_time - continue - raise - return result - - -# Choose the best implementation, roughly: -# kqueue == devpoll == epoll > poll > select -# select() also can't accept a FD > FD_SETSIZE (usually around 1024) -def DefaultSelector(): - """ This function serves as a first call for DefaultSelector to - detect if the select module is being monkey-patched incorrectly - by eventlet, greenlet, and preserve proper behavior. """ - global _DEFAULT_SELECTOR - if _DEFAULT_SELECTOR is None: - if pycompat.isjython: - _DEFAULT_SELECTOR = JythonSelectSelector - elif _can_allocate('kqueue'): - _DEFAULT_SELECTOR = KqueueSelector - elif _can_allocate('devpoll'): - _DEFAULT_SELECTOR = DevpollSelector - elif _can_allocate('epoll'): - _DEFAULT_SELECTOR = EpollSelector - elif _can_allocate('poll'): - _DEFAULT_SELECTOR = PollSelector - elif hasattr(select, 'select'): - _DEFAULT_SELECTOR = SelectSelector - else: # Platform-specific: AppEngine - raise RuntimeError('Platform does not have a selector.') - return _DEFAULT_SELECTOR() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/setdiscovery.py --- a/mercurial/setdiscovery.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/setdiscovery.py Mon Jan 22 17:53:02 2018 -0500 @@ -133,7 +133,8 @@ def findcommonheads(ui, local, remote, initialsamplesize=100, fullsamplesize=200, - abortwhenunrelated=True): + abortwhenunrelated=True, + ancestorsof=None): '''Return a tuple (common, anyincoming, remoteheads) used to identify missing nodes from or in remote. ''' @@ -141,7 +142,11 @@ roundtrips = 0 cl = local.changelog - dag = dagutil.revlogdag(cl) + localsubset = None + if ancestorsof is not None: + rev = local.changelog.rev + localsubset = [rev(n) for n in ancestorsof] + dag = dagutil.revlogdag(cl, localsubset=localsubset) # early exit if we know all the specified remote heads already ui.debug("query 1; heads\n") diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/simplemerge.py --- a/mercurial/simplemerge.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/simplemerge.py Mon Jan 22 17:53:02 2018 -0500 @@ -418,6 +418,8 @@ The merged result is written into `localctx`. """ + opts = pycompat.byteskwargs(opts) + def readctx(ctx): # Merges were always run in the working copy before, which means # they used decoded data, if the user defined any repository diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/smartset.py --- a/mercurial/smartset.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/smartset.py Mon Jan 22 17:53:02 2018 -0500 @@ -772,6 +772,16 @@ >>> xs.last() # cached 4 """ + def __new__(cls, gen, iterasc=None): + if iterasc is None: + typ = cls + elif iterasc: + typ = _generatorsetasc + else: + typ = _generatorsetdesc + + return super(generatorset, cls).__new__(typ) + def __init__(self, gen, iterasc=None): """ gen: a generator producing the values for the generatorset. @@ -782,13 +792,6 @@ self._genlist = [] self._finished = False self._ascending = True - if iterasc is not None: - if iterasc: - self.fastasc = self._iterator - self.__contains__ = self._asccontains - else: - self.fastdesc = self._iterator - self.__contains__ = self._desccontains def __nonzero__(self): # Do not use 'for r in self' because it will enforce the iteration @@ -814,36 +817,6 @@ self._cache[x] = False return False - def _asccontains(self, x): - """version of contains optimised for ascending generator""" - if x in self._cache: - return self._cache[x] - - # Use new values only, as existing values would be cached. - for l in self._consumegen(): - if l == x: - return True - if l > x: - break - - self._cache[x] = False - return False - - def _desccontains(self, x): - """version of contains optimised for descending generator""" - if x in self._cache: - return self._cache[x] - - # Use new values only, as existing values would be cached. - for l in self._consumegen(): - if l == x: - return True - if l < x: - break - - self._cache[x] = False - return False - def __iter__(self): if self._ascending: it = self.fastasc @@ -947,7 +920,45 @@ def __repr__(self): d = {False: '-', True: '+'}[self._ascending] - return '<%s%s>' % (type(self).__name__, d) + return '<%s%s>' % (type(self).__name__.lstrip('_'), d) + +class _generatorsetasc(generatorset): + """Special case of generatorset optimized for ascending generators.""" + + fastasc = generatorset._iterator + + def __contains__(self, x): + if x in self._cache: + return self._cache[x] + + # Use new values only, as existing values would be cached. + for l in self._consumegen(): + if l == x: + return True + if l > x: + break + + self._cache[x] = False + return False + +class _generatorsetdesc(generatorset): + """Special case of generatorset optimized for descending generators.""" + + fastdesc = generatorset._iterator + + def __contains__(self, x): + if x in self._cache: + return self._cache[x] + + # Use new values only, as existing values would be cached. + for l in self._consumegen(): + if l == x: + return True + if l < x: + break + + self._cache[x] = False + return False def spanset(repo, start=0, end=None): """Create a spanset that represents a range of repository revisions diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sparse.py --- a/mercurial/sparse.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/sparse.py Mon Jan 22 17:53:02 2018 -0500 @@ -12,7 +12,10 @@ import os from .i18n import _ -from .node import nullid +from .node import ( + hex, + nullid, +) from . import ( error, match as matchmod, @@ -173,12 +176,12 @@ tempsignature = '0' if signature is None or (includetemp and tempsignature is None): - signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest() + signature = hex(hashlib.sha1(repo.vfs.tryread('sparse')).digest()) cache['signature'] = signature if includetemp: raw = repo.vfs.tryread('tempsparse') - tempsignature = hashlib.sha1(raw).hexdigest() + tempsignature = hex(hashlib.sha1(raw).digest()) cache['tempsignature'] = tempsignature return '%s %s' % (signature, tempsignature) @@ -291,24 +294,9 @@ includes, excludes, profiles = patternsforrev(repo, rev) if includes or excludes: - # Explicitly include subdirectories of includes so - # status will walk them down to the actual include. - subdirs = set() - for include in includes: - # TODO consider using posix path functions here so Windows - # \ directory separators don't come into play. - dirname = os.path.dirname(include) - # basename is used to avoid issues with absolute - # paths (which on Windows can include the drive). - while os.path.basename(dirname): - subdirs.add(dirname) - dirname = os.path.dirname(dirname) - matcher = matchmod.match(repo.root, '', [], include=includes, exclude=excludes, default='relpath') - if subdirs: - matcher = forceincludematcher(matcher, subdirs) matchers.append(matcher) except IOError: pass diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sshpeer.py --- a/mercurial/sshpeer.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/sshpeer.py Mon Jan 22 17:53:02 2018 -0500 @@ -18,9 +18,9 @@ ) def _serverquote(s): + """quote a string for the remote shell ... which we assume is sh""" if not s: return s - '''quote a string for the remote shell ... which we assume is sh''' if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s): return s return "'%s'" % s.replace("'", "'\\''") @@ -136,6 +136,8 @@ sshcmd = self.ui.config("ui", "ssh") remotecmd = self.ui.config("ui", "remotecmd") + sshaddenv = dict(self.ui.configitems("sshenv")) + sshenv = util.shellenviron(sshaddenv) args = util.sshargs(sshcmd, self._host, self._user, self._port) @@ -144,11 +146,11 @@ util.shellquote("%s init %s" % (_serverquote(remotecmd), _serverquote(self._path)))) ui.debug('running %s\n' % cmd) - res = ui.system(cmd, blockedtag='sshpeer') + res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv) if res != 0: self._abort(error.RepoError(_("could not create remote repo"))) - self._validaterepo(sshcmd, args, remotecmd) + self._validaterepo(sshcmd, args, remotecmd, sshenv) # Begin of _basepeer interface. @@ -180,7 +182,7 @@ # End of _basewirecommands interface. - def _validaterepo(self, sshcmd, args, remotecmd): + def _validaterepo(self, sshcmd, args, remotecmd, sshenv=None): # cleanup up previous run self._cleanup() @@ -196,7 +198,7 @@ # no buffer allow the use of 'select' # feel free to remove buffering and select usage when we ultimately # move to threading. - sub = util.popen4(cmd, bufsize=0) + sub = util.popen4(cmd, bufsize=0, env=sshenv) self._pipeo, self._pipei, self._pipee, self._subprocess = sub self._pipei = util.bufferedinputpipe(self._pipei) @@ -204,8 +206,9 @@ self._pipeo = doublepipe(self.ui, self._pipeo, self._pipee) def badresponse(): - self._abort(error.RepoError(_('no suitable response from ' - 'remote hg'))) + msg = _("no suitable response from remote hg") + hint = self.ui.config("ui", "ssherrorhint") + self._abort(error.RepoError(msg, hint=hint)) try: # skip any noise generated by remote shell @@ -280,6 +283,17 @@ def _callstream(self, cmd, **args): args = pycompat.byteskwargs(args) + if (self.ui.debugflag + and self.ui.configbool('devel', 'debug.peer-request')): + dbg = self.ui.debug + line = 'devel-peer-request: %s\n' + dbg(line % cmd) + for key, value in sorted(args.items()): + if not isinstance(value, dict): + dbg(line % ' %s: %d bytes' % (key, len(value))) + else: + for dk, dv in sorted(value.items()): + dbg(line % ' %s-%s: %d' % (key, dk, len(dv))) self.ui.debug("sending %s command\n" % cmd) self._pipeo.write("%s\n" % cmd) _func, names = wireproto.commands[cmd] diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sshserver.py --- a/mercurial/sshserver.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/sshserver.py Mon Jan 22 17:53:02 2018 -0500 @@ -76,13 +76,7 @@ def sendstream(self, source): write = self.fout.write - - if source.reader: - gen = iter(lambda: source.reader.read(4096), '') - else: - gen = source.gen - - for chunk in gen: + for chunk in source.gen: write(chunk) self.fout.flush() @@ -111,6 +105,7 @@ handlers = { str: sendresponse, wireproto.streamres: sendstream, + wireproto.streamres_legacy: sendstream, wireproto.pushres: sendpushresponse, wireproto.pusherr: sendpusherror, wireproto.ooberror: sendooberror, diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sslutil.py --- a/mercurial/sslutil.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/sslutil.py Mon Jan 22 17:53:02 2018 -0500 @@ -17,6 +17,7 @@ from .i18n import _ from . import ( error, + node, pycompat, util, ) @@ -96,13 +97,13 @@ # in this legacy code since we don't support SNI. args = { - 'keyfile': self._keyfile, - 'certfile': self._certfile, - 'server_side': server_side, - 'cert_reqs': self.verify_mode, - 'ssl_version': self.protocol, - 'ca_certs': self._cacerts, - 'ciphers': self._ciphers, + r'keyfile': self._keyfile, + r'certfile': self._certfile, + r'server_side': server_side, + r'cert_reqs': self.verify_mode, + r'ssl_version': self.protocol, + r'ca_certs': self._cacerts, + r'ciphers': self._ciphers, } return ssl.wrap_socket(socket, **args) @@ -808,9 +809,9 @@ # If a certificate fingerprint is pinned, use it and only it to # validate the remote cert. peerfingerprints = { - 'sha1': hashlib.sha1(peercert).hexdigest(), - 'sha256': hashlib.sha256(peercert).hexdigest(), - 'sha512': hashlib.sha512(peercert).hexdigest(), + 'sha1': node.hex(hashlib.sha1(peercert).digest()), + 'sha256': node.hex(hashlib.sha256(peercert).digest()), + 'sha512': node.hex(hashlib.sha512(peercert).digest()), } def fmtfingerprint(s): diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/statichttprepo.py --- a/mercurial/statichttprepo.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/statichttprepo.py Mon Jan 22 17:53:02 2018 -0500 @@ -166,8 +166,6 @@ self.encodepats = None self.decodepats = None self._transref = None - # Cache of types representing filtered repos. - self._filteredrepotypes = {} def _restrictcapabilities(self, caps): caps = super(statichttprepository, self)._restrictcapabilities(caps) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/statprof.py --- a/mercurial/statprof.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/statprof.py Mon Jan 22 17:53:02 2018 -0500 @@ -815,7 +815,6 @@ tos = sample.stack[0] name = tos.function path = simplifypath(tos.path) - category = '%s:%d' % (path, tos.lineno) stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno), frame.function) for frame in sample.stack)) qstack = collections.deque(stack) @@ -922,7 +921,7 @@ load_data(path=path) - display(**displayargs) + display(**pycompat.strkwargs(displayargs)) return 0 diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/store.py --- a/mercurial/store.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/store.py Mon Jan 22 17:53:02 2018 -0500 @@ -15,6 +15,7 @@ from .i18n import _ from . import ( error, + node, policy, pycompat, util, @@ -221,7 +222,7 @@ _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 def _hashencode(path, dotencode): - digest = hashlib.sha1(path).hexdigest() + digest = node.hex(hashlib.sha1(path).digest()) le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/' parts = _auxencode(le, dotencode) basename = parts[-1] diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/streamclone.py --- a/mercurial/streamclone.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/streamclone.py Mon Jan 22 17:53:02 2018 -0500 @@ -7,23 +7,27 @@ from __future__ import absolute_import +import contextlib +import os import struct +import tempfile +import warnings from .i18n import _ from . import ( branchmap, + cacheutil, error, phases, store, util, ) -def canperformstreamclone(pullop, bailifbundle2supported=False): +def canperformstreamclone(pullop, bundle2=False): """Whether it is possible to perform a streaming clone as part of pull. - ``bailifbundle2supported`` will cause the function to return False if - bundle2 stream clones are supported. It should only be called by the - legacy stream clone code path. + ``bundle2`` will cause the function to consider stream clone through + bundle2 and only through bundle2. Returns a tuple of (supported, requirements). ``supported`` is True if streaming clone is supported and False otherwise. ``requirements`` is @@ -35,18 +39,18 @@ bundle2supported = False if pullop.canusebundle2: - if 'v1' in pullop.remotebundle2caps.get('stream', []): + if 'v2' in pullop.remotebundle2caps.get('stream', []): bundle2supported = True # else # Server doesn't support bundle2 stream clone or doesn't support # the versions we support. Fall back and possibly allow legacy. # Ensures legacy code path uses available bundle2. - if bailifbundle2supported and bundle2supported: + if bundle2supported and not bundle2: return False, None # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. - #elif not bailifbundle2supported and not bundle2supported: - # return False, None + elif bundle2 and not bundle2supported: + return False, None # Streaming clone only works on empty repositories. if len(repo): @@ -235,10 +239,26 @@ def generatev1wireproto(repo): """Emit content for version 1 of streaming clone suitable for the wire. - This is the data output from ``generatev1()`` with a header line - indicating file count and byte size. + This is the data output from ``generatev1()`` with 2 header lines. The + first line indicates overall success. The 2nd contains the file count and + byte size of payload. + + The success line contains "0" for success, "1" for stream generation not + allowed, and "2" for error locking the repository (possibly indicating + a permissions error for the server process). """ - filecount, bytecount, it = generatev1(repo) + if not allowservergeneration(repo): + yield '1\n' + return + + try: + filecount, bytecount, it = generatev1(repo) + except error.LockError: + yield '2\n' + return + + # Indicates successful response. + yield '0\n' yield '%d %d\n' % (filecount, bytecount) for chunk in it: yield chunk @@ -412,3 +432,203 @@ def apply(self, repo): return applybundlev1(repo, self._fh) + +# type of file to stream +_fileappend = 0 # append only file +_filefull = 1 # full snapshot file + +# Source of the file +_srcstore = 's' # store (svfs) +_srccache = 'c' # cache (cache) + +# This is it's own function so extensions can override it. +def _walkstreamfullstorefiles(repo): + """list snapshot file from the store""" + fnames = [] + if not repo.publishing(): + fnames.append('phaseroots') + return fnames + +def _filterfull(entry, copy, vfsmap): + """actually copy the snapshot files""" + src, name, ftype, data = entry + if ftype != _filefull: + return entry + return (src, name, ftype, copy(vfsmap[src].join(name))) + +@contextlib.contextmanager +def maketempcopies(): + """return a function to temporary copy file""" + files = [] + try: + def copy(src): + fd, dst = tempfile.mkstemp() + os.close(fd) + files.append(dst) + util.copyfiles(src, dst, hardlink=True) + return dst + yield copy + finally: + for tmp in files: + util.tryunlink(tmp) + +def _makemap(repo): + """make a (src -> vfs) map for the repo""" + vfsmap = { + _srcstore: repo.svfs, + _srccache: repo.cachevfs, + } + # we keep repo.vfs out of the on purpose, ther are too many danger there + # (eg: .hg/hgrc) + assert repo.vfs not in vfsmap.values() + + return vfsmap + +def _emit(repo, entries, totalfilesize): + """actually emit the stream bundle""" + vfsmap = _makemap(repo) + progress = repo.ui.progress + progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes')) + with maketempcopies() as copy: + try: + # copy is delayed until we are in the try + entries = [_filterfull(e, copy, vfsmap) for e in entries] + yield None # this release the lock on the repository + seen = 0 + + for src, name, ftype, data in entries: + vfs = vfsmap[src] + yield src + yield util.uvarintencode(len(name)) + if ftype == _fileappend: + fp = vfs(name) + size = data + elif ftype == _filefull: + fp = open(data, 'rb') + size = util.fstat(fp).st_size + try: + yield util.uvarintencode(size) + yield name + if size <= 65536: + chunks = (fp.read(size),) + else: + chunks = util.filechunkiter(fp, limit=size) + for chunk in chunks: + seen += len(chunk) + progress(_('bundle'), seen, total=totalfilesize, + unit=_('bytes')) + yield chunk + finally: + fp.close() + finally: + progress(_('bundle'), None) + +def generatev2(repo): + """Emit content for version 2 of a streaming clone. + + the data stream consists the following entries: + 1) A char representing the file destination (eg: store or cache) + 2) A varint containing the length of the filename + 3) A varint containing the length of file data + 4) N bytes containing the filename (the internal, store-agnostic form) + 5) N bytes containing the file data + + Returns a 3-tuple of (file count, file size, data iterator). + """ + + with repo.lock(): + + entries = [] + totalfilesize = 0 + + repo.ui.debug('scanning\n') + for name, ename, size in _walkstreamfiles(repo): + if size: + entries.append((_srcstore, name, _fileappend, size)) + totalfilesize += size + for name in _walkstreamfullstorefiles(repo): + if repo.svfs.exists(name): + totalfilesize += repo.svfs.lstat(name).st_size + entries.append((_srcstore, name, _filefull, None)) + for name in cacheutil.cachetocopy(repo): + if repo.cachevfs.exists(name): + totalfilesize += repo.cachevfs.lstat(name).st_size + entries.append((_srccache, name, _filefull, None)) + + chunks = _emit(repo, entries, totalfilesize) + first = next(chunks) + assert first is None + + return len(entries), totalfilesize, chunks + +@contextlib.contextmanager +def nested(*ctxs): + with warnings.catch_warnings(): + # For some reason, Python decided 'nested' was deprecated without + # replacement. They officially advertised for filtering the deprecation + # warning for people who actually need the feature. + warnings.filterwarnings("ignore",category=DeprecationWarning) + with contextlib.nested(*ctxs): + yield + +def consumev2(repo, fp, filecount, filesize): + """Apply the contents from a version 2 streaming clone. + + Data is read from an object that only needs to provide a ``read(size)`` + method. + """ + with repo.lock(): + repo.ui.status(_('%d files to transfer, %s of data\n') % + (filecount, util.bytecount(filesize))) + + start = util.timer() + handledbytes = 0 + progress = repo.ui.progress + + progress(_('clone'), handledbytes, total=filesize, unit=_('bytes')) + + vfsmap = _makemap(repo) + + with repo.transaction('clone'): + ctxs = (vfs.backgroundclosing(repo.ui) + for vfs in vfsmap.values()) + with nested(*ctxs): + for i in range(filecount): + src = fp.read(1) + vfs = vfsmap[src] + namelen = util.uvarintdecodestream(fp) + datalen = util.uvarintdecodestream(fp) + + name = fp.read(namelen) + + if repo.ui.debugflag: + repo.ui.debug('adding [%s] %s (%s)\n' % + (src, name, util.bytecount(datalen))) + + with vfs(name, 'w') as ofp: + for chunk in util.filechunkiter(fp, limit=datalen): + handledbytes += len(chunk) + progress(_('clone'), handledbytes, total=filesize, + unit=_('bytes')) + ofp.write(chunk) + + # force @filecache properties to be reloaded from + # streamclone-ed file at next access + repo.invalidate(clearfilecache=True) + + elapsed = util.timer() - start + if elapsed <= 0: + elapsed = 0.001 + progress(_('clone'), None) + repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % + (util.bytecount(handledbytes), elapsed, + util.bytecount(handledbytes / elapsed))) + +def applybundlev2(repo, fp, filecount, filesize, requirements): + missingreqs = [r for r in requirements if r not in repo.supported] + if missingreqs: + raise error.Abort(_('unable to apply stream clone: ' + 'unsupported format: %s') % + ', '.join(sorted(missingreqs))) + + consumev2(repo, fp, filecount, filesize) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/subrepo.py --- a/mercurial/subrepo.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/subrepo.py Mon Jan 22 17:53:02 2018 -0500 @@ -55,13 +55,13 @@ def _getstorehashcachename(remotepath): '''get a unique filename for the store hash cache of a remote repository''' - return hashlib.sha1(_expandedabspath(remotepath)).hexdigest()[0:12] + return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12] class SubrepoAbort(error.Abort): """Exception class used to avoid handling a subrepo error more than once""" def __init__(self, *args, **kw): - self.subrepo = kw.pop('subrepo', None) - self.cause = kw.pop('cause', None) + self.subrepo = kw.pop(r'subrepo', None) + self.cause = kw.pop(r'cause', None) error.Abort.__init__(self, *args, **kw) def annotatesubrepoerror(func): @@ -389,24 +389,44 @@ if util.safehasattr(repo, '_subparent'): source = util.url(repo._subsource) if source.isabs(): - return str(source) + return bytes(source) source.path = posixpath.normpath(source.path) parent = _abssource(repo._subparent, push, abort=False) if parent: parent = util.url(util.pconvert(parent)) parent.path = posixpath.join(parent.path or '', source.path) parent.path = posixpath.normpath(parent.path) - return str(parent) + return bytes(parent) else: # recursion reached top repo + path = None if util.safehasattr(repo, '_subtoppath'): - return repo._subtoppath - if push and repo.ui.config('paths', 'default-push'): - return repo.ui.config('paths', 'default-push') - if repo.ui.config('paths', 'default'): - return repo.ui.config('paths', 'default') - if repo.shared(): - # chop off the .hg component to get the default path form + path = repo._subtoppath + elif push and repo.ui.config('paths', 'default-push'): + path = repo.ui.config('paths', 'default-push') + elif repo.ui.config('paths', 'default'): + path = repo.ui.config('paths', 'default') + elif repo.shared(): + # chop off the .hg component to get the default path form. This has + # already run through vfsmod.vfs(..., realpath=True), so it doesn't + # have problems with 'C:' return os.path.dirname(repo.sharedpath) + if path: + # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is + # as expected: an absolute path to the root of the C: drive. The + # latter is a relative path, and works like so: + # + # C:\>cd C:\some\path + # C:\>D: + # D:\>python -c "import os; print os.path.abspath('C:')" + # C:\some\path + # + # D:\>python -c "import os; print os.path.abspath('C:relative')" + # C:\some\path\relative + if util.hasdriveletter(path): + if len(path) == 2 or path[2:3] not in br'\/': + path = os.path.abspath(path) + return path + if abort: raise error.Abort(_("default path for subrepository not found")) @@ -789,7 +809,7 @@ yield '# %s\n' % _expandedabspath(remotepath) vfs = self._repo.vfs for relname in filelist: - filehash = hashlib.sha1(vfs.tryread(relname)).hexdigest() + filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest()) yield '%s = %s\n' % (relname, filehash) @propertycache @@ -811,7 +831,7 @@ with self._repo.lock(): storehash = list(self._calcstorehash(remotepath)) vfs = self._cachestorehashvfs - vfs.writelines(cachefile, storehash, mode='w', notindexed=True) + vfs.writelines(cachefile, storehash, mode='wb', notindexed=True) def _getctx(self): '''fetch the context for this subrepo revision, possibly a workingctx @@ -841,11 +861,7 @@ if defpath != defpushpath: addpathconfig('default-push', defpushpath) - fp = self._repo.vfs("hgrc", "w", text=True) - try: - fp.write(''.join(lines)) - finally: - fp.close() + self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines))) @annotatesubrepoerror def add(self, ui, match, prefix, explicitonly, **opts): @@ -1154,24 +1170,24 @@ # 2. update the subrepo to the revision specified in # the corresponding substate dictionary self.ui.status(_('reverting subrepo %s\n') % substate[0]) - if not opts.get('no_backup'): + if not opts.get(r'no_backup'): # Revert all files on the subrepo, creating backups # Note that this will not recursively revert subrepos # We could do it if there was a set:subrepos() predicate opts = opts.copy() - opts['date'] = None - opts['rev'] = substate[1] + opts[r'date'] = None + opts[r'rev'] = substate[1] self.filerevert(*pats, **opts) # Update the repo to the revision specified in the given substate - if not opts.get('dry_run'): + if not opts.get(r'dry_run'): self.get(substate, overwrite=True) def filerevert(self, *pats, **opts): - ctx = self._repo[opts['rev']] + ctx = self._repo[opts[r'rev']] parents = self._repo.dirstate.parents() - if opts.get('all'): + if opts.get(r'all'): pats = ['set:modified()'] else: pats = [] @@ -1244,7 +1260,7 @@ if not self.ui.interactive(): # Making stdin be a pipe should prevent svn from behaving # interactively even if we can't pass --non-interactive. - extrakw['stdin'] = subprocess.PIPE + extrakw[r'stdin'] = subprocess.PIPE # Starting in svn 1.5 --non-interactive is a global flag # instead of being per-command, but we need to support 1.4 so # we have to be intelligent about what commands take @@ -1284,6 +1300,9 @@ raise error.Abort(_('cannot retrieve svn tool version')) return (int(m.group(1)), int(m.group(2))) + def _svnmissing(self): + return not self.wvfs.exists('.svn') + def _wcrevs(self): # Get the working directory revision as well as the last # commit revision so we can compare the subrepo state with @@ -1331,7 +1350,10 @@ return True, True, bool(missing) return bool(changes), False, bool(missing) + @annotatesubrepoerror def dirty(self, ignoreupdate=False, missing=False): + if self._svnmissing(): + return self._state[1] != '' wcchanged = self._wcchanged() changed = wcchanged[0] or (missing and wcchanged[2]) if not changed: diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templatefilters.py --- a/mercurial/templatefilters.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templatefilters.py Mon Jan 22 17:53:02 2018 -0500 @@ -348,6 +348,11 @@ """Date. Returns a date like "2006-09-18".""" return util.shortdate(text) +@templatefilter('slashpath') +def slashpath(path): + """Any text. Replaces the native path separator with slash.""" + return util.pconvert(path) + @templatefilter('splitlines') def splitlines(text): """Any text. Split text into a list of lines.""" diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templatekw.py --- a/mercurial/templatekw.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templatekw.py Mon Jan 22 17:53:02 2018 -0500 @@ -17,6 +17,7 @@ encoding, error, hbisect, + i18n, obsutil, patch, pycompat, @@ -301,6 +302,30 @@ return getrenamed +def getlogcolumns(): + """Return a dict of log column labels""" + _ = pycompat.identity # temporarily disable gettext + # i18n: column positioning for "hg log" + columns = _('bookmark: %s\n' + 'branch: %s\n' + 'changeset: %s\n' + 'copies: %s\n' + 'date: %s\n' + 'extra: %s=%s\n' + 'files+: %s\n' + 'files-: %s\n' + 'files: %s\n' + 'instability: %s\n' + 'manifest: %s\n' + 'obsolete: %s\n' + 'parent: %s\n' + 'phase: %s\n' + 'summary: %s\n' + 'tag: %s\n' + 'user: %s\n') + return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()], + i18n._(columns).splitlines(True))) + # default templates internally used for rendering of lists defaulttempl = { 'parent': '{rev}:{node|formatnode} ', @@ -513,6 +538,8 @@ return '@' elif ctx.obsolete(): return 'x' + elif ctx.isunstable(): + return '*' elif ctx.closesbranch(): return '_' else: @@ -608,6 +635,7 @@ # the verbosity templatekw available. succsandmarkers = showsuccsandmarkers(**args) + args = pycompat.byteskwargs(args) ui = args['ui'] values = [] @@ -816,7 +844,7 @@ @templatekeyword('phaseidx') def showphaseidx(repo, ctx, templ, **args): - """Integer. The changeset phase index.""" + """Integer. The changeset phase index. (ADVANCED)""" return ctx.phase() @templatekeyword('rev') @@ -860,12 +888,6 @@ """List of strings. Any tags associated with the changeset.""" return shownames('tags', **args) -def loadkeyword(ui, extname, registrarobj): - """Load template keyword from specified registrarobj - """ - for name, func in registrarobj._table.iteritems(): - keywords[name] = func - @templatekeyword('termwidth') def showtermwidth(repo, ctx, templ, **args): """Integer. The width of the current terminal.""" @@ -891,5 +913,24 @@ return showlist('instability', args['ctx'].instabilities(), args, plural='instabilities') +@templatekeyword('verbosity') +def showverbosity(ui, **args): + """String. The current output verbosity in 'debug', 'quiet', 'verbose', + or ''.""" + # see cmdutil.changeset_templater for priority of these flags + if ui.debugflag: + return 'debug' + elif ui.quiet: + return 'quiet' + elif ui.verbose: + return 'verbose' + return '' + +def loadkeyword(ui, extname, registrarobj): + """Load template keyword from specified registrarobj + """ + for name, func in registrarobj._table.iteritems(): + keywords[name] = func + # tell hggettext to extract docstrings from these functions: i18nfunctions = keywords.values() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templater.py --- a/mercurial/templater.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templater.py Mon Jan 22 17:53:02 2018 -0500 @@ -184,6 +184,8 @@ return parsed, n + 1 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}')) + if not tmpl.endswith('}', n + 1, pos): + raise error.ParseError(_("invalid token"), pos) parsed.append(parseres) if quote: @@ -257,6 +259,8 @@ def compileexp(exp, context, curmethods): """Compile parsed template tree to (func, data) pair""" + if not exp: + raise error.ParseError(_("missing argument")) t = exp[0] if t in curmethods: return curmethods[t](exp, context) @@ -382,9 +386,7 @@ raise error.Abort(_("recursive reference '%s' in template") % key) def runsymbol(context, mapping, key, default=''): - v = mapping.get(key) - if v is None: - v = context._defaults.get(key) + v = context.symbol(mapping, key) if v is None: # put poison to cut recursion. we can't move this to parsing phase # because "x = {x}" is allowed if "x" is a keyword. (issue4758) @@ -395,7 +397,11 @@ except TemplateNotFound: v = default if callable(v): - return v(**pycompat.strkwargs(mapping)) + # TODO: templatekw functions will be updated to take (context, mapping) + # pair instead of **props + props = context._resources.copy() + props.update(mapping) + return v(**pycompat.strkwargs(props)) return v def buildtemplate(exp, context): @@ -626,7 +632,7 @@ return [s] return [] - ctx = mapping['ctx'] + ctx = context.resource(mapping, 'ctx') chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1))) return ''.join(chunks) @@ -639,8 +645,8 @@ raise error.ParseError(_('extdata expects one argument')) source = evalstring(context, mapping, args['source']) - cache = mapping['cache'].setdefault('extdata', {}) - ctx = mapping['ctx'] + cache = context.resource(mapping, 'cache').setdefault('extdata', {}) + ctx = context.resource(mapping, 'ctx') if source in cache: data = cache[source] else: @@ -656,10 +662,13 @@ raise error.ParseError(_("files expects one argument")) raw = evalstring(context, mapping, args[0]) - ctx = mapping['ctx'] + ctx = context.resource(mapping, 'ctx') m = ctx.match([raw]) files = list(ctx.matches(m)) - return templatekw.showlist("file", files, mapping) + # TODO: pass (context, mapping) pair to keyword function + props = context._resources.copy() + props.update(mapping) + return templatekw.showlist("file", files, props) @templatefunc('fill(text[, width[, initialident[, hangindent]]])') def fill(context, mapping, args): @@ -692,7 +701,7 @@ # i18n: "formatnode" is a keyword raise error.ParseError(_("formatnode expects one argument")) - ui = mapping['ui'] + ui = context.resource(mapping, 'ui') node = evalstring(context, mapping, args[0]) if ui.debugflag: return node @@ -858,7 +867,7 @@ # i18n: "label" is a keyword raise error.ParseError(_("label expects two arguments")) - ui = mapping['ui'] + ui = context.resource(mapping, 'ui') thing = evalstring(context, mapping, args[1]) # preserve unknown symbol as literal so effects like 'red', 'bold', # etc. don't need to be quoted @@ -880,7 +889,10 @@ if len(args) == 1: pattern = evalstring(context, mapping, args[0]) - return templatekw.showlatesttags(pattern, **mapping) + # TODO: pass (context, mapping) pair to keyword function + props = context._resources.copy() + props.update(mapping) + return templatekw.showlatesttags(pattern, **pycompat.strkwargs(props)) @templatefunc('localdate(date[, tz])') def localdate(context, mapping, args): @@ -1005,17 +1017,18 @@ "obsmakers") raise error.ParseError(msg) -@templatefunc('obsfateverb(successors)') +@templatefunc('obsfateverb(successors, markers)') def obsfateverb(context, mapping, args): """Compute obsfate related information based on successors (EXPERIMENTAL)""" - if len(args) != 1: + if len(args) != 2: # i18n: "obsfateverb" is a keyword - raise error.ParseError(_("obsfateverb expects one arguments")) + raise error.ParseError(_("obsfateverb expects two arguments")) successors = evalfuncarg(context, mapping, args[0]) + markers = evalfuncarg(context, mapping, args[1]) try: - return obsutil.successorsetverb(successors) + return obsutil.obsfateverb(successors, markers) except TypeError: # i18n: "obsfateverb" is a keyword errmsg = _("obsfateverb first argument should be countable") @@ -1029,7 +1042,7 @@ # i18n: "relpath" is a keyword raise error.ParseError(_("relpath expects one argument")) - repo = mapping['ctx'].repo() + repo = context.resource(mapping, 'ctx').repo() path = evalstring(context, mapping, args[0]) return repo.pathto(path) @@ -1042,7 +1055,7 @@ raise error.ParseError(_("revset expects one or more arguments")) raw = evalstring(context, mapping, args[0]) - ctx = mapping['ctx'] + ctx = context.resource(mapping, 'ctx') repo = ctx.repo() def query(expr): @@ -1054,7 +1067,8 @@ revs = query(revsetlang.formatspec(raw, *formatargs)) revs = list(revs) else: - revsetcache = mapping['cache'].setdefault("revsetcache", {}) + cache = context.resource(mapping, 'cache') + revsetcache = cache.setdefault("revsetcache", {}) if raw in revsetcache: revs = revsetcache[raw] else: @@ -1062,7 +1076,11 @@ revs = list(revs) revsetcache[raw] = revs - return templatekw.showrevslist("revision", revs, **mapping) + # TODO: pass (context, mapping) pair to keyword function + props = context._resources.copy() + props.update(mapping) + return templatekw.showrevslist("revision", revs, + **pycompat.strkwargs(props)) @templatefunc('rstdoc(text, style)') def rstdoc(context, mapping, args): @@ -1114,7 +1132,7 @@ # _partialmatch() of filtered changelog could take O(len(repo)) time, # which would be unacceptably slow. so we look for hash collision in # unfiltered space, which means some hashes may be slightly longer. - cl = mapping['ctx']._repo.unfiltered().changelog + cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog return cl.shortest(node, minlength) @templatefunc('strip(text[, chars])') @@ -1289,17 +1307,42 @@ filter uses function to transform value. syntax is {key|filter1|filter2|...}.''' - def __init__(self, loader, filters=None, defaults=None, aliases=()): + def __init__(self, loader, filters=None, defaults=None, resources=None, + aliases=()): self._loader = loader if filters is None: filters = {} self._filters = filters if defaults is None: defaults = {} + if resources is None: + resources = {} self._defaults = defaults + self._resources = resources self._aliasmap = _aliasrules.buildmap(aliases) self._cache = {} # key: (func, data) + def symbol(self, mapping, key): + """Resolve symbol to value or function; None if nothing found""" + v = None + if key not in self._resources: + v = mapping.get(key) + if v is None: + v = self._defaults.get(key) + return v + + def resource(self, mapping, key): + """Return internal data (e.g. cache) used for keyword/function + evaluation""" + v = None + if key in self._resources: + v = mapping.get(key) + if v is None: + v = self._resources.get(key) + if v is None: + raise error.Abort(_('template resource not available: %s') % key) + return v + def _load(self, t): '''load, parse, and cache a template''' if t not in self._cache: @@ -1393,17 +1436,27 @@ class templater(object): - def __init__(self, filters=None, defaults=None, cache=None, aliases=(), - minchunk=1024, maxchunk=65536): - '''set up template engine. - filters is dict of functions. each transforms a value into another. - defaults is dict of default map definitions. - aliases is list of alias (name, replacement) pairs. - ''' + def __init__(self, filters=None, defaults=None, resources=None, + cache=None, aliases=(), minchunk=1024, maxchunk=65536): + """Create template engine optionally with preloaded template fragments + + - ``filters``: a dict of functions to transform a value into another. + - ``defaults``: a dict of symbol values/functions; may be overridden + by a ``mapping`` dict. + - ``resources``: a dict of internal data (e.g. cache), inaccessible + from user template; may be overridden by a ``mapping`` dict. + - ``cache``: a dict of preloaded template fragments. + - ``aliases``: a list of alias (name, replacement) pairs. + + self.cache may be updated later to register additional template + fragments. + """ if filters is None: filters = {} if defaults is None: defaults = {} + if resources is None: + resources = {} if cache is None: cache = {} self.cache = cache.copy() @@ -1411,15 +1464,17 @@ self.filters = templatefilters.filters.copy() self.filters.update(filters) self.defaults = defaults + self._resources = {'templ': self} + self._resources.update(resources) self._aliases = aliases self.minchunk, self.maxchunk = minchunk, maxchunk self.ecache = {} @classmethod - def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None, - minchunk=1024, maxchunk=65536): + def frommapfile(cls, mapfile, filters=None, defaults=None, resources=None, + cache=None, minchunk=1024, maxchunk=65536): """Create templater from the specified map file""" - t = cls(filters, defaults, cache, [], minchunk, maxchunk) + t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk) cache, tmap, aliases = _readmapfile(mapfile) t.cache.update(cache) t.map = tmap @@ -1456,7 +1511,7 @@ except KeyError: raise error.Abort(_('invalid template engine: %s') % ttype) self.ecache[ttype] = ecls(self.load, self.filters, self.defaults, - self._aliases) + self._resources, self._aliases) proc = self.ecache[ttype] stream = proc.process(t, mapping) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/gitweb/changelogentry.tmpl --- a/mercurial/templates/gitweb/changelogentry.tmpl Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/gitweb/changelogentry.tmpl Mon Jan 22 17:53:02 2018 -0500 @@ -1,5 +1,9 @@
@@ -41,6 +44,7 @@ +{if(obsolete, '')} {ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)} {child%changesetchild}
changeset {rev} {node|short}
obsolete{succsandmarkers%obsfateentry}
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/gitweb/filelog.tmpl --- a/mercurial/templates/gitweb/filelog.tmpl Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/gitweb/filelog.tmpl Mon Jan 22 17:53:02 2018 -0500 @@ -36,7 +36,7 @@
{file|urlescape}{if(linerange, -' (following lines {linerange}{if(descend, ', descending')} back to filelog)')} +' (following lines {linerange}{if(descend, ', descending')} all revisions for this file)')}
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/gitweb/graph.tmpl --- a/mercurial/templates/gitweb/graph.tmpl Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/gitweb/graph.tmpl Mon Jan 22 17:53:02 2018 -0500 @@ -4,7 +4,6 @@ href="{url|urlescape}atom-log" title="Atom feed for {repo|escape}"/> - @@ -37,66 +36,15 @@
-
    - -
      + +
        {nodes%graphentry}
      -
      @@ -107,9 +55,12 @@
      @@ -100,9 +49,12 @@
      ' changesettag = '' +successorlink = '{node|short} ' +obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}' +obsfateverb = '{obsfateverb(successors, markers)}' +obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}' +obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}' filediffparent = ' diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/excanvas.js --- a/mercurial/templates/static/excanvas.js Mon Jan 08 16:07:51 2018 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,924 +0,0 @@ -// Copyright 2006 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - - -// Known Issues: -// -// * Patterns are not implemented. -// * Radial gradient are not implemented. The VML version of these look very -// different from the canvas one. -// * Clipping paths are not implemented. -// * Coordsize. The width and height attribute have higher priority than the -// width and height style values which isn't correct. -// * Painting mode isn't implemented. -// * Canvas width/height should is using content-box by default. IE in -// Quirks mode will draw the canvas using border-box. Either change your -// doctype to HTML5 -// (http://www.whatwg.org/specs/web-apps/current-work/#the-doctype) -// or use Box Sizing Behavior from WebFX -// (http://webfx.eae.net/dhtml/boxsizing/boxsizing.html) -// * Non uniform scaling does not correctly scale strokes. -// * Optimize. There is always room for speed improvements. - -// Only add this code if we do not already have a canvas implementation -if (!document.createElement('canvas').getContext) { - -(function() { - - // alias some functions to make (compiled) code shorter - var m = Math; - var mr = m.round; - var ms = m.sin; - var mc = m.cos; - var abs = m.abs; - var sqrt = m.sqrt; - - // this is used for sub pixel precision - var Z = 10; - var Z2 = Z / 2; - - /** - * This funtion is assigned to the elements as element.getContext(). - * @this {HTMLElement} - * @return {CanvasRenderingContext2D_} - */ - function getContext() { - return this.context_ || - (this.context_ = new CanvasRenderingContext2D_(this)); - } - - var slice = Array.prototype.slice; - - /** - * Binds a function to an object. The returned function will always use the - * passed in {@code obj} as {@code this}. - * - * Example: - * - * g = bind(f, obj, a, b) - * g(c, d) // will do f.call(obj, a, b, c, d) - * - * @param {Function} f The function to bind the object to - * @param {Object} obj The object that should act as this when the function - * is called - * @param {*} var_args Rest arguments that will be used as the initial - * arguments when the function is called - * @return {Function} A new function that has bound this - */ - function bind(f, obj, var_args) { - var a = slice.call(arguments, 2); - return function() { - return f.apply(obj, a.concat(slice.call(arguments))); - }; - } - - var G_vmlCanvasManager_ = { - init: function(opt_doc) { - if (/MSIE/.test(navigator.userAgent) && !window.opera) { - var doc = opt_doc || document; - // Create a dummy element so that IE will allow canvas elements to be - // recognized. - doc.createElement('canvas'); - doc.attachEvent('onreadystatechange', bind(this.init_, this, doc)); - } - }, - - init_: function(doc) { - // create xmlns - if (!doc.namespaces['g_vml_']) { - doc.namespaces.add('g_vml_', 'urn:schemas-microsoft-com:vml', - '#default#VML'); - - } - if (!doc.namespaces['g_o_']) { - doc.namespaces.add('g_o_', 'urn:schemas-microsoft-com:office:office', - '#default#VML'); - } - - // Setup default CSS. Only add one style sheet per document - if (!doc.styleSheets['ex_canvas_']) { - var ss = doc.createStyleSheet(); - ss.owningElement.id = 'ex_canvas_'; - ss.cssText = 'canvas{display:inline-block;overflow:hidden;' + - // default size is 300x150 in Gecko and Opera - 'text-align:left;width:300px;height:150px}' + - 'g_vml_\\:*{behavior:url(#default#VML)}' + - 'g_o_\\:*{behavior:url(#default#VML)}'; - - } - - // find all canvas elements - var els = doc.getElementsByTagName('canvas'); - for (var i = 0; i < els.length; i++) { - this.initElement(els[i]); - } - }, - - /** - * Public initializes a canvas element so that it can be used as canvas - * element from now on. This is called automatically before the page is - * loaded but if you are creating elements using createElement you need to - * make sure this is called on the element. - * @param {HTMLElement} el The canvas element to initialize. - * @return {HTMLElement} the element that was created. - */ - initElement: function(el) { - if (!el.getContext) { - - el.getContext = getContext; - - // Remove fallback content. There is no way to hide text nodes so we - // just remove all childNodes. We could hide all elements and remove - // text nodes but who really cares about the fallback content. - el.innerHTML = ''; - - // do not use inline function because that will leak memory - el.attachEvent('onpropertychange', onPropertyChange); - el.attachEvent('onresize', onResize); - - var attrs = el.attributes; - if (attrs.width && attrs.width.specified) { - // TODO: use runtimeStyle and coordsize - // el.getContext().setWidth_(attrs.width.nodeValue); - el.style.width = attrs.width.nodeValue + 'px'; - } else { - el.width = el.clientWidth; - } - if (attrs.height && attrs.height.specified) { - // TODO: use runtimeStyle and coordsize - // el.getContext().setHeight_(attrs.height.nodeValue); - el.style.height = attrs.height.nodeValue + 'px'; - } else { - el.height = el.clientHeight; - } - //el.getContext().setCoordsize_() - } - return el; - } - }; - - function onPropertyChange(e) { - var el = e.srcElement; - - switch (e.propertyName) { - case 'width': - el.style.width = el.attributes.width.nodeValue + 'px'; - el.getContext().clearRect(); - break; - case 'height': - el.style.height = el.attributes.height.nodeValue + 'px'; - el.getContext().clearRect(); - break; - } - } - - function onResize(e) { - var el = e.srcElement; - if (el.firstChild) { - el.firstChild.style.width = el.clientWidth + 'px'; - el.firstChild.style.height = el.clientHeight + 'px'; - } - } - - G_vmlCanvasManager_.init(); - - // precompute "00" to "FF" - var dec2hex = []; - for (var i = 0; i < 16; i++) { - for (var j = 0; j < 16; j++) { - dec2hex[i * 16 + j] = i.toString(16) + j.toString(16); - } - } - - function createMatrixIdentity() { - return [ - [1, 0, 0], - [0, 1, 0], - [0, 0, 1] - ]; - } - - function matrixMultiply(m1, m2) { - var result = createMatrixIdentity(); - - for (var x = 0; x < 3; x++) { - for (var y = 0; y < 3; y++) { - var sum = 0; - - for (var z = 0; z < 3; z++) { - sum += m1[x][z] * m2[z][y]; - } - - result[x][y] = sum; - } - } - return result; - } - - function copyState(o1, o2) { - o2.fillStyle = o1.fillStyle; - o2.lineCap = o1.lineCap; - o2.lineJoin = o1.lineJoin; - o2.lineWidth = o1.lineWidth; - o2.miterLimit = o1.miterLimit; - o2.shadowBlur = o1.shadowBlur; - o2.shadowColor = o1.shadowColor; - o2.shadowOffsetX = o1.shadowOffsetX; - o2.shadowOffsetY = o1.shadowOffsetY; - o2.strokeStyle = o1.strokeStyle; - o2.globalAlpha = o1.globalAlpha; - o2.arcScaleX_ = o1.arcScaleX_; - o2.arcScaleY_ = o1.arcScaleY_; - o2.lineScale_ = o1.lineScale_; - } - - function processStyle(styleString) { - var str, alpha = 1; - - styleString = String(styleString); - if (styleString.substring(0, 3) == 'rgb') { - var start = styleString.indexOf('(', 3); - var end = styleString.indexOf(')', start + 1); - var guts = styleString.substring(start + 1, end).split(','); - - str = '#'; - for (var i = 0; i < 3; i++) { - str += dec2hex[Number(guts[i])]; - } - - if (guts.length == 4 && styleString.substr(3, 1) == 'a') { - alpha = guts[3]; - } - } else { - str = styleString; - } - - return {color: str, alpha: alpha}; - } - - function processLineCap(lineCap) { - switch (lineCap) { - case 'butt': - return 'flat'; - case 'round': - return 'round'; - case 'square': - default: - return 'square'; - } - } - - /** - * This class implements CanvasRenderingContext2D interface as described by - * the WHATWG. - * @param {HTMLElement} surfaceElement The element that the 2D context should - * be associated with - */ - function CanvasRenderingContext2D_(surfaceElement) { - this.m_ = createMatrixIdentity(); - - this.mStack_ = []; - this.aStack_ = []; - this.currentPath_ = []; - - // Canvas context properties - this.strokeStyle = '#000'; - this.fillStyle = '#000'; - - this.lineWidth = 1; - this.lineJoin = 'miter'; - this.lineCap = 'butt'; - this.miterLimit = Z * 1; - this.globalAlpha = 1; - this.canvas = surfaceElement; - - var el = surfaceElement.ownerDocument.createElement('div'); - el.style.width = surfaceElement.clientWidth + 'px'; - el.style.height = surfaceElement.clientHeight + 'px'; - el.style.overflow = 'hidden'; - el.style.position = 'absolute'; - surfaceElement.appendChild(el); - - this.element_ = el; - this.arcScaleX_ = 1; - this.arcScaleY_ = 1; - this.lineScale_ = 1; - } - - var contextPrototype = CanvasRenderingContext2D_.prototype; - contextPrototype.clearRect = function() { - this.element_.innerHTML = ''; - }; - - contextPrototype.beginPath = function() { - // TODO: Branch current matrix so that save/restore has no effect - // as per safari docs. - this.currentPath_ = []; - }; - - contextPrototype.moveTo = function(aX, aY) { - var p = this.getCoords_(aX, aY); - this.currentPath_.push({type: 'moveTo', x: p.x, y: p.y}); - this.currentX_ = p.x; - this.currentY_ = p.y; - }; - - contextPrototype.lineTo = function(aX, aY) { - var p = this.getCoords_(aX, aY); - this.currentPath_.push({type: 'lineTo', x: p.x, y: p.y}); - - this.currentX_ = p.x; - this.currentY_ = p.y; - }; - - contextPrototype.bezierCurveTo = function(aCP1x, aCP1y, - aCP2x, aCP2y, - aX, aY) { - var p = this.getCoords_(aX, aY); - var cp1 = this.getCoords_(aCP1x, aCP1y); - var cp2 = this.getCoords_(aCP2x, aCP2y); - bezierCurveTo(this, cp1, cp2, p); - }; - - // Helper function that takes the already fixed cordinates. - function bezierCurveTo(self, cp1, cp2, p) { - self.currentPath_.push({ - type: 'bezierCurveTo', - cp1x: cp1.x, - cp1y: cp1.y, - cp2x: cp2.x, - cp2y: cp2.y, - x: p.x, - y: p.y - }); - self.currentX_ = p.x; - self.currentY_ = p.y; - } - - contextPrototype.quadraticCurveTo = function(aCPx, aCPy, aX, aY) { - // the following is lifted almost directly from - // http://developer.mozilla.org/en/docs/Canvas_tutorial:Drawing_shapes - - var cp = this.getCoords_(aCPx, aCPy); - var p = this.getCoords_(aX, aY); - - var cp1 = { - x: this.currentX_ + 2.0 / 3.0 * (cp.x - this.currentX_), - y: this.currentY_ + 2.0 / 3.0 * (cp.y - this.currentY_) - }; - var cp2 = { - x: cp1.x + (p.x - this.currentX_) / 3.0, - y: cp1.y + (p.y - this.currentY_) / 3.0 - }; - - bezierCurveTo(this, cp1, cp2, p); - }; - - contextPrototype.arc = function(aX, aY, aRadius, - aStartAngle, aEndAngle, aClockwise) { - aRadius *= Z; - var arcType = aClockwise ? 'at' : 'wa'; - - var xStart = aX + mc(aStartAngle) * aRadius - Z2; - var yStart = aY + ms(aStartAngle) * aRadius - Z2; - - var xEnd = aX + mc(aEndAngle) * aRadius - Z2; - var yEnd = aY + ms(aEndAngle) * aRadius - Z2; - - // IE won't render arches drawn counter clockwise if xStart == xEnd. - if (xStart == xEnd && !aClockwise) { - xStart += 0.125; // Offset xStart by 1/80 of a pixel. Use something - // that can be represented in binary - } - - var p = this.getCoords_(aX, aY); - var pStart = this.getCoords_(xStart, yStart); - var pEnd = this.getCoords_(xEnd, yEnd); - - this.currentPath_.push({type: arcType, - x: p.x, - y: p.y, - radius: aRadius, - xStart: pStart.x, - yStart: pStart.y, - xEnd: pEnd.x, - yEnd: pEnd.y}); - - }; - - contextPrototype.rect = function(aX, aY, aWidth, aHeight) { - this.moveTo(aX, aY); - this.lineTo(aX + aWidth, aY); - this.lineTo(aX + aWidth, aY + aHeight); - this.lineTo(aX, aY + aHeight); - this.closePath(); - }; - - contextPrototype.strokeRect = function(aX, aY, aWidth, aHeight) { - var oldPath = this.currentPath_; - this.beginPath(); - - this.moveTo(aX, aY); - this.lineTo(aX + aWidth, aY); - this.lineTo(aX + aWidth, aY + aHeight); - this.lineTo(aX, aY + aHeight); - this.closePath(); - this.stroke(); - - this.currentPath_ = oldPath; - }; - - contextPrototype.fillRect = function(aX, aY, aWidth, aHeight) { - var oldPath = this.currentPath_; - this.beginPath(); - - this.moveTo(aX, aY); - this.lineTo(aX + aWidth, aY); - this.lineTo(aX + aWidth, aY + aHeight); - this.lineTo(aX, aY + aHeight); - this.closePath(); - this.fill(); - - this.currentPath_ = oldPath; - }; - - contextPrototype.createLinearGradient = function(aX0, aY0, aX1, aY1) { - var gradient = new CanvasGradient_('gradient'); - gradient.x0_ = aX0; - gradient.y0_ = aY0; - gradient.x1_ = aX1; - gradient.y1_ = aY1; - return gradient; - }; - - contextPrototype.createRadialGradient = function(aX0, aY0, aR0, - aX1, aY1, aR1) { - var gradient = new CanvasGradient_('gradientradial'); - gradient.x0_ = aX0; - gradient.y0_ = aY0; - gradient.r0_ = aR0; - gradient.x1_ = aX1; - gradient.y1_ = aY1; - gradient.r1_ = aR1; - return gradient; - }; - - contextPrototype.drawImage = function(image, var_args) { - var dx, dy, dw, dh, sx, sy, sw, sh; - - // to find the original width we overide the width and height - var oldRuntimeWidth = image.runtimeStyle.width; - var oldRuntimeHeight = image.runtimeStyle.height; - image.runtimeStyle.width = 'auto'; - image.runtimeStyle.height = 'auto'; - - // get the original size - var w = image.width; - var h = image.height; - - // and remove overides - image.runtimeStyle.width = oldRuntimeWidth; - image.runtimeStyle.height = oldRuntimeHeight; - - if (arguments.length == 3) { - dx = arguments[1]; - dy = arguments[2]; - sx = sy = 0; - sw = dw = w; - sh = dh = h; - } else if (arguments.length == 5) { - dx = arguments[1]; - dy = arguments[2]; - dw = arguments[3]; - dh = arguments[4]; - sx = sy = 0; - sw = w; - sh = h; - } else if (arguments.length == 9) { - sx = arguments[1]; - sy = arguments[2]; - sw = arguments[3]; - sh = arguments[4]; - dx = arguments[5]; - dy = arguments[6]; - dw = arguments[7]; - dh = arguments[8]; - } else { - throw Error('Invalid number of arguments'); - } - - var d = this.getCoords_(dx, dy); - - var w2 = sw / 2; - var h2 = sh / 2; - - var vmlStr = []; - - var W = 10; - var H = 10; - - // For some reason that I've now forgotten, using divs didn't work - vmlStr.push(' ' , - '', - ''); - - this.element_.insertAdjacentHTML('BeforeEnd', - vmlStr.join('')); - }; - - contextPrototype.stroke = function(aFill) { - var lineStr = []; - var lineOpen = false; - var a = processStyle(aFill ? this.fillStyle : this.strokeStyle); - var color = a.color; - var opacity = a.alpha * this.globalAlpha; - - var W = 10; - var H = 10; - - lineStr.push(''); - - if (!aFill) { - var lineWidth = this.lineScale_ * this.lineWidth; - - // VML cannot correctly render a line if the width is less than 1px. - // In that case, we dilute the color to make the line look thinner. - if (lineWidth < 1) { - opacity *= lineWidth; - } - - lineStr.push( - '' - ); - } else if (typeof this.fillStyle == 'object') { - var fillStyle = this.fillStyle; - var angle = 0; - var focus = {x: 0, y: 0}; - - // additional offset - var shift = 0; - // scale factor for offset - var expansion = 1; - - if (fillStyle.type_ == 'gradient') { - var x0 = fillStyle.x0_ / this.arcScaleX_; - var y0 = fillStyle.y0_ / this.arcScaleY_; - var x1 = fillStyle.x1_ / this.arcScaleX_; - var y1 = fillStyle.y1_ / this.arcScaleY_; - var p0 = this.getCoords_(x0, y0); - var p1 = this.getCoords_(x1, y1); - var dx = p1.x - p0.x; - var dy = p1.y - p0.y; - angle = Math.atan2(dx, dy) * 180 / Math.PI; - - // The angle should be a non-negative number. - if (angle < 0) { - angle += 360; - } - - // Very small angles produce an unexpected result because they are - // converted to a scientific notation string. - if (angle < 1e-6) { - angle = 0; - } - } else { - var p0 = this.getCoords_(fillStyle.x0_, fillStyle.y0_); - var width = max.x - min.x; - var height = max.y - min.y; - focus = { - x: (p0.x - min.x) / width, - y: (p0.y - min.y) / height - }; - - width /= this.arcScaleX_ * Z; - height /= this.arcScaleY_ * Z; - var dimension = m.max(width, height); - shift = 2 * fillStyle.r0_ / dimension; - expansion = 2 * fillStyle.r1_ / dimension - shift; - } - - // We need to sort the color stops in ascending order by offset, - // otherwise IE won't interpret it correctly. - var stops = fillStyle.colors_; - stops.sort(function(cs1, cs2) { - return cs1.offset - cs2.offset; - }); - - var length = stops.length; - var color1 = stops[0].color; - var color2 = stops[length - 1].color; - var opacity1 = stops[0].alpha * this.globalAlpha; - var opacity2 = stops[length - 1].alpha * this.globalAlpha; - - var colors = []; - for (var i = 0; i < length; i++) { - var stop = stops[i]; - colors.push(stop.offset * expansion + shift + ' ' + stop.color); - } - - // When colors attribute is used, the meanings of opacity and o:opacity2 - // are reversed. - lineStr.push(''); - } else { - lineStr.push(''); - } - - lineStr.push(''); - - this.element_.insertAdjacentHTML('beforeEnd', lineStr.join('')); - }; - - contextPrototype.fill = function() { - this.stroke(true); - } - - contextPrototype.closePath = function() { - this.currentPath_.push({type: 'close'}); - }; - - /** - * @private - */ - contextPrototype.getCoords_ = function(aX, aY) { - var m = this.m_; - return { - x: Z * (aX * m[0][0] + aY * m[1][0] + m[2][0]) - Z2, - y: Z * (aX * m[0][1] + aY * m[1][1] + m[2][1]) - Z2 - } - }; - - contextPrototype.save = function() { - var o = {}; - copyState(this, o); - this.aStack_.push(o); - this.mStack_.push(this.m_); - this.m_ = matrixMultiply(createMatrixIdentity(), this.m_); - }; - - contextPrototype.restore = function() { - copyState(this.aStack_.pop(), this); - this.m_ = this.mStack_.pop(); - }; - - function matrixIsFinite(m) { - for (var j = 0; j < 3; j++) { - for (var k = 0; k < 2; k++) { - if (!isFinite(m[j][k]) || isNaN(m[j][k])) { - return false; - } - } - } - return true; - } - - function setM(ctx, m, updateLineScale) { - if (!matrixIsFinite(m)) { - return; - } - ctx.m_ = m; - - if (updateLineScale) { - // Get the line scale. - // Determinant of this.m_ means how much the area is enlarged by the - // transformation. So its square root can be used as a scale factor - // for width. - var det = m[0][0] * m[1][1] - m[0][1] * m[1][0]; - ctx.lineScale_ = sqrt(abs(det)); - } - } - - contextPrototype.translate = function(aX, aY) { - var m1 = [ - [1, 0, 0], - [0, 1, 0], - [aX, aY, 1] - ]; - - setM(this, matrixMultiply(m1, this.m_), false); - }; - - contextPrototype.rotate = function(aRot) { - var c = mc(aRot); - var s = ms(aRot); - - var m1 = [ - [c, s, 0], - [-s, c, 0], - [0, 0, 1] - ]; - - setM(this, matrixMultiply(m1, this.m_), false); - }; - - contextPrototype.scale = function(aX, aY) { - this.arcScaleX_ *= aX; - this.arcScaleY_ *= aY; - var m1 = [ - [aX, 0, 0], - [0, aY, 0], - [0, 0, 1] - ]; - - setM(this, matrixMultiply(m1, this.m_), true); - }; - - contextPrototype.transform = function(m11, m12, m21, m22, dx, dy) { - var m1 = [ - [m11, m12, 0], - [m21, m22, 0], - [dx, dy, 1] - ]; - - setM(this, matrixMultiply(m1, this.m_), true); - }; - - contextPrototype.setTransform = function(m11, m12, m21, m22, dx, dy) { - var m = [ - [m11, m12, 0], - [m21, m22, 0], - [dx, dy, 1] - ]; - - setM(this, m, true); - }; - - /******** STUBS ********/ - contextPrototype.clip = function() { - // TODO: Implement - }; - - contextPrototype.arcTo = function() { - // TODO: Implement - }; - - contextPrototype.createPattern = function() { - return new CanvasPattern_; - }; - - // Gradient / Pattern Stubs - function CanvasGradient_(aType) { - this.type_ = aType; - this.x0_ = 0; - this.y0_ = 0; - this.r0_ = 0; - this.x1_ = 0; - this.y1_ = 0; - this.r1_ = 0; - this.colors_ = []; - } - - CanvasGradient_.prototype.addColorStop = function(aOffset, aColor) { - aColor = processStyle(aColor); - this.colors_.push({offset: aOffset, - color: aColor.color, - alpha: aColor.alpha}); - }; - - function CanvasPattern_() {} - - // set up externs - G_vmlCanvasManager = G_vmlCanvasManager_; - CanvasRenderingContext2D = CanvasRenderingContext2D_; - CanvasGradient = CanvasGradient_; - CanvasPattern = CanvasPattern_; - -})(); - -} // if diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/followlines.js --- a/mercurial/templates/static/followlines.js Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/static/followlines.js Mon Jan 22 17:53:02 2018 -0500 @@ -13,7 +13,7 @@ } // URL to complement with "linerange" query parameter var targetUri = sourcelines.dataset.logurl; - if (typeof targetUri === 'undefined') { + if (typeof targetUri === 'undefined') { return; } @@ -38,7 +38,7 @@ // element var selectableElements = Array.prototype.filter.call( sourcelines.children, - function(x) { return x.tagName === selectableTag }); + function(x) { return x.tagName === selectableTag; }); var btnTitleStart = 'start following lines history from here'; var btnTitleEnd = 'terminate line block selection here'; @@ -62,7 +62,7 @@ } // extend DOM with CSS class for selection highlight and action buttons - var followlinesButtons = [] + var followlinesButtons = []; for (var i = 0; i < selectableElements.length; i++) { selectableElements[i].classList.add('followlines-select'); var btn = createButton(); @@ -114,7 +114,7 @@ if (parent === null) { return null; } - if (element.tagName == selectableTag && parent.isSameNode(sourcelines)) { + if (element.tagName === selectableTag && parent.isSameNode(sourcelines)) { return element; } return selectableParent(parent); @@ -182,7 +182,7 @@ // compute line range (startId, endId) var endId = parseInt(endElement.id.slice(1)); - if (endId == startId) { + if (endId === startId) { // clicked twice the same line, cancel and reset initial state // (CSS, event listener for selection start) removeSelectedCSSClass(); diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/mercurial.js --- a/mercurial/templates/static/mercurial.js Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/static/mercurial.js Mon Jan 22 17:53:02 2018 -0500 @@ -25,32 +25,29 @@ function Graph() { this.canvas = document.getElementById('graph'); - if (window.G_vmlCanvasManager) this.canvas = window.G_vmlCanvasManager.initElement(this.canvas); this.ctx = this.canvas.getContext('2d'); this.ctx.strokeStyle = 'rgb(0, 0, 0)'; this.ctx.fillStyle = 'rgb(0, 0, 0)'; - this.cur = [0, 0]; - this.line_width = 3; this.bg = [0, 4]; this.cell = [2, 0]; this.columns = 0; - this.revlink = ''; + +} - this.reset = function() { +Graph.prototype = { + reset: function() { this.bg = [0, 4]; this.cell = [2, 0]; this.columns = 0; - document.getElementById('nodebgs').innerHTML = ''; - document.getElementById('graphnodes').innerHTML = ''; - } + }, - this.scale = function(height) { + scale: function(height) { this.bg_height = height; this.box_size = Math.floor(this.bg_height / 1.2); this.cell_height = this.box_size; - } + }, - this.setColor = function(color, bg, fg) { + setColor: function(color, bg, fg) { // Set the colour. // @@ -62,9 +59,9 @@ // provides the multiplier that should be applied to // the foreground colours. var s; - if(typeof color == "string") { + if(typeof color === "string") { s = "#" + color; - } else { //typeof color == "number" + } else { //typeof color === "number" color %= colors.length; var red = (colors[color][0] * fg) || bg; var green = (colors[color][1] * fg) || bg; @@ -78,9 +75,9 @@ this.ctx.fillStyle = s; return s; - } + }, - this.edge = function(x0, y0, x1, y1, color, width) { + edge: function(x0, y0, x1, y1, color, width) { this.setColor(color, 0.0, 0.65); if(width >= 0) @@ -90,28 +87,106 @@ this.ctx.lineTo(x1, y1); this.ctx.stroke(); - } + }, + + graphNodeCurrent: function(x, y, radius) { + this.ctx.lineWidth = 2; + this.ctx.beginPath(); + this.ctx.arc(x, y, radius * 1.75, 0, Math.PI * 2, true); + this.ctx.stroke(); + }, + + graphNodeClosing: function(x, y, radius) { + this.ctx.fillRect(x - radius, y - 1.5, radius * 2, 3); + }, - this.render = function(data) { + graphNodeUnstable: function(x, y, radius) { + var x30 = radius * Math.cos(Math.PI / 6); + var y30 = radius * Math.sin(Math.PI / 6); + this.ctx.lineWidth = 2; + this.ctx.beginPath(); + this.ctx.moveTo(x, y - radius); + this.ctx.lineTo(x, y + radius); + this.ctx.moveTo(x - x30, y - y30); + this.ctx.lineTo(x + x30, y + y30); + this.ctx.moveTo(x - x30, y + y30); + this.ctx.lineTo(x + x30, y - y30); + this.ctx.stroke(); + }, + + graphNodeObsolete: function(x, y, radius) { + var p45 = radius * Math.cos(Math.PI / 4); + this.ctx.lineWidth = 3; + this.ctx.beginPath(); + this.ctx.moveTo(x - p45, y - p45); + this.ctx.lineTo(x + p45, y + p45); + this.ctx.moveTo(x - p45, y + p45); + this.ctx.lineTo(x + p45, y - p45); + this.ctx.stroke(); + }, + + graphNodeNormal: function(x, y, radius) { + this.ctx.beginPath(); + this.ctx.arc(x, y, radius, 0, Math.PI * 2, true); + this.ctx.fill(); + }, - var backgrounds = ''; - var nodedata = ''; + vertex: function(x, y, radius, color, parity, cur) { + this.ctx.save(); + this.setColor(color, 0.25, 0.75); + if (cur.graphnode[0] === '@') { + this.graphNodeCurrent(x, y, radius); + } + switch (cur.graphnode.substr(-1)) { + case '_': + this.graphNodeClosing(x, y, radius); + break; + case '*': + this.graphNodeUnstable(x, y, radius); + break; + case 'x': + this.graphNodeObsolete(x, y, radius); + break; + default: + this.graphNodeNormal(x, y, radius); + } + this.ctx.restore(); - for (var i in data) { + var left = (this.bg_height - this.box_size) + (this.columns + 1) * this.box_size; + var item = document.querySelector('[data-node="' + cur.node + '"]'); + if (item) { + item.style.paddingLeft = left + 'px'; + } + }, + + render: function(data) { + + var i, j, cur, line, start, end, color, x, y, x0, y0, x1, y1, column, radius; + + var cols = 0; + for (i = 0; i < data.length; i++) { + cur = data[i]; + for (j = 0; j < cur.edges.length; j++) { + line = cur.edges[j]; + cols = Math.max(cols, line[0], line[1]); + } + } + this.canvas.width = (cols + 1) * this.bg_height; + this.canvas.height = (data.length + 1) * this.bg_height - 27; + + for (i = 0; i < data.length; i++) { var parity = i % 2; this.cell[1] += this.bg_height; this.bg[1] += this.bg_height; - var cur = data[i]; - var node = cur[1]; - var edges = cur[2]; + cur = data[i]; var fold = false; var prevWidth = this.ctx.lineWidth; - for (var j in edges) { + for (j = 0; j < cur.edges.length; j++) { - line = edges[j]; + line = cur.edges[j]; start = line[0]; end = line[1]; color = line[2]; @@ -126,8 +201,8 @@ this.columns += 1; } - if (start == this.columns && start > end) { - var fold = true; + if (start === this.columns && start > end) { + fold = true; } x0 = this.cell[0] + this.box_size * start + this.box_size / 2; @@ -142,26 +217,21 @@ // Draw the revision node in the right column - column = node[0] - color = node[1] + column = cur.vertex[0]; + color = cur.vertex[1]; radius = this.box_size / 8; x = this.cell[0] + this.box_size * column + this.box_size / 2; y = this.bg[1] - this.bg_height / 2; - var add = this.vertex(x, y, color, parity, cur); - backgrounds += add[0]; - nodedata += add[1]; + this.vertex(x, y, radius, color, parity, cur); if (fold) this.columns -= 1; } - document.getElementById('nodebgs').innerHTML += backgrounds; - document.getElementById('graphnodes').innerHTML += nodedata; - } -} +}; function process_dates(parentSelector){ @@ -228,10 +298,11 @@ return shortdate(once); } - for (unit in scales){ + for (var unit in scales){ + if (!scales.hasOwnProperty(unit)) { continue; } var s = scales[unit]; var n = Math.floor(delta / s); - if ((n >= 2) || (s == 1)){ + if ((n >= 2) || (s === 1)){ if (future){ return format(n, unit) + ' from now'; } else { @@ -259,7 +330,7 @@ function toggleDiffstat() { var curdetails = document.getElementById('diffstatdetails').style.display; - var curexpand = curdetails == 'none' ? 'inline' : 'none'; + var curexpand = curdetails === 'none' ? 'inline' : 'none'; document.getElementById('diffstatdetails').style.display = curexpand; document.getElementById('diffstatexpand').style.display = curdetails; } @@ -273,7 +344,8 @@ function setLinewrap(enable) { var nodes = document.getElementsByClassName('sourcelines'); - for (var i = 0; i < nodes.length; i++) { + var i; + for (i = 0; i < nodes.length; i++) { if (enable) { nodes[i].classList.add('wrap'); } else { @@ -282,7 +354,7 @@ } var links = document.getElementsByClassName('linewraplink'); - for (var i = 0; i < links.length; i++) { + for (i = 0; i < links.length; i++) { links[i].innerHTML = enable ? 'on' : 'off'; } } @@ -297,12 +369,12 @@ } function makeRequest(url, method, onstart, onsuccess, onerror, oncomplete) { - xfr = new XMLHttpRequest(); - xfr.onreadystatechange = function() { - if (xfr.readyState === 4) { + var xhr = new XMLHttpRequest(); + xhr.onreadystatechange = function() { + if (xhr.readyState === 4) { try { - if (xfr.status === 200) { - onsuccess(xfr.responseText); + if (xhr.status === 200) { + onsuccess(xhr.responseText); } else { throw 'server error'; } @@ -314,11 +386,11 @@ } }; - xfr.open(method, url); - xfr.overrideMimeType("text/xhtml; charset=" + document.characterSet.toLowerCase()); - xfr.send(); + xhr.open(method, url); + xhr.overrideMimeType("text/xhtml; charset=" + document.characterSet.toLowerCase()); + xhr.send(); onstart(); - return xfr; + return xhr; } function removeByClassName(className) { @@ -338,14 +410,26 @@ element.insertAdjacentHTML('beforeend', format(formatStr, replacements)); } +function adoptChildren(from, to) { + var nodes = from.children; + var curClass = 'c' + Date.now(); + while (nodes.length) { + var node = nodes[0]; + node = document.adoptNode(node); + node.classList.add(curClass); + to.appendChild(node); + } + process_dates('.' + curClass); +} + function ajaxScrollInit(urlFormat, nextPageVar, nextPageVarGet, containerSelector, messageFormat, mode) { - updateInitiated = false; - container = document.querySelector(containerSelector); + var updateInitiated = false; + var container = document.querySelector(containerSelector); function scrollHandler() { if (updateInitiated) { @@ -354,8 +438,7 @@ var scrollHeight = document.documentElement.scrollHeight; var clientHeight = document.documentElement.clientHeight; - var scrollTop = document.body.scrollTop - || document.documentElement.scrollTop; + var scrollTop = document.body.scrollTop || document.documentElement.scrollTop; if (scrollHeight - (scrollTop + clientHeight) < 50) { updateInitiated = true; @@ -382,36 +465,20 @@ appendFormatHTML(container, messageFormat, message); }, function onsuccess(htmlText) { - if (mode == 'graph') { - var sizes = htmlText.match(/^\s*<\/canvas>$/m); - var addWidth = sizes[1]; - var addHeight = sizes[2]; - addWidth = parseInt(addWidth); - addHeight = parseInt(addHeight); - graph.canvas.width = addWidth; - graph.canvas.height = addHeight; + var doc = docFromHTML(htmlText); + if (mode === 'graph') { + var graph = window.graph; var dataStr = htmlText.match(/^\s*var data = (.*);$/m)[1]; var data = JSON.parse(dataStr); - if (data.length < nextPageVar) { - nextPageVar = undefined; - } graph.reset(); + adoptChildren(doc.querySelector('#graphnodes'), container.querySelector('#graphnodes')); graph.render(data); } else { - var doc = docFromHTML(htmlText); - var nodes = doc.querySelector(containerSelector).children; - var curClass = 'c' + Date.now(); - while (nodes.length) { - var node = nodes[0]; - node = document.adoptNode(node); - node.classList.add(curClass); - container.appendChild(node); - } - process_dates('.' + curClass); + adoptChildren(doc.querySelector(containerSelector), container); } - nextPageVar = nextPageVarGet(htmlText, nextPageVar); + nextPageVar = nextPageVarGet(htmlText); }, function onerror(errorText) { var message = { @@ -450,7 +517,7 @@ "ignoreblanklines", ]; - var urlParams = new URLSearchParams(window.location.search); + var urlParams = new window.URLSearchParams(window.location.search); function updateAndRefresh(e) { var checkbox = e.target; @@ -459,7 +526,7 @@ window.location.search = urlParams.toString(); } - var allChecked = form.getAttribute("data-ignorews") == "1"; + var allChecked = form.getAttribute("data-ignorews") === "1"; for (var i = 0; i < KEYS.length; i++) { var key = KEYS[i]; @@ -469,11 +536,11 @@ continue; } - currentValue = form.getAttribute("data-" + key); - checkbox.checked = currentValue != "0"; + var currentValue = form.getAttribute("data-" + key); + checkbox.checked = currentValue !== "0"; // ignorews implies ignorewsamount and ignorewseol. - if (allChecked && (key == "ignorewsamount" || key == "ignorewseol")) { + if (allChecked && (key === "ignorewsamount" || key === "ignorewseol")) { checkbox.checked = true; checkbox.disabled = true; } diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/style-gitweb.css --- a/mercurial/templates/static/style-gitweb.css Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/static/style-gitweb.css Mon Jan 22 17:53:02 2018 -0500 @@ -61,8 +61,6 @@ } td.indexlinks a:hover { background-color: #6666aa; } div.pre { font-family:monospace; font-size:12px; white-space:pre; } -div.diff_info { font-family:monospace; color:#000099; background-color:#edece6; font-style:italic; } -div.index_include { border:solid #d9d8d1; border-width:0px 0px 1px; padding:12px 8px; } .search { margin-right: 8px; @@ -122,6 +120,18 @@ background-color: #ffaaff; border-color: #ffccff #ff00ee #ff00ee #ffccff; } +span.logtags span.phasetag { + background-color: #dfafff; + border-color: #e2b8ff #ce48ff #ce48ff #e2b8ff; +} +span.logtags span.obsoletetag { + background-color: #dddddd; + border-color: #e4e4e4 #a3a3a3 #a3a3a3 #e4e4e4; +} +span.logtags span.instabilitytag { + background-color: #ffb1c0; + border-color: #ffbbc8 #ff4476 #ff4476 #ffbbc8; +} span.logtags span.tagtag { background-color: #ffffaa; border-color: #ffffcc #ffee00 #ffee00 #ffffcc; @@ -191,10 +201,9 @@ } div#followlines { - background-color: #B7B7B7; - border: 1px solid #CCC; - border-radius: 5px; - padding: 4px; + background-color: #FFF; + border: 1px solid #d9d8d1; + padding: 5px; position: fixed; } @@ -293,30 +302,26 @@ margin: 0; } -ul#nodebgs { +ul#graphnodes { list-style: none inside none; padding: 0; margin: 0; - top: -0.7em; -} - -ul#graphnodes li, ul#nodebgs li { - height: 39px; } -ul#graphnodes { +ul#graphnodes li { + position: relative; + height: 37px; + overflow: visible; + padding-top: 2px; +} + +ul#graphnodes li .fg { position: absolute; z-index: 10; - top: -0.8em; - list-style: none inside none; - padding: 0; } ul#graphnodes li .info { - display: block; font-size: 100%; - position: relative; - top: -3px; font-style: italic; } diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/style-monoblue.css --- a/mercurial/templates/static/style-monoblue.css Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/static/style-monoblue.css Mon Jan 22 17:53:02 2018 -0500 @@ -233,6 +233,18 @@ background-color: #ffaaff; border-color: #ffccff #ff00ee #ff00ee #ffccff; } +span.logtags span.phasetag { + background-color: #dfafff; + border-color: #e2b8ff #ce48ff #ce48ff #e2b8ff; +} +span.logtags span.obsoletetag { + background-color: #dddddd; + border-color: #e4e4e4 #a3a3a3 #a3a3a3 #e4e4e4; +} +span.logtags span.instabilitytag { + background-color: #ffb1c0; + border-color: #ffbbc8 #ff4476 #ff4476 #ffbbc8; +} span.logtags span.tagtag { background-color: #ffffaa; border-color: #ffffcc #ffee00 #ffee00 #ffffcc; @@ -309,6 +321,7 @@ pre.sourcelines.stripes > :nth-child(4n+1):hover + :nth-child(4n+2), pre.sourcelines.stripes > :nth-child(4n+3):hover + :nth-child(4n+4) { background-color: #D5E1E6; } +tr:target td, pre.sourcelines > span:target, pre.sourcelines.stripes > span:target { background-color: #bfdfff; @@ -456,7 +469,7 @@ /** canvas **/ div#wrapper { position: relative; - font-size: 1.2em; + font-size: 1.1em; } canvas { @@ -465,32 +478,33 @@ top: -0.7em; } -ul#nodebgs li.parity0 { +ul#graphnodes li.parity0 { background: #F1F6F7; } -ul#nodebgs li.parity1 { +ul#graphnodes li.parity1 { background: #FFFFFF; } ul#graphnodes { - position: absolute; - z-index: 10; - top: 7px; list-style: none inside none; + margin: 0; + padding: 0; } -ul#nodebgs { - list-style: none inside none; +ul#graphnodes li { + height: 37px; + overflow: visible; + padding-top: 2px; } -ul#graphnodes li, ul#nodebgs li { - height: 39px; +ul#graphnodes li .fg { + position: absolute; + z-index: 10; } ul#graphnodes li .info { - display: block; - position: relative; + margin-top: 2px; } /** end of canvas **/ diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/style-paper.css --- a/mercurial/templates/static/style-paper.css Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/static/style-paper.css Mon Jan 22 17:53:02 2018 -0500 @@ -137,6 +137,33 @@ margin: 1em 0; } +.phase { + color: #999; + font-size: 70%; + border-bottom: 1px dotted #999; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +.obsolete { + color: #999; + font-size: 70%; + border-bottom: 1px dashed #999; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +.instability { + color: #000; + font-size: 70%; + border-bottom: 1px solid #000; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + .tag { color: #999; font-size: 70%; @@ -165,10 +192,6 @@ vertical-align: baseline; } -h3 .branchname { - font-size: 80%; -} - /* Common */ pre { margin: 0; } @@ -190,6 +213,7 @@ } .bigtable td { + padding: 1px 4px; vertical-align: top; } @@ -295,10 +319,9 @@ } div#followlines { - background-color: #B7B7B7; - border: 1px solid #CCC; - border-radius: 5px; - padding: 4px; + background-color: #FFF; + border: 1px solid #999; + padding: 5px; position: fixed; } @@ -409,7 +432,6 @@ text-align: right; font-weight: normal; color: #999; - margin-right: .5em; vertical-align: top; } @@ -438,29 +460,23 @@ } ul#graphnodes { - position: absolute; - z-index: 10; - top: -1.0em; - list-style: none inside none; - padding: 0; -} - -ul#nodebgs { list-style: none inside none; padding: 0; margin: 0; - top: -0.7em; } -ul#graphnodes li, ul#nodebgs li { +ul#graphnodes li { height: 39px; + overflow: visible; +} + +ul#graphnodes li .fg { + position: absolute; + z-index: 10; } ul#graphnodes li .info { - display: block; font-size: 70%; - position: relative; - top: -3px; } /* Comparison */ diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/static/style.css --- a/mercurial/templates/static/style.css Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/templates/static/style.css Mon Jan 22 17:53:02 2018 -0500 @@ -95,28 +95,23 @@ margin: 0; } -ul#nodebgs { +ul#graphnodes { list-style: none inside none; padding: 0; margin: 0; - top: -0.7em; -} - -ul#graphnodes li, ul#nodebgs li { - height: 39px; } -ul#graphnodes { +ul#graphnodes li { + height: 37px; + overflow: visible; + padding-top: 2px; +} + +ul#graphnodes li .fg { position: absolute; z-index: 10; - top: -0.85em; - list-style: none inside none; - padding: 0; } ul#graphnodes li .info { - display: block; font-size: 70%; - position: relative; - top: -1px; } diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/thirdparty/selectors2.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/selectors2.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,743 @@ +""" Back-ported, durable, and portable selectors """ + +# MIT License +# +# Copyright (c) 2017 Seth Michael Larson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import collections +import errno +import math +import select +import socket +import sys +import time + +from .. import pycompat + +namedtuple = collections.namedtuple +Mapping = collections.Mapping + +try: + monotonic = time.monotonic +except AttributeError: + monotonic = time.time + +__author__ = 'Seth Michael Larson' +__email__ = 'sethmichaellarson@protonmail.com' +__version__ = '2.0.0' +__license__ = 'MIT' +__url__ = 'https://www.github.com/SethMichaelLarson/selectors2' + +__all__ = ['EVENT_READ', + 'EVENT_WRITE', + 'SelectorKey', + 'DefaultSelector', + 'BaseSelector'] + +EVENT_READ = (1 << 0) +EVENT_WRITE = (1 << 1) +_DEFAULT_SELECTOR = None +_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. +_ERROR_TYPES = (OSError, IOError, socket.error) + + +SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) + + +class _SelectorMapping(Mapping): + """ Mapping of file objects to selector keys """ + + def __init__(self, selector): + self._selector = selector + + def __len__(self): + return len(self._selector._fd_to_key) + + def __getitem__(self, fileobj): + try: + fd = self._selector._fileobj_lookup(fileobj) + return self._selector._fd_to_key[fd] + except KeyError: + raise KeyError("{0!r} is not registered.".format(fileobj)) + + def __iter__(self): + return iter(self._selector._fd_to_key) + + +def _fileobj_to_fd(fileobj): + """ Return a file descriptor from a file object. If + given an integer will simply return that integer back. """ + if isinstance(fileobj, int): + fd = fileobj + else: + try: + fd = int(fileobj.fileno()) + except (AttributeError, TypeError, ValueError): + raise ValueError("Invalid file object: {0!r}".format(fileobj)) + if fd < 0: + raise ValueError("Invalid file descriptor: {0}".format(fd)) + return fd + + +class BaseSelector(object): + """ Abstract Selector class + + A selector supports registering file objects to be monitored + for specific I/O events. + + A file object is a file descriptor or any object with a + `fileno()` method. An arbitrary object can be attached to the + file object which can be used for example to store context info, + a callback, etc. + + A selector can use various implementations (select(), poll(), epoll(), + and kqueue()) depending on the platform. The 'DefaultSelector' class uses + the most efficient implementation for the current platform. + """ + def __init__(self): + # Maps file descriptors to keys. + self._fd_to_key = {} + + # Read-only mapping returned by get_map() + self._map = _SelectorMapping(self) + + def _fileobj_lookup(self, fileobj): + """ Return a file descriptor from a file object. + This wraps _fileobj_to_fd() to do an exhaustive + search in case the object is invalid but we still + have it in our map. Used by unregister() so we can + unregister an object that was previously registered + even if it is closed. It is also used by _SelectorMapping + """ + try: + return _fileobj_to_fd(fileobj) + except ValueError: + + # Search through all our mapped keys. + for key in self._fd_to_key.values(): + if key.fileobj is fileobj: + return key.fd + + # Raise ValueError after all. + raise + + def register(self, fileobj, events, data=None): + """ Register a file object for a set of events to monitor. """ + if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): + raise ValueError("Invalid events: {0!r}".format(events)) + + key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) + + if key.fd in self._fd_to_key: + raise KeyError("{0!r} (FD {1}) is already registered" + .format(fileobj, key.fd)) + + self._fd_to_key[key.fd] = key + return key + + def unregister(self, fileobj): + """ Unregister a file object from being monitored. """ + try: + key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + # Getting the fileno of a closed socket on Windows errors with EBADF. + except socket.error as err: + if err.errno != errno.EBADF: + raise + else: + for key in self._fd_to_key.values(): + if key.fileobj is fileobj: + self._fd_to_key.pop(key.fd) + break + else: + raise KeyError("{0!r} is not registered".format(fileobj)) + return key + + def modify(self, fileobj, events, data=None): + """ Change a registered file object monitored events and data. """ + # NOTE: Some subclasses optimize this operation even further. + try: + key = self._fd_to_key[self._fileobj_lookup(fileobj)] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + if events != key.events: + self.unregister(fileobj) + key = self.register(fileobj, events, data) + + elif data != key.data: + # Use a shortcut to update the data. + key = key._replace(data=data) + self._fd_to_key[key.fd] = key + + return key + + def select(self, timeout=None): + """ Perform the actual selection until some monitored file objects + are ready or the timeout expires. """ + raise NotImplementedError() + + def close(self): + """ Close the selector. This must be called to ensure that all + underlying resources are freed. """ + self._fd_to_key.clear() + self._map = None + + def get_key(self, fileobj): + """ Return the key associated with a registered file object. """ + mapping = self.get_map() + if mapping is None: + raise RuntimeError("Selector is closed") + try: + return mapping[fileobj] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + def get_map(self): + """ Return a mapping of file objects to selector keys """ + return self._map + + def _key_from_fd(self, fd): + """ Return the key associated to a given file descriptor + Return None if it is not found. """ + try: + return self._fd_to_key[fd] + except KeyError: + return None + + def __enter__(self): + return self + + def __exit__(self, *_): + self.close() + + +# Almost all platforms have select.select() +if hasattr(select, "select"): + class SelectSelector(BaseSelector): + """ Select-based selector. """ + def __init__(self): + super(SelectSelector, self).__init__() + self._readers = set() + self._writers = set() + + def register(self, fileobj, events, data=None): + key = super(SelectSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + self._readers.add(key.fd) + if events & EVENT_WRITE: + self._writers.add(key.fd) + return key + + def unregister(self, fileobj): + key = super(SelectSelector, self).unregister(fileobj) + self._readers.discard(key.fd) + self._writers.discard(key.fd) + return key + + def select(self, timeout=None): + # Selecting on empty lists on Windows errors out. + if not len(self._readers) and not len(self._writers): + return [] + + timeout = None if timeout is None else max(timeout, 0.0) + ready = [] + r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers, + self._writers, timeout) + r = set(r) + w = set(w) + for fd in r | w: + events = 0 + if fd in r: + events |= EVENT_READ + if fd in w: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def _wrap_select(self, r, w, timeout=None): + """ Wrapper for select.select because timeout is a positional arg """ + return select.select(r, w, [], timeout) + + __all__.append('SelectSelector') + + # Jython has a different implementation of .fileno() for socket objects. + if pycompat.isjython: + class _JythonSelectorMapping(object): + """ This is an implementation of _SelectorMapping that is built + for use specifically with Jython, which does not provide a hashable + value from socket.socket.fileno(). """ + + def __init__(self, selector): + assert isinstance(selector, JythonSelectSelector) + self._selector = selector + + def __len__(self): + return len(self._selector._sockets) + + def __getitem__(self, fileobj): + for sock, key in self._selector._sockets: + if sock is fileobj: + return key + else: + raise KeyError("{0!r} is not registered.".format(fileobj)) + + class JythonSelectSelector(SelectSelector): + """ This is an implementation of SelectSelector that is for Jython + which works around that Jython's socket.socket.fileno() does not + return an integer fd value. All SelectorKey.fd will be equal to -1 + and should not be used. This instead uses object id to compare fileobj + and will only use select.select as it's the only selector that allows + directly passing in socket objects rather than registering fds. + See: http://bugs.jython.org/issue1678 + https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer + """ + + def __init__(self): + super(JythonSelectSelector, self).__init__() + + self._sockets = [] # Uses a list of tuples instead of dictionary. + self._map = _JythonSelectorMapping(self) + self._readers = [] + self._writers = [] + + # Jython has a select.cpython_compatible_select function in older versions. + self._select_func = getattr(select, 'cpython_compatible_select', select.select) + + def register(self, fileobj, events, data=None): + for sock, _ in self._sockets: + if sock is fileobj: + raise KeyError("{0!r} is already registered" + .format(fileobj, sock)) + + key = SelectorKey(fileobj, -1, events, data) + self._sockets.append((fileobj, key)) + + if events & EVENT_READ: + self._readers.append(fileobj) + if events & EVENT_WRITE: + self._writers.append(fileobj) + return key + + def unregister(self, fileobj): + for i, (sock, key) in enumerate(self._sockets): + if sock is fileobj: + break + else: + raise KeyError("{0!r} is not registered.".format(fileobj)) + + if key.events & EVENT_READ: + self._readers.remove(fileobj) + if key.events & EVENT_WRITE: + self._writers.remove(fileobj) + + del self._sockets[i] + return key + + def _wrap_select(self, r, w, timeout=None): + """ Wrapper for select.select because timeout is a positional arg """ + return self._select_func(r, w, [], timeout) + + __all__.append('JythonSelectSelector') + SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used. + + +if hasattr(select, "poll"): + class PollSelector(BaseSelector): + """ Poll-based selector """ + def __init__(self): + super(PollSelector, self).__init__() + self._poll = select.poll() + + def register(self, fileobj, events, data=None): + key = super(PollSelector, self).register(fileobj, events, data) + event_mask = 0 + if events & EVENT_READ: + event_mask |= select.POLLIN + if events & EVENT_WRITE: + event_mask |= select.POLLOUT + self._poll.register(key.fd, event_mask) + return key + + def unregister(self, fileobj): + key = super(PollSelector, self).unregister(fileobj) + self._poll.unregister(key.fd) + return key + + def _wrap_poll(self, timeout=None): + """ Wrapper function for select.poll.poll() so that + _syscall_wrapper can work with only seconds. """ + if timeout is not None: + if timeout <= 0: + timeout = 0 + else: + # select.poll.poll() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1000) + + result = self._poll.poll(timeout) + return result + + def select(self, timeout=None): + ready = [] + fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) + for fd, event_mask in fd_events: + events = 0 + if event_mask & ~select.POLLIN: + events |= EVENT_WRITE + if event_mask & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + + return ready + + __all__.append('PollSelector') + +if hasattr(select, "epoll"): + class EpollSelector(BaseSelector): + """ Epoll-based selector """ + def __init__(self): + super(EpollSelector, self).__init__() + self._epoll = select.epoll() + + def fileno(self): + return self._epoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(EpollSelector, self).register(fileobj, events, data) + events_mask = 0 + if events & EVENT_READ: + events_mask |= select.EPOLLIN + if events & EVENT_WRITE: + events_mask |= select.EPOLLOUT + _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) + return key + + def unregister(self, fileobj): + key = super(EpollSelector, self).unregister(fileobj) + try: + _syscall_wrapper(self._epoll.unregister, False, key.fd) + except _ERROR_TYPES: + # This can occur when the fd was closed since registry. + pass + return key + + def select(self, timeout=None): + if timeout is not None: + if timeout <= 0: + timeout = 0.0 + else: + # select.epoll.poll() has a resolution of 1 millisecond + # but luckily takes seconds so we don't need a wrapper + # like PollSelector. Just for better rounding. + timeout = math.ceil(timeout * 1000) * 0.001 + timeout = float(timeout) + else: + timeout = -1.0 # epoll.poll() must have a float. + + # We always want at least 1 to ensure that select can be called + # with no file descriptors registered. Otherwise will fail. + max_events = max(len(self._fd_to_key), 1) + + ready = [] + fd_events = _syscall_wrapper(self._epoll.poll, True, + timeout=timeout, + maxevents=max_events) + for fd, event_mask in fd_events: + events = 0 + if event_mask & ~select.EPOLLIN: + events |= EVENT_WRITE + if event_mask & ~select.EPOLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._epoll.close() + super(EpollSelector, self).close() + + __all__.append('EpollSelector') + + +if hasattr(select, "devpoll"): + class DevpollSelector(BaseSelector): + """Solaris /dev/poll selector.""" + + def __init__(self): + super(DevpollSelector, self).__init__() + self._devpoll = select.devpoll() + + def fileno(self): + return self._devpoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(DevpollSelector, self).register(fileobj, events, data) + poll_events = 0 + if events & EVENT_READ: + poll_events |= select.POLLIN + if events & EVENT_WRITE: + poll_events |= select.POLLOUT + self._devpoll.register(key.fd, poll_events) + return key + + def unregister(self, fileobj): + key = super(DevpollSelector, self).unregister(fileobj) + self._devpoll.unregister(key.fd) + return key + + def _wrap_poll(self, timeout=None): + """ Wrapper function for select.poll.poll() so that + _syscall_wrapper can work with only seconds. """ + if timeout is not None: + if timeout <= 0: + timeout = 0 + else: + # select.devpoll.poll() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1000) + + result = self._devpoll.poll(timeout) + return result + + def select(self, timeout=None): + ready = [] + fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) + for fd, event_mask in fd_events: + events = 0 + if event_mask & ~select.POLLIN: + events |= EVENT_WRITE + if event_mask & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + + return ready + + def close(self): + self._devpoll.close() + super(DevpollSelector, self).close() + + __all__.append('DevpollSelector') + + +if hasattr(select, "kqueue"): + class KqueueSelector(BaseSelector): + """ Kqueue / Kevent-based selector """ + def __init__(self): + super(KqueueSelector, self).__init__() + self._kqueue = select.kqueue() + + def fileno(self): + return self._kqueue.fileno() + + def register(self, fileobj, events, data=None): + key = super(KqueueSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + kevent = select.kevent(key.fd, + select.KQ_FILTER_READ, + select.KQ_EV_ADD) + + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + + if events & EVENT_WRITE: + kevent = select.kevent(key.fd, + select.KQ_FILTER_WRITE, + select.KQ_EV_ADD) + + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + + return key + + def unregister(self, fileobj): + key = super(KqueueSelector, self).unregister(fileobj) + if key.events & EVENT_READ: + kevent = select.kevent(key.fd, + select.KQ_FILTER_READ, + select.KQ_EV_DELETE) + try: + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + except _ERROR_TYPES: + pass + if key.events & EVENT_WRITE: + kevent = select.kevent(key.fd, + select.KQ_FILTER_WRITE, + select.KQ_EV_DELETE) + try: + _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) + except _ERROR_TYPES: + pass + + return key + + def select(self, timeout=None): + if timeout is not None: + timeout = max(timeout, 0) + + max_events = len(self._fd_to_key) * 2 + ready_fds = {} + + kevent_list = _syscall_wrapper(self._kqueue.control, True, + None, max_events, timeout) + + for kevent in kevent_list: + fd = kevent.ident + event_mask = kevent.filter + events = 0 + if event_mask == select.KQ_FILTER_READ: + events |= EVENT_READ + if event_mask == select.KQ_FILTER_WRITE: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + if key.fd not in ready_fds: + ready_fds[key.fd] = (key, events & key.events) + else: + old_events = ready_fds[key.fd][1] + ready_fds[key.fd] = (key, (events | old_events) & key.events) + + return list(ready_fds.values()) + + def close(self): + self._kqueue.close() + super(KqueueSelector, self).close() + + __all__.append('KqueueSelector') + + +def _can_allocate(struct): + """ Checks that select structs can be allocated by the underlying + operating system, not just advertised by the select module. We don't + check select() because we'll be hopeful that most platforms that + don't have it available will not advertise it. (ie: GAE) """ + try: + # select.poll() objects won't fail until used. + if struct == 'poll': + p = select.poll() + p.poll(0) + + # All others will fail on allocation. + else: + getattr(select, struct)().close() + return True + except (OSError, AttributeError): + return False + + +# Python 3.5 uses a more direct route to wrap system calls to increase speed. +if sys.version_info >= (3, 5): + def _syscall_wrapper(func, _, *args, **kwargs): + """ This is the short-circuit version of the below logic + because in Python 3.5+ all selectors restart system calls. """ + return func(*args, **kwargs) +else: + def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): + """ Wrapper function for syscalls that could fail due to EINTR. + All functions should be retried if there is time left in the timeout + in accordance with PEP 475. """ + timeout = kwargs.get("timeout", None) + if timeout is None: + expires = None + recalc_timeout = False + else: + timeout = float(timeout) + if timeout < 0.0: # Timeout less than 0 treated as no timeout. + expires = None + else: + expires = monotonic() + timeout + + args = list(args) + if recalc_timeout and "timeout" not in kwargs: + raise ValueError( + "Timeout must be in args or kwargs to be recalculated") + + result = _SYSCALL_SENTINEL + while result is _SYSCALL_SENTINEL: + try: + result = func(*args, **kwargs) + # OSError is thrown by select.select + # IOError is thrown by select.epoll.poll + # select.error is thrown by select.poll.poll + # Aren't we thankful for Python 3.x rework for exceptions? + except (OSError, IOError, select.error) as e: + # select.error wasn't a subclass of OSError in the past. + errcode = None + if hasattr(e, "errno"): + errcode = e.errno + elif hasattr(e, "args"): + errcode = e.args[0] + + # Also test for the Windows equivalent of EINTR. + is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and + errcode == errno.WSAEINTR)) + + if is_interrupt: + if expires is not None: + current_time = monotonic() + if current_time > expires: + raise OSError(errno=errno.ETIMEDOUT) + if recalc_timeout: + if "timeout" in kwargs: + kwargs["timeout"] = expires - current_time + continue + raise + return result + + +# Choose the best implementation, roughly: +# kqueue == devpoll == epoll > poll > select +# select() also can't accept a FD > FD_SETSIZE (usually around 1024) +def DefaultSelector(): + """ This function serves as a first call for DefaultSelector to + detect if the select module is being monkey-patched incorrectly + by eventlet, greenlet, and preserve proper behavior. """ + global _DEFAULT_SELECTOR + if _DEFAULT_SELECTOR is None: + if pycompat.isjython: + _DEFAULT_SELECTOR = JythonSelectSelector + elif _can_allocate('kqueue'): + _DEFAULT_SELECTOR = KqueueSelector + elif _can_allocate('devpoll'): + _DEFAULT_SELECTOR = DevpollSelector + elif _can_allocate('epoll'): + _DEFAULT_SELECTOR = EpollSelector + elif _can_allocate('poll'): + _DEFAULT_SELECTOR = PollSelector + elif hasattr(select, 'select'): + _DEFAULT_SELECTOR = SelectSelector + else: # Platform-specific: AppEngine + raise RuntimeError('Platform does not have a selector.') + return _DEFAULT_SELECTOR() diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/ui.py --- a/mercurial/ui.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/ui.py Mon Jan 22 17:53:02 2018 -0500 @@ -49,6 +49,10 @@ [ui] # The rollback command is dangerous. As a rule, don't use it. rollback = False +# Make `hg status` report copy information +statuscopies = yes +# Prefer curses UIs when available. Revert to plain-text with `text`. +interface = curses [commands] # Make `hg status` emit cwd-relative paths by default. @@ -58,6 +62,7 @@ [diff] git = 1 +showfunc = 1 """ samplehgrcs = { @@ -695,6 +700,9 @@ >>> u.setconfig(s, b'list1', b'this,is "a small" ,test') >>> u.configlist(s, b'list1') ['this', 'is', 'a small', 'test'] + >>> u.setconfig(s, b'list2', b'this, is "a small" , test ') + >>> u.configlist(s, b'list2') + ['this', 'is', 'a small', 'test'] """ # default is not always a list v = self.configwith(config.parselist, section, name, default, @@ -886,9 +894,9 @@ "cmdname.type" is recommended. For example, status issues a label of "status.modified" for modified files. ''' - if self._buffers and not opts.get('prompt', False): + if self._buffers and not opts.get(r'prompt', False): if self._bufferapplylabels: - label = opts.get('label', '') + label = opts.get(r'label', '') self._buffers[-1].extend(self.label(a, label) for a in args) else: self._buffers[-1].extend(args) @@ -899,7 +907,7 @@ else: msgs = args if self._colormode is not None: - label = opts.get('label', '') + label = opts.get(r'label', '') msgs = [self.label(a, label) for a in args] self._write(*msgs, **opts) @@ -927,7 +935,7 @@ else: msgs = args if self._colormode is not None: - label = opts.get('label', '') + label = opts.get(r'label', '') msgs = [self.label(a, label) for a in args] self._write_err(*msgs, **opts) @@ -1602,7 +1610,7 @@ stack. """ if not self.configbool('devel', 'all-warnings'): - if config is not None and not self.configbool('devel', config): + if config is None or not self.configbool('devel', config): return msg = 'devel-warn: ' + msg stacklevel += 1 # get in develwarn diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/upgrade.py --- a/mercurial/upgrade.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/upgrade.py Mon Jan 22 17:53:02 2018 -0500 @@ -14,6 +14,8 @@ from . import ( changelog, error, + filelog, + hg, localrepo, manifest, revlog, @@ -94,6 +96,9 @@ 'generaldelta', } +def preservedrequirements(repo): + return set() + deficiency = 'deficiency' optimisation = 'optimization' @@ -256,7 +261,7 @@ @registerformatvariant class removecldeltachain(formatvariant): - name = 'removecldeltachain' + name = 'plain-cl-delta' default = True @@ -281,6 +286,28 @@ def fromconfig(repo): return True +@registerformatvariant +class compressionengine(formatvariant): + name = 'compression' + default = 'zlib' + + description = _('Compresion algorithm used to compress data. ' + 'Some engine are faster than other') + + upgrademessage = _('revlog content will be recompressed with the new ' + 'algorithm.') + + @classmethod + def fromrepo(cls, repo): + for req in repo.requirements: + if req.startswith('exp-compression-'): + return req.split('-', 2)[2] + return 'zlib' + + @classmethod + def fromconfig(cls, repo): + return repo.ui.config('experimental', 'format.compression') + def finddeficiencies(repo): """returns a list of deficiencies that the repo suffer from""" deficiencies = [] @@ -342,6 +369,19 @@ 'recomputed; this will likely drastically slow down ' 'execution time'))) + optimizations.append(improvement( + name='redeltafulladd', + type=optimisation, + description=_('every revision will be re-added as if it was new ' + 'content. It will go through the full storage ' + 'mechanism giving extensions a chance to process it ' + '(eg. lfs). This is similar to "redeltaall" but even ' + 'slower since more logic is involved.'), + upgrademessage=_('each revision will be added as new content to the ' + 'internal storage; this will likely drastically slow ' + 'down execution time, but some extensions might need ' + 'it'))) + return optimizations def determineactions(repo, deficiencies, sourcereqs, destreqs): @@ -387,9 +427,8 @@ mandir = path[:-len('00manifest.i')] return manifest.manifestrevlog(repo.svfs, dir=mandir) else: - # Filelogs don't do anything special with settings. So we can use a - # vanilla revlog. - return revlog.revlog(repo.svfs, path) + #reverse of "/".join(("data", path + ".i")) + return filelog.filelog(repo.svfs, path[5:-2]) def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas): """Copy revlogs between 2 repos.""" @@ -592,6 +631,8 @@ deltareuse = revlog.revlog.DELTAREUSESAMEREVS elif 'redeltamultibase' in actions: deltareuse = revlog.revlog.DELTAREUSESAMEREVS + elif 'redeltafulladd' in actions: + deltareuse = revlog.revlog.DELTAREUSEFULLADD else: deltareuse = revlog.revlog.DELTAREUSEALWAYS @@ -679,6 +720,7 @@ # FUTURE there is potentially a need to control the wanted requirements via # command arguments or via an extension hook point. newreqs = localrepo.newreporequirements(repo) + newreqs.update(preservedrequirements(repo)) noremovereqs = (repo.requirements - newreqs - supportremovedrequirements(repo)) @@ -804,9 +846,10 @@ try: ui.write(_('creating temporary repository to stage migrated ' 'data: %s\n') % tmppath) - dstrepo = localrepo.localrepository(repo.baseui, - path=tmppath, - create=True) + + # clone ui without using ui.copy because repo.ui is protected + repoui = repo.ui.__class__(repo.ui) + dstrepo = hg.repository(repoui, path=tmppath, create=True) with dstrepo.wlock(), dstrepo.lock(): backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/url.py --- a/mercurial/url.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/url.py Mon Jan 22 17:53:02 2018 -0500 @@ -466,7 +466,7 @@ handlerfuncs = [] -def opener(ui, authinfo=None): +def opener(ui, authinfo=None, useragent=None): ''' construct an opener suitable for urllib2 authinfo will be added to the password manager @@ -512,8 +512,14 @@ # own distribution name. Since servers should not be using the user # agent string for anything, clients should be able to define whatever # user agent they deem appropriate. - agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version() - opener.addheaders = [(r'User-agent', pycompat.sysstr(agent))] + # + # The custom user agent is for lfs, because unfortunately some servers + # do look at this value. + if not useragent: + agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version() + opener.addheaders = [(r'User-agent', pycompat.sysstr(agent))] + else: + opener.addheaders = [(r'User-agent', pycompat.sysstr(useragent))] # This header should only be needed by wire protocol requests. But it has # been sent on all requests since forever. We keep sending it for backwards diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/util.py --- a/mercurial/util.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/util.py Mon Jan 22 17:53:02 2018 -0500 @@ -49,6 +49,7 @@ encoding, error, i18n, + node as nodemod, policy, pycompat, urllibcompat, @@ -109,6 +110,8 @@ expandglobs = platform.expandglobs explainexit = platform.explainexit findexe = platform.findexe +getfsmountpoint = platform.getfsmountpoint +getfstype = platform.getfstype gethgcmd = platform.gethgcmd getuser = platform.getuser getpid = os.getpid @@ -163,6 +166,10 @@ setprocname = osutil.setprocname except AttributeError: pass +try: + unblocksignal = osutil.unblocksignal +except AttributeError: + pass # Python compatibility @@ -259,7 +266,7 @@ def __getitem__(self, key): if key not in DIGESTS: raise Abort(_('unknown digest type: %s') % k) - return self._hashes[key].hexdigest() + return nodemod.hex(self._hashes[key].digest()) def __iter__(self): return iter(self._hashes) @@ -931,6 +938,11 @@ # __dict__ assignment required to bypass __setattr__ (eg: repoview) obj.__dict__[self.name] = value +def clearcachedproperty(obj, prop): + '''clear a cached property value, if one has been set''' + if prop in obj.__dict__: + del obj.__dict__[prop] + def pipefilter(s, cmd): '''filter string S through command CMD, returning its output''' p = subprocess.Popen(cmd, shell=True, close_fds=closefds, @@ -1196,6 +1208,7 @@ 'ext4', 'hfs', 'jfs', + 'NTFS', 'reiserfs', 'tmpfs', 'ufs', @@ -1510,13 +1523,6 @@ return ''.join(result) -def getfstype(dirpath): - '''Get the filesystem type name from a directory (best-effort) - - Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. - ''' - return getattr(osutil, 'getfstype', lambda x: None)(dirpath) - def checknlink(testfile): '''check whether hardlink count reporting works properly''' @@ -2662,7 +2668,7 @@ else: prefix_char = prefix mapping[prefix_char] = prefix_char - r = remod.compile(r'%s(%s)' % (prefix, patterns)) + r = remod.compile(br'%s(%s)' % (prefix, patterns)) return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) def getport(port): @@ -3859,3 +3865,82 @@ fn = '%s~%s~%s' % (f, tag, n) if fn not in ctx and fn not in others: return fn + +def readexactly(stream, n): + '''read n bytes from stream.read and abort if less was available''' + s = stream.read(n) + if len(s) < n: + raise error.Abort(_("stream ended unexpectedly" + " (got %d bytes, expected %d)") + % (len(s), n)) + return s + +def uvarintencode(value): + """Encode an unsigned integer value to a varint. + + A varint is a variable length integer of 1 or more bytes. Each byte + except the last has the most significant bit set. The lower 7 bits of + each byte store the 2's complement representation, least significant group + first. + + >>> uvarintencode(0) + '\\x00' + >>> uvarintencode(1) + '\\x01' + >>> uvarintencode(127) + '\\x7f' + >>> uvarintencode(1337) + '\\xb9\\n' + >>> uvarintencode(65536) + '\\x80\\x80\\x04' + >>> uvarintencode(-1) + Traceback (most recent call last): + ... + ProgrammingError: negative value for uvarint: -1 + """ + if value < 0: + raise error.ProgrammingError('negative value for uvarint: %d' + % value) + bits = value & 0x7f + value >>= 7 + bytes = [] + while value: + bytes.append(pycompat.bytechr(0x80 | bits)) + bits = value & 0x7f + value >>= 7 + bytes.append(pycompat.bytechr(bits)) + + return ''.join(bytes) + +def uvarintdecodestream(fh): + """Decode an unsigned variable length integer from a stream. + + The passed argument is anything that has a ``.read(N)`` method. + + >>> try: + ... from StringIO import StringIO as BytesIO + ... except ImportError: + ... from io import BytesIO + >>> uvarintdecodestream(BytesIO(b'\\x00')) + 0 + >>> uvarintdecodestream(BytesIO(b'\\x01')) + 1 + >>> uvarintdecodestream(BytesIO(b'\\x7f')) + 127 + >>> uvarintdecodestream(BytesIO(b'\\xb9\\n')) + 1337 + >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04')) + 65536 + >>> uvarintdecodestream(BytesIO(b'\\x80')) + Traceback (most recent call last): + ... + Abort: stream ended unexpectedly (got 0 bytes, expected 1) + """ + result = 0 + shift = 0 + while True: + byte = ord(readexactly(fh, 1)) + result |= ((byte & 0x7f) << shift) + if not (byte & 0x80): + return result + shift += 7 diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/verify.py --- a/mercurial/verify.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/verify.py Mon Jan 22 17:53:02 2018 -0500 @@ -17,6 +17,7 @@ from . import ( error, + pycompat, revlog, scmutil, util, @@ -105,7 +106,8 @@ if self.lrugetctx(l)[f].filenode() == node] except Exception: pass - self.warn(_(" (expected %s)") % " ".join(map(str, linkrevs))) + self.warn(_(" (expected %s)") % " ".join + (map(pycompat.bytestr, linkrevs))) lr = None # can't be trusted try: diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/vfs.py --- a/mercurial/vfs.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/vfs.py Mon Jan 22 17:53:02 2018 -0500 @@ -83,8 +83,8 @@ with self(path, mode=mode) as fp: return fp.readlines() - def write(self, path, data, backgroundclose=False): - with self(path, 'wb', backgroundclose=backgroundclose) as fp: + def write(self, path, data, backgroundclose=False, **kwargs): + with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp: return fp.write(data) def writelines(self, path, data, mode='wb', notindexed=False): @@ -170,9 +170,9 @@ def mkdir(self, path=None): return os.mkdir(self.join(path)) - def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False): + def mkstemp(self, suffix='', prefix='tmp', dir=None): fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, - dir=self.join(dir), text=text) + dir=self.join(dir)) dname, fname = util.split(name) if dir: return fd, os.path.join(dir, fname) @@ -277,8 +277,12 @@ to ``__call__``/``open`` to result in the file possibly being closed asynchronously, on a background thread. """ - # This is an arbitrary restriction and could be changed if we ever - # have a use case. + # Sharing backgroundfilecloser between threads is complex and using + # multiple instances puts us at risk of running out of file descriptors + # only allow to use backgroundfilecloser when in main thread. + if not isinstance(threading.currentThread(), threading._MainThread): + yield + return vfs = getattr(self, 'vfs', self) if getattr(vfs, '_backgroundfilecloser', None): raise error.Abort( @@ -329,9 +333,8 @@ return os.chmod(name, self.createmode & 0o666) - def __call__(self, path, mode="r", text=False, atomictemp=False, - notindexed=False, backgroundclose=False, checkambig=False, - auditpath=True): + def __call__(self, path, mode="r", atomictemp=False, notindexed=False, + backgroundclose=False, checkambig=False, auditpath=True): '''Open ``path`` file, which is relative to vfs root. Newly created directories are marked as "not to be indexed by @@ -369,7 +372,7 @@ self.audit(path, mode=mode) f = self.join(path) - if not text and "b" not in mode: + if "b" not in mode: mode += "b" # for that other OS nlink = -1 @@ -413,7 +416,8 @@ ' valid for checkambig=True') % mode) fp = checkambigatclosing(fp) - if backgroundclose: + if (backgroundclose and + isinstance(threading.currentThread(), threading._MainThread)): if not self._backgroundfilecloser: raise error.Abort(_('backgroundclose can only be used when a ' 'backgroundclosing context manager is active') diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/win32.py --- a/mercurial/win32.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/win32.py Mon Jan 22 17:53:02 2018 -0500 @@ -8,6 +8,7 @@ from __future__ import absolute_import import ctypes +import ctypes.wintypes as wintypes import errno import msvcrt import os @@ -33,6 +34,7 @@ _HANDLE = ctypes.c_void_p _HWND = _HANDLE _PCCERT_CONTEXT = ctypes.c_void_p +_MAX_PATH = wintypes.MAX_PATH _INVALID_HANDLE_VALUE = _HANDLE(-1).value @@ -223,6 +225,24 @@ _kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD] _kernel32.SetFileAttributesA.restype = _BOOL +_DRIVE_UNKNOWN = 0 +_DRIVE_NO_ROOT_DIR = 1 +_DRIVE_REMOVABLE = 2 +_DRIVE_FIXED = 3 +_DRIVE_REMOTE = 4 +_DRIVE_CDROM = 5 +_DRIVE_RAMDISK = 6 + +_kernel32.GetDriveTypeA.argtypes = [_LPCSTR] +_kernel32.GetDriveTypeA.restype = _UINT + +_kernel32.GetVolumeInformationA.argtypes = [_LPCSTR, ctypes.c_void_p, _DWORD, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, _DWORD] +_kernel32.GetVolumeInformationA.restype = _BOOL + +_kernel32.GetVolumePathNameA.argtypes = [_LPCSTR, ctypes.c_void_p, _DWORD] +_kernel32.GetVolumePathNameA.restype = _BOOL + _kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD] _kernel32.OpenProcess.restype = _HANDLE @@ -410,6 +430,49 @@ raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER) return buf.value +def getvolumename(path): + """Get the mount point of the filesystem from a directory or file + (best-effort) + + Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. + """ + # realpath() calls GetFullPathName() + realpath = os.path.realpath(path) + + # allocate at least MAX_PATH long since GetVolumePathName('c:\\', buf, 4) + # somehow fails on Windows XP + size = max(len(realpath), _MAX_PATH) + 1 + buf = ctypes.create_string_buffer(size) + + if not _kernel32.GetVolumePathNameA(realpath, ctypes.byref(buf), size): + raise ctypes.WinError() # Note: WinError is a function + + return buf.value + +def getfstype(path): + """Get the filesystem type name from a directory or file (best-effort) + + Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. + """ + volume = getvolumename(path) + + t = _kernel32.GetDriveTypeA(volume) + + if t == _DRIVE_REMOTE: + return 'cifs' + elif t not in (_DRIVE_REMOVABLE, _DRIVE_FIXED, _DRIVE_CDROM, + _DRIVE_RAMDISK): + return None + + size = _MAX_PATH + 1 + name = ctypes.create_string_buffer(size) + + if not _kernel32.GetVolumeInformationA(volume, None, 0, None, None, None, + ctypes.byref(name), size): + raise ctypes.WinError() # Note: WinError is a function + + return name.value + def getuser(): '''return name of current user''' size = _DWORD(300) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/windows.py --- a/mercurial/windows.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/windows.py Mon Jan 22 17:53:02 2018 -0500 @@ -32,6 +32,8 @@ osutil = policy.importmod(r'osutil') executablepath = win32.executablepath +getfsmountpoint = win32.getvolumename +getfstype = win32.getfstype getuser = win32.getuser hidewindow = win32.hidewindow makedir = win32.makedir diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/wireproto.py --- a/mercurial/wireproto.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/wireproto.py Mon Jan 22 17:53:02 2018 -0500 @@ -205,13 +205,16 @@ # :scsv: list of comma-separated values return as set # :plain: string with no transformation needed. gboptsmap = {'heads': 'nodes', + 'bookmarks': 'boolean', 'common': 'nodes', 'obsmarkers': 'boolean', 'phases': 'boolean', 'bundlecaps': 'scsv', 'listkeys': 'csv', 'cg': 'boolean', - 'cbattempted': 'boolean'} + 'cbattempted': 'boolean', + 'stream': 'boolean', +} # client side @@ -451,9 +454,9 @@ # don't pass optional arguments left at their default value opts = {} if three is not None: - opts['three'] = three + opts[r'three'] = three if four is not None: - opts['four'] = four + opts[r'four'] = four return self._call('debugwireargs', one=one, two=two, **opts) def _call(self, cmd, **args): @@ -519,18 +522,28 @@ The call was successful and the result is a stream. - Accepts either a generator or an object with a ``read(size)`` method. + Accepts a generator containing chunks of data to be sent to the client. + + ``prefer_uncompressed`` indicates that the data is expected to be + uncompressable and that the stream should therefore use the ``none`` + engine. + """ + def __init__(self, gen=None, prefer_uncompressed=False): + self.gen = gen + self.prefer_uncompressed = prefer_uncompressed - ``v1compressible`` indicates whether this data can be compressed to - "version 1" clients (technically: HTTP peers using - application/mercurial-0.1 media type). This flag should NOT be used on - new commands because new clients should support a more modern compression - mechanism. +class streamres_legacy(object): + """wireproto reply: uncompressed binary stream + + The call was successful and the result is a stream. + + Accepts a generator containing chunks of data to be sent to the client. + + Like ``streamres``, but sends an uncompressed data for "version 1" clients + using the application/mercurial-0.1 media type. """ - def __init__(self, gen=None, reader=None, v1compressible=False): + def __init__(self, gen=None): self.gen = gen - self.reader = reader - self.v1compressible = v1compressible class pushres(object): """wireproto reply: success with simple integer return @@ -767,7 +780,7 @@ else: caps.append('streamreqs=%s' % ','.join(sorted(requiredformats))) if repo.ui.configbool('experimental', 'bundle2-advertise'): - capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo)) + capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role='server')) caps.append('bundle2=' + urlreq.quote(capsblob)) caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority)) @@ -801,7 +814,8 @@ outgoing = discovery.outgoing(repo, missingroots=nodes, missingheads=repo.heads()) cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve') - return streamres(reader=cg, v1compressible=True) + gen = iter(lambda: cg.read(32768), '') + return streamres(gen=gen) @wireprotocommand('changegroupsubset', 'bases heads') def changegroupsubset(repo, proto, bases, heads): @@ -810,13 +824,14 @@ outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads) cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve') - return streamres(reader=cg, v1compressible=True) + gen = iter(lambda: cg.read(32768), '') + return streamres(gen=gen) @wireprotocommand('debugwireargs', 'one two *') def debugwireargs(repo, proto, one, two, others): # only accept optional args from the known set opts = options('debugwireargs', ['three', 'four'], others) - return repo.debugwireargs(one, two, **opts) + return repo.debugwireargs(one, two, **pycompat.strkwargs(opts)) @wireprotocommand('getbundle', '*') def getbundle(repo, proto, others): @@ -847,20 +862,24 @@ raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint) + prefercompressed = True + try: if repo.ui.configbool('server', 'disablefullbundle'): # Check to see if this is a full clone. clheads = set(repo.changelog.heads()) + changegroup = opts.get('cg', True) heads = set(opts.get('heads', set())) common = set(opts.get('common', set())) common.discard(nullid) - if not common and clheads == heads: + if changegroup and not common and clheads == heads: raise error.Abort( _('server has pull-based clones disabled'), hint=_('remove --pull if specified or upgrade Mercurial')) - chunks = exchange.getbundlechunks(repo, 'serve', - **pycompat.strkwargs(opts)) + info, chunks = exchange.getbundlechunks(repo, 'serve', + **pycompat.strkwargs(opts)) + prefercompressed = info.get('prefercompressed', True) except error.Abort as exc: # cleanly forward Abort error to the client if not exchange.bundle2requested(opts.get('bundlecaps')): @@ -875,8 +894,10 @@ advargs.append(('hint', exc.hint)) bundler.addpart(bundle2.bundlepart('error:abort', manargs, advargs)) - return streamres(gen=bundler.getchunks(), v1compressible=True) - return streamres(gen=chunks, v1compressible=True) + chunks = bundler.getchunks() + prefercompressed = False + + return streamres(gen=chunks, prefer_uncompressed=not prefercompressed) @wireprotocommand('heads') def heads(repo, proto): @@ -953,21 +974,7 @@ capability with a value representing the version and flags of the repo it is serving. Client checks to see if it understands the format. ''' - if not streamclone.allowservergeneration(repo): - return '1\n' - - def getstream(it): - yield '0\n' - for chunk in it: - yield chunk - - try: - # LockError may be raised before the first result is yielded. Don't - # emit output until we're sure we got the lock successfully. - it = streamclone.generatev1wireproto(repo) - return streamres(gen=getstream(it)) - except error.LockError: - return '2\n' + return streamres_legacy(streamclone.generatev1wireproto(repo)) @wireprotocommand('unbundle', 'heads') def unbundle(repo, proto, heads): @@ -1002,7 +1009,7 @@ if util.safehasattr(r, 'addpart'): # The return looks streamable, we are in the bundle2 case and # should return a stream. - return streamres(gen=r.getchunks()) + return streamres_legacy(gen=r.getchunks()) return pushres(r) finally: @@ -1066,4 +1073,4 @@ manargs, advargs)) except error.PushRaced as exc: bundler.newpart('error:pushraced', [('message', str(exc))]) - return streamres(gen=bundler.getchunks()) + return streamres_legacy(gen=bundler.getchunks()) diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/worker.py --- a/mercurial/worker.py Mon Jan 08 16:07:51 2018 -0800 +++ b/mercurial/worker.py Mon Jan 22 17:53:02 2018 -0500 @@ -11,6 +11,8 @@ import os import signal import sys +import threading +import time from .i18n import _ from . import ( @@ -53,7 +55,7 @@ raise error.Abort(_('number of cpus must be an integer')) return min(max(countcpus(), 4), 32) -if pycompat.isposix: +if pycompat.isposix or pycompat.iswindows: _startupcost = 0.01 else: _startupcost = 1e30 @@ -81,7 +83,8 @@ args - arguments to split into chunks, to pass to individual workers ''' - if worthwhile(ui, costperarg, len(args)): + enabled = ui.configbool('worker', 'enabled') + if enabled and worthwhile(ui, costperarg, len(args)): return _platformworker(ui, func, staticargs, args) return func(*staticargs + (args,)) @@ -203,7 +206,91 @@ elif os.WIFSIGNALED(code): return -os.WTERMSIG(code) -if not pycompat.iswindows: +def _windowsworker(ui, func, staticargs, args): + class Worker(threading.Thread): + def __init__(self, taskqueue, resultqueue, func, staticargs, + group=None, target=None, name=None, verbose=None): + threading.Thread.__init__(self, group=group, target=target, + name=name, verbose=verbose) + self._taskqueue = taskqueue + self._resultqueue = resultqueue + self._func = func + self._staticargs = staticargs + self._interrupted = False + self.daemon = True + self.exception = None + + def interrupt(self): + self._interrupted = True + + def run(self): + try: + while not self._taskqueue.empty(): + try: + args = self._taskqueue.get_nowait() + for res in self._func(*self._staticargs + (args,)): + self._resultqueue.put(res) + # threading doesn't provide a native way to + # interrupt execution. handle it manually at every + # iteration. + if self._interrupted: + return + except util.empty: + break + except Exception as e: + # store the exception such that the main thread can resurface + # it as if the func was running without workers. + self.exception = e + raise + + threads = [] + def trykillworkers(): + # Allow up to 1 second to clean worker threads nicely + cleanupend = time.time() + 1 + for t in threads: + t.interrupt() + for t in threads: + remainingtime = cleanupend - time.time() + t.join(remainingtime) + if t.is_alive(): + # pass over the workers joining failure. it is more + # important to surface the inital exception than the + # fact that one of workers may be processing a large + # task and does not get to handle the interruption. + ui.warn(_("failed to kill worker threads while " + "handling an exception\n")) + return + + workers = _numworkers(ui) + resultqueue = util.queue() + taskqueue = util.queue() + # partition work to more pieces than workers to minimize the chance + # of uneven distribution of large tasks between the workers + for pargs in partition(args, workers * 20): + taskqueue.put(pargs) + for _i in range(workers): + t = Worker(taskqueue, resultqueue, func, staticargs) + threads.append(t) + t.start() + try: + while len(threads) > 0: + while not resultqueue.empty(): + yield resultqueue.get() + threads[0].join(0.05) + finishedthreads = [_t for _t in threads if not _t.is_alive()] + for t in finishedthreads: + if t.exception is not None: + raise t.exception + threads.remove(t) + except (Exception, KeyboardInterrupt): # re-raises + trykillworkers() + raise + while not resultqueue.empty(): + yield resultqueue.get() + +if pycompat.iswindows: + _platformworker = _windowsworker +else: _platformworker = _posixworker _exitstatus = _posixexitstatus diff -r 87676e8ee056 -r 27b6df1b5adb rust/.cargo/config --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/.cargo/config Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,7 @@ +# Rust builds with a modern MSVC and uses a newer CRT. +# Python 2.7 has a shared library dependency on an older CRT (msvcr90.dll). +# We statically link the modern CRT to avoid multiple msvcr*.dll libraries +# being loaded and Python possibly picking up symbols from the newer runtime +# (which would be loaded first). +[target.'cfg(target_os = "windows")'] +rustflags = ["-Ctarget-feature=+crt-static"] diff -r 87676e8ee056 -r 27b6df1b5adb rust/Cargo.lock --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/Cargo.lock Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,127 @@ +[[package]] +name = "aho-corasick" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cpython" +version = "0.1.0" +source = "git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52#c90d65cf84abfffce7ef54476bbfed56017a2f52" +dependencies = [ + "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", + "python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)", +] + +[[package]] +name = "hgcli" +version = "0.1.0" +dependencies = [ + "cpython 0.1.0 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)", + "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "libc" +version = "0.2.35" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-traits" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "python27-sys" +version = "0.1.2" +source = "git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52#c90d65cf84abfffce7ef54476bbfed56017a2f52" +dependencies = [ + "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "thread-id" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "utf8-ranges" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66" +"checksum cpython 0.1.0 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)" = "" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)" = "96264e9b293e95d25bfcbbf8a88ffd1aedc85b754eba8b7d78012f638ba220eb" +"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" +"checksum num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "cacfcab5eb48250ee7d0c7896b51a2c5eec99c1feea5f32025635f5ae4b00070" +"checksum python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)" = "" +"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f" +"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" +"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" +"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5" +"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff -r 87676e8ee056 -r 27b6df1b5adb rust/Cargo.toml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/Cargo.toml Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,2 @@ +[workspace] +members = ["hgcli"] diff -r 87676e8ee056 -r 27b6df1b5adb rust/README.rst --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/README.rst Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,78 @@ +=================== +Mercurial Rust Code +=================== + +This directory contains various Rust code for the Mercurial project. + +The top-level ``Cargo.toml`` file defines a workspace containing +all primary Mercurial crates. + +Building +======== + +To build the Rust components:: + + $ cargo build + +If you prefer a non-debug / release configuration:: + + $ cargo build --release + +Features +-------- + +The following Cargo features are available: + +localdev (default) + Produce files that work with an in-source-tree build. + + In this mode, the build finds and uses a ``python2.7`` binary from + ``PATH``. The ``hg`` binary assumes it runs from ``rust/target/hg`` + and it finds Mercurial files at ``dirname($0)/../../../``. + +Build Mechanism +--------------- + +The produced ``hg`` binary is *bound* to a CPython installation. The +binary links against and loads a CPython library that is discovered +at build time (by a ``build.rs`` Cargo build script). The Python +standard library defined by this CPython installation is also used. + +Finding the appropriate CPython installation to use is done by +the ``python27-sys`` crate's ``build.rs``. Its search order is:: + +1. ``PYTHON_SYS_EXECUTABLE`` environment variable. +2. ``python`` executable on ``PATH`` +3. ``python2`` executable on ``PATH`` +4. ``python2.7`` executable on ``PATH`` + +Additional verification of the found Python will be performed by our +``build.rs`` to ensure it meets Mercurial's requirements. + +Details about the build-time configured Python are built into the +produced ``hg`` binary. This means that a built ``hg`` binary is only +suitable for a specific, well-defined role. These roles are controlled +by Cargo features (see above). + +Running +======= + +The ``hgcli`` crate produces an ``hg`` binary. You can run this binary +via ``cargo run``:: + + $ cargo run --manifest-path hgcli/Cargo.toml + +Or directly:: + + $ target/debug/hg + $ target/release/hg + +You can also run the test harness with this binary:: + + $ ./run-tests.py --with-hg ../rust/target/debug/hg + +.. note:: + + Integration with the test harness is still preliminary. Remember to + ``cargo build`` after changes because the test harness doesn't yet + automatically build Rust code. diff -r 87676e8ee056 -r 27b6df1b5adb rust/hgcli/Cargo.toml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hgcli/Cargo.toml Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,35 @@ +[package] +name = "hgcli" +version = "0.1.0" +authors = ["Gregory Szorc "] +license = "GPL-2.0" + +build = "build.rs" + +[[bin]] +name = "hg" +path = "src/main.rs" + +[features] +# localdev: detect Python in PATH and use files from source checkout. +default = ["localdev"] +localdev = [] + +[dependencies] +libc = "0.2.34" + +# We currently use a custom build of cpython and python27-sys with the +# following changes: +# * GILGuard call of prepare_freethreaded_python() is removed. +# TODO switch to official release when our changes are incorporated. +[dependencies.cpython] +version = "0.1" +default-features = false +features = ["python27-sys"] +git = "https://github.com/indygreg/rust-cpython.git" +rev = "c90d65cf84abfffce7ef54476bbfed56017a2f52" + +[dependencies.python27-sys] +version = "0.1.2" +git = "https://github.com/indygreg/rust-cpython.git" +rev = "c90d65cf84abfffce7ef54476bbfed56017a2f52" diff -r 87676e8ee056 -r 27b6df1b5adb rust/hgcli/build.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hgcli/build.rs Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,127 @@ +// build.rs -- Configure build environment for `hgcli` Rust package. +// +// Copyright 2017 Gregory Szorc +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +use std::collections::HashMap; +use std::env; +use std::path::Path; +use std::process::Command; + +struct PythonConfig { + python: String, + config: HashMap, +} + +fn get_python_config() -> PythonConfig { + // The python27-sys crate exports a Cargo variable defining the full + // path to the interpreter being used. + let python = env::var("DEP_PYTHON27_PYTHON_INTERPRETER").expect( + "Missing DEP_PYTHON27_PYTHON_INTERPRETER; bad python27-sys crate?", + ); + + if !Path::new(&python).exists() { + panic!( + "Python interpreter {} does not exist; this should never happen", + python + ); + } + + // This is a bit hacky but it gets the job done. + let separator = "SEPARATOR STRING"; + + let script = "import sysconfig; \ +c = sysconfig.get_config_vars(); \ +print('SEPARATOR STRING'.join('%s=%s' % i for i in c.items()))"; + + let mut command = Command::new(&python); + command.arg("-c").arg(script); + + let out = command.output().unwrap(); + + if !out.status.success() { + panic!( + "python script failed: {}", + String::from_utf8_lossy(&out.stderr) + ); + } + + let stdout = String::from_utf8_lossy(&out.stdout); + let mut m = HashMap::new(); + + for entry in stdout.split(separator) { + let mut parts = entry.splitn(2, "="); + let key = parts.next().unwrap(); + let value = parts.next().unwrap(); + m.insert(String::from(key), String::from(value)); + } + + PythonConfig { + python: python, + config: m, + } +} + +#[cfg(not(target_os = "windows"))] +fn have_shared(config: &PythonConfig) -> bool { + match config.config.get("Py_ENABLE_SHARED") { + Some(value) => value == "1", + None => false, + } +} + +#[cfg(target_os = "windows")] +fn have_shared(config: &PythonConfig) -> bool { + use std::path::PathBuf; + + // python27.dll should exist next to python2.7.exe. + let mut dll = PathBuf::from(&config.python); + dll.pop(); + dll.push("python27.dll"); + + return dll.exists(); +} + +const REQUIRED_CONFIG_FLAGS: [&str; 2] = ["Py_USING_UNICODE", "WITH_THREAD"]; + +fn main() { + let config = get_python_config(); + + println!("Using Python: {}", config.python); + println!("cargo:rustc-env=PYTHON_INTERPRETER={}", config.python); + + let prefix = config.config.get("prefix").unwrap(); + + println!("Prefix: {}", prefix); + + // TODO Windows builds don't expose these config flags. Figure out another + // way. + #[cfg(not(target_os = "windows"))] + for key in REQUIRED_CONFIG_FLAGS.iter() { + let result = match config.config.get(*key) { + Some(value) => value == "1", + None => false, + }; + + if !result { + panic!("Detected Python requires feature {}", key); + } + } + + // We need a Python shared library. + if !have_shared(&config) { + panic!("Detected Python lacks a shared library, which is required"); + } + + let ucs4 = match config.config.get("Py_UNICODE_SIZE") { + Some(value) => value == "4", + None => false, + }; + + if !ucs4 { + #[cfg(not(target_os = "windows"))] + panic!("Detected Python doesn't support UCS-4 code points"); + } +} diff -r 87676e8ee056 -r 27b6df1b5adb rust/hgcli/src/main.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hgcli/src/main.rs Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,233 @@ +// main.rs -- Main routines for `hg` program +// +// Copyright 2017 Gregory Szorc +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +extern crate libc; +extern crate cpython; +extern crate python27_sys; + +use cpython::{NoArgs, ObjectProtocol, PyModule, PyResult, Python}; +use libc::{c_char, c_int}; + +use std::env; +use std::path::PathBuf; +use std::ffi::{CString, OsStr}; +#[cfg(target_family = "unix")] +use std::os::unix::ffi::{OsStrExt, OsStringExt}; + +#[derive(Debug)] +struct Environment { + _exe: PathBuf, + python_exe: PathBuf, + python_home: PathBuf, + mercurial_modules: PathBuf, +} + +/// Run Mercurial locally from a source distribution or checkout. +/// +/// hg is /rust/target//hg +/// Python interpreter is detected by build script. +/// Python home is relative to Python interpreter. +/// Mercurial files are relative to hg binary, which is relative to source root. +#[cfg(feature = "localdev")] +fn get_environment() -> Environment { + let exe = env::current_exe().unwrap(); + + let mut mercurial_modules = exe.clone(); + mercurial_modules.pop(); // /rust/target/ + mercurial_modules.pop(); // /rust/target + mercurial_modules.pop(); // /rust + mercurial_modules.pop(); // / + + let python_exe: &'static str = env!("PYTHON_INTERPRETER"); + let python_exe = PathBuf::from(python_exe); + + let mut python_home = python_exe.clone(); + python_home.pop(); + + // On Windows, python2.7.exe exists at the root directory of the Python + // install. Everywhere else, the Python install root is one level up. + if !python_exe.ends_with("python2.7.exe") { + python_home.pop(); + } + + Environment { + _exe: exe.clone(), + python_exe: python_exe, + python_home: python_home, + mercurial_modules: mercurial_modules.to_path_buf(), + } +} + +// On UNIX, platform string is just bytes and should not contain NUL. +#[cfg(target_family = "unix")] +fn cstring_from_os>(s: T) -> CString { + CString::new(s.as_ref().as_bytes()).unwrap() +} + +// TODO convert to ANSI characters? +#[cfg(target_family = "windows")] +fn cstring_from_os>(s: T) -> CString { + CString::new(s.as_ref().to_str().unwrap()).unwrap() +} + +// On UNIX, argv starts as an array of char*. So it is easy to convert +// to C strings. +#[cfg(target_family = "unix")] +fn args_to_cstrings() -> Vec { + env::args_os() + .map(|a| CString::new(a.into_vec()).unwrap()) + .collect() +} + +// TODO Windows support is incomplete. We should either use env::args_os() +// (or call into GetCommandLineW() + CommandLinetoArgvW()), convert these to +// PyUnicode instances, and pass these into Python/Mercurial outside the +// standard PySys_SetArgvEx() mechanism. This will allow us to preserve the +// raw bytes (since PySys_SetArgvEx() is based on char* and can drop wchar +// data. +// +// For now, we use env::args(). This will choke on invalid UTF-8 arguments. +// But it is better than nothing. +#[cfg(target_family = "windows")] +fn args_to_cstrings() -> Vec { + env::args().map(|a| CString::new(a).unwrap()).collect() +} + +fn set_python_home(env: &Environment) { + let raw = cstring_from_os(&env.python_home).into_raw(); + unsafe { + python27_sys::Py_SetPythonHome(raw); + } +} + +fn update_encoding(_py: Python, _sys_mod: &PyModule) { + // Call sys.setdefaultencoding("undefined") if HGUNICODEPEDANTRY is set. + let pedantry = env::var("HGUNICODEPEDANTRY").is_ok(); + + if pedantry { + // site.py removes the sys.setdefaultencoding attribute. So we need + // to reload the module to get a handle on it. This is a lesser + // used feature and we'll support this later. + // TODO support this + panic!("HGUNICODEPEDANTRY is not yet supported"); + } +} + +fn update_modules_path(env: &Environment, py: Python, sys_mod: &PyModule) { + let sys_path = sys_mod.get(py, "path").unwrap(); + sys_path + .call_method(py, "insert", (0, env.mercurial_modules.to_str()), None) + .expect("failed to update sys.path to location of Mercurial modules"); +} + +fn run() -> Result<(), i32> { + let env = get_environment(); + + //println!("{:?}", env); + + // Tell Python where it is installed. + set_python_home(&env); + + // Set program name. The backing memory needs to live for the duration of the + // interpreter. + // + // TODO consider storing this in a static or associating with lifetime of + // the Python interpreter. + // + // Yes, we use the path to the Python interpreter not argv[0] here. The + // reason is because Python uses the given path to find the location of + // Python files. Apparently we could define our own ``Py_GetPath()`` + // implementation. But this may require statically linking Python, which is + // not desirable. + let program_name = cstring_from_os(&env.python_exe).as_ptr(); + unsafe { + python27_sys::Py_SetProgramName(program_name as *mut i8); + } + + unsafe { + python27_sys::Py_Initialize(); + } + + // https://docs.python.org/2/c-api/init.html#c.PySys_SetArgvEx has important + // usage information about PySys_SetArgvEx: + // + // * It says the first argument should be the script that is being executed. + // If not a script, it can be empty. We are definitely not a script. + // However, parts of Mercurial do look at sys.argv[0]. So we need to set + // something here. + // + // * When embedding Python, we should use ``PySys_SetArgvEx()`` and set + // ``updatepath=0`` for security reasons. Essentially, Python's default + // logic will treat an empty argv[0] in a manner that could result in + // sys.path picking up directories it shouldn't and this could lead to + // loading untrusted modules. + + // env::args() will panic if it sees a non-UTF-8 byte sequence. And + // Mercurial supports arbitrary encodings of input data. So we need to + // use OS-specific mechanisms to get the raw bytes without UTF-8 + // interference. + let args = args_to_cstrings(); + let argv: Vec<*const c_char> = args.iter().map(|a| a.as_ptr()).collect(); + + unsafe { + python27_sys::PySys_SetArgvEx(args.len() as c_int, argv.as_ptr() as *mut *mut i8, 0); + } + + let result; + { + // These need to be dropped before we call Py_Finalize(). Hence the + // block. + let gil = Python::acquire_gil(); + let py = gil.python(); + + // Mercurial code could call sys.exit(), which will call exit() + // itself. So this may not return. + // TODO this may cause issues on Windows due to the CRT mismatch. + // Investigate if we can intercept sys.exit() or SystemExit() to + // ensure we handle process exit. + result = match run_py(&env, py) { + // Print unhandled exceptions and exit code 255, as this is what + // `python` does. + Err(err) => { + err.print(py); + Err(255) + } + Ok(()) => Ok(()), + }; + } + + unsafe { + python27_sys::Py_Finalize(); + } + + result +} + +fn run_py(env: &Environment, py: Python) -> PyResult<()> { + let sys_mod = py.import("sys").unwrap(); + + update_encoding(py, &sys_mod); + update_modules_path(&env, py, &sys_mod); + + // TODO consider a better error message on failure to import. + let demand_mod = py.import("hgdemandimport")?; + demand_mod.call(py, "enable", NoArgs, None)?; + + let dispatch_mod = py.import("mercurial.dispatch")?; + dispatch_mod.call(py, "run", NoArgs, None)?; + + Ok(()) +} + +fn main() { + let exit_code = match run() { + Err(err) => err, + Ok(()) => 0, + }; + + std::process::exit(exit_code); +} diff -r 87676e8ee056 -r 27b6df1b5adb setup.py --- a/setup.py Mon Jan 08 16:07:51 2018 -0800 +++ b/setup.py Mon Jan 22 17:53:02 2018 -0500 @@ -29,12 +29,16 @@ if sys.version_info[0] >= 3: printf = eval('print') libdir_escape = 'unicode_escape' + def sysstr(s): + return s.decode('latin-1') else: libdir_escape = 'string_escape' def printf(*args, **kwargs): f = kwargs.get('file', sys.stdout) end = kwargs.get('end', '\n') f.write(b' '.join(args) + end) + def sysstr(s): + return s # Attempt to guide users to a modern pip - this means that 2.6 users # should have a chance of getting a 4.2 release, and when we ratchet @@ -136,6 +140,18 @@ from distutils.sysconfig import get_python_inc, get_config_var from distutils.version import StrictVersion +def write_if_changed(path, content): + """Write content to a file iff the content hasn't changed.""" + if os.path.exists(path): + with open(path, 'rb') as fh: + current = fh.read() + else: + current = b'' + + if current != content: + with open(path, 'wb') as fh: + fh.write(content) + scripts = ['hg'] if os.name == 'nt': # We remove hg.bat if we are able to build hg.exe. @@ -283,8 +299,8 @@ if os.path.isdir('.hg'): hg = findhg() cmd = ['log', '-r', '.', '--template', '{tags}\n'] - numerictags = [t for t in hg.run(cmd).split() if t[0:1].isdigit()] - hgid = hg.run(['id', '-i']).strip() + numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()] + hgid = sysstr(hg.run(['id', '-i'])).strip() if not hgid: # Bail out if hg is having problems interacting with this repository, # rather than falling through and producing a bogus version number. @@ -297,7 +313,7 @@ version += '+' else: # no tag found ltagcmd = ['parents', '--template', '{latesttag}'] - ltag = hg.run(ltagcmd) + ltag = sysstr(hg.run(ltagcmd)) changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag] changessince = len(hg.run(changessincecmd).splitlines()) version = '%s+%s-%s' % (ltag, changessince, hgid) @@ -317,9 +333,14 @@ version = kw.get('node', '')[:12] if version: - with open("mercurial/__version__.py", "w") as f: - f.write('# this file is autogenerated by setup.py\n') - f.write('version = "%s"\n' % version) + versionb = version + if not isinstance(versionb, bytes): + versionb = versionb.encode('ascii') + + write_if_changed('mercurial/__version__.py', b''.join([ + b'# this file is autogenerated by setup.py\n' + b'version = "%s"\n' % versionb, + ])) try: oldpolicy = os.environ.get('HGMODULEPOLICY', None) @@ -478,9 +499,13 @@ modulepolicy = 'allow' else: modulepolicy = 'c' - with open(os.path.join(basepath, '__modulepolicy__.py'), "w") as f: - f.write('# this file is autogenerated by setup.py\n') - f.write('modulepolicy = b"%s"\n' % modulepolicy) + + content = b''.join([ + b'# this file is autogenerated by setup.py\n', + b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'), + ]) + write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), + content) build_py.run(self) @@ -767,7 +792,7 @@ 'mercurial.thirdparty.attr', 'hgext', 'hgext.convert', 'hgext.fsmonitor', 'hgext.fsmonitor.pywatchman', 'hgext.highlight', - 'hgext.largefiles', 'hgext.zeroconf', 'hgext3rd', + 'hgext.largefiles', 'hgext.lfs', 'hgext.zeroconf', 'hgext3rd', 'hgdemandimport'] common_depends = ['mercurial/bitmanipulation.h', @@ -910,7 +935,7 @@ if py2exeloaded: extra['console'] = [ {'script':'hg', - 'copyright':'Copyright (C) 2005-2017 Matt Mackall and others', + 'copyright':'Copyright (C) 2005-2018 Matt Mackall and others', 'product_version':version}] # sub command of 'build' because 'py2exe' does not handle sub_commands build.sub_commands.insert(0, ('build_hgextindex', None)) diff -r 87676e8ee056 -r 27b6df1b5adb tests/autodiff.py --- a/tests/autodiff.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/autodiff.py Mon Jan 22 17:53:02 2018 -0500 @@ -12,33 +12,33 @@ cmdtable = {} command = registrar.command(cmdtable) -@command('autodiff', - [('', 'git', '', 'git upgrade mode (yes/no/auto/warn/abort)')], - '[OPTION]... [FILE]...') +@command(b'autodiff', + [(b'', b'git', b'', b'git upgrade mode (yes/no/auto/warn/abort)')], + b'[OPTION]... [FILE]...') def autodiff(ui, repo, *pats, **opts): diffopts = patch.difffeatureopts(ui, opts) - git = opts.get('git', 'no') + git = opts.get(b'git', b'no') brokenfiles = set() losedatafn = None - if git in ('yes', 'no'): - diffopts.git = git == 'yes' + if git in (b'yes', b'no'): + diffopts.git = git == b'yes' diffopts.upgrade = False - elif git == 'auto': + elif git == b'auto': diffopts.git = False diffopts.upgrade = True - elif git == 'warn': + elif git == b'warn': diffopts.git = False diffopts.upgrade = True def losedatafn(fn=None, **kwargs): brokenfiles.add(fn) return True - elif git == 'abort': + elif git == b'abort': diffopts.git = False diffopts.upgrade = True def losedatafn(fn=None, **kwargs): - raise error.Abort('losing data for %s' % fn) + raise error.Abort(b'losing data for %s' % fn) else: - raise error.Abort('--git must be yes, no or auto') + raise error.Abort(b'--git must be yes, no or auto') node1, node2 = scmutil.revpair(repo, []) m = scmutil.match(repo[node2], pats, opts) @@ -47,4 +47,4 @@ for chunk in it: ui.write(chunk) for fn in sorted(brokenfiles): - ui.write(('data lost for: %s\n' % fn)) + ui.write((b'data lost for: %s\n' % fn)) diff -r 87676e8ee056 -r 27b6df1b5adb tests/blackbox-readonly-dispatch.py --- a/tests/blackbox-readonly-dispatch.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/blackbox-readonly-dispatch.py Mon Jan 22 17:53:02 2018 -0500 @@ -18,8 +18,8 @@ f = open('foo', 'wb') f.write('foo\n') f.close() -testdispatch("add foo") -testdispatch("commit -m commit1 -d 2000-01-01 foo") +testdispatch("--debug add foo") +testdispatch("--debug commit -m commit1 -d 2000-01-01 foo") # append to file 'foo' and commit f = open('foo', 'ab') @@ -29,8 +29,8 @@ os.rmdir(".hg/blackbox.log") # replace it with the real blackbox.log file os.rename(".hg/blackbox.log-", ".hg/blackbox.log") -testdispatch("commit -m commit2 -d 2000-01-02 foo") +testdispatch("--debug commit -m commit2 -d 2000-01-02 foo") # check 88803a69b24 (fancyopts modified command table) -testdispatch("log -r 0") -testdispatch("log -r tip") +testdispatch("--debug log -r 0") +testdispatch("--debug log -r tip") diff -r 87676e8ee056 -r 27b6df1b5adb tests/common-pattern.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/common-pattern.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,161 @@ +# common patterns in test at can safely be replaced +from __future__ import absolute_import + +import os + +substitutions = [ + # list of possible compressions + (br'(zstd,)?zlib,none,bzip2', + br'$USUAL_COMPRESSIONS$' + ), + # capabilities sent through http + (br'bundlecaps=HG20%2Cbundle2%3DHG20%250A' + br'bookmarks%250A' + br'changegroup%253D01%252C02%250A' + br'digests%253Dmd5%252Csha1%252Csha512%250A' + br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A' + br'hgtagsfnodes%250A' + br'listkeys%250A' + br'phases%253Dheads%250A' + br'pushkey%250A' + br'remote-changegroup%253Dhttp%252Chttps%250A' + br'stream%253Dv2', + # (the replacement patterns) + br'$USUAL_BUNDLE_CAPS$' + ), + (br'bundlecaps=HG20%2Cbundle2%3DHG20%250A' + br'bookmarks%250A' + br'changegroup%253D01%252C02%250A' + br'digests%253Dmd5%252Csha1%252Csha512%250A' + br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A' + br'hgtagsfnodes%250A' + br'listkeys%250A' + br'phases%253Dheads%250A' + br'pushkey%250A' + br'remote-changegroup%253Dhttp%252Chttps', + # (the replacement patterns) + br'$USUAL_BUNDLE_CAPS_SERVER$' + ), + # bundle2 capabilities sent through ssh + (br'bundle2=HG20%0A' + br'bookmarks%0A' + br'changegroup%3D01%2C02%0A' + br'digests%3Dmd5%2Csha1%2Csha512%0A' + br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A' + br'hgtagsfnodes%0A' + br'listkeys%0A' + br'phases%3Dheads%0A' + br'pushkey%0A' + br'remote-changegroup%3Dhttp%2Chttps%0A' + br'stream%3Dv2', + # (replacement patterns) + br'$USUAL_BUNDLE2_CAPS$' + ), + # bundle2 capabilities advertised by the server + (br'bundle2=HG20%0A' + br'bookmarks%0A' + br'changegroup%3D01%2C02%0A' + br'digests%3Dmd5%2Csha1%2Csha512%0A' + br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A' + br'hgtagsfnodes%0A' + br'listkeys%0A' + br'phases%3Dheads%0A' + br'pushkey%0A' + br'remote-changegroup%3Dhttp%2Chttps', + # (replacement patterns) + br'$USUAL_BUNDLE2_CAPS_SERVER$' + ), + # HTTP log dates + (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "GET', + br' - - [$LOGDATE$] "GET' + ), + # Windows has an extra '/' in the following lines that get globbed away: + # pushing to file:/*/$TESTTMP/r2 (glob) + # comparing with file:/*/$TESTTMP/r2 (glob) + # sub/maybelarge.dat: largefile 34..9c not available from + # file:/*/$TESTTMP/largefiles-repo (glob) + (br'(.*file:/)/?(/\$TESTTMP.*)', + lambda m: m.group(1) + b'*' + m.group(2) + b' (glob)' + ), +] + +# Various platform error strings, keyed on a common replacement string +_errors = { + br'$ENOENT$': ( + # strerror() + br'No such file or directory', + + # FormatMessage(ERROR_FILE_NOT_FOUND) + br'The system cannot find the file specified', + ), + br'$ENOTDIR$': ( + # strerror() + br'Not a directory', + + # FormatMessage(ERROR_PATH_NOT_FOUND) + br'The system cannot find the path specified', + ), + br'$ECONNRESET$': ( + # strerror() + br'Connection reset by peer', + + # FormatMessage(WSAECONNRESET) + br'An existing connection was forcibly closed by the remote host', + ), + br'$EADDRINUSE$': ( + # strerror() + br'Address already in use', + + # FormatMessage(WSAEADDRINUSE) + br'Only one usage of each socket address' + br' \(protocol/network address/port\) is normally permitted', + ), +} + +for replace, msgs in _errors.items(): + substitutions.extend((m, replace) for m in msgs) + +# Output lines on Windows that can be autocorrected for '\' vs '/' path +# differences. +_winpathfixes = [ + # cloning subrepo s\ss from $TESTTMP/t/s/ss + # cloning subrepo foo\bar from http://localhost:$HGPORT/foo/bar + br'(?m)^cloning subrepo \S+\\.*', + + # pulling from $TESTTMP\issue1852a + br'(?m)^pulling from \$TESTTMP\\.*', + + # pushing to $TESTTMP\a + br'(?m)^pushing to \$TESTTMP\\.*', + + # pushing subrepo s\ss to $TESTTMP/t/s/ss + br'(?m)^pushing subrepo \S+\\\S+ to.*', + + # moving d1\d11\a1 to d3/d11/a1 + br'(?m)^moving \S+\\.*', + + # d1\a: not recording move - dummy does not exist + br'\S+\\\S+: not recording move .+', + + # reverting s\a + br'(?m)^reverting (?!subrepo ).*\\.*', + + # saved backup bundle to + # $TESTTMP\test\.hg\strip-backup/443431ffac4f-2fc5398a-backup.hg + br'(?m)^saved backup bundle to \$TESTTMP.*\.hg', + + # no changes made to subrepo s\ss since last push to ../tcc/s/ss + br'(?m)^no changes made to subrepo \S+\\\S+ since.*', + + # changeset 5:9cc5aa7204f0: stuff/maybelarge.dat references missing + # $TESTTMP\largefiles-repo-hg\.hg\largefiles\76..38 + br'(?m)^changeset .* references (corrupted|missing) \$TESTTMP\\.*', + + # stuff/maybelarge.dat: largefile 76..38 not available from + # file:/*/$TESTTMP\largefiles-repo (glob) + br'.*: largefile \S+ not available from file:/\*/.+', +] + +if os.name == 'nt': + substitutions.extend([(s, lambda match: match.group().replace(b'\\', b'/')) + for s in _winpathfixes]) diff -r 87676e8ee056 -r 27b6df1b5adb tests/dummysmtpd.py --- a/tests/dummysmtpd.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/dummysmtpd.py Mon Jan 22 17:53:02 2018 -0500 @@ -9,6 +9,7 @@ import smtpd import ssl import sys +import traceback from mercurial import ( server, @@ -27,6 +28,15 @@ def process_message(self, peer, mailfrom, rcpttos, data): log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos))) + def handle_error(self): + # On Windows, a bad SSL connection sometimes generates a WSAECONNRESET. + # The default handler will shutdown this server, and then both the + # current connection and subsequent ones fail on the client side with + # "No connection could be made because the target machine actively + # refused it". If we eat the error, then the client properly aborts in + # the expected way, and the server is available for subsequent requests. + traceback.print_exc() + class dummysmtpsecureserver(dummysmtpserver): def __init__(self, localaddr, certfile): dummysmtpserver.__init__(self, localaddr) diff -r 87676e8ee056 -r 27b6df1b5adb tests/dummyssh --- a/tests/dummyssh Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/dummyssh Mon Jan 22 17:53:02 2018 -0500 @@ -13,9 +13,9 @@ os.environ["SSH_CLIENT"] = "%s 1 2" % os.environ.get('LOCALIP', '127.0.0.1') log = open("dummylog", "ab") -log.write("Got arguments") +log.write(b"Got arguments") for i, arg in enumerate(sys.argv[1:]): - log.write(" %d:%s" % (i + 1, arg)) + log.write(b" %d:%s" % (i + 1, arg)) log.write("\n") log.close() hgcmd = sys.argv[2] diff -r 87676e8ee056 -r 27b6df1b5adb tests/f --- a/tests/f Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/f Mon Jan 22 17:53:02 2018 -0500 @@ -59,7 +59,7 @@ if isfile: if opts.type: facts.append('file') - if opts.hexdump or opts.dump or opts.md5: + if any((opts.hexdump, opts.dump, opts.md5, opts.sha1, opts.sha256)): content = open(f, 'rb').read() elif islink: if opts.type: @@ -95,6 +95,9 @@ if opts.sha1 and content is not None: h = hashlib.sha1(content) facts.append('sha1=%s' % h.hexdigest()[:opts.bytes]) + if opts.sha256 and content is not None: + h = hashlib.sha256(content) + facts.append('sha256=%s' % h.hexdigest()[:opts.bytes]) if isstdin: outfile.write(b', '.join(facts) + b'\n') elif facts: @@ -150,6 +153,8 @@ help="recurse into directories") parser.add_option("-S", "--sha1", action="store_true", help="show sha1 hash of the content") + parser.add_option("", "--sha256", action="store_true", + help="show sha256 hash of the content") parser.add_option("-M", "--md5", action="store_true", help="show md5 hash of the content") parser.add_option("-D", "--dump", action="store_true", diff -r 87676e8ee056 -r 27b6df1b5adb tests/flagprocessorext.py --- a/tests/flagprocessorext.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/flagprocessorext.py Mon Jan 22 17:53:02 2018 -0500 @@ -58,7 +58,7 @@ def noopaddrevision(orig, self, text, transaction, link, p1, p2, cachedelta=None, node=None, flags=revlog.REVIDX_DEFAULT_FLAGS): - if '[NOOP]' in text: + if b'[NOOP]' in text: flags |= REVIDX_NOOP return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, node=node, flags=flags) @@ -66,7 +66,7 @@ def b64addrevision(orig, self, text, transaction, link, p1, p2, cachedelta=None, node=None, flags=revlog.REVIDX_DEFAULT_FLAGS): - if '[BASE64]' in text: + if b'[BASE64]' in text: flags |= REVIDX_BASE64 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, node=node, flags=flags) @@ -74,7 +74,7 @@ def gzipaddrevision(orig, self, text, transaction, link, p1, p2, cachedelta=None, node=None, flags=revlog.REVIDX_DEFAULT_FLAGS): - if '[GZIP]' in text: + if b'[GZIP]' in text: flags |= REVIDX_GZIP return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, node=node, flags=flags) @@ -84,7 +84,7 @@ flags=revlog.REVIDX_DEFAULT_FLAGS): # This addrevision wrapper is meant to add a flag we will not have # transforms registered for, ensuring we handle this error case. - if '[FAIL]' in text: + if b'[FAIL]' in text: flags |= REVIDX_FAIL return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, node=node, flags=flags) diff -r 87676e8ee056 -r 27b6df1b5adb tests/get-with-headers.py --- a/tests/get-with-headers.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/get-with-headers.py Mon Jan 22 17:53:02 2018 -0500 @@ -5,6 +5,7 @@ from __future__ import absolute_import, print_function +import argparse import json import os import sys @@ -22,25 +23,27 @@ except ImportError: pass -twice = False -if '--twice' in sys.argv: - sys.argv.remove('--twice') - twice = True -headeronly = False -if '--headeronly' in sys.argv: - sys.argv.remove('--headeronly') - headeronly = True -formatjson = False -if '--json' in sys.argv: - sys.argv.remove('--json') - formatjson = True +parser = argparse.ArgumentParser() +parser.add_argument('--twice', action='store_true') +parser.add_argument('--headeronly', action='store_true') +parser.add_argument('--json', action='store_true') +parser.add_argument('--hgproto') +parser.add_argument('--requestheader', nargs='*', default=[], + help='Send an additional HTTP request header. Argument ' + 'value is
      =') +parser.add_argument('--bodyfile', + help='Write HTTP response body to a file') +parser.add_argument('host') +parser.add_argument('path') +parser.add_argument('show', nargs='*') -hgproto = None -if '--hgproto' in sys.argv: - idx = sys.argv.index('--hgproto') - hgproto = sys.argv[idx + 1] - sys.argv.pop(idx) - sys.argv.pop(idx) +args = parser.parse_args() + +twice = args.twice +headeronly = args.headeronly +formatjson = args.json +hgproto = args.hgproto +requestheaders = args.requestheader tag = None def request(host, path, show): @@ -52,6 +55,10 @@ if hgproto: headers['X-HgProto-1'] = hgproto + for header in requestheaders: + key, value = header.split('=', 1) + headers[key] = value + conn = httplib.HTTPConnection(host) conn.request("GET", '/' + path, None, headers) response = conn.getresponse() @@ -66,6 +73,11 @@ print() data = response.read() + if args.bodyfile: + bodyfh = open(args.bodyfile, 'wb') + else: + bodyfh = sys.stdout + # Pretty print JSON. This also has the beneficial side-effect # of verifying emitted JSON is well-formed. if formatjson: @@ -74,18 +86,22 @@ data = json.loads(data) lines = json.dumps(data, sort_keys=True, indent=2).splitlines() for line in lines: - print(line.rstrip()) + bodyfh.write(line.rstrip()) + bodyfh.write(b'\n') else: - sys.stdout.write(data) + bodyfh.write(data) + + if args.bodyfile: + bodyfh.close() if twice and response.getheader('ETag', None): tag = response.getheader('ETag') return response.status -status = request(sys.argv[1], sys.argv[2], sys.argv[3:]) +status = request(args.host, args.path, args.show) if twice: - status = request(sys.argv[1], sys.argv[2], sys.argv[3:]) + status = request(args.host, args.path, args.show) if 200 <= status <= 305: sys.exit(0) diff -r 87676e8ee056 -r 27b6df1b5adb tests/hghave.py --- a/tests/hghave.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/hghave.py Mon Jan 22 17:53:02 2018 -0500 @@ -284,6 +284,17 @@ return (0, 0) return (int(m.group(1)), int(m.group(2))) +# https://github.com/git-lfs/lfs-test-server +@check("lfs-test-server", "git-lfs test server") +def has_lfsserver(): + exe = 'lfs-test-server' + if has_windows(): + exe = 'lfs-test-server.exe' + return any( + os.access(os.path.join(path, exe), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,)) def has_git_range(v): major, minor = v.split('.')[0:2] @@ -444,6 +455,10 @@ return matchoutput("clang-format --help", br"^OVERVIEW: A tool to format C/C\+\+[^ ]+ code.") +@check("jshint", "JSHint static code analysis tool") +def has_jshint(): + return matchoutput("jshint --version 2>&1", br"jshint v") + @check("pygments", "Pygments source highlighting library") def has_pygments(): try: @@ -685,3 +700,11 @@ return True except ImportError: return False + +@check("clang-libfuzzer", "clang new enough to include libfuzzer") +def has_clang_libfuzzer(): + mat = matchoutput('clang --version', 'clang version (\d)') + if mat: + # libfuzzer is new in clang 6 + return int(mat.group(1)) > 5 + return False diff -r 87676e8ee056 -r 27b6df1b5adb tests/list-tree.py --- a/tests/list-tree.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/list-tree.py Mon Jan 22 17:53:02 2018 -0500 @@ -24,4 +24,4 @@ else: yield p -print('\n'.join(sorted(gather()))) +print('\n'.join(sorted(gather(), key=lambda x: x.replace(os.path.sep, '/')))) diff -r 87676e8ee056 -r 27b6df1b5adb tests/logexceptions.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/logexceptions.py Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,73 @@ +# logexceptions.py - Write files containing info about Mercurial exceptions +# +# Copyright 2017 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import inspect +import os +import sys +import traceback +import uuid + +from mercurial import ( + dispatch, + extensions, +) + +def handleexception(orig, ui): + res = orig(ui) + + if not ui.environ.get(b'HGEXCEPTIONSDIR'): + return res + + dest = os.path.join(ui.environ[b'HGEXCEPTIONSDIR'], + str(uuid.uuid4()).encode('ascii')) + + exc_type, exc_value, exc_tb = sys.exc_info() + + stack = [] + tb = exc_tb + while tb: + stack.append(tb) + tb = tb.tb_next + stack.reverse() + + hgframe = 'unknown' + hgline = 'unknown' + + # Find the first Mercurial frame in the stack. + for tb in stack: + mod = inspect.getmodule(tb) + if not mod.__name__.startswith(('hg', 'mercurial')): + continue + + frame = tb.tb_frame + + try: + with open(inspect.getsourcefile(tb), 'r') as fh: + hgline = fh.readlines()[frame.f_lineno - 1].strip() + except (IndexError, OSError): + pass + + hgframe = '%s:%d' % (frame.f_code.co_filename, frame.f_lineno) + break + + primary = traceback.extract_tb(exc_tb)[-1] + primaryframe = '%s:%d' % (primary.filename, primary.lineno) + + with open(dest, 'wb') as fh: + parts = [ + str(exc_value), + primaryframe, + hgframe, + hgline, + ] + fh.write(b'\0'.join(p.encode('utf-8', 'replace') for p in parts)) + +def extsetup(ui): + extensions.wrapfunction(dispatch, 'handlecommandexception', + handleexception) diff -r 87676e8ee056 -r 27b6df1b5adb tests/revlog-formatv0.py --- a/tests/revlog-formatv0.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/revlog-formatv0.py Mon Jan 22 17:53:02 2018 -0500 @@ -22,27 +22,27 @@ import sys files = [ - ('formatv0/.hg/00changelog.i', - '000000000000004400000000000000000000000000000000000000' - '000000000000000000000000000000000000000000000000000000' - '0000a1ef0b125355d27765928be600cfe85784284ab3'), - ('formatv0/.hg/00changelog.d', - '756163613935613961356635353036303562366138343738336237' - '61623536363738616436356635380a757365720a3020300a656d70' - '74790a0a656d7074792066696c65'), - ('formatv0/.hg/00manifest.i', - '000000000000003000000000000000000000000000000000000000' - '000000000000000000000000000000000000000000000000000000' - '0000aca95a9a5f550605b6a84783b7ab56678ad65f58'), - ('formatv0/.hg/00manifest.d', - '75656d707479006238306465356431333837353835343163356630' - '35323635616431343461623966613836643164620a'), - ('formatv0/.hg/data/empty.i', - '000000000000000000000000000000000000000000000000000000' - '000000000000000000000000000000000000000000000000000000' - '0000b80de5d138758541c5f05265ad144ab9fa86d1db'), - ('formatv0/.hg/data/empty.d', - ''), + (b'formatv0/.hg/00changelog.i', + b'000000000000004400000000000000000000000000000000000000' + b'000000000000000000000000000000000000000000000000000000' + b'0000a1ef0b125355d27765928be600cfe85784284ab3'), + (b'formatv0/.hg/00changelog.d', + b'756163613935613961356635353036303562366138343738336237' + b'61623536363738616436356635380a757365720a3020300a656d70' + b'74790a0a656d7074792066696c65'), + (b'formatv0/.hg/00manifest.i', + b'000000000000003000000000000000000000000000000000000000' + b'000000000000000000000000000000000000000000000000000000' + b'0000aca95a9a5f550605b6a84783b7ab56678ad65f58'), + (b'formatv0/.hg/00manifest.d', + b'75656d707479006238306465356431333837353835343163356630' + b'35323635616431343461623966613836643164620a'), + (b'formatv0/.hg/data/empty.i', + b'000000000000000000000000000000000000000000000000000000' + b'000000000000000000000000000000000000000000000000000000' + b'0000b80de5d138758541c5f05265ad144ab9fa86d1db'), + (b'formatv0/.hg/data/empty.d', + b''), ] def makedirs(name): diff -r 87676e8ee056 -r 27b6df1b5adb tests/run-tests.py --- a/tests/run-tests.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/run-tests.py Mon Jan 22 17:53:02 2018 -0500 @@ -45,11 +45,12 @@ from __future__ import absolute_import, print_function +import argparse +import collections import difflib import distutils.version as version import errno import json -import optparse import os import random import re @@ -296,122 +297,132 @@ def getparser(): """Obtain the OptionParser used by the CLI.""" - parser = optparse.OptionParser("%prog [options] [tests]") - - # keep these sorted - parser.add_option("--blacklist", action="append", + parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]') + + selection = parser.add_argument_group('Test Selection') + selection.add_argument('--allow-slow-tests', action='store_true', + help='allow extremely slow tests') + selection.add_argument("--blacklist", action="append", help="skip tests listed in the specified blacklist file") - parser.add_option("--whitelist", action="append", + selection.add_argument("--changed", + help="run tests that are changed in parent rev or working directory") + selection.add_argument("-k", "--keywords", + help="run tests matching keywords") + selection.add_argument("-r", "--retest", action="store_true", + help = "retest failed tests") + selection.add_argument("--test-list", action="append", + help="read tests to run from the specified file") + selection.add_argument("--whitelist", action="append", help="always run tests listed in the specified whitelist file") - parser.add_option("--test-list", action="append", - help="read tests to run from the specified file") - parser.add_option("--changed", type="string", - help="run tests that are changed in parent rev or working directory") - parser.add_option("-C", "--annotate", action="store_true", - help="output files annotated with coverage") - parser.add_option("-c", "--cover", action="store_true", - help="print a test coverage report") - parser.add_option("--color", choices=["always", "auto", "never"], - default=os.environ.get('HGRUNTESTSCOLOR', 'auto'), - help="colorisation: always|auto|never (default: auto)") - parser.add_option("-d", "--debug", action="store_true", + selection.add_argument('tests', metavar='TESTS', nargs='*', + help='Tests to run') + + harness = parser.add_argument_group('Test Harness Behavior') + harness.add_argument('--bisect-repo', + metavar='bisect_repo', + help=("Path of a repo to bisect. Use together with " + "--known-good-rev")) + harness.add_argument("-d", "--debug", action="store_true", help="debug mode: write output of test scripts to console" " rather than capturing and diffing it (disables timeout)") - parser.add_option("-f", "--first", action="store_true", + harness.add_argument("-f", "--first", action="store_true", help="exit on the first test failure") - parser.add_option("-H", "--htmlcov", action="store_true", - help="create an HTML report of the coverage of the files") - parser.add_option("-i", "--interactive", action="store_true", + harness.add_argument("-i", "--interactive", action="store_true", help="prompt to accept changed output") - parser.add_option("-j", "--jobs", type="int", + harness.add_argument("-j", "--jobs", type=int, help="number of jobs to run in parallel" " (default: $%s or %d)" % defaults['jobs']) - parser.add_option("--keep-tmpdir", action="store_true", + harness.add_argument("--keep-tmpdir", action="store_true", help="keep temporary directory after running tests") - parser.add_option("-k", "--keywords", - help="run tests matching keywords") - parser.add_option("--list-tests", action="store_true", + harness.add_argument('--known-good-rev', + metavar="known_good_rev", + help=("Automatically bisect any failures using this " + "revision as a known-good revision.")) + harness.add_argument("--list-tests", action="store_true", help="list tests instead of running them") - parser.add_option("-l", "--local", action="store_true", + harness.add_argument("--loop", action="store_true", + help="loop tests repeatedly") + harness.add_argument('--random', action="store_true", + help='run tests in random order') + harness.add_argument("-p", "--port", type=int, + help="port on which servers should listen" + " (default: $%s or %d)" % defaults['port']) + harness.add_argument('--profile-runner', action='store_true', + help='run statprof on run-tests') + harness.add_argument("-R", "--restart", action="store_true", + help="restart at last error") + harness.add_argument("--runs-per-test", type=int, dest="runs_per_test", + help="run each test N times (default=1)", default=1) + harness.add_argument("--shell", + help="shell to use (default: $%s or %s)" % defaults['shell']) + harness.add_argument('--showchannels', action='store_true', + help='show scheduling channels') + harness.add_argument("--slowtimeout", type=int, + help="kill errant slow tests after SLOWTIMEOUT seconds" + " (default: $%s or %d)" % defaults['slowtimeout']) + harness.add_argument("-t", "--timeout", type=int, + help="kill errant tests after TIMEOUT seconds" + " (default: $%s or %d)" % defaults['timeout']) + harness.add_argument("--tmpdir", + help="run tests in the given temporary directory" + " (implies --keep-tmpdir)") + harness.add_argument("-v", "--verbose", action="store_true", + help="output verbose messages") + + hgconf = parser.add_argument_group('Mercurial Configuration') + hgconf.add_argument("--chg", action="store_true", + help="install and use chg wrapper in place of hg") + hgconf.add_argument("--compiler", + help="compiler to build with") + hgconf.add_argument('--extra-config-opt', action="append", default=[], + help='set the given config opt in the test hgrc') + hgconf.add_argument("-l", "--local", action="store_true", help="shortcut for --with-hg=/../hg, " "and --with-chg=/../contrib/chg/chg if --chg is set") - parser.add_option("--loop", action="store_true", - help="loop tests repeatedly") - parser.add_option("--runs-per-test", type="int", dest="runs_per_test", - help="run each test N times (default=1)", default=1) - parser.add_option("-n", "--nodiff", action="store_true", - help="skip showing test changes") - parser.add_option("--outputdir", type="string", - help="directory to write error logs to (default=test directory)") - parser.add_option("-p", "--port", type="int", - help="port on which servers should listen" - " (default: $%s or %d)" % defaults['port']) - parser.add_option("--compiler", type="string", - help="compiler to build with") - parser.add_option("--pure", action="store_true", + hgconf.add_argument("--ipv6", action="store_true", + help="prefer IPv6 to IPv4 for network related tests") + hgconf.add_argument("--pure", action="store_true", help="use pure Python code instead of C extensions") - parser.add_option("-R", "--restart", action="store_true", - help="restart at last error") - parser.add_option("-r", "--retest", action="store_true", - help="retest failed tests") - parser.add_option("-S", "--noskips", action="store_true", - help="don't report skip tests verbosely") - parser.add_option("--shell", type="string", - help="shell to use (default: $%s or %s)" % defaults['shell']) - parser.add_option("-t", "--timeout", type="int", - help="kill errant tests after TIMEOUT seconds" - " (default: $%s or %d)" % defaults['timeout']) - parser.add_option("--slowtimeout", type="int", - help="kill errant slow tests after SLOWTIMEOUT seconds" - " (default: $%s or %d)" % defaults['slowtimeout']) - parser.add_option("--time", action="store_true", - help="time how long each test takes") - parser.add_option("--json", action="store_true", - help="store test result data in 'report.json' file") - parser.add_option("--tmpdir", type="string", - help="run tests in the given temporary directory" - " (implies --keep-tmpdir)") - parser.add_option("-v", "--verbose", action="store_true", - help="output verbose messages") - parser.add_option("--xunit", type="string", - help="record xunit results at specified path") - parser.add_option("--view", type="string", - help="external diff viewer") - parser.add_option("--with-hg", type="string", + hgconf.add_argument("-3", "--py3k-warnings", action="store_true", + help="enable Py3k warnings on Python 2.7+") + hgconf.add_argument("--with-chg", metavar="CHG", + help="use specified chg wrapper in place of hg") + hgconf.add_argument("--with-hg", metavar="HG", help="test using specified hg script rather than a " "temporary installation") - parser.add_option("--chg", action="store_true", - help="install and use chg wrapper in place of hg") - parser.add_option("--with-chg", metavar="CHG", - help="use specified chg wrapper in place of hg") - parser.add_option("--ipv6", action="store_true", - help="prefer IPv6 to IPv4 for network related tests") - parser.add_option("-3", "--py3k-warnings", action="store_true", - help="enable Py3k warnings on Python 2.7+") # This option should be deleted once test-check-py3-compat.t and other # Python 3 tests run with Python 3. - parser.add_option("--with-python3", metavar="PYTHON3", - help="Python 3 interpreter (if running under Python 2)" - " (TEMPORARY)") - parser.add_option('--extra-config-opt', action="append", - help='set the given config opt in the test hgrc') - parser.add_option('--random', action="store_true", - help='run tests in random order') - parser.add_option('--profile-runner', action='store_true', - help='run statprof on run-tests') - parser.add_option('--allow-slow-tests', action='store_true', - help='allow extremely slow tests') - parser.add_option('--showchannels', action='store_true', - help='show scheduling channels') - parser.add_option('--known-good-rev', type="string", - metavar="known_good_rev", - help=("Automatically bisect any failures using this " - "revision as a known-good revision.")) - parser.add_option('--bisect-repo', type="string", - metavar='bisect_repo', - help=("Path of a repo to bisect. Use together with " - "--known-good-rev")) + hgconf.add_argument("--with-python3", metavar="PYTHON3", + help="Python 3 interpreter (if running under Python 2)" + " (TEMPORARY)") + + reporting = parser.add_argument_group('Results Reporting') + reporting.add_argument("-C", "--annotate", action="store_true", + help="output files annotated with coverage") + reporting.add_argument("--color", choices=["always", "auto", "never"], + default=os.environ.get('HGRUNTESTSCOLOR', 'auto'), + help="colorisation: always|auto|never (default: auto)") + reporting.add_argument("-c", "--cover", action="store_true", + help="print a test coverage report") + reporting.add_argument('--exceptions', action='store_true', + help='log all exceptions and generate an exception report') + reporting.add_argument("-H", "--htmlcov", action="store_true", + help="create an HTML report of the coverage of the files") + reporting.add_argument("--json", action="store_true", + help="store test result data in 'report.json' file") + reporting.add_argument("--outputdir", + help="directory to write error logs to (default=test directory)") + reporting.add_argument("-n", "--nodiff", action="store_true", + help="skip showing test changes") + reporting.add_argument("-S", "--noskips", action="store_true", + help="don't report skip tests verbosely") + reporting.add_argument("--time", action="store_true", + help="time how long each test takes") + reporting.add_argument("--view", + help="external diff viewer") + reporting.add_argument("--xunit", + help="record xunit results at specified path") for option, (envvar, default) in defaults.items(): defaults[option] = type(default)(os.environ.get(envvar, default)) @@ -421,7 +432,7 @@ def parseargs(args, parser): """Parse arguments with our OptionParser and validate results.""" - (options, args) = parser.parse_args(args) + options = parser.parse_args(args) # jython is always pure if 'java' in sys.platform or '__pypy__' in sys.modules: @@ -550,7 +561,7 @@ if options.showchannels: options.nodiff = True - return (options, args) + return options def rename(src, dst): """Like os.rename(), trade atomicity and opened files friendliness @@ -892,10 +903,9 @@ # Diff generation may rely on written .err file. if (ret != 0 or out != self._refout) and not self._skipped \ and not self._debug: - f = open(self.errpath, 'wb') - for line in out: - f.write(line) - f.close() + with open(self.errpath, 'wb') as f: + for line in out: + f.write(line) # The result object handles diff calculation for us. with firstlock: @@ -936,10 +946,9 @@ if (self._ret != 0 or self._out != self._refout) and not self._skipped \ and not self._debug and self._out: - f = open(self.errpath, 'wb') - for line in self._out: - f.write(line) - f.close() + with open(self.errpath, 'wb') as f: + for line in self._out: + f.write(line) vlog("# Ret was:", self._ret, '(%s)' % self.name) @@ -967,13 +976,20 @@ self._portmap(0), self._portmap(1), self._portmap(2), - (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$', - br'\1 (glob)'), (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'), (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'), ] r.append((self._escapepath(self._testtmp), b'$TESTTMP')) + replacementfile = os.path.join(self._testdir, b'common-pattern.py') + + if os.path.exists(replacementfile): + data = {} + with open(replacementfile, mode='rb') as source: + # the intermediate 'compile' step help with debugging + code = compile(source.read(), replacementfile, 'exec') + exec(code, data) + r.extend(data.get('substitutions', ())) return r def _escapepath(self, p): @@ -1075,29 +1091,31 @@ def _createhgrc(self, path): """Create an hgrc file for this test.""" - hgrc = open(path, 'wb') - hgrc.write(b'[ui]\n') - hgrc.write(b'slash = True\n') - hgrc.write(b'interactive = False\n') - hgrc.write(b'mergemarkers = detailed\n') - hgrc.write(b'promptecho = True\n') - hgrc.write(b'[defaults]\n') - hgrc.write(b'[devel]\n') - hgrc.write(b'all-warnings = true\n') - hgrc.write(b'default-date = 0 0\n') - hgrc.write(b'[largefiles]\n') - hgrc.write(b'usercache = %s\n' % - (os.path.join(self._testtmp, b'.cache/largefiles'))) - hgrc.write(b'[web]\n') - hgrc.write(b'address = localhost\n') - hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii')) - - for opt in self._extraconfigopts: - section, key = opt.split('.', 1) - assert '=' in key, ('extra config opt %s must ' - 'have an = for assignment' % opt) - hgrc.write(b'[%s]\n%s\n' % (section, key)) - hgrc.close() + with open(path, 'wb') as hgrc: + hgrc.write(b'[ui]\n') + hgrc.write(b'slash = True\n') + hgrc.write(b'interactive = False\n') + hgrc.write(b'mergemarkers = detailed\n') + hgrc.write(b'promptecho = True\n') + hgrc.write(b'[defaults]\n') + hgrc.write(b'[devel]\n') + hgrc.write(b'all-warnings = true\n') + hgrc.write(b'default-date = 0 0\n') + hgrc.write(b'[largefiles]\n') + hgrc.write(b'usercache = %s\n' % + (os.path.join(self._testtmp, b'.cache/largefiles'))) + hgrc.write(b'[lfs]\n') + hgrc.write(b'usercache = %s\n' % + (os.path.join(self._testtmp, b'.cache/lfs'))) + hgrc.write(b'[web]\n') + hgrc.write(b'address = localhost\n') + hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii')) + + for opt in self._extraconfigopts: + section, key = opt.encode('utf-8').split(b'.', 1) + assert b'=' in key, ('extra config opt %s must ' + 'have an = for assignment' % opt) + hgrc.write(b'[%s]\n%s\n' % (section, key)) def fail(self, msg): # unittest differentiates between errored and failed. @@ -1203,9 +1221,7 @@ def __init__(self, path, *args, **kwds): # accept an extra "case" parameter - case = None - if 'case' in kwds: - case = kwds.pop('case') + case = kwds.pop('case', None) self._case = case self._allcases = parsettestcases(path) super(TTest, self).__init__(path, *args, **kwds) @@ -1219,9 +1235,8 @@ return os.path.join(self._testdir, self.bname) def _run(self, env): - f = open(self.path, 'rb') - lines = f.readlines() - f.close() + with open(self.path, 'rb') as f: + lines = f.readlines() # .t file is both reference output and the test input, keep reference # output updated with the the test input. This avoids some race @@ -1233,10 +1248,9 @@ # Write out the generated script. fname = b'%s.sh' % self._testtmp - f = open(fname, 'wb') - for l in script: - f.write(l) - f.close() + with open(fname, 'wb') as f: + for l in script: + f.write(l) cmd = b'%s "%s"' % (self._shell, fname) vlog("# Running", cmd) @@ -1326,6 +1340,9 @@ script.append(b'alias hg="%s"\n' % self._hgcommand) if os.getenv('MSYSTEM'): script.append(b'alias pwd="pwd -W"\n') + if self._case: + script.append(b'TESTCASE=%s\n' % shellquote(self._case)) + script.append(b'export TESTCASE\n') n = 0 for n, l in enumerate(lines): @@ -1436,10 +1453,7 @@ r = self.linematch(el, lout) if isinstance(r, str): - if r == '+glob': - lout = el[:-1] + ' (glob)\n' - r = '' # Warn only this line. - elif r == '-glob': + if r == '-glob': lout = ''.join(el.rsplit(' (glob)', 1)) r = '' # Warn only this line. elif r == "retry": @@ -1523,6 +1537,7 @@ @staticmethod def rematch(el, l): try: + el = b'(?:' + el + b')' # use \Z to ensure that the regex matches to the end of the string if os.name == 'nt': return re.match(el + br'\r?\n\Z', l) @@ -1594,8 +1609,10 @@ if l.endswith(b" (glob)\n"): l = l[:-8] + b"\n" return TTest.globmatch(el[:-8], l) or retry - if os.altsep and l.replace(b'\\', b'/') == el: - return b'+glob' + if os.altsep: + _l = l.replace(b'\\', b'/') + if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l: + return True return retry @staticmethod @@ -1873,9 +1890,8 @@ continue if self._keywords: - f = open(test.path, 'rb') - t = f.read().lower() + test.bname.lower() - f.close() + with open(test.path, 'rb') as f: + t = f.read().lower() + test.bname.lower() ignored = False for k in self._keywords.lower().split(): if k not in t: @@ -2104,6 +2120,18 @@ os.environ['PYTHONHASHSEED']) if self._runner.options.time: self.printtimes(result.times) + + if self._runner.options.exceptions: + exceptions = aggregateexceptions( + os.path.join(self._runner._outputdir, b'exceptions')) + total = sum(exceptions.values()) + + self.stream.writeln('Exceptions Report:') + self.stream.writeln('%d total from %d frames' % + (total, len(exceptions))) + for (frame, line, exc), count in exceptions.most_common(): + self.stream.writeln('%d\t%s: %s' % (count, frame, exc)) + self.stream.flush() return result @@ -2251,6 +2279,50 @@ separators=(',', ': ')) outf.writelines(("testreport =", jsonout)) +def sorttests(testdescs, shuffle=False): + """Do an in-place sort of tests.""" + if shuffle: + random.shuffle(testdescs) + return + + # keywords for slow tests + slow = {b'svn': 10, + b'cvs': 10, + b'hghave': 10, + b'largefiles-update': 10, + b'run-tests': 10, + b'corruption': 10, + b'race': 10, + b'i18n': 10, + b'check': 100, + b'gendoc': 100, + b'contrib-perf': 200, + } + perf = {} + + def sortkey(f): + # run largest tests first, as they tend to take the longest + f = f['path'] + try: + return perf[f] + except KeyError: + try: + val = -os.stat(f).st_size + except OSError as e: + if e.errno != errno.ENOENT: + raise + perf[f] = -1e9 # file does not exist, tell early + return -1e9 + for kw, mul in slow.items(): + if kw in f: + val *= mul + if f.endswith(b'.py'): + val /= 10.0 + perf[f] = val / 1000.0 + return perf[f] + + testdescs.sort(key=sortkey) + class TestRunner(object): """Holds context for executing tests. @@ -2295,18 +2367,16 @@ oldmask = os.umask(0o22) try: parser = parser or getparser() - options, args = parseargs(args, parser) - # positional arguments are paths to test files to run, so - # we make sure they're all bytestrings - args = [_bytespath(a) for a in args] + options = parseargs(args, parser) + tests = [_bytespath(a) for a in options.tests] if options.test_list is not None: for listfile in options.test_list: with open(listfile, 'rb') as f: - args.extend(t for t in f.read().splitlines() if t) + tests.extend(t for t in f.read().splitlines() if t) self.options = options self._checktools() - testdescs = self.findtests(args) + testdescs = self.findtests(tests) if options.profile_runner: import statprof statprof.start() @@ -2320,51 +2390,22 @@ os.umask(oldmask) def _run(self, testdescs): - if self.options.random: - random.shuffle(testdescs) - else: - # keywords for slow tests - slow = {b'svn': 10, - b'cvs': 10, - b'hghave': 10, - b'largefiles-update': 10, - b'run-tests': 10, - b'corruption': 10, - b'race': 10, - b'i18n': 10, - b'check': 100, - b'gendoc': 100, - b'contrib-perf': 200, - } - perf = {} - def sortkey(f): - # run largest tests first, as they tend to take the longest - f = f['path'] - try: - return perf[f] - except KeyError: - try: - val = -os.stat(f).st_size - except OSError as e: - if e.errno != errno.ENOENT: - raise - perf[f] = -1e9 # file does not exist, tell early - return -1e9 - for kw, mul in slow.items(): - if kw in f: - val *= mul - if f.endswith(b'.py'): - val /= 10.0 - perf[f] = val / 1000.0 - return perf[f] - testdescs.sort(key=sortkey) + sorttests(testdescs, shuffle=self.options.random) self._testdir = osenvironb[b'TESTDIR'] = getattr( os, 'getcwdb', os.getcwd)() + # assume all tests in same folder for now + if testdescs: + pathname = os.path.dirname(testdescs[0]['path']) + if pathname: + osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'], + pathname) if self.options.outputdir: self._outputdir = canonpath(_bytespath(self.options.outputdir)) else: self._outputdir = self._testdir + if testdescs and pathname: + self._outputdir = os.path.join(self._outputdir, pathname) if 'PYTHONHASHSEED' not in os.environ: # use a random python hash seed all the time @@ -2381,11 +2422,6 @@ print("error: temp dir %r already exists" % tmpdir) return 1 - # Automatically removing tmpdir sounds convenient, but could - # really annoy anyone in the habit of using "--tmpdir=/tmp" - # or "--tmpdir=$HOME". - #vlog("# Removing temp dir", tmpdir) - #shutil.rmtree(tmpdir) os.makedirs(tmpdir) else: d = None @@ -2407,12 +2443,27 @@ self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin') os.makedirs(self._tmpbindir) - # This looks redundant with how Python initializes sys.path from - # the location of the script being executed. Needed because the - # "hg" specified by --with-hg is not the only Python script - # executed in the test suite that needs to import 'mercurial' - # ... which means it's not really redundant at all. - self._pythondir = self._bindir + normbin = os.path.normpath(os.path.abspath(whg)) + normbin = normbin.replace(os.sep.encode('ascii'), b'/') + + # Other Python scripts in the test harness need to + # `import mercurial`. If `hg` is a Python script, we assume + # the Mercurial modules are relative to its path and tell the tests + # to load Python modules from its directory. + with open(whg, 'rb') as fh: + initial = fh.read(1024) + + if re.match(b'#!.*python', initial): + self._pythondir = self._bindir + # If it looks like our in-repo Rust binary, use the source root. + # This is a bit hacky. But rhg is still not supported outside the + # source directory. So until it is, do the simple thing. + elif re.search(b'/rust/target/[^/]+/hg', normbin): + self._pythondir = os.path.dirname(self._testdir) + # Fall back to the legacy behavior. + else: + self._pythondir = self._bindir + else: self._installdir = os.path.join(self._hgtmp, b"install") self._bindir = os.path.join(self._installdir, b"bin") @@ -2484,6 +2535,23 @@ self._coveragefile = os.path.join(self._testdir, b'.coverage') + if self.options.exceptions: + exceptionsdir = os.path.join(self._outputdir, b'exceptions') + try: + os.makedirs(exceptionsdir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + # Remove all existing exception reports. + for f in os.listdir(exceptionsdir): + os.unlink(os.path.join(exceptionsdir, f)) + + osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir + logexceptions = os.path.join(self._testdir, b'logexceptions.py') + self.options.extra_config_opt.append( + 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')) + vlog("# Using TESTDIR", self._testdir) vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR']) vlog("# Using HGTMP", self._hgtmp) @@ -2512,6 +2580,16 @@ else: args = os.listdir(b'.') + expanded_args = [] + for arg in args: + if os.path.isdir(arg): + if not arg.endswith(b'/'): + arg += b'/' + expanded_args.extend([arg + a for a in os.listdir(arg)]) + else: + expanded_args.append(arg) + args = expanded_args + tests = [] for t in args: if not (os.path.basename(t).startswith(b'test-') @@ -2767,13 +2845,12 @@ if e.errno != errno.ENOENT: raise else: - f = open(installerrs, 'rb') - for line in f: - if PYTHON3: - sys.stdout.buffer.write(line) - else: - sys.stdout.write(line) - f.close() + with open(installerrs, 'rb') as f: + for line in f: + if PYTHON3: + sys.stdout.buffer.write(line) + else: + sys.stdout.write(line) sys.exit(1) os.chdir(self._testdir) @@ -2781,28 +2858,24 @@ if self.options.py3k_warnings and not self.options.anycoverage: vlog("# Updating hg command to enable Py3k Warnings switch") - f = open(os.path.join(self._bindir, 'hg'), 'rb') - lines = [line.rstrip() for line in f] - lines[0] += ' -3' - f.close() - f = open(os.path.join(self._bindir, 'hg'), 'wb') - for line in lines: - f.write(line + '\n') - f.close() + with open(os.path.join(self._bindir, 'hg'), 'rb') as f: + lines = [line.rstrip() for line in f] + lines[0] += ' -3' + with open(os.path.join(self._bindir, 'hg'), 'wb') as f: + for line in lines: + f.write(line + '\n') hgbat = os.path.join(self._bindir, b'hg.bat') if os.path.isfile(hgbat): # hg.bat expects to be put in bin/scripts while run-tests.py # installation layout put it in bin/ directly. Fix it - f = open(hgbat, 'rb') - data = f.read() - f.close() + with open(hgbat, 'rb') as f: + data = f.read() if b'"%~dp0..\python" "%~dp0hg" %*' in data: data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*', b'"%~dp0python" "%~dp0hg" %*') - f = open(hgbat, 'wb') - f.write(data) - f.close() + with open(hgbat, 'wb') as f: + f.write(data) else: print('WARNING: cannot fix hg.bat reference to python.exe') @@ -2927,6 +3000,24 @@ print("WARNING: Did not find prerequisite tool: %s " % p.decode("utf-8")) +def aggregateexceptions(path): + exceptions = collections.Counter() + + for f in os.listdir(path): + with open(os.path.join(path, f), 'rb') as fh: + data = fh.read().split(b'\0') + if len(data) != 4: + continue + + exc, mainframe, hgframe, hgline = data + exc = exc.decode('utf-8') + mainframe = mainframe.decode('utf-8') + hgframe = hgframe.decode('utf-8') + hgline = hgline.decode('utf-8') + exceptions[(hgframe, hgline, exc)] += 1 + + return exceptions + if __name__ == '__main__': runner = TestRunner() diff -r 87676e8ee056 -r 27b6df1b5adb tests/seq.py --- a/tests/seq.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/seq.py Mon Jan 22 17:53:02 2018 -0500 @@ -10,6 +10,9 @@ from __future__ import absolute_import, print_function import sys +if sys.version_info[0] >= 3: + xrange = range + start = 1 if len(sys.argv) > 2: start = int(sys.argv[1]) diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-acl.t --- a/tests/test-acl.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-acl.t Mon Jan 22 17:53:02 2018 -0500 @@ -93,14 +93,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -156,14 +156,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -222,14 +222,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -298,14 +298,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -366,14 +366,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -439,14 +439,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -509,14 +509,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -584,14 +584,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -656,14 +656,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -730,14 +730,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -813,14 +813,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -894,14 +894,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -925,7 +925,7 @@ bundle2-input-bundle: 4 parts total transaction abort! rollback completed - abort: No such file or directory: ../acl.config + abort: $ENOENT$: ../acl.config no rollback information available 0:6675d58eff77 @@ -970,14 +970,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -1057,14 +1057,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -1143,14 +1143,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -1225,14 +1225,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -1304,14 +1304,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -1387,14 +1387,14 @@ f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd 911600dab2ae7a9baff75958b84fe606851ce955 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 24 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 24 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 24 bundle2-input-part: "check:heads" supported @@ -1507,14 +1507,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -1591,14 +1591,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -1668,14 +1668,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -1741,14 +1741,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -1808,14 +1808,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -1897,14 +1897,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -1985,14 +1985,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -2057,14 +2057,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported @@ -2139,14 +2139,14 @@ 911600dab2ae7a9baff75958b84fe606851ce955 e8fc755d4d8217ee5b0c2bb41558c40d43b92c01 bundle2-output-bundle: "HG20", 5 parts total - bundle2-output-part: "replycaps" 168 bytes payload + bundle2-output-part: "replycaps" 188 bytes payload bundle2-output-part: "check:phases" 48 bytes payload bundle2-output-part: "check:heads" streamed payload bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload bundle2-output-part: "phase-heads" 48 bytes payload bundle2-input-bundle: with-transaction bundle2-input-part: "replycaps" supported - bundle2-input-part: total payload size 168 + bundle2-input-part: total payload size 188 bundle2-input-part: "check:phases" supported bundle2-input-part: total payload size 48 bundle2-input-part: "check:heads" supported diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-add.t --- a/tests/test-add.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-add.t Mon Jan 22 17:53:02 2018 -0500 @@ -104,7 +104,7 @@ merging a warning: conflicts while merging a! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ hg st M a @@ -197,17 +197,17 @@ $ echo def > CapsDir1/CapsDir/SubDir/Def.txt $ hg add capsdir1/capsdir - adding CapsDir1/CapsDir/AbC.txt (glob) - adding CapsDir1/CapsDir/SubDir/Def.txt (glob) + adding CapsDir1/CapsDir/AbC.txt + adding CapsDir1/CapsDir/SubDir/Def.txt $ hg forget capsdir1/capsdir/abc.txt $ hg forget capsdir1/capsdir - removing CapsDir1/CapsDir/SubDir/Def.txt (glob) + removing CapsDir1/CapsDir/SubDir/Def.txt $ hg add capsdir1 - adding CapsDir1/CapsDir/AbC.txt (glob) - adding CapsDir1/CapsDir/SubDir/Def.txt (glob) + adding CapsDir1/CapsDir/AbC.txt + adding CapsDir1/CapsDir/SubDir/Def.txt $ hg ci -m "AbCDef" capsdir1/capsdir @@ -216,14 +216,14 @@ C CapsDir1/CapsDir/SubDir/Def.txt $ hg files capsdir1/capsdir - CapsDir1/CapsDir/AbC.txt (glob) - CapsDir1/CapsDir/SubDir/Def.txt (glob) + CapsDir1/CapsDir/AbC.txt + CapsDir1/CapsDir/SubDir/Def.txt $ echo xyz > CapsDir1/CapsDir/SubDir/Def.txt $ hg ci -m xyz capsdir1/capsdir/subdir/def.txt $ hg revert -r '.^' capsdir1/capsdir - reverting CapsDir1/CapsDir/SubDir/Def.txt (glob) + reverting CapsDir1/CapsDir/SubDir/Def.txt The conditional tests above mean the hash on the diff line differs on Windows and OS X @@ -244,8 +244,8 @@ $ hg remove -f 'glob:**.txt' -X capsdir1/capsdir $ hg remove -f 'glob:**.txt' -I capsdir1/capsdir - removing CapsDir1/CapsDir/ABC.txt (glob) - removing CapsDir1/CapsDir/SubDir/Def.txt (glob) + removing CapsDir1/CapsDir/ABC.txt + removing CapsDir1/CapsDir/SubDir/Def.txt #endif $ cd .. diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-addremove-similar.t --- a/tests/test-addremove-similar.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-addremove-similar.t Mon Jan 22 17:53:02 2018 -0500 @@ -153,7 +153,7 @@ $ hg addremove -s80 removing d/a adding d/b - recording removal of d/a as rename to d/b (100% similar) (glob) + recording removal of d/a as rename to d/b (100% similar) $ hg debugstate r 0 0 1970-01-01 00:00:00 d/a a 0 -1 unset d/b @@ -163,12 +163,12 @@ no copies found here (since the target isn't in d $ hg addremove -s80 d - removing d/b (glob) + removing d/b copies here $ hg addremove -s80 adding c - recording removal of d/a as rename to c (100% similar) (glob) + recording removal of d/a as rename to c (100% similar) $ cd .. diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-addremove.t --- a/tests/test-addremove.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-addremove.t Mon Jan 22 17:53:02 2018 -0500 @@ -31,8 +31,7 @@ $ hg forget foo $ hg -v addremove nonexistent - nonexistent: The system cannot find the file specified (windows !) - nonexistent: No such file or directory (no-windows !) + nonexistent: $ENOENT$ [1] $ cd .. @@ -86,8 +85,7 @@ $ rm c $ hg ci -A -m "c" nonexistent - nonexistent: The system cannot find the file specified (windows !) - nonexistent: No such file or directory (no-windows !) + nonexistent: $ENOENT$ abort: failed to mark all new/missing files as added/removed [255] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-alias.t --- a/tests/test-alias.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-alias.t Mon Jan 22 17:53:02 2018 -0500 @@ -119,6 +119,12 @@ $ hg help noclosing error in definition for alias 'noclosingquotation': No closing quotation +"--" in alias definition should be preserved + + $ hg --config alias.dash='cat --' -R alias dash -r0 + abort: -r0 not under root '$TESTTMP/alias' + (consider using '--cwd alias') + [255] invalid options @@ -148,6 +154,12 @@ $ hg no--config abort: error in definition for alias 'no--config': --config may only be given on the command line [255] + $ hg no --config alias.no='--repo elsewhere --cwd elsewhere status' + abort: error in definition for alias 'no': --repo/--cwd may only be given on the command line + [255] + $ hg no --config alias.no='--repo elsewhere' + abort: error in definition for alias 'no': --repo may only be given on the command line + [255] optional repository @@ -351,6 +363,10 @@ $ hg echoall --cwd .. +"--" passed to shell alias should be preserved + + $ hg --config alias.printf='!printf "$@"' printf '%s %s %s\n' -- --cwd .. + -- --cwd .. repo specific shell aliases diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-amend-subrepo.t --- a/tests/test-amend-subrepo.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-amend-subrepo.t Mon Jan 22 17:53:02 2018 -0500 @@ -58,7 +58,7 @@ $ echo a >> s/a $ hg add -R s - adding s/a (glob) + adding s/a $ hg amend abort: uncommitted changes in subrepository "s" (use --subrepos for recursive commit) diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-amend.t --- a/tests/test-amend.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-amend.t Mon Jan 22 17:53:02 2018 -0500 @@ -29,7 +29,7 @@ $ echo 2 >> B $ hg amend - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/112478962961-7e959a55-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/112478962961-7e959a55-amend.hg (obsstore-off !) #if obsstore-off $ hg log -p -G --hidden -T '{rev} {node|short} {desc}\n' @ 1 be169c7e8dbe B @@ -99,13 +99,13 @@ $ echo 4 > D $ hg add C D $ hg amend -m NEWMESSAGE -I C - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/be169c7e8dbe-7684ddc5-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/be169c7e8dbe-7684ddc5-amend.hg (obsstore-off !) $ hg log -r . -T '{node|short} {desc} {files}\n' c7ba14d9075b NEWMESSAGE B C $ echo 5 > E $ rm C $ hg amend -d '2000 1000' -u 'Foo ' -A C D - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/c7ba14d9075b-b3e76daa-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/c7ba14d9075b-b3e76daa-amend.hg (obsstore-off !) $ hg log -r . -T '{node|short} {desc} {files} {author} {date}\n' 14f6c4bcc865 NEWMESSAGE B D Foo 2000.01000 @@ -119,11 +119,11 @@ $ chmod +x $TESTTMP/prefix.sh $ HGEDITOR="sh $TESTTMP/prefix.sh" hg amend --edit - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/14f6c4bcc865-6591f15d-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/14f6c4bcc865-6591f15d-amend.hg (obsstore-off !) $ hg log -r . -T '{node|short} {desc}\n' 298f085230c3 EDITED: NEWMESSAGE $ HGEDITOR="sh $TESTTMP/prefix.sh" hg amend -e -m MSG - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/298f085230c3-d81a6ad3-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/298f085230c3-d81a6ad3-amend.hg (obsstore-off !) $ hg log -r . -T '{node|short} {desc}\n' 974f07f28537 EDITED: MSG @@ -132,7 +132,7 @@ abort: options --message and --logfile are mutually exclusive [255] $ hg amend -l $TESTTMP/msg - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/974f07f28537-edb6470a-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/974f07f28537-edb6470a-amend.hg (obsstore-off !) $ hg log -r . -T '{node|short} {desc}\n' 507be9bdac71 FOO @@ -152,7 +152,7 @@ new file mode 100644 examine changes to 'G'? [Ynesfdaq?] n - saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/507be9bdac71-c8077452-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/507be9bdac71-c8077452-amend.hg (obsstore-off !) $ hg log -r . -T '{files}\n' B D F @@ -185,10 +185,11 @@ > EOF $ hg amend + 1 new orphan changesets $ hg log -T '{rev} {node|short} {desc}\n' -G @ 3 be169c7e8dbe B | - | o 2 26805aba1e60 C + | * 2 26805aba1e60 C | | | x 1 112478962961 B |/ @@ -203,8 +204,8 @@ [255] $ hg amend --note "adding bar" $ hg debugobsolete -r . - 112478962961147124edd43549aedd1a335e44bf be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 0 (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'amend', 'user': 'test'} - be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 16084da537dd8f84cfdb3055c633772269d62e1b 0 (Thu Jan 01 00:00:00 1970 +0000) {'note': 'adding bar', 'operation': 'amend', 'user': 'test'} + 112478962961147124edd43549aedd1a335e44bf be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'} + be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 16084da537dd8f84cfdb3055c633772269d62e1b 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'adding bar', 'operation': 'amend', 'user': 'test'} #endif Cannot amend public changeset @@ -213,6 +214,7 @@ $ hg update -C -q A $ hg amend -m AMEND abort: cannot amend public changesets + (see 'hg help phases' for details) [255] Amend a merge changeset @@ -226,7 +228,7 @@ > EOS $ hg update -q C $ hg amend -m FOO - saved backup bundle to $TESTTMP/repo3/.hg/strip-backup/a35c07e8a2a4-15ff4612-amend.hg (glob) (obsstore-off !) + saved backup bundle to $TESTTMP/repo3/.hg/strip-backup/a35c07e8a2a4-15ff4612-amend.hg (obsstore-off !) $ rm .hg/localtags $ hg log -G -T '{desc}\n' @ FOO diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-annotate.t --- a/tests/test-annotate.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-annotate.t Mon Jan 22 17:53:02 2018 -0500 @@ -556,8 +556,8 @@ $ rm baz $ hg annotate -ncr "wdir()" baz - abort: $TESTTMP\repo\baz: The system cannot find the file specified (windows !) - abort: No such file or directory: $TESTTMP/repo/baz (no-windows !) + abort: $TESTTMP\repo\baz: $ENOENT$ (windows !) + abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !) [255] annotate removed file @@ -565,8 +565,8 @@ $ hg rm baz $ hg annotate -ncr "wdir()" baz - abort: $TESTTMP\repo\baz: The system cannot find the file specified (windows !) - abort: No such file or directory: $TESTTMP/repo/baz (no-windows !) + abort: $TESTTMP\repo\baz: $ENOENT$ (windows !) + abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !) [255] $ hg revert --all --no-backup --quiet diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-archive.t --- a/tests/test-archive.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-archive.t Mon Jan 22 17:53:02 2018 -0500 @@ -32,7 +32,7 @@ sharing subrepo subrepo from $TESTTMP/test/subrepo 5 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cat shared1/subrepo/.hg/sharedpath - $TESTTMP/test/subrepo/.hg (no-eol) (glob) + $TESTTMP/test/subrepo/.hg (no-eol) hg subrepos are shared into existence on demand if the parent was shared @@ -45,7 +45,7 @@ sharing subrepo subrepo from $TESTTMP/test/subrepo 3 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cat share2/subrepo/.hg/sharedpath - $TESTTMP/test/subrepo/.hg (no-eol) (glob) + $TESTTMP/test/subrepo/.hg (no-eol) $ echo 'mod' > share2/subrepo/sub $ hg -R share2 ci -Sqm 'subrepo mod' $ hg -R clone1 update -C tip @@ -79,7 +79,7 @@ $ hg -R shared3 archive --config ui.archivemeta=False -r tip -S archive sharing subrepo subrepo from $TESTTMP/test/subrepo $ cat shared3/subrepo/.hg/sharedpath - $TESTTMP/test/subrepo/.hg (no-eol) (glob) + $TESTTMP/test/subrepo/.hg (no-eol) $ diff -r archive test Only in test: .hg Common subdirectories: archive/baz and test/baz (?) diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-audit-path.t --- a/tests/test-audit-path.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-audit-path.t Mon Jan 22 17:53:02 2018 -0500 @@ -3,7 +3,7 @@ audit of .hg $ hg add .hg/00changelog.i - abort: path contains illegal component: .hg/00changelog.i (glob) + abort: path contains illegal component: .hg/00changelog.i [255] #if symlink @@ -17,14 +17,14 @@ $ ln -s a b $ echo b > a/b $ hg add b/b - abort: path 'b/b' traverses symbolic link 'b' (glob) + abort: path 'b/b' traverses symbolic link 'b' [255] $ hg add b should still fail - maybe $ hg add b/b - abort: path 'b/b' traverses symbolic link 'b' (glob) + abort: path 'b/b' traverses symbolic link 'b' [255] $ hg commit -m 'add symlink b' @@ -86,7 +86,7 @@ $ hg manifest -r0 .hg/test $ hg update -Cr0 - abort: path contains illegal component: .hg/test (glob) + abort: path contains illegal component: .hg/test [255] attack foo/.hg/test @@ -94,7 +94,7 @@ $ hg manifest -r1 foo/.hg/test $ hg update -Cr1 - abort: path 'foo/.hg/test' is inside nested repo 'foo' (glob) + abort: path 'foo/.hg/test' is inside nested repo 'foo' [255] attack back/test where back symlinks to .. @@ -121,7 +121,7 @@ $ mkdir ../test $ echo data > ../test/file $ hg update -Cr3 - abort: path contains illegal component: ../test (glob) + abort: path contains illegal component: ../test [255] $ cat ../test/file data @@ -131,7 +131,7 @@ $ hg manifest -r4 /tmp/test $ hg update -Cr4 - abort: path contains illegal component: /tmp/test (glob) + abort: path contains illegal component: /tmp/test [255] $ cd .. diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-audit-subrepo.t --- a/tests/test-audit-subrepo.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-audit-subrepo.t Mon Jan 22 17:53:02 2018 -0500 @@ -9,7 +9,7 @@ $ hg init sub/.hg $ echo 'sub/.hg = sub/.hg' >> .hgsub $ hg ci -qAm 'add subrepo "sub/.hg"' - abort: path 'sub/.hg' is inside nested repo 'sub' (glob) + abort: path 'sub/.hg' is inside nested repo 'sub' [255] prepare tampered repo (including the commit above): @@ -33,7 +33,7 @@ on clone (and update): $ hg clone -q hgname hgname2 - abort: path 'sub/.hg' is inside nested repo 'sub' (glob) + abort: path 'sub/.hg' is inside nested repo 'sub' [255] Test direct symlink traversal diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-basic.t --- a/tests/test-basic.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-basic.t Mon Jan 22 17:53:02 2018 -0500 @@ -4,7 +4,8 @@ devel.all-warnings=true devel.default-date=0 0 extensions.fsmonitor= (fsmonitor !) - largefiles.usercache=$TESTTMP/.cache/largefiles (glob) + largefiles.usercache=$TESTTMP/.cache/largefiles + lfs.usercache=$TESTTMP/.cache/lfs ui.slash=True ui.interactive=False ui.mergemarkers=detailed @@ -33,15 +34,7 @@ [255] #endif -#if devfull no-chg - $ hg status >/dev/full 2>&1 - [1] - - $ hg status ENOENT 2>/dev/full - [1] -#endif - -#if devfull chg +#if devfull $ hg status >/dev/full 2>&1 [255] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-blackbox.t --- a/tests/test-blackbox.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-blackbox.t Mon Jan 22 17:53:02 2018 -0500 @@ -60,7 +60,7 @@ adding c $ cd ../blackboxtest2 $ hg pull - pulling from $TESTTMP/blackboxtest (glob) + pulling from $TESTTMP/blackboxtest searching for changes adding changesets adding manifests @@ -85,7 +85,7 @@ $ mkdir .hg/blackbox.log $ hg --debug incoming warning: cannot write to blackbox.log: * (glob) - comparing with $TESTTMP/blackboxtest (glob) + comparing with $TESTTMP/blackboxtest query 1; heads searching for changes all local heads known remotely @@ -104,7 +104,7 @@ $ hg pull - pulling from $TESTTMP/blackboxtest (glob) + pulling from $TESTTMP/blackboxtest searching for changes adding changesets adding manifests @@ -133,7 +133,7 @@ saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob) $ hg blackbox -l 6 1970/01/01 00:00:00 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip - 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg (glob) + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated base branch cache in * seconds (glob) 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote base branch cache with 1 labels and 2 nodes 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob) @@ -193,37 +193,70 @@ > os.rename(".hg/blackbox.log-", ".hg/blackbox.log")\ > \1#' $TESTDIR/test-dispatch.py > ../test-dispatch.py $ $PYTHON $TESTDIR/blackbox-readonly-dispatch.py - running: add foo + running: --debug add foo + warning: cannot write to blackbox.log: Is a directory (no-windows !) + warning: cannot write to blackbox.log: $TESTTMP/blackboxtest3/.hg/blackbox.log: Access is denied (windows !) + adding foo result: 0 - running: commit -m commit1 -d 2000-01-01 foo + running: --debug commit -m commit1 -d 2000-01-01 foo + warning: cannot write to blackbox.log: Is a directory (no-windows !) + warning: cannot write to blackbox.log: $TESTTMP/blackboxtest3/.hg/blackbox.log: Access is denied (windows !) + committing files: + foo + committing manifest + committing changelog + updating the branch cache + committed changeset 0:0e46349438790c460c5c9f7546bfcd39b267bbd2 result: None - running: commit -m commit2 -d 2000-01-02 foo + running: --debug commit -m commit2 -d 2000-01-02 foo + committing files: + foo + committing manifest + committing changelog + updating the branch cache + committed changeset 1:45589e459b2edfbf3dbde7e01f611d2c1e7453d7 result: None - running: log -r 0 - changeset: 0:0e4634943879 + running: --debug log -r 0 + changeset: 0:0e46349438790c460c5c9f7546bfcd39b267bbd2 + phase: draft + parent: -1:0000000000000000000000000000000000000000 + parent: -1:0000000000000000000000000000000000000000 + manifest: 0:9091aa5df980aea60860a2e39c95182e68d1ddec user: test date: Sat Jan 01 00:00:00 2000 +0000 - summary: commit1 + files+: foo + extra: branch=default + description: + commit1 + result: None - running: log -r tip - changeset: 1:45589e459b2e + running: --debug log -r tip + changeset: 1:45589e459b2edfbf3dbde7e01f611d2c1e7453d7 tag: tip + phase: draft + parent: 0:0e46349438790c460c5c9f7546bfcd39b267bbd2 + parent: -1:0000000000000000000000000000000000000000 + manifest: 1:895aa9b7886f89dd017a6d62524e1f9180b04df9 user: test date: Sun Jan 02 00:00:00 2000 +0000 - summary: commit2 + files: foo + extra: branch=default + description: + commit2 + result: None $ hg blackbox - 1970/01/01 00:00:00 bob @0e46349438790c460c5c9f7546bfcd39b267bbd2 (5000)> commit -m commit2 -d 2000-01-02 foo + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> updating the branch cache 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> updated served branch cache in * seconds (glob) 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> wrote served branch cache with 1 labels and 1 nodes - 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> commit -m commit2 -d 2000-01-02 foo exited 0 after * seconds (glob) - 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r 0 + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug commit -m commit2 -d 2000-01-02 foo exited 0 after *.?? seconds (glob) + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r 0 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> writing .hg/cache/tags2-visible with 0 tags - 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r 0 exited 0 after * seconds (glob) - 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r tip - 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r tip exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r 0 exited 0 after *.?? seconds (glob) + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip exited 0 after *.?? seconds (glob) 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> blackbox Test log recursion from dirty status check diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bookmarks-pushpull.t --- a/tests/test-bookmarks-pushpull.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bookmarks-pushpull.t Mon Jan 22 17:53:02 2018 -0500 @@ -1,3 +1,12 @@ +#testcases b2-pushkey b2-binary + +#if b2-pushkey + $ cat << EOF >> $HGRCPATH + > [devel] + > legacy.exchange=bookmarks + > EOF +#endif + #require serve $ cat << EOF >> $HGRCPATH @@ -103,14 +112,222 @@ delete a remote bookmark $ hg book -d W - $ hg push -B W ../a --config "$TESTHOOK" + +#if b2-pushkey + + $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes pushing to ../a + query 1; heads searching for changes + all remote heads known locally + listing keys for "phases" + checking for updated bookmarks + listing keys for "bookmarks" no changes found + bundle2-output-bundle: "HG20", 4 parts total + bundle2-output: start emission of HG20 stream + bundle2-output: bundle parameter: + bundle2-output: start of parts + bundle2-output: bundle part: "replycaps" + bundle2-output-part: "replycaps" 205 bytes payload + bundle2-output: part 0: "REPLYCAPS" + bundle2-output: header chunk size: 16 + bundle2-output: payload chunk size: 205 + bundle2-output: closing payload chunk + bundle2-output: bundle part: "check:bookmarks" + bundle2-output-part: "check:bookmarks" 23 bytes payload + bundle2-output: part 1: "CHECK:BOOKMARKS" + bundle2-output: header chunk size: 22 + bundle2-output: payload chunk size: 23 + bundle2-output: closing payload chunk + bundle2-output: bundle part: "check:phases" + bundle2-output-part: "check:phases" 48 bytes payload + bundle2-output: part 2: "CHECK:PHASES" + bundle2-output: header chunk size: 19 + bundle2-output: payload chunk size: 48 + bundle2-output: closing payload chunk + bundle2-output: bundle part: "pushkey" + bundle2-output-part: "pushkey" (params: 4 mandatory) empty payload + bundle2-output: part 3: "PUSHKEY" + bundle2-output: header chunk size: 90 + bundle2-output: closing payload chunk + bundle2-output: end of bundle + bundle2-input: start processing of HG20 stream + bundle2-input: reading bundle2 stream parameters + bundle2-input-bundle: with-transaction + bundle2-input: start extraction of bundle2 parts + bundle2-input: part header size: 16 + bundle2-input: part type: "REPLYCAPS" + bundle2-input: part id: "0" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part replycaps + bundle2-input-part: "replycaps" supported + bundle2-input: payload chunk size: 205 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 205 + bundle2-input: part header size: 22 + bundle2-input: part type: "CHECK:BOOKMARKS" + bundle2-input: part id: "1" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part check:bookmarks + bundle2-input-part: "check:bookmarks" supported + bundle2-input: payload chunk size: 23 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 23 + bundle2-input: part header size: 19 + bundle2-input: part type: "CHECK:PHASES" + bundle2-input: part id: "2" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part check:phases + bundle2-input-part: "check:phases" supported + bundle2-input: payload chunk size: 48 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 48 + bundle2-input: part header size: 90 + bundle2-input: part type: "PUSHKEY" + bundle2-input: part id: "3" + bundle2-input: part parameters: 4 + bundle2-input: found a handler for part pushkey + bundle2-input-part: "pushkey" (params: 4 mandatory) supported + pushing key for "bookmarks:W" + bundle2-input: payload chunk size: 0 + bundle2-input: part header size: 0 + bundle2-input: end of bundle2 stream + bundle2-input-bundle: 3 parts total + running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh test-hook-bookmark: W: 0000000000000000000000000000000000000000 -> + bundle2-output-bundle: "HG20", 1 parts total + bundle2-output: start emission of HG20 stream + bundle2-output: bundle parameter: + bundle2-output: start of parts + bundle2-output: bundle part: "reply:pushkey" + bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload + bundle2-output: part 0: "REPLY:PUSHKEY" + bundle2-output: header chunk size: 43 + bundle2-output: closing payload chunk + bundle2-output: end of bundle + bundle2-input: start processing of HG20 stream + bundle2-input: reading bundle2 stream parameters + bundle2-input-bundle: no-transaction + bundle2-input: start extraction of bundle2 parts + bundle2-input: part header size: 43 + bundle2-input: part type: "REPLY:PUSHKEY" + bundle2-input: part id: "0" + bundle2-input: part parameters: 2 + bundle2-input: found a handler for part reply:pushkey + bundle2-input-part: "reply:pushkey" (params: 0 advisory) supported + bundle2-input: payload chunk size: 0 + bundle2-input: part header size: 0 + bundle2-input: end of bundle2 stream + bundle2-input-bundle: 0 parts total deleting remote bookmark W + listing keys for "phases" [1] +#endif +#if b2-binary + + $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes + pushing to ../a + query 1; heads + searching for changes + all remote heads known locally + listing keys for "phases" + checking for updated bookmarks + listing keys for "bookmarks" + no changes found + bundle2-output-bundle: "HG20", 4 parts total + bundle2-output: start emission of HG20 stream + bundle2-output: bundle parameter: + bundle2-output: start of parts + bundle2-output: bundle part: "replycaps" + bundle2-output-part: "replycaps" 205 bytes payload + bundle2-output: part 0: "REPLYCAPS" + bundle2-output: header chunk size: 16 + bundle2-output: payload chunk size: 205 + bundle2-output: closing payload chunk + bundle2-output: bundle part: "check:bookmarks" + bundle2-output-part: "check:bookmarks" 23 bytes payload + bundle2-output: part 1: "CHECK:BOOKMARKS" + bundle2-output: header chunk size: 22 + bundle2-output: payload chunk size: 23 + bundle2-output: closing payload chunk + bundle2-output: bundle part: "check:phases" + bundle2-output-part: "check:phases" 48 bytes payload + bundle2-output: part 2: "CHECK:PHASES" + bundle2-output: header chunk size: 19 + bundle2-output: payload chunk size: 48 + bundle2-output: closing payload chunk + bundle2-output: bundle part: "bookmarks" + bundle2-output-part: "bookmarks" 23 bytes payload + bundle2-output: part 3: "BOOKMARKS" + bundle2-output: header chunk size: 16 + bundle2-output: payload chunk size: 23 + bundle2-output: closing payload chunk + bundle2-output: end of bundle + bundle2-input: start processing of HG20 stream + bundle2-input: reading bundle2 stream parameters + bundle2-input-bundle: with-transaction + bundle2-input: start extraction of bundle2 parts + bundle2-input: part header size: 16 + bundle2-input: part type: "REPLYCAPS" + bundle2-input: part id: "0" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part replycaps + bundle2-input-part: "replycaps" supported + bundle2-input: payload chunk size: 205 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 205 + bundle2-input: part header size: 22 + bundle2-input: part type: "CHECK:BOOKMARKS" + bundle2-input: part id: "1" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part check:bookmarks + bundle2-input-part: "check:bookmarks" supported + bundle2-input: payload chunk size: 23 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 23 + bundle2-input: part header size: 19 + bundle2-input: part type: "CHECK:PHASES" + bundle2-input: part id: "2" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part check:phases + bundle2-input-part: "check:phases" supported + bundle2-input: payload chunk size: 48 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 48 + bundle2-input: part header size: 16 + bundle2-input: part type: "BOOKMARKS" + bundle2-input: part id: "3" + bundle2-input: part parameters: 0 + bundle2-input: found a handler for part bookmarks + bundle2-input-part: "bookmarks" supported + bundle2-input: payload chunk size: 23 + bundle2-input: payload chunk size: 0 + bundle2-input-part: total payload size 23 + bundle2-input: part header size: 0 + bundle2-input: end of bundle2 stream + bundle2-input-bundle: 3 parts total + running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh + test-hook-bookmark: W: 0000000000000000000000000000000000000000 -> + bundle2-output-bundle: "HG20", 0 parts total + bundle2-output: start emission of HG20 stream + bundle2-output: bundle parameter: + bundle2-output: start of parts + bundle2-output: end of bundle + bundle2-input: start processing of HG20 stream + bundle2-input: reading bundle2 stream parameters + bundle2-input-bundle: no-transaction + bundle2-input: start extraction of bundle2 parts + bundle2-input: part header size: 0 + bundle2-input: end of bundle2 stream + bundle2-input-bundle: 0 parts total + deleting remote bookmark W + listing keys for "phases" + [1] + +#endif + export the active bookmark $ hg bookmark V @@ -192,7 +409,7 @@ * foobar 1:9b140be10808 $ hg pull --config paths.foo=../a foo --config "$TESTHOOK" - pulling from $TESTTMP/a (glob) + pulling from $TESTTMP/a searching for changes adding changesets adding manifests @@ -268,7 +485,7 @@ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved (activating bookmark X) $ hg pull --config paths.foo=../a foo -B . --config "$TESTHOOK" - pulling from $TESTTMP/a (glob) + pulling from $TESTTMP/a no changes found divergent bookmark @ stored as @foo importing bookmark X @@ -623,12 +840,12 @@ be exchanged) $ hg -R repo1 incoming -B - comparing with $TESTTMP/bmcomparison/source (glob) + comparing with $TESTTMP/bmcomparison/source searching for changed bookmarks no changed bookmarks found [1] $ hg -R repo1 outgoing -B - comparing with $TESTTMP/bmcomparison/source (glob) + comparing with $TESTTMP/bmcomparison/source searching for changed bookmarks no changed bookmarks found [1] @@ -772,7 +989,7 @@ $ echo 2 > f2 $ hg ci -qAmr $ hg push -B X - pushing to $TESTTMP/addmarks (glob) + pushing to $TESTTMP/addmarks searching for changes remote has heads on branch 'default' that are not known locally: a2a606d9ff1b abort: push creates new remote head 54694f811df9 with bookmark 'X'! @@ -852,19 +1069,36 @@ Local push ---------- +#if b2-pushkey + $ hg push -B @ local - pushing to $TESTTMP/issue4455-dest (glob) + pushing to $TESTTMP/issue4455-dest searching for changes no changes found pushkey-abort: prepushkey hook exited with status 1 abort: exporting bookmark @ failed! [255] + +#endif +#if b2-binary + + $ hg push -B @ local + pushing to $TESTTMP/issue4455-dest + searching for changes + no changes found + abort: prepushkey hook exited with status 1 + [255] + +#endif + $ hg -R ../issue4455-dest/ bookmarks no bookmarks set Using ssh --------- +#if b2-pushkey + $ hg push -B @ ssh # bundle2+ pushing to ssh://user@dummy/issue4455-dest searching for changes @@ -872,6 +1106,7 @@ remote: pushkey-abort: prepushkey hook exited with status 1 abort: exporting bookmark @ failed! [255] + $ hg -R ../issue4455-dest/ bookmarks no bookmarks set @@ -882,12 +1117,27 @@ remote: pushkey-abort: prepushkey hook exited with status 1 exporting bookmark @ failed! [1] + +#endif +#if b2-binary + + $ hg push -B @ ssh # bundle2+ + pushing to ssh://user@dummy/issue4455-dest + searching for changes + no changes found + remote: prepushkey hook exited with status 1 + abort: push failed on remote + [255] + +#endif + $ hg -R ../issue4455-dest/ bookmarks no bookmarks set Using http ---------- +#if b2-pushkey $ hg push -B @ http # bundle2+ pushing to http://localhost:$HGPORT/ searching for changes @@ -895,6 +1145,7 @@ remote: pushkey-abort: prepushkey hook exited with status 1 abort: exporting bookmark @ failed! [255] + $ hg -R ../issue4455-dest/ bookmarks no bookmarks set @@ -905,5 +1156,20 @@ remote: pushkey-abort: prepushkey hook exited with status 1 exporting bookmark @ failed! [1] + +#endif + +#if b2-binary + + $ hg push -B @ ssh # bundle2+ + pushing to ssh://user@dummy/issue4455-dest + searching for changes + no changes found + remote: prepushkey hook exited with status 1 + abort: push failed on remote + [255] + +#endif + $ hg -R ../issue4455-dest/ bookmarks no bookmarks set diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bookmarks-rebase.t --- a/tests/test-bookmarks-rebase.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bookmarks-rebase.t Mon Jan 22 17:53:02 2018 -0500 @@ -38,7 +38,7 @@ $ hg rebase -s two -d one rebasing 3:2ae46b1d99a7 "3" (two tip) - saved backup bundle to $TESTTMP/.hg/strip-backup/2ae46b1d99a7-e6b057bc-rebase.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/2ae46b1d99a7-e6b057bc-rebase.hg $ hg log changeset: 3:42e5ed2cdcf4 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bookmarks.t --- a/tests/test-bookmarks.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bookmarks.t Mon Jan 22 17:53:02 2018 -0500 @@ -736,7 +736,7 @@ Z 2:db815d6d32e6 x y 2:db815d6d32e6 $ hg -R ../cloned-bookmarks-manual-update-with-divergence pull - pulling from $TESTTMP/repo (glob) + pulling from $TESTTMP/repo searching for changes adding changesets adding manifests diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-branch-change.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-branch-change.t Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,331 @@ +Testing changing branch on commits +================================== + +Setup + + $ cat >> $HGRCPATH << EOF + > [alias] + > glog = log -G -T "{rev}:{node|short} {desc}\n{branch} ({bookmarks})" + > [experimental] + > evolution = createmarkers + > [extensions] + > rebase= + > EOF + + $ hg init repo + $ cd repo + $ for ch in a b c d e; do echo foo >> $ch; hg ci -Aqm "Added "$ch; done + $ hg glog + @ 4:aa98ab95a928 Added e + | default () + o 3:62615734edd5 Added d + | default () + o 2:28ad74487de9 Added c + | default () + o 1:29becc82797a Added b + | default () + o 0:18d04c59bb5d Added a + default () + + $ hg branches + default 4:aa98ab95a928 + +Try without passing a new branch name + + $ hg branch -r . + abort: no branch name specified for the revisions + [255] + +Setting an invalid branch name + + $ hg branch -r . a:b + abort: ':' cannot be used in a name + [255] + $ hg branch -r . tip + abort: the name 'tip' is reserved + [255] + $ hg branch -r . 1234 + abort: cannot use an integer as a name + [255] + +Change on non-linear set of commits + + $ hg branch -r 2 -r 4 foo + abort: cannot change branch of non-linear revisions + [255] + +Change in middle of the stack (linear commits) + + $ hg branch -r 1::3 foo + abort: cannot change branch of changeset with children + [255] + +Change with dirty working directory + + $ echo bar > a + $ hg branch -r . foo + abort: uncommitted changes + [255] + + $ hg revert --all + reverting a + +Change on empty revision set + + $ hg branch -r 'draft() - all()' foo + abort: empty revision set + [255] + +Changing branch on linear set of commits from head + +Without obsmarkers + + $ hg branch -r 3:4 foo --config experimental.evolution=! + changed branch on 2 changesets + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/62615734edd5-e86bd13a-branch-change.hg + $ hg glog + @ 4:3938acfb5c0f Added e + | foo () + o 3:9435da006bdc Added d + | foo () + o 2:28ad74487de9 Added c + | default () + o 1:29becc82797a Added b + | default () + o 0:18d04c59bb5d Added a + default () + + $ hg branches + foo 4:3938acfb5c0f + default 2:28ad74487de9 (inactive) + +With obsmarkers + + $ hg branch -r 3::4 bar + changed branch on 2 changesets + $ hg glog + @ 6:7c1991464886 Added e + | bar () + o 5:1ea05e93925f Added d + | bar () + o 2:28ad74487de9 Added c + | default () + o 1:29becc82797a Added b + | default () + o 0:18d04c59bb5d Added a + default () + + $ hg branches + bar 6:7c1991464886 + default 2:28ad74487de9 (inactive) + +Change branch name to an existing branch + + $ hg branch -r . default + abort: a branch of the same name already exists + [255] + +Changing on a branch head which is not topological head + + $ hg branch -r 2 stable + abort: cannot change branch of changeset with children + [255] + +Enabling the allowunstable config and trying to change branch on a branch head +which is not a topological head + + $ echo "[experimental]" >> .hg/hgrc + $ echo "evolution.allowunstable=yes" >> .hg/hgrc + $ hg branch -r 2 foo + changed branch on 1 changesets + 2 new orphan changesets + +Changing branch of an obsoleted changeset + + $ hg branch -r 4 foobar + abort: hidden revision '4' was rewritten as: 7c1991464886! + (use --hidden to access hidden revisions) + [255] + + $ hg branch -r 4 --hidden foobar + abort: cannot change branch of a obsolete changeset + [255] + +Make sure bookmark movement is correct + + $ hg bookmark b1 + $ hg glog -r '.^::' + @ 6:7c1991464886 Added e + | bar (b1) + * 5:1ea05e93925f Added d + | bar () + ~ + + $ hg branch -r '(.^)::' wat --debug + changing branch of '1ea05e93925f806d875a2163f9b76764be644636' from 'bar' to 'wat' + committing files: + d + committing manifest + committing changelog + new node id is 343660ccab7400da637bd6a211d07f413536d718 + changing branch of '7c19914648869f5b02fc7fed31ddee9783fdd680' from 'bar' to 'wat' + committing files: + e + committing manifest + committing changelog + new node id is de1404b45a69f8cc6437d7679033ee33e9efb4ba + moving bookmarks ['b1'] from 7c19914648869f5b02fc7fed31ddee9783fdd680 to de1404b45a69f8cc6437d7679033ee33e9efb4ba + resolving manifests + branchmerge: False, force: False, partial: False + ancestor: 7c1991464886, local: 7c1991464886+, remote: de1404b45a69 + starting 4 threads for background file closing (?) + changed branch on 2 changesets + updating the branch cache + invalid branchheads cache (served): tip differs + + $ hg glog -r '(.^)::' + @ 9:de1404b45a69 Added e + | wat (b1) + * 8:343660ccab74 Added d + | wat () + ~ + +Make sure phase handling is correct + + $ echo foo >> bar + $ hg ci -Aqm "added bar" --secret + 1 new orphan changesets + $ hg glog -r . + @ 10:8ad1294c1660 added bar + | wat (b1) + ~ + $ hg branch -r . secret + changed branch on 1 changesets + $ hg phase -r . + 11: secret + + $ hg branches + secret 11:38a9b2d53f98 + foo 7:8a4729a5e2b8 + wat 9:de1404b45a69 (inactive) + default 2:28ad74487de9 (inactive) + $ hg branch + secret + +Changing branch of another head, different from one on which we are + + $ hg glog + @ 11:38a9b2d53f98 added bar + | secret (b1) + * 9:de1404b45a69 Added e + | wat () + * 8:343660ccab74 Added d + | wat () + | o 7:8a4729a5e2b8 Added c + | | foo () + x | 2:28ad74487de9 Added c + |/ default () + o 1:29becc82797a Added b + | default () + o 0:18d04c59bb5d Added a + default () + + $ hg branch + secret + + $ hg branch -r 7 foobar + changed branch on 1 changesets + +The current branch must be preserved + $ hg branch + secret + +Changing branch on multiple heads at once + + $ hg rebase -s 8 -d 12 --keepbranches -q + + $ hg rebase -s 14 -d 1 --keepbranches -q + + $ hg branch -r 0: stable + changed branch on 6 changesets + $ hg glog + @ 23:6a5ddbcfb870 added bar + | stable (b1) + o 22:baedc6e98a67 Added e + | stable () + | o 21:99ac7bf8aad1 Added d + | | stable () + | o 20:0ecb4d39c4bd Added c + |/ stable () + o 19:fd45b986b109 Added b + | stable () + o 18:204d2769eca2 Added a + stable () + + $ hg branches + stable 23:6a5ddbcfb870 + + $ hg branch + stable + +Changing to same branch is no-op + + $ hg branch -r 19::21 stable + changed branch on 0 changesets + +Changing branch name to existing branch name if the branch of parent of root of +revs is same as the new branch name + + $ hg branch -r 20::21 bugfix + changed branch on 2 changesets + $ hg glog + o 25:714defe1cf34 Added d + | bugfix () + o 24:98394def28fc Added c + | bugfix () + | @ 23:6a5ddbcfb870 added bar + | | stable (b1) + | o 22:baedc6e98a67 Added e + |/ stable () + o 19:fd45b986b109 Added b + | stable () + o 18:204d2769eca2 Added a + stable () + + $ hg branch -r 24:25 stable + changed branch on 2 changesets + $ hg glog + o 27:4ec342341562 Added d + | stable () + o 26:83f48859c2de Added c + | stable () + | @ 23:6a5ddbcfb870 added bar + | | stable (b1) + | o 22:baedc6e98a67 Added e + |/ stable () + o 19:fd45b986b109 Added b + | stable () + o 18:204d2769eca2 Added a + stable () + +Testing on merge + + $ hg merge -r 26 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + + $ hg branch -r . abcd + abort: outstanding uncommitted merge + [255] + $ hg ci -m "Merge commit" + $ hg branch -r '(.^)::' def + abort: cannot change branch of a merge commit + [255] + +Changing branch on public changeset + + $ hg phase -r 27 -p + $ hg branch -r 27 def + abort: cannot change branch of public changesets + (see 'hg help phases' for details) + [255] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bugzilla.t --- a/tests/test-bugzilla.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bugzilla.t Mon Jan 22 17:53:02 2018 -0500 @@ -61,7 +61,7 @@ $ cat bzmock.log && rm bzmock.log update bugid=123, newstate={}, committer='test' ---- - changeset 7875a8342c6f in repo $TESTTMP/mockremote refers to bug 123. (glob) + changeset 7875a8342c6f in repo $TESTTMP/mockremote refers to bug 123. details: Fixes bug 123 ---- diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bundle.t --- a/tests/test-bundle.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bundle.t Mon Jan 22 17:53:02 2018 -0500 @@ -856,7 +856,7 @@ $ hg bundle --base 1 -r 3 ../update2bundled.hg 1 changesets found $ hg strip -r 3 - saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg (glob) + saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg $ hg merge -R ../update2bundled.hg -r 3 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle 1 files updated, 0 files merged, 0 files removed, 0 files unresolved diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bundle2-exchange.t --- a/tests/test-bundle2-exchange.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bundle2-exchange.t Mon Jan 22 17:53:02 2018 -0500 @@ -106,7 +106,7 @@ postclose-tip:02de42196ebe draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase $ hg -R other pull -r 24b6387c8c8c - pulling from $TESTTMP/main (glob) + pulling from $TESTTMP/main searching for changes adding changesets adding manifests @@ -137,7 +137,7 @@ postclose-tip:02de42196ebe draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase $ hg -R other pull -r 24b6387c8c8c - pulling from $TESTTMP/main (glob) + pulling from $TESTTMP/main no changes found pre-close-tip:24b6387c8c8c public postclose-tip:24b6387c8c8c public @@ -157,7 +157,7 @@ pull empty $ hg -R other pull -r 24b6387c8c8c - pulling from $TESTTMP/main (glob) + pulling from $TESTTMP/main no changes found pre-close-tip:24b6387c8c8c public postclose-tip:24b6387c8c8c public @@ -253,7 +253,7 @@ remote: added 1 changesets with 0 changes to 0 files (-1 heads) remote: 1 new obsolescence markers remote: pre-close-tip:eea13746799a public book_eea1 - remote: pushkey: lock state after "bookmarks" + remote: pushkey: lock state after "bookmark" remote: lock: free remote: wlock: free remote: postclose-tip:eea13746799a public book_eea1 @@ -339,7 +339,7 @@ remote: added 1 changesets with 1 changes to 1 files remote: 1 new obsolescence markers remote: pre-close-tip:5fddd98957c8 draft book_5fdd - remote: pushkey: lock state after "bookmarks" + remote: pushkey: lock state after "bookmark" remote: lock: free remote: wlock: free remote: postclose-tip:5fddd98957c8 draft book_5fdd @@ -390,7 +390,7 @@ remote: added 1 changesets with 1 changes to 1 files remote: 1 new obsolescence markers remote: pre-close-tip:32af7686d403 public book_32af - remote: pushkey: lock state after "bookmarks" + remote: pushkey: lock state after "bookmark" remote: lock: free remote: wlock: free remote: postclose-tip:32af7686d403 public book_32af diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-bundle2-multiple-changegroups.t --- a/tests/test-bundle2-multiple-changegroups.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-bundle2-multiple-changegroups.t Mon Jan 22 17:53:02 2018 -0500 @@ -13,24 +13,24 @@ > # in 'heads' as intermediate heads for the first changegroup. > intermediates = [repo[r].p1().node() for r in heads] > outgoing = discovery.outgoing(repo, common, intermediates) - > cg = changegroup.makechangegroup(repo, outgoing, '01', + > cg = changegroup.makechangegroup(repo, outgoing, b'01', > source, bundlecaps=bundlecaps) - > bundler.newpart('output', data='changegroup1') - > bundler.newpart('changegroup', data=cg.getchunks()) + > bundler.newpart(b'output', data=b'changegroup1') + > bundler.newpart(b'changegroup', data=cg.getchunks()) > outgoing = discovery.outgoing(repo, common + intermediates, heads) - > cg = changegroup.makechangegroup(repo, outgoing, '01', + > cg = changegroup.makechangegroup(repo, outgoing, b'01', > source, bundlecaps=bundlecaps) - > bundler.newpart('output', data='changegroup2') - > bundler.newpart('changegroup', data=cg.getchunks()) + > bundler.newpart(b'output', data=b'changegroup2') + > bundler.newpart(b'changegroup', data=cg.getchunks()) > > def _pull(repo, *args, **kwargs): > pullop = _orig_pull(repo, *args, **kwargs) - > repo.ui.write('pullop.cgresult is %d\n' % pullop.cgresult) + > repo.ui.write(b'pullop.cgresult is %d\n' % pullop.cgresult) > return pullop > > _orig_pull = exchange.pull > exchange.pull = _pull - > exchange.getbundle2partsmapping['changegroup'] = _getbundlechangegrouppart + > exchange.getbundle2partsmapping[b'changegroup'] = _getbundlechangegrouppart > EOF $ cat >> $HGRCPATH << EOF @@ -74,7 +74,7 @@ Pull the new commits in the clone $ hg pull - pulling from $TESTTMP/repo (glob) + pulling from $TESTTMP/repo searching for changes remote: changegroup1 adding changesets @@ -145,7 +145,7 @@ $ cd ../clone $ hg pull - pulling from $TESTTMP/repo (glob) + pulling from $TESTTMP/repo searching for changes remote: changegroup1 adding changesets @@ -219,7 +219,7 @@ $ cd ../clone $ hg pull - pulling from $TESTTMP/repo (glob) + pulling from $TESTTMP/repo searching for changes remote: changegroup1 adding changesets diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-cache-abuse.t --- a/tests/test-cache-abuse.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-cache-abuse.t Mon Jan 22 17:53:02 2018 -0500 @@ -70,6 +70,11 @@ $ damage "tags --hidden" tags2 $ damage tags tags2-visible $ damage "tag -f t3" hgtagsfnodes1 + 1 new orphan changesets + 1 new orphan changesets + 1 new orphan changesets + 1 new orphan changesets + 1 new orphan changesets Beat up branch caches: diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-casefolding.t --- a/tests/test-casefolding.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-casefolding.t Mon Jan 22 17:53:02 2018 -0500 @@ -178,8 +178,8 @@ $ echo 'foo' > a/B/c/D/E $ hg ci -m 'e content change' $ hg revert --all -r 0 - removing a/B/c/D/E (glob) - adding a/B/c/D/e (glob) + removing a/B/c/D/E + adding a/B/c/D/e $ find * | sort a a/B diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-cat.t --- a/tests/test-cat.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-cat.t Mon Jan 22 17:53:02 2018 -0500 @@ -66,9 +66,9 @@ Test template output $ hg --cwd tmp cat ../b ../c -T '== {path} ({abspath}) ==\n{data}' - == ../b (b) == (glob) + == ../b (b) == 1 - == ../c (c) == (glob) + == ../c (c) == 3 $ hg cat b c -Tjson --output - @@ -119,3 +119,13 @@ $ PATTERN='t4' hg log -r '.' -T "{envvars % '{key} -> {value}\n'}" \ > --config "experimental.exportableenviron=PATTERN" PATTERN -> t4 + +Test behavior of output when directory structure does not already exist + + $ mkdir foo + $ echo a > foo/a + $ hg add foo/a + $ hg commit -qm "add foo/a" + $ hg cat --output "output/%p" foo/a + $ cat output/foo/a + a diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-censor.t --- a/tests/test-censor.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-censor.t Mon Jan 22 17:53:02 2018 -0500 @@ -353,7 +353,7 @@ checking files 2 files, 1 changesets, 2 total revisions $ hg pull -r $H1 -r $H2 - pulling from $TESTTMP/r (glob) + pulling from $TESTTMP/r searching for changes adding changesets adding manifests @@ -398,7 +398,7 @@ $ hg cat -r $CLEANREV target Re-sanitized; nothing to see here $ hg push -f -r $H2 - pushing to $TESTTMP/r (glob) + pushing to $TESTTMP/r searching for changes adding changesets adding manifests diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-check-clang-format.t --- a/tests/test-check-clang-format.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-check-clang-format.t Mon Jan 22 17:53:02 2018 -0500 @@ -3,7 +3,7 @@ $ . "$TESTDIR/helpers-testrepo.sh" $ cd "$TESTDIR"/.. - $ for f in `testrepohg files 'set:(**.c or **.h) and not "listfile:contrib/clang-format-blacklist"'` ; do + $ for f in `testrepohg files 'set:(**.c or **.cc or **.h) and not "listfile:contrib/clang-format-blacklist"'` ; do > clang-format --style file $f > $f.formatted > cmp $f $f.formatted || diff -u $f $f.formatted > rm $f.formatted diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-check-code.t --- a/tests/test-check-code.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-check-code.t Mon Jan 22 17:53:02 2018 -0500 @@ -15,7 +15,6 @@ Skipping i18n/polib.py it has no-che?k-code (glob) Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob) Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob) - Skipping mercurial/selectors2.py it has no-che?k-code (glob) Skipping mercurial/statprof.py it has no-che?k-code (glob) Skipping tests/badserverext.py it has no-che?k-code (glob) @@ -44,6 +43,7 @@ .hgignore .hgsigs .hgtags + .jshintrc CONTRIBUTING CONTRIBUTORS COPYING diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-check-config.t --- a/tests/test-check-config.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-check-config.t Mon Jan 22 17:53:02 2018 -0500 @@ -33,7 +33,7 @@ $ $PYTHON contrib/check-config.py < $TESTTMP/files foo = ui.configint('ui', 'intdefault', default=42) conflict on ui.intdefault: ('int', '42') != ('int', '1') - at $TESTTMP/testfile.py:12: (glob) + at $TESTTMP/testfile.py:12: undocumented: ui.doesnotexist (str) undocumented: ui.intdefault (int) [42] undocumented: ui.intdefault2 (int) [42] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-check-jshint.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-check-jshint.t Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,11 @@ +#require test-repo jshint hg10 + + $ . "$TESTDIR/helpers-testrepo.sh" + +run jshint on all tracked files ending in .js except vendored dependencies + + $ cd "`dirname "$TESTDIR"`" + + $ testrepohg locate 'set:**.js' \ + > 2>/dev/null \ + > | xargs jshint diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-check-pylint.t --- a/tests/test-check-pylint.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-check-pylint.t Mon Jan 22 17:53:02 2018 -0500 @@ -11,7 +11,8 @@ $ touch $TESTTMP/fakerc $ pylint --rcfile=$TESTTMP/fakerc --disable=all \ - > --enable=W0102 --reports=no \ + > --enable=W0102,C0321 \ + > --reports=no \ > --ignore=thirdparty \ > mercurial hgdemandimport hgext hgext3rd (?) diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-clone-uncompressed.t --- a/tests/test-clone-uncompressed.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-clone-uncompressed.t Mon Jan 22 17:53:02 2018 -0500 @@ -1,5 +1,14 @@ #require serve +#testcases stream-legacy stream-bundle2 + +#if stream-bundle2 + $ cat << EOF >> $HGRCPATH + > [experimental] + > bundle2.stream = yes + > EOF +#endif + Initialize repository the status call is to check for issue5130 @@ -12,30 +21,222 @@ ... fh.write(str(i)) $ hg -q commit -A -m 'add a lot of files' $ hg st + $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + +Cannot stream clone when server.uncompressed is set + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' + 200 Script output follows + + 1 + +#if stream-legacy + $ hg debugcapabilities http://localhost:$HGPORT + Main capabilities: + batch + branchmap + $USUAL_BUNDLE2_CAPS_SERVER$ + changegroupsubset + compression=zstd,zlib + getbundle + httpheader=1024 + httpmediatype=0.1rx,0.1tx,0.2tx + known + lookup + pushkey + unbundle=HG10GZ,HG10BZ,HG10UN + unbundlehash + Bundle2 capabilities: + HG20 + bookmarks + changegroup + 01 + 02 + digests + md5 + sha1 + sha512 + error + abort + unsupportedcontent + pushraced + pushkey + hgtagsfnodes + listkeys + phases + heads + pushkey + remote-changegroup + http + https + + $ hg clone --stream -U http://localhost:$HGPORT server-disabled + warning: stream clone requested but server has them disabled + requesting all changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1025 changes to 1025 files + new changesets 96ee1d7354c4:c17445101a72 + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" + 200 Script output follows + content-type: application/mercurial-0.2 + + + $ f --size body --hexdump --bytes 100 + body: size=232 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| + 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| + 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| + 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| + 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| + 0060: 69 73 20 66 |is f| + +#endif +#if stream-bundle2 + $ hg debugcapabilities http://localhost:$HGPORT + Main capabilities: + batch + branchmap + $USUAL_BUNDLE2_CAPS_SERVER$ + changegroupsubset + compression=zstd,zlib + getbundle + httpheader=1024 + httpmediatype=0.1rx,0.1tx,0.2tx + known + lookup + pushkey + unbundle=HG10GZ,HG10BZ,HG10UN + unbundlehash + Bundle2 capabilities: + HG20 + bookmarks + changegroup + 01 + 02 + digests + md5 + sha1 + sha512 + error + abort + unsupportedcontent + pushraced + pushkey + hgtagsfnodes + listkeys + phases + heads + pushkey + remote-changegroup + http + https + + $ hg clone --stream -U http://localhost:$HGPORT server-disabled + warning: stream clone requested but server has them disabled + requesting all changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1025 changes to 1025 files + new changesets 96ee1d7354c4:c17445101a72 + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" + 200 Script output follows + content-type: application/mercurial-0.2 + + + $ f --size body --hexdump --bytes 100 + body: size=232 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| + 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| + 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| + 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| + 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| + 0060: 69 73 20 66 |is f| + +#endif + + $ killdaemons.py + $ cd server $ hg serve -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid >> $DAEMON_PIDS + $ cat hg.pid > $DAEMON_PIDS $ cd .. Basic clone +#if stream-legacy $ hg clone --stream -U http://localhost:$HGPORT clone1 streaming all changes 1027 files to transfer, 96.3 KB of data transferred 96.3 KB in * seconds (*/sec) (glob) searching for changes no changes found +#endif +#if stream-bundle2 + $ hg clone --stream -U http://localhost:$HGPORT clone1 + streaming all changes + 1030 files to transfer, 96.4 KB of data + transferred 96.4 KB in * seconds (* */sec) (glob) + + $ ls -1 clone1/.hg/cache + branch2-served + rbc-names-v1 + rbc-revs-v1 +#endif + +getbundle requests with stream=1 are uncompressed + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" + 200 Script output follows + content-type: application/mercurial-0.2 + + + $ f --size --hex --bytes 256 body + body: size=112222 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: 68 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |h.STREAM2.......| + 0020: 05 09 04 0c 2d 62 79 74 65 63 6f 75 6e 74 39 38 |....-bytecount98| + 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030| + 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote| + 0050: 6e 63 6f 64 65 20 66 6e 63 61 63 68 65 20 67 65 |ncode fncache ge| + 0060: 6e 65 72 61 6c 64 65 6c 74 61 20 72 65 76 6c 6f |neraldelta revlo| + 0070: 67 76 31 20 73 74 6f 72 65 00 00 80 00 73 08 42 |gv1 store....s.B| + 0080: 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 00 |data/0.i........| + 0090: 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 01 |................| + 00a0: ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 87 |.........)c.I.#.| + 00b0: bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 00 |...Vg.g,i..9....| + 00c0: 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 74 |........u0s.Bdat| + 00d0: 61 2f 31 2e 69 00 03 00 01 00 00 00 00 00 00 00 |a/1.i...........| + 00e0: 02 00 00 00 01 00 00 00 00 00 00 00 01 ff ff ff |................| + 00f0: ff ff ff ff ff f9 76 da 1d 0d f2 25 6c de 08 db |......v....%l...| --uncompressed is an alias to --stream +#if stream-legacy $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed streaming all changes 1027 files to transfer, 96.3 KB of data transferred 96.3 KB in * seconds (*/sec) (glob) searching for changes no changes found +#endif +#if stream-bundle2 + $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed + streaming all changes + 1030 files to transfer, 96.4 KB of data + transferred 96.4 KB in * seconds (* */sec) (glob) +#endif Clone with background file closing enabled +#if stream-legacy $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding using http://localhost:$HGPORT/ sending capabilities command @@ -57,6 +258,27 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 1 parts total checking for updated bookmarks +#endif +#if stream-bundle2 + $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding + using http://localhost:$HGPORT/ + sending capabilities command + query 1; heads + sending batch command + streaming all changes + sending getbundle command + bundle2-input-bundle: with-transaction + bundle2-input-part: "stream2" (params: 3 mandatory) supported + applying stream bundle + 1030 files to transfer, 96.4 KB of data + starting 4 threads for background file closing + starting 4 threads for background file closing + transferred 96.4 KB in * seconds (* */sec) (glob) + bundle2-input-part: total payload size 112077 + bundle2-input-part: "listkeys" (params: 1 mandatory) supported + bundle2-input-bundle: 1 parts total + checking for updated bookmarks +#endif Cannot stream clone when there are secret changesets @@ -79,12 +301,20 @@ $ cat hg.pid > $DAEMON_PIDS $ cd .. +#if stream-legacy $ hg clone --stream -U http://localhost:$HGPORT secret-allowed streaming all changes 1027 files to transfer, 96.3 KB of data transferred 96.3 KB in * seconds (*/sec) (glob) searching for changes no changes found +#endif +#if stream-bundle2 + $ hg clone --stream -U http://localhost:$HGPORT secret-allowed + streaming all changes + 1030 files to transfer, 96.4 KB of data + transferred 96.4 KB in * seconds (* */sec) (glob) +#endif $ killdaemons.py @@ -171,3 +401,106 @@ $ wait $ hg -R clone id 000000000000 + $ cd .. + +Stream repository with bookmarks +-------------------------------- + +(revert introduction of secret changeset) + + $ hg -R server phase --draft 'secret()' + +add a bookmark + + $ hg -R server bookmark -r tip some-bookmark + +clone it + +#if stream-legacy + $ hg clone --stream http://localhost:$HGPORT with-bookmarks + streaming all changes + 1027 files to transfer, 96.3 KB of data + transferred 96.3 KB in * seconds (*) (glob) + searching for changes + no changes found + updating to branch default + 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif +#if stream-bundle2 + $ hg clone --stream http://localhost:$HGPORT with-bookmarks + streaming all changes + 1033 files to transfer, 96.6 KB of data + transferred 96.6 KB in * seconds (* */sec) (glob) + updating to branch default + 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif + $ hg -R with-bookmarks bookmarks + some-bookmark 1:c17445101a72 + +Stream repository with phases +----------------------------- + +Clone as publishing + + $ hg -R server phase -r 'all()' + 0: draft + 1: draft + +#if stream-legacy + $ hg clone --stream http://localhost:$HGPORT phase-publish + streaming all changes + 1027 files to transfer, 96.3 KB of data + transferred 96.3 KB in * seconds (*) (glob) + searching for changes + no changes found + updating to branch default + 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif +#if stream-bundle2 + $ hg clone --stream http://localhost:$HGPORT phase-publish + streaming all changes + 1033 files to transfer, 96.6 KB of data + transferred 96.6 KB in * seconds (* */sec) (glob) + updating to branch default + 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif + $ hg -R phase-publish phase -r 'all()' + 0: public + 1: public + +Clone as non publishing + + $ cat << EOF >> server/.hg/hgrc + > [phases] + > publish = False + > EOF + $ killdaemons.py + $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + +#if stream-legacy + $ hg clone --stream http://localhost:$HGPORT phase-no-publish + streaming all changes + 1027 files to transfer, 96.3 KB of data + transferred 96.3 KB in * seconds (*) (glob) + searching for changes + no changes found + updating to branch default + 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg -R phase-no-publish phase -r 'all()' + 0: public + 1: public +#endif +#if stream-bundle2 + $ hg clone --stream http://localhost:$HGPORT phase-no-publish + streaming all changes + 1034 files to transfer, 96.7 KB of data + transferred 96.7 KB in * seconds (* */sec) (glob) + updating to branch default + 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg -R phase-no-publish phase -r 'all()' + 0: draft + 1: draft +#endif + + $ killdaemons.py diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-clone.t --- a/tests/test-clone.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-clone.t Mon Jan 22 17:53:02 2018 -0500 @@ -138,7 +138,7 @@ $ hg clone -q -U --config 'paths.foobar=a#0' foobar f $ hg -R f showconfig paths.default - $TESTTMP/a#0 (glob) + $TESTTMP/a#0 Use --pull: @@ -808,7 +808,7 @@ The destination should point to it $ cat share-dest1a/.hg/sharedpath; echo - $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg (glob) + $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg The destination should have bookmarks @@ -818,7 +818,7 @@ The default path should be the remote, not the share $ hg -R share-dest1a config paths.default - $TESTTMP/source1a (glob) + $TESTTMP/source1a Clone with existing share dir should result in pull + share @@ -839,7 +839,7 @@ b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1 $ cat share-dest1b/.hg/sharedpath; echo - $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg (glob) + $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg We only get bookmarks from the remote, not everything in the share @@ -850,7 +850,7 @@ Default path should be source, not share. $ hg -R share-dest1b config paths.default - $TESTTMP/source1b (glob) + $TESTTMP/source1b Checked out revision should be head of default branch diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-clonebundles.t --- a/tests/test-clonebundles.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-clonebundles.t Mon Jan 22 17:53:02 2018 -0500 @@ -32,8 +32,8 @@ $ cat server/access.log * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) - * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) - * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) + $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob) + $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob) Empty manifest file results in retrieval (the extension only checks if the manifest file exists) @@ -517,3 +517,30 @@ transferred 613 bytes in * seconds (*) (glob) searching for changes no changes found + +Test clone bundle retrieved through bundle2 + + $ cat << EOF >> $HGRCPATH + > [extensions] + > largefiles= + > EOF + $ killdaemons.py + $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log + $ cat hg.pid >> $DAEMON_PIDS + + $ hg -R server debuglfput gz-a.hg + f6eca29e25359f6a92f1ea64527cdcf1b5abe62a + + $ cat > server/.hg/clonebundles.manifest << EOF + > largefile://f6eca29e25359f6a92f1ea64527cdcf1b5abe62a BUNDLESPEC=gzip-v2 + > EOF + + $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback + applying clone bundle from largefile://f6eca29e25359f6a92f1ea64527cdcf1b5abe62a + adding changesets + adding manifests + adding file changes + added 2 changesets with 2 changes to 2 files + finished applying clone bundle + searching for changes + no changes found diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-command-template.t --- a/tests/test-command-template.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-command-template.t Mon Jan 22 17:53:02 2018 -0500 @@ -204,6 +204,16 @@ $ hg log -r 'wdir()' -T '{manifest}\n' +Internal resources shouldn't be exposed (issue5699): + + $ hg log -r. -T '{cache}{ctx}{repo}{revcache}{templ}{ui}' + +Never crash on internal resource not available: + + $ hg --cwd .. debugtemplate '{"c0bebeef"|shortest}\n' + abort: template resource not available: ctx + [255] + Quoting for ui.logtemplate $ hg tip --config "ui.logtemplate={rev}\n" @@ -2751,6 +2761,25 @@ $ hg log -T '{date' hg: parse error at 1: unterminated template expansion [255] + $ hg log -T '{date(}' + hg: parse error at 7: not a prefix: end + [255] + $ hg log -T '{date)}' + hg: parse error at 5: invalid token + [255] + $ hg log -T '{date date}' + hg: parse error at 6: invalid token + [255] + + $ hg log -T '{}' + hg: parse error at 2: not a prefix: end + [255] + $ hg debugtemplate -v '{()}' + (template + (group + None)) + hg: parse error: missing argument + [255] Behind the scenes, this will throw TypeError @@ -2881,6 +2910,17 @@ @@ -0,0 +1,1 @@ +second +ui verbosity: + + $ hg log -l1 -T '{verbosity}\n' + + $ hg log -l1 -T '{verbosity}\n' --debug + debug + $ hg log -l1 -T '{verbosity}\n' --quiet + quiet + $ hg log -l1 -T '{verbosity}\n' --verbose + verbose + $ cd .. @@ -4064,6 +4104,48 @@ 5:13207e5a10d9fd28ec424934298e176197f2c67f, 4:bbe44766e73d5f11ed2177f1838de10c53ef3e74 +Invalid arguments passed to revset() + + $ hg log -T '{revset("%whatever", 0)}\n' + hg: parse error: unexpected revspec format character w + [255] + $ hg log -T '{revset("%lwhatever", files)}\n' + hg: parse error: unexpected revspec format character w + [255] + $ hg log -T '{revset("%s %s", 0)}\n' + hg: parse error: missing argument for revspec + [255] + $ hg log -T '{revset("", 0)}\n' + hg: parse error: too many revspec arguments specified + [255] + $ hg log -T '{revset("%s", 0, 1)}\n' + hg: parse error: too many revspec arguments specified + [255] + $ hg log -T '{revset("%", 0)}\n' + hg: parse error: incomplete revspec format character + [255] + $ hg log -T '{revset("%l", 0)}\n' + hg: parse error: incomplete revspec format character + [255] + $ hg log -T '{revset("%d", 'foo')}\n' + hg: parse error: invalid argument for revspec + [255] + $ hg log -T '{revset("%ld", files)}\n' + hg: parse error: invalid argument for revspec + [255] + $ hg log -T '{revset("%ls", 0)}\n' + hg: parse error: invalid argument for revspec + [255] + $ hg log -T '{revset("%b", 'foo')}\n' + hg: parse error: invalid argument for revspec + [255] + $ hg log -T '{revset("%lb", files)}\n' + hg: parse error: invalid argument for revspec + [255] + $ hg log -T '{revset("%r", 0)}\n' + hg: parse error: invalid argument for revspec + [255] + Test files function $ hg log -T "{rev}\n{join(files('*'), '\n')}\n" diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-commandserver.t --- a/tests/test-commandserver.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-commandserver.t Mon Jan 22 17:53:02 2018 -0500 @@ -207,6 +207,7 @@ devel.default-date=0 0 extensions.fsmonitor= (fsmonitor !) largefiles.usercache=$TESTTMP/.cache/largefiles + lfs.usercache=$TESTTMP/.cache/lfs ui.slash=True ui.interactive=False ui.mergemarkers=detailed diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-commit-amend.t --- a/tests/test-commit-amend.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-commit-amend.t Mon Jan 22 17:53:02 2018 -0500 @@ -16,6 +16,7 @@ $ hg phase -r . -p $ hg ci --amend abort: cannot amend public changesets + (see 'hg help phases' for details) [255] $ hg phase -r . -f -d @@ -40,7 +41,7 @@ $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg commit --amend -m 'amend base1' pretxncommit 43f1ba15f28a50abf0aae529cf8a16bfced7b149 43f1ba15f28a tip - saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-5ab4f721-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-5ab4f721-amend.hg $ echo 'pretxncommit.foo = ' >> $HGRCPATH $ hg diff -c . diff -r ad120869acf0 -r 43f1ba15f28a a @@ -98,7 +99,7 @@ Add new file along with modified existing file: $ hg ci --amend -m 'amend base1 new file' - saved backup bundle to $TESTTMP/.hg/strip-backup/43f1ba15f28a-007467c2-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/43f1ba15f28a-007467c2-amend.hg Remove file that was added in amended commit: (and test logfile option) @@ -107,7 +108,7 @@ $ hg rm b $ echo 'amend base1 remove new file' > ../logfile $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg ci --amend --logfile ../logfile - saved backup bundle to $TESTTMP/.hg/strip-backup/c16295aaf401-1ada9901-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/c16295aaf401-1ada9901-amend.hg $ hg cat b b: no such file in rev 47343646fa3d @@ -127,7 +128,7 @@ 254 (changelog) 163 (manifests) 131 a - saved backup bundle to $TESTTMP/.hg/strip-backup/47343646fa3d-c2758885-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/47343646fa3d-c2758885-amend.hg 1 changesets found uncompressed size of bundle content: 250 (changelog) @@ -174,10 +175,10 @@ > EOF $ HGEDITOR="sh .hg/checkeditform.sh" hg ci --amend -u foo -d '1 0' HGEDITFORM=commit.amend.normal - saved backup bundle to $TESTTMP/.hg/strip-backup/401431e913a1-5e8e532c-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/401431e913a1-5e8e532c-amend.hg $ echo a >> a $ hg ci --amend -u foo -d '1 0' - saved backup bundle to $TESTTMP/.hg/strip-backup/d96b1d28ae33-677e0afb-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/d96b1d28ae33-677e0afb-amend.hg $ hg log -r . changeset: 1:a9a13940fc03 tag: tip @@ -271,7 +272,7 @@ 249 (changelog) 163 (manifests) 133 a - saved backup bundle to $TESTTMP/.hg/strip-backup/a9a13940fc03-7c2e8674-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/a9a13940fc03-7c2e8674-amend.hg 1 changesets found uncompressed size of bundle content: 257 (changelog) @@ -307,7 +308,7 @@ 257 (changelog) 163 (manifests) 133 a - saved backup bundle to $TESTTMP/.hg/strip-backup/64a124ba1b44-10374b8f-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/64a124ba1b44-10374b8f-amend.hg 1 changesets found uncompressed size of bundle content: 257 (changelog) @@ -334,13 +335,13 @@ $ hg book book1 $ hg book book2 $ hg ci --amend -m 'move bookmarks' - saved backup bundle to $TESTTMP/.hg/strip-backup/7892795b8e38-3fb46217-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/7892795b8e38-3fb46217-amend.hg $ hg book book1 1:8311f17e2616 * book2 1:8311f17e2616 $ echo a >> a $ hg ci --amend -m 'move bookmarks' - saved backup bundle to $TESTTMP/.hg/strip-backup/8311f17e2616-f0504fe3-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/8311f17e2616-f0504fe3-amend.hg $ hg book book1 1:a3b65065808c * book2 1:a3b65065808c @@ -374,7 +375,7 @@ $ hg branch default -f marked working directory as branch default $ hg ci --amend -m 'back to default' - saved backup bundle to $TESTTMP/.hg/strip-backup/f8339a38efe1-c18453c9-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/f8339a38efe1-c18453c9-amend.hg $ hg branches default 2:9c07515f2650 @@ -390,7 +391,7 @@ $ echo b >> b $ hg ci -mb $ hg ci --amend --close-branch -m 'closing branch foo' - saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-54245dc7-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-54245dc7-amend.hg Same thing, different code path: @@ -399,7 +400,7 @@ reopening closed branch head 4 $ echo b >> b $ hg ci --amend --close-branch - saved backup bundle to $TESTTMP/.hg/strip-backup/027371728205-b900d9fa-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/027371728205-b900d9fa-amend.hg $ hg branches default 2:9c07515f2650 @@ -420,7 +421,7 @@ $ hg ci -m 'b -> c' $ hg mv c d $ hg ci --amend -m 'b -> d' - saved backup bundle to $TESTTMP/.hg/strip-backup/42f3f27a067d-f23cc9f7-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/42f3f27a067d-f23cc9f7-amend.hg $ hg st --rev '.^' --copies d A d b @@ -428,7 +429,7 @@ $ hg ci -m 'e = d' $ hg cp e f $ hg ci --amend -m 'f = d' - saved backup bundle to $TESTTMP/.hg/strip-backup/9198f73182d5-251d584a-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/9198f73182d5-251d584a-amend.hg $ hg st --rev '.^' --copies f A f d @@ -439,7 +440,7 @@ $ hg cp a f $ mv f.orig f $ hg ci --amend -m replacef - saved backup bundle to $TESTTMP/.hg/strip-backup/f0993ab6b482-eda301bf-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/f0993ab6b482-eda301bf-amend.hg $ hg st --change . --copies $ hg log -r . --template "{file_copies}\n" @@ -451,7 +452,7 @@ adding g $ hg mv g h $ hg ci --amend - saved backup bundle to $TESTTMP/.hg/strip-backup/58585e3f095c-0f5ebcda-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/58585e3f095c-0f5ebcda-amend.hg $ hg st --change . --copies h A h $ hg log -r . --template "{file_copies}\n" @@ -471,11 +472,11 @@ $ echo a >> a $ hg ci -ma $ hg ci --amend -m "a'" - saved backup bundle to $TESTTMP/.hg/strip-backup/39a162f1d65e-9dfe13d8-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/39a162f1d65e-9dfe13d8-amend.hg $ hg log -r . --template "{branch}\n" a $ hg ci --amend -m "a''" - saved backup bundle to $TESTTMP/.hg/strip-backup/d5ca7b1ac72b-0b4c1a34-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/d5ca7b1ac72b-0b4c1a34-amend.hg $ hg log -r . --template "{branch}\n" a @@ -492,7 +493,7 @@ $ hg graft 12 grafting 12:2647734878ef "fork" (tip) $ hg ci --amend -m 'graft amend' - saved backup bundle to $TESTTMP/.hg/strip-backup/fe8c6f7957ca-25638666-amend.hg (glob) + saved backup bundle to $TESTTMP/.hg/strip-backup/fe8c6f7957ca-25638666-amend.hg $ hg log -r . --debug | grep extra extra: amend_source=fe8c6f7957ca1665ed77496ed7a07657d469ac60 extra: branch=a @@ -604,6 +605,7 @@ babar $ hg commit --amend + 1 new orphan changesets $ hg log -r 'orphan()' changeset: 16:37973c7e0b61 branch: a @@ -1111,7 +1113,7 @@ marked working directory as branch newdirname (branches are permanent and global, did you want a bookmark?) $ hg mv olddirname newdirname - moving olddirname/commonfile.py to newdirname/commonfile.py (glob) + moving olddirname/commonfile.py to newdirname/commonfile.py $ hg ci -m rename $ hg update default @@ -1129,7 +1131,7 @@ $ hg ci -m add $ $ hg debugrename newdirname/newfile.py - newdirname/newfile.py renamed from olddirname/newfile.py:690b295714aed510803d3020da9c70fca8336def (glob) + newdirname/newfile.py renamed from olddirname/newfile.py:690b295714aed510803d3020da9c70fca8336def $ hg status -C --change . A newdirname/newfile.py $ hg status -C --rev 1 @@ -1148,7 +1150,7 @@ $ echo a >> newdirname/commonfile.py $ hg ci --amend -m bug $ hg debugrename newdirname/newfile.py - newdirname/newfile.py renamed from olddirname/newfile.py:690b295714aed510803d3020da9c70fca8336def (glob) + newdirname/newfile.py renamed from olddirname/newfile.py:690b295714aed510803d3020da9c70fca8336def $ hg debugindex newdirname/newfile.py rev offset length delta linkrev nodeid p1 p2 0 0 89 -1 3 34a4d536c0c0 000000000000 000000000000 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-commit-interactive-curses.t --- a/tests/test-commit-interactive-curses.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-commit-interactive-curses.t Mon Jan 22 17:53:02 2018 -0500 @@ -206,7 +206,7 @@ > X > EOF $ hg commit -i -m "newly added file" -d "0 0" - saved backup bundle to $TESTTMP/a/.hg/strip-backup/2b0e9be4d336-3cf0bc8c-amend.hg (glob) + saved backup bundle to $TESTTMP/a/.hg/strip-backup/2b0e9be4d336-3cf0bc8c-amend.hg $ hg diff -c . diff -r a6735021574d -r c1d239d165ae x --- /dev/null Thu Jan 01 00:00:00 1970 +0000 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-commit-unresolved.t --- a/tests/test-commit-unresolved.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-commit-unresolved.t Mon Jan 22 17:53:02 2018 -0500 @@ -21,13 +21,63 @@ $ commit "D" 3 created new head +State before the merge + + $ hg status + $ hg id + e45016d2b3d3 tip + $ hg summary + parent: 3:e45016d2b3d3 tip + D + branch: default + commit: (clean) + update: 2 new changesets, 2 branch heads (merge) + phases: 4 draft + +Testing the abort functionality first in case of conflicts + + $ hg merge --abort + abort: no merge in progress + [255] + $ hg merge + merging A + warning: conflicts while merging A! (edit, then use 'hg resolve --mark') + 1 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + [1] + + $ hg merge --abort e4501 + abort: cannot specify a node with --abort + [255] + $ hg merge --abort --rev e4501 + abort: cannot specify both --rev and --abort + [255] + + $ hg merge --abort + aborting the merge, updating back to e45016d2b3d3 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +Checking that we got back in the same state + + $ hg status + ? A.orig + $ hg id + e45016d2b3d3 tip + $ hg summary + parent: 3:e45016d2b3d3 tip + D + branch: default + commit: 1 unknown (clean) + update: 2 new changesets, 2 branch heads (merge) + phases: 4 draft + Merging a conflict araises $ hg merge merging A warning: conflicts while merging A! (edit, then use 'hg resolve --mark') 1 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] Correct the conflict without marking the file as resolved @@ -52,7 +102,7 @@ merging A warning: conflicts while merging A! (edit, then use 'hg resolve --mark') 1 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ hg rm --force A $ hg commit -m merged @@ -64,4 +114,28 @@ $ hg commit -m merged created new head +Testing the abort functionality in case of no conflicts + + $ hg update -C 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ addcommit "E" 4 + created new head + $ hg id + 68352a18a7c4 tip + + $ hg merge -r 4 + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + + $ hg merge --preview --abort + abort: cannot specify --preview with --abort + [255] + + $ hg merge --abort + aborting the merge, updating back to 68352a18a7c4 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + + $ hg id + 68352a18a7c4 tip + $ cd .. diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-commit.t --- a/tests/test-commit.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-commit.t Mon Jan 22 17:53:02 2018 -0500 @@ -61,7 +61,7 @@ $ mkdir dir $ echo boo > dir/file $ hg add - adding dir/file (glob) + adding dir/file $ hg -v commit -m commit-9 dir committing files: dir/file @@ -180,8 +180,8 @@ $ mkdir bar $ echo bar > bar/bar $ hg add - adding bar/bar (glob) - adding foo/foo (glob) + adding bar/bar + adding foo/foo $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo commit-subdir-1 @@ -648,7 +648,8 @@ > u = uimod.ui.load() > r = hg.repository(u, '.') > def filectxfn(repo, memctx, path): - > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned') + > return context.memfilectx(repo, memctx, path, + > '[hooks]\nupdate = echo owned') > c = context.memctx(r, [r['tip'].node(), node.nullid], > 'evil', [notrc], filectxfn, 0) > r.commitctx(c) @@ -673,14 +674,15 @@ > u = uimod.ui.load() > r = hg.repository(u, '.') > def filectxfn(repo, memctx, path): - > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned') + > return context.memfilectx(repo, memctx, path, + > '[hooks]\nupdate = echo owned') > c = context.memctx(r, [r['tip'].node(), node.nullid], > 'evil', [notrc], filectxfn, 0) > r.commitctx(c) > EOF $ $PYTHON evil-commit.py $ hg co --clean tip - abort: path contains illegal component: HG~1/hgrc (glob) + abort: path contains illegal component: HG~1/hgrc [255] $ hg rollback -f @@ -692,14 +694,15 @@ > u = uimod.ui.load() > r = hg.repository(u, '.') > def filectxfn(repo, memctx, path): - > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned') + > return context.memfilectx(repo, memctx, path, + > '[hooks]\nupdate = echo owned') > c = context.memctx(r, [r['tip'].node(), node.nullid], > 'evil', [notrc], filectxfn, 0) > r.commitctx(c) > EOF $ $PYTHON evil-commit.py $ hg co --clean tip - abort: path contains illegal component: HG8B6C~2/hgrc (glob) + abort: path contains illegal component: HG8B6C~2/hgrc [255] # test that an unmodified commit template message aborts diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-completion.t --- a/tests/test-completion.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-completion.t Mon Jan 22 17:53:02 2018 -0500 @@ -72,6 +72,7 @@ debugapplystreamclonebundle debugbuilddag debugbundle + debugcapabilities debugcheckstate debugcolor debugcommands @@ -84,8 +85,10 @@ debugdeltachain debugdirstate debugdiscovery + debugdownload debugextensions debugfileset + debugformat debugfsinfo debuggetbundle debugignore @@ -226,7 +229,7 @@ forget: include, exclude init: ssh, remotecmd, insecure log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude - merge: force, rev, preview, tool + merge: force, rev, preview, abort, tool pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure push: force, rev, bookmark, branch, new-branch, pushvars, ssh, remotecmd, insecure remove: after, force, subrepos, include, exclude @@ -239,7 +242,7 @@ backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user bisect: reset, good, bad, skip, extend, command, noupdate bookmarks: force, rev, delete, rename, inactive, template - branch: force, clean + branch: force, clean, rev branches: active, closed, template bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure cat: output, rev, decode, include, exclude, template @@ -249,6 +252,7 @@ debugapplystreamclonebundle: debugbuilddag: mergeable-file, overwritten-file, new-file debugbundle: all, part-type, spec + debugcapabilities: debugcheckstate: debugcolor: style debugcommands: @@ -259,9 +263,11 @@ debugdate: extended debugdeltachain: changelog, manifest, dir, template debugdirstate: nodates, datesort - debugdiscovery: old, nonheads, ssh, remotecmd, insecure + debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure + debugdownload: output debugextensions: template debugfileset: rev + debugformat: template debugfsinfo: debuggetbundle: head, common, type debugignore: @@ -270,7 +276,7 @@ debuginstall: template debugknown: debuglabelcomplete: - debuglocks: force-lock, force-wlock + debuglocks: force-lock, force-wlock, set-lock, set-wlock debugmergestate: debugnamecomplete: debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-config.t --- a/tests/test-config.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-config.t Mon Jan 22 17:53:02 2018 -0500 @@ -7,7 +7,7 @@ > novaluekey > EOF $ hg showconfig - hg: parse error at $TESTTMP/.hg/hgrc:1: novaluekey (glob) + hg: parse error at $TESTTMP/.hg/hgrc:1: novaluekey [255] Invalid syntax: no key @@ -16,7 +16,7 @@ > =nokeyvalue > EOF $ hg showconfig - hg: parse error at $TESTTMP/.hg/hgrc:1: =nokeyvalue (glob) + hg: parse error at $TESTTMP/.hg/hgrc:1: =nokeyvalue [255] Test hint about invalid syntax from leading white space @@ -25,7 +25,7 @@ > key=value > EOF $ hg showconfig - hg: parse error at $TESTTMP/.hg/hgrc:1: key=value (glob) + hg: parse error at $TESTTMP/.hg/hgrc:1: key=value unexpected leading whitespace [255] @@ -34,7 +34,7 @@ > key=value > EOF $ hg showconfig - hg: parse error at $TESTTMP/.hg/hgrc:1: [section] (glob) + hg: parse error at $TESTTMP/.hg/hgrc:1: [section] unexpected leading whitespace [255] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-conflict.t --- a/tests/test-conflict.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-conflict.t Mon Jan 22 17:53:02 2018 -0500 @@ -38,7 +38,7 @@ merging a warning: conflicts while merging a! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ hg id @@ -91,7 +91,7 @@ merging a warning: conflicts while merging a! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ cat a @@ -182,7 +182,7 @@ merging a warning: conflicts while merging a! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ cat a @@ -207,7 +207,7 @@ merging a warning: conflicts while merging a! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ cat a Small Mathematical Series. @@ -254,7 +254,7 @@ merging a warning: conflicts while merging a! (edit, then use 'hg resolve --mark') 1 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ hg resolve --tool :merge-other a merging a diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-context.py --- a/tests/test-context.py Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-context.py Mon Jan 22 17:53:02 2018 -0500 @@ -32,7 +32,7 @@ # test memctx with non-ASCII commit message def filectxfn(repo, memctx, path): - return context.memfilectx(repo, "foo", "") + return context.memfilectx(repo, memctx, "foo", "") ctx = context.memctx(repo, ['tip', None], encoding.tolocal("Gr\xc3\xbcezi!"), @@ -49,7 +49,7 @@ data, flags = fctx.data(), fctx.flags() if f == 'foo': data += 'bar\n' - return context.memfilectx(repo, f, data, 'l' in flags, 'x' in flags) + return context.memfilectx(repo, memctx, f, data, 'l' in flags, 'x' in flags) ctxa = repo.changectx(0) ctxb = context.memctx(repo, [ctxa.node(), None], "test diff", ["foo"], diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-contrib-check-code.t --- a/tests/test-contrib-check-code.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-contrib-check-code.t Mon Jan 22 17:53:02 2018 -0500 @@ -173,6 +173,17 @@ don't use old-style two-argument raise, use Exception(message) [1] + $ cat < tab.t + > indent + > > heredoc + > EOF + $ "$check_code" tab.t + tab.t:1: + > indent + don't use tabs to indent + [1] + $ rm tab.t + $ cat > rst.py < """problematic rst text > diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-contrib-perf.t --- a/tests/test-contrib-perf.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-contrib-perf.t Mon Jan 22 17:53:02 2018 -0500 @@ -55,6 +55,8 @@ benchmark parsing bookmarks from disk to memory perfbranchmap benchmark the update of a branchmap + perfbundleread + Benchmark reading of bundle files. perfcca (no help text available) perfchangegroupchangelog Benchmark producing a changelog group for a changegroup. @@ -173,3 +175,7 @@ $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py; > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) | > "$TESTDIR"/check-perf-code.py contrib/perf.py + contrib/perf.py:\d+: (re) + > from mercurial import ( + import newer module separately in try clause for early Mercurial + [1] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-authormap.t --- a/tests/test-convert-authormap.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-authormap.t Mon Jan 22 17:53:02 2018 -0500 @@ -27,7 +27,7 @@ sorting... converting... 0 foo - writing author map file $TESTTMP/new/.hg/authormap (glob) + writing author map file $TESTTMP/new/.hg/authormap $ cat new/.hg/authormap user name=Long User Name $ hg -Rnew log @@ -44,7 +44,7 @@ $ hg init new $ mv authormap.txt new/.hg/authormap $ hg convert orig new - ignoring bad line in author map file $TESTTMP/new/.hg/authormap: this line is ignored (glob) + ignoring bad line in author map file $TESTTMP/new/.hg/authormap: this line is ignored scanning source... sorting... converting... diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-cvs.t --- a/tests/test-convert-cvs.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-cvs.t Mon Jan 22 17:53:02 2018 -0500 @@ -80,7 +80,12 @@ since it does not use DST (unlike other U.S. time zones) and is always a fixed difference from UTC. - $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg +This choice is limited to work on Linux environments. At least on +FreeBSD 11 this timezone is not known. A better choice is +TZ=Pacific/Johnston. On Linux "US/Hawaii" is just a symlink to this +name and also it is known on FreeBSD and on Solaris. + + $ TZ=Pacific/Johnston hg convert --config convert.localtimezone=True src src-hg initializing destination src-hg repository connecting to $TESTTMP/cvsrepo scanning source... @@ -170,7 +175,7 @@ convert again - $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg + $ TZ=Pacific/Johnston hg convert --config convert.localtimezone=True src src-hg connecting to $TESTTMP/cvsrepo scanning source... collecting CVS rlog @@ -231,7 +236,7 @@ convert again - $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg + $ TZ=Pacific/Johnston hg convert --config convert.localtimezone=True src src-hg connecting to $TESTTMP/cvsrepo scanning source... collecting CVS rlog @@ -249,7 +254,7 @@ convert again with --filemap - $ TZ=US/Hawaii hg convert --config convert.localtimezone=True --filemap filemap src src-filemap + $ TZ=Pacific/Johnston hg convert --config convert.localtimezone=True --filemap filemap src src-filemap connecting to $TESTTMP/cvsrepo scanning source... collecting CVS rlog @@ -296,7 +301,7 @@ convert again - $ TZ=US/Hawaii hg convert --config convert.cvsps.fuzz=2 --config convert.localtimezone=True src src-hg + $ TZ=Pacific/Johnston hg convert --config convert.cvsps.fuzz=2 --config convert.localtimezone=True src src-hg connecting to $TESTTMP/cvsrepo scanning source... collecting CVS rlog diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-filemap.t --- a/tests/test-convert-filemap.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-filemap.t Mon Jan 22 17:53:02 2018 -0500 @@ -637,7 +637,7 @@ $ cd namedbranch $ hg --config extensions.mq= strip tip 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - saved backup bundle to $TESTTMP/namedbranch/.hg/strip-backup/73899bcbe45c-92adf160-backup.hg (glob) + saved backup bundle to $TESTTMP/namedbranch/.hg/strip-backup/73899bcbe45c-92adf160-backup.hg $ hg up foo 2 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg merge default diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-git.t --- a/tests/test-convert-git.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-git.t Mon Jan 22 17:53:02 2018 -0500 @@ -936,7 +936,7 @@ $ COMMIT_OBJ=1c/0ce3c5886f83a1d78a7b517cdff5cf9ca17bdd $ mv git-repo4/.git/objects/$COMMIT_OBJ git-repo4/.git/objects/$COMMIT_OBJ.tmp $ hg convert git-repo4 git-repo4-broken-hg 2>&1 | grep 'abort:' - abort: cannot retrieve number of commits in $TESTTMP/git-repo4/.git (glob) + abort: cannot retrieve number of commits in $TESTTMP/git-repo4/.git $ mv git-repo4/.git/objects/$COMMIT_OBJ.tmp git-repo4/.git/objects/$COMMIT_OBJ damage git repository by renaming a blob object diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-svn-encoding.t --- a/tests/test-convert-svn-encoding.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-svn-encoding.t Mon Jan 22 17:53:02 2018 -0500 @@ -12,7 +12,7 @@ $ hg --debug convert svn-repo A-hg --config progress.debug=1 initializing destination A-hg repository - reparent to file://*/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) run hg sink pre-conversion action scanning source... found trunk at 'trunk' @@ -21,7 +21,7 @@ found branch branch\xc3\xa9 at 5 (esc) found branch branch\xc3\xa9e at 6 (esc) scanning: 1/4 revisions (25.00%) - reparent to file://*/svn-repo/trunk (glob) + reparent to file:/*/$TESTTMP/svn-repo/trunk (glob) fetching revision log for "/trunk" from 4 to 0 parsing revision 4 (2 changes) parsing revision 3 (4 changes) @@ -31,18 +31,18 @@ '/branches' is not under '/trunk', ignoring '/tags' is not under '/trunk', ignoring scanning: 2/4 revisions (50.00%) - reparent to file://*/svn-repo/branches/branch%C3%A9 (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob) fetching revision log for "/branches/branch\xc3\xa9" from 5 to 0 (esc) parsing revision 5 (1 changes) - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/branches/branch%C3%A9 (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob) found parent of branch /branches/branch\xc3\xa9 at 4: /trunk (esc) scanning: 3/4 revisions (75.00%) - reparent to file://*/svn-repo/branches/branch%C3%A9e (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob) fetching revision log for "/branches/branch\xc3\xa9e" from 6 to 0 (esc) parsing revision 6 (1 changes) - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/branches/branch%C3%A9e (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob) found parent of branch /branches/branch\xc3\xa9e at 5: /branches/branch\xc3\xa9 (esc) scanning: 4/4 revisions (100.00%) scanning: 5/4 revisions (125.00%) @@ -57,7 +57,7 @@ 4 hello source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@2 converting: 1/6 revisions (16.67%) - reparent to file://*/svn-repo/trunk (glob) + reparent to file:/*/$TESTTMP/svn-repo/trunk (glob) scanning paths: /trunk/\xc3\xa0 0/3 paths (0.00%) (esc) scanning paths: /trunk/\xc3\xa0/e\xcc\x81 1/3 paths (33.33%) (esc) scanning paths: /trunk/\xc3\xa9 2/3 paths (66.67%) (esc) @@ -74,14 +74,14 @@ converting: 2/6 revisions (33.33%) scanning paths: /trunk/\xc3\xa0 0/4 paths (0.00%) (esc) gone from -1 - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/trunk (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/trunk (glob) scanning paths: /trunk/\xc3\xa8 1/4 paths (25.00%) (esc) copied to \xc3\xa8 from \xc3\xa9@2 (esc) scanning paths: /trunk/\xc3\xa9 2/4 paths (50.00%) (esc) gone from -1 - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/trunk (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/trunk (glob) scanning paths: /trunk/\xc3\xb9 3/4 paths (75.00%) (esc) mark /trunk/\xc3\xb9 came from \xc3\xa0:2 (esc) getting files: \xc3\xa0/e\xcc\x81 1/4 files (25.00%) (esc) @@ -101,12 +101,12 @@ converting: 3/6 revisions (50.00%) scanning paths: /trunk/\xc3\xa8 0/2 paths (0.00%) (esc) gone from -1 - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/trunk (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/trunk (glob) scanning paths: /trunk/\xc3\xb9 1/2 paths (50.00%) (esc) gone from -1 - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/trunk (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/trunk (glob) getting files: \xc3\xa8 1/2 files (50.00%) (esc) getting files: \xc3\xb9/e\xcc\x81 2/2 files (100.00%) (esc) committing files: @@ -116,21 +116,21 @@ 1 branch to branch? source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?@5 converting: 4/6 revisions (66.67%) - reparent to file://*/svn-repo/branches/branch%C3%A9 (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob) scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc) committing changelog updating the branch cache 0 branch to branch?e source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?e@6 converting: 5/6 revisions (83.33%) - reparent to file://*/svn-repo/branches/branch%C3%A9e (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob) scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc) committing changelog updating the branch cache - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/branches/branch%C3%A9e (glob) - reparent to file://*/svn-repo (glob) - reparent to file://*/svn-repo/branches/branch%C3%A9e (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob) + reparent to file:/*/$TESTTMP/svn-repo (glob) + reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob) updating tags committing files: .hgtags diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-svn-sink.t --- a/tests/test-convert-svn-sink.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-svn-sink.t Mon Jan 22 17:53:02 2018 -0500 @@ -48,8 +48,8 @@ 0 modify a file $ svnupanddisplay a-hg-wc 2 2 1 test d1 - 2 1 test d1/d2 (glob) - 2 1 test d1/d2/b (glob) + 2 1 test d1/d2 + 2 1 test d1/d2/b 2 2 test . 2 2 test a revision: 2 @@ -89,8 +89,8 @@ 0 rename a file $ svnupanddisplay a-hg-wc 1 3 1 test d1 - 3 1 test d1/d2 (glob) - 3 1 test d1/d2/b (glob) + 3 1 test d1/d2 + 3 1 test d1/d2/b 3 3 test . 3 3 test b revision: 3 @@ -124,8 +124,8 @@ 0 copy a file $ svnupanddisplay a-hg-wc 1 4 1 test d1 - 4 1 test d1/d2 (glob) - 4 1 test d1/d2/b (glob) + 4 1 test d1/d2 + 4 1 test d1/d2/b 4 3 test b 4 4 test . 4 4 test c @@ -161,8 +161,8 @@ 0 remove a file $ svnupanddisplay a-hg-wc 1 5 1 test d1 - 5 1 test d1/d2 (glob) - 5 1 test d1/d2/b (glob) + 5 1 test d1/d2 + 5 1 test d1/d2/b 5 4 test c 5 5 test . revision: 5 @@ -203,8 +203,8 @@ 0 make a file executable $ svnupanddisplay a-hg-wc 1 6 1 test d1 - 6 1 test d1/d2 (glob) - 6 1 test d1/d2/b (glob) + 6 1 test d1/d2 + 6 1 test d1/d2/b 6 6 test . 6 6 test c revision: 6 @@ -256,7 +256,7 @@ $ hg --cwd a up 5 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg --cwd a --config extensions.strip= strip -r 6 - saved backup bundle to $TESTTMP/a/.hg/strip-backup/bd4f7b7a7067-ed505e42-backup.hg (glob) + saved backup bundle to $TESTTMP/a/.hg/strip-backup/bd4f7b7a7067-ed505e42-backup.hg #endif @@ -312,7 +312,7 @@ $ svnupanddisplay a-hg-wc 1 1 1 test . 1 1 test d1 - 1 1 test d1/a (glob) + 1 1 test d1/a revision: 1 author: test msg: add executable file in new directory @@ -337,10 +337,10 @@ 0 copy file to new directory $ svnupanddisplay a-hg-wc 1 2 1 test d1 - 2 1 test d1/a (glob) + 2 1 test d1/a 2 2 test . 2 2 test d2 - 2 2 test d2/a (glob) + 2 2 test d2/a revision: 2 author: test msg: copy file to new directory @@ -384,7 +384,7 @@ merging b warning: conflicts while merging b! (edit, then use 'hg resolve --mark') 2 files updated, 0 files merged, 0 files removed, 1 files unresolved - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ hg --cwd b revert -r 2 b $ hg --cwd b resolve -m b diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert-svn-source.t --- a/tests/test-convert-svn-source.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert-svn-source.t Mon Jan 22 17:53:02 2018 -0500 @@ -32,8 +32,8 @@ $ cd .. $ svn import -m "init projB" projB "$SVNREPOURL/proj%20B" | filter_svn_output | sort - Adding projB/mytrunk (glob) - Adding projB/tags (glob) + Adding projB/mytrunk + Adding projB/tags Committed revision 1. Update svn repository @@ -253,3 +253,72 @@ abort: svn-empty: missing or unsupported repository [255] $ mv format svn-empty/format + +enable svn subrepos + + $ cat >> $HGRCPATH < [subrepos] + > svn:allowed = true + > EOF + +try converting when we have an svn subrepo and a merge in hg superrepo (issue5657) + + $ cd "$TESTTMP" + $ hg init withmerge + $ cd withmerge + $ echo "subrepo = [svn]$SVNREPOURL" >.hgsub + $ hg add .hgsub + $ svn checkout "$SVNREPOURL" subrepo | sort + A subrepo/proj B + A subrepo/proj B/mytrunk + A subrepo/proj B/mytrunk/letter .txt + A subrepo/proj B/mytrunk/letter2.txt + A subrepo/proj B/tags + A subrepo/proj B/tags/v0.1 + A subrepo/proj B/tags/v0.1/letter .txt + A subrepo/proj B/tags/v0.2 + A subrepo/proj B/tags/v0.2/letter .txt + A subrepo/proj B/tags/v0.2/letter2.txt + Checked out revision 9. + $ hg ci -m "Adding svn subrepo" + $ touch file1.txt + $ hg add file1.txt + $ hg ci -m "Adding file1" + $ hg up 0 + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ touch file2.txt + $ hg add file2.txt + $ hg ci -m "Adding file2" + created new head + $ hg merge 1 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m "merged" + $ cd .. + $ hg --config extensions.convert= convert withmerge withmerge-converted + initializing destination withmerge-converted repository + scanning source... + sorting... + converting... + 3 Adding svn subrepo + 2 Adding file1 + 1 Adding file2 + 0 merged + $ cd withmerge-converted + $ hg up | sort + 4 files updated, 0 files merged, 0 files removed, 0 files unresolved + A subrepo/proj B + A subrepo/proj B/mytrunk + A subrepo/proj B/mytrunk/letter .txt + A subrepo/proj B/mytrunk/letter2.txt + A subrepo/proj B/tags + A subrepo/proj B/tags/v0.1 + A subrepo/proj B/tags/v0.1/letter .txt + A subrepo/proj B/tags/v0.2 + A subrepo/proj B/tags/v0.2/letter .txt + A subrepo/proj B/tags/v0.2/letter2.txt + Checked out revision 9. + $ ls + file1.txt + file2.txt + subrepo diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-convert.t --- a/tests/test-convert.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-convert.t Mon Jan 22 17:53:02 2018 -0500 @@ -476,7 +476,7 @@ assuming destination emptydir-hg initializing destination emptydir-hg repository emptydir does not look like a CVS checkout - $TESTTMP/emptydir does not look like a Git repository (glob) + $TESTTMP/emptydir does not look like a Git repository emptydir does not look like a Subversion repository emptydir is not a local Mercurial repository emptydir does not look like a darcs repository diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-copy-move-merge.t --- a/tests/test-copy-move-merge.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-copy-move-merge.t Mon Jan 22 17:53:02 2018 -0500 @@ -82,7 +82,7 @@ $ hg strip -r . --config extensions.strip= 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - saved backup bundle to $TESTTMP/t/.hg/strip-backup/550bd84c0cd3-fc575957-backup.hg (glob) + saved backup bundle to $TESTTMP/t/.hg/strip-backup/550bd84c0cd3-fc575957-backup.hg $ hg up -qC 2 $ hg rebase --keep -d 1 -b 2 --config extensions.rebase= --config experimental.copytrace=off --config ui.interactive=True << EOF > c @@ -122,7 +122,7 @@ $ hg rebase -d . -b 2 --config extensions.rebase= --config experimental.copytrace=off rebasing 2:6adcf8c12e7d "copy b->x" - saved backup bundle to $TESTTMP/copydisable/.hg/strip-backup/6adcf8c12e7d-ce4b3e75-rebase.hg (glob) + saved backup bundle to $TESTTMP/copydisable/.hg/strip-backup/6adcf8c12e7d-ce4b3e75-rebase.hg $ hg up -q 3 $ hg log -f x -T '{rev} {desc}\n' 3 copy b->x @@ -155,7 +155,7 @@ $ hg rebase -d 2 -s 3 --config extensions.rebase= --config experimental.copytrace=off rebasing 3:47e1a9e6273b "copy a->b (2)" (tip) - saved backup bundle to $TESTTMP/copydisable3/.hg/strip-backup/47e1a9e6273b-2d099c59-rebase.hg (glob) + saved backup bundle to $TESTTMP/copydisable3/.hg/strip-backup/47e1a9e6273b-2d099c59-rebase.hg $ hg log -G -f b @ changeset: 3:76024fb4b05b diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-copytrace-heuristics.t --- a/tests/test-copytrace-heuristics.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-copytrace-heuristics.t Mon Jan 22 17:53:02 2018 -0500 @@ -55,7 +55,7 @@ rebasing 2:557f403c0afd "mod a, mod dir/file.txt" (tip) merging b and a to b merging dir2/file.txt and dir/file.txt to dir2/file.txt - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/557f403c0afd-9926eeff-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/557f403c0afd-9926eeff-rebase.hg $ cd .. $ rm -rf repo @@ -125,7 +125,7 @@ $ hg rebase -s . -d 2 rebasing 3:9d5cf99c3d9f "mod a" (tip) merging b and a to b - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/9d5cf99c3d9f-f02358cc-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/9d5cf99c3d9f-f02358cc-rebase.hg $ cd .. $ rm -rf repo @@ -160,7 +160,7 @@ $ hg rebase -s . -d 0 rebasing 3:fbe97126b396 "mod b" (tip) merging a and b to a - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/fbe97126b396-cf5452a1-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/fbe97126b396-cf5452a1-rebase.hg $ cd .. $ rm -rf repo @@ -197,7 +197,7 @@ $ hg rebase -s . -d 2 rebasing 3:6b2f4cece40f "mod dir/a" (tip) merging dir/b and dir/a to dir/b - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/6b2f4cece40f-503efe60-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/6b2f4cece40f-503efe60-rebase.hg $ cd .. $ rm -rf repo @@ -255,7 +255,7 @@ $ hg rebase -s 2 -d 1 rebasing 2:ef716627c70b "mod a" (tip) merging foo and a to foo - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/ef716627c70b-24681561-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/ef716627c70b-24681561-rebase.hg $ cd .. $ rm -rf repo @@ -286,7 +286,7 @@ $ hg rebase -s 1 -d 2 rebasing 1:472e38d57782 "mv a b" - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/472e38d57782-17d50e29-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/472e38d57782-17d50e29-rebase.hg $ hg up -q c492ed3c7e35dcd1dc938053b8adf56e2cfbd062 $ ls b @@ -320,7 +320,7 @@ $ hg rebase -s . -d 1 rebasing 2:a33d80b6e352 "mv dir/ dir2/" (tip) merging dir/a and dir2/a to dir2/a - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/a33d80b6e352-fecb9ada-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/a33d80b6e352-fecb9ada-rebase.hg $ cd .. $ rm -rf server $ rm -rf repo @@ -355,7 +355,7 @@ $ hg rebase -s . -d 2 rebasing 3:d41316942216 "mod a" (tip) merging c and a to c - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d41316942216-2b5949bc-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d41316942216-2b5949bc-rebase.hg $ cd .. $ rm -rf repo @@ -391,7 +391,7 @@ merging a and b to b rebasing 2:d3efd280421d "mv b c" merging b and c to c - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/472e38d57782-ab8d3c58-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/472e38d57782-ab8d3c58-rebase.hg $ cd .. $ rm -rf repo @@ -428,7 +428,7 @@ $ hg rebase -s . -d 2 rebasing 3:ef716627c70b "mod a" (tip) merging b and a to b - saved backup bundle to $TESTTMP/repo/.hg/strip-backup/ef716627c70b-24681561-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/ef716627c70b-24681561-rebase.hg $ ls b c @@ -500,7 +500,7 @@ rebasing 2:ef716627c70b "mod a" (tip) merging b and a to b merging c and a to c - saved backup bundle to $TESTTMP/repo/repo/.hg/strip-backup/ef716627c70b-24681561-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/repo/.hg/strip-backup/ef716627c70b-24681561-rebase.hg $ ls b c @@ -624,7 +624,7 @@ $ hg rebase -s . -d 1 --config experimental.copytrace.sourcecommitlimit=100 rebasing 2:6207d2d318e7 "mod a" (tip) merging dir2/b and dir1/a to dir2/b - saved backup bundle to $TESTTMP/repo/repo/.hg/strip-backup/6207d2d318e7-1c9779ad-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/repo/.hg/strip-backup/6207d2d318e7-1c9779ad-rebase.hg $ cat dir2/b a b @@ -661,7 +661,7 @@ $ hg rebase -s . -d 1 --config experimental.copytrace.sourcecommitlimit=100 rebasing 2:e8919e7df8d0 "mv dir1 dir2" (tip) - saved backup bundle to $TESTTMP/repo/repo/.hg/strip-backup/e8919e7df8d0-f62fab62-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/repo/.hg/strip-backup/e8919e7df8d0-f62fab62-rebase.hg $ ls dir2 a dummy @@ -711,6 +711,6 @@ $ hg rebase -s 8b6e13696 -d . --config experimental.copytrace.sourcecommitlimit=100 rebasing 1:8b6e13696c38 "added more things to a" merging foo/bar and a to foo/bar - saved backup bundle to $TESTTMP/repo/repo/repo/.hg/strip-backup/8b6e13696c38-fc14ac83-rebase.hg (glob) + saved backup bundle to $TESTTMP/repo/repo/repo/.hg/strip-backup/8b6e13696c38-fc14ac83-rebase.hg $ cd .. $ rm -rf repo diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-debugcommands.t --- a/tests/test-debugcommands.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-debugcommands.t Mon Jan 22 17:53:02 2018 -0500 @@ -1,4 +1,6 @@ $ cat << EOF >> $HGRCPATH + > [ui] + > interactive=yes > [format] > usegeneraldelta=yes > EOF @@ -77,6 +79,72 @@ } ] +debugdelta chain with sparse read enabled + + $ cat >> $HGRCPATH < [experimental] + > sparse-read = True + > EOF + $ hg debugdeltachain -m + rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks + 0 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1 + + $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen} {readsize} {largestblock} {readdensity}\n' + 0 1 1 44 44 1.0 + + $ hg debugdeltachain -m -Tjson + [ + { + "chainid": 1, + "chainlen": 1, + "chainratio": 1.02325581395, + "chainsize": 44, + "compsize": 44, + "deltatype": "base", + "extradist": 0, + "extraratio": 0.0, + "largestblock": 44, + "lindist": 44, + "prevrev": -1, + "readdensity": 1.0, + "readsize": 44, + "rev": 0, + "srchunks": 1, + "uncompsize": 43 + } + ] + + $ printf "This test checks things.\n" >> a + $ hg ci -m a + $ hg branch other + marked working directory as branch other + (branches are permanent and global, did you want a bookmark?) + $ for i in `$TESTDIR/seq.py 5`; do + > printf "shorter ${i}" >> a + > hg ci -m "a other:$i" + > hg up -q default + > printf "for the branch default we want longer chains: ${i}" >> a + > hg ci -m "a default:$i" + > hg up -q other + > done + $ hg debugdeltachain a -T '{rev} {srchunks}\n' \ + > --config experimental.sparse-read.density-threshold=0.50 \ + > --config experimental.sparse-read.min-gap-size=0 + 0 1 + 1 1 + 2 1 + 3 1 + 4 1 + 5 1 + 6 1 + 7 1 + 8 1 + 9 1 + 10 2 + 11 1 + $ hg --config extensions.strip= strip --no-backup -r 1 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + Test max chain len $ cat >> $HGRCPATH << EOF > [format] @@ -111,6 +179,126 @@ 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob) 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob) +Test debuglocks command: + + $ hg debuglocks + lock: free + wlock: free + +* Test setting the lock + +waitlock will wait for file to be created. If it isn't in a reasonable +amount of time, displays error message and returns 1 + $ waitlock() { + > start=`date +%s` + > timeout=5 + > while [ \( ! -f $1 \) -a \( ! -L $1 \) ]; do + > now=`date +%s` + > if [ "`expr $now - $start`" -gt $timeout ]; then + > echo "timeout: $1 was not created in $timeout seconds" + > return 1 + > fi + > sleep 0.1 + > done + > } + $ dolock() { + > { + > waitlock .hg/unlock + > rm -f .hg/unlock + > echo y + > } | hg debuglocks "$@" > /dev/null + > } + $ dolock -s & + $ waitlock .hg/store/lock + + $ hg debuglocks + lock: user *, process * (*s) (glob) + wlock: free + [1] + $ touch .hg/unlock + $ wait + $ [ -f .hg/store/lock ] || echo "There is no lock" + There is no lock + +* Test setting the wlock + + $ dolock -S & + $ waitlock .hg/wlock + + $ hg debuglocks + lock: free + wlock: user *, process * (*s) (glob) + [1] + $ touch .hg/unlock + $ wait + $ [ -f .hg/wlock ] || echo "There is no wlock" + There is no wlock + +* Test setting both locks + + $ dolock -Ss & + $ waitlock .hg/wlock && waitlock .hg/store/lock + + $ hg debuglocks + lock: user *, process * (*s) (glob) + wlock: user *, process * (*s) (glob) + [2] + +* Test failing to set a lock + + $ hg debuglocks -s + abort: lock is already held + [255] + + $ hg debuglocks -S + abort: wlock is already held + [255] + + $ touch .hg/unlock + $ wait + + $ hg debuglocks + lock: free + wlock: free + +* Test forcing the lock + + $ dolock -s & + $ waitlock .hg/store/lock + + $ hg debuglocks + lock: user *, process * (*s) (glob) + wlock: free + [1] + + $ hg debuglocks -L + + $ hg debuglocks + lock: free + wlock: free + + $ touch .hg/unlock + $ wait + +* Test forcing the wlock + + $ dolock -S & + $ waitlock .hg/wlock + + $ hg debuglocks + lock: free + wlock: user *, process * (*s) (glob) + [1] + + $ hg debuglocks -W + + $ hg debuglocks + lock: free + wlock: free + + $ touch .hg/unlock + $ wait + Test WdirUnsupported exception $ hg debugdata -c ffffffffffffffffffffffffffffffffffffffff @@ -156,3 +344,40 @@ from h hidden in g at: debugstacktrace.py:6 in f debugstacktrace.py:9 in g + +Test debugcapabilities command: + + $ hg debugcapabilities ./debugrevlog/ + Main capabilities: + branchmap + $USUAL_BUNDLE2_CAPS$ + getbundle + known + lookup + pushkey + unbundle + Bundle2 capabilities: + HG20 + bookmarks + changegroup + 01 + 02 + digests + md5 + sha1 + sha512 + error + abort + unsupportedcontent + pushraced + pushkey + hgtagsfnodes + listkeys + phases + heads + pushkey + remote-changegroup + http + https + stream + v2 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-default-push.t --- a/tests/test-default-push.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-default-push.t Mon Jan 22 17:53:02 2018 -0500 @@ -27,7 +27,7 @@ Push should push to 'default' when 'default-push' not set: $ hg --cwd b push - pushing to $TESTTMP/a (glob) + pushing to $TESTTMP/a searching for changes adding changesets adding manifests @@ -39,7 +39,7 @@ $ echo '[paths]' >> b/.hg/hgrc $ echo 'default-push = ../c' >> b/.hg/hgrc $ hg --cwd b push - pushing to $TESTTMP/c (glob) + pushing to $TESTTMP/c searching for changes adding changesets adding manifests @@ -49,7 +49,7 @@ But push should push to 'default' if explicitly specified (issue5000): $ hg --cwd b push default - pushing to $TESTTMP/a (glob) + pushing to $TESTTMP/a searching for changes no changes found [1] @@ -63,7 +63,7 @@ $ touch foo $ hg -q commit -A -m 'add foo' $ hg --config paths.default-push=../a push - pushing to $TESTTMP/a (glob) + pushing to $TESTTMP/a searching for changes adding changesets adding manifests diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-devel-warnings.t --- a/tests/test-devel-warnings.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-devel-warnings.t Mon Jan 22 17:53:02 2018 -0500 @@ -99,7 +99,7 @@ #if no-chg $ hg buggylocking --traceback devel-warn: "wlock" acquired after "lock" at: - */hg:* in (glob) + */hg:* in (glob) (?) */mercurial/dispatch.py:* in run (glob) */mercurial/dispatch.py:* in dispatch (glob) */mercurial/dispatch.py:* in _runcatch (glob) @@ -115,7 +115,7 @@ #else $ hg buggylocking --traceback devel-warn: "wlock" acquired after "lock" at: - */hg:* in (glob) + */hg:* in (glob) (?) */mercurial/dispatch.py:* in run (glob) */mercurial/dispatch.py:* in dispatch (glob) */mercurial/dispatch.py:* in _runcatch (glob) @@ -177,7 +177,7 @@ $ hg oldanddeprecated --traceback devel-warn: foorbar is deprecated, go shopping (compatibility will be dropped after Mercurial-42.1337, update your code.) at: - */hg:* in (glob) + */hg:* in (glob) (?) */mercurial/dispatch.py:* in run (glob) */mercurial/dispatch.py:* in dispatch (glob) */mercurial/dispatch.py:* in _runcatch (glob) @@ -238,7 +238,7 @@ 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping (compatibility will be dropped after Mercurial-42.1337, update your code.) at: - */hg:* in (glob) + */hg:* in (glob) (?) */mercurial/dispatch.py:* in run (glob) */mercurial/dispatch.py:* in dispatch (glob) */mercurial/dispatch.py:* in _runcatch (glob) diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-diff-color.t --- a/tests/test-diff-color.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-diff-color.t Mon Jan 22 17:53:02 2018 -0500 @@ -259,3 +259,134 @@ \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mall\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtabs\x1b[0m\x1b[0;1;41m \x1b[0m (esc) $ cd .. + +test inline color diff + + $ hg init inline + $ cd inline + $ cat > file1 << EOF + > this is the first line + > this is the second line + > third line starts with space + > + starts with a plus sign + > this one with one tab + > now with full two tabs + > now tabs everywhere, much fun + > + > this line won't change + > + > two lines are going to + > be changed into three! + > + > three of those lines will + > collapse onto one + > (to see if it works) + > EOF + $ hg add file1 + $ hg ci -m 'commit' + + $ cat > file1 << EOF + > that is the first paragraph + > this is the second line + > third line starts with space + > - starts with a minus sign + > this one with two tab + > now with full three tabs + > now there are tabs everywhere, much fun + > + > this line won't change + > + > two lines are going to + > (entirely magically, + > assuming this works) + > be changed into four! + > + > three of those lines have + > collapsed onto one + > EOF + $ hg diff --config experimental.worddiff=False --color=debug + [diff.diffline|diff --git a/file1 b/file1] + [diff.file_a|--- a/file1] + [diff.file_b|+++ b/file1] + [diff.hunk|@@ -1,16 +1,17 @@] + [diff.deleted|-this is the first line] + [diff.deleted|-this is the second line] + [diff.deleted|- third line starts with space] + [diff.deleted|-+ starts with a plus sign] + [diff.deleted|-][diff.tab| ][diff.deleted|this one with one tab] + [diff.deleted|-][diff.tab| ][diff.deleted|now with full two tabs] + [diff.deleted|-][diff.tab| ][diff.deleted|now tabs][diff.tab| ][diff.deleted|everywhere, much fun] + [diff.inserted|+that is the first paragraph] + [diff.inserted|+ this is the second line] + [diff.inserted|+third line starts with space] + [diff.inserted|+- starts with a minus sign] + [diff.inserted|+][diff.tab| ][diff.inserted|this one with two tab] + [diff.inserted|+][diff.tab| ][diff.inserted|now with full three tabs] + [diff.inserted|+][diff.tab| ][diff.inserted|now there are tabs][diff.tab| ][diff.inserted|everywhere, much fun] + + this line won't change + + two lines are going to + [diff.deleted|-be changed into three!] + [diff.inserted|+(entirely magically,] + [diff.inserted|+ assuming this works)] + [diff.inserted|+be changed into four!] + + [diff.deleted|-three of those lines will] + [diff.deleted|-collapse onto one] + [diff.deleted|-(to see if it works)] + [diff.inserted|+three of those lines have] + [diff.inserted|+collapsed onto one] + $ hg diff --config experimental.worddiff=True --color=debug + [diff.diffline|diff --git a/file1 b/file1] + [diff.file_a|--- a/file1] + [diff.file_b|+++ b/file1] + [diff.hunk|@@ -1,16 +1,17 @@] + [diff.deleted|-this is the ][diff.deleted.highlight|first][diff.deleted| line] + [diff.deleted|-this is the second line] + [diff.deleted|-][diff.deleted.highlight| ][diff.deleted|third line starts with space] + [diff.deleted|-][diff.deleted.highlight|+][diff.deleted| starts with a ][diff.deleted.highlight|plus][diff.deleted| sign] + [diff.deleted|-][diff.tab| ][diff.deleted|this one with ][diff.deleted.highlight|one][diff.deleted| tab] + [diff.deleted|-][diff.tab| ][diff.deleted|now with full ][diff.deleted.highlight|two][diff.deleted| tabs] + [diff.deleted|-][diff.tab| ][diff.deleted|now tabs][diff.tab| ][diff.deleted|everywhere, much fun] + [diff.inserted|+that is the first paragraph] + [diff.inserted|+][diff.inserted.highlight| ][diff.inserted|this is the ][diff.inserted.highlight|second][diff.inserted| line] + [diff.inserted|+third line starts with space] + [diff.inserted|+][diff.inserted.highlight|-][diff.inserted| starts with a ][diff.inserted.highlight|minus][diff.inserted| sign] + [diff.inserted|+][diff.tab| ][diff.inserted|this one with ][diff.inserted.highlight|two][diff.inserted| tab] + [diff.inserted|+][diff.tab| ][diff.inserted|now with full ][diff.inserted.highlight|three][diff.inserted| tabs] + [diff.inserted|+][diff.tab| ][diff.inserted|now][diff.inserted.highlight| there are][diff.inserted| tabs][diff.tab| ][diff.inserted|everywhere, much fun] + + this line won't change + + two lines are going to + [diff.deleted|-be changed into ][diff.deleted.highlight|three][diff.deleted|!] + [diff.inserted|+(entirely magically,] + [diff.inserted|+ assuming this works)] + [diff.inserted|+be changed into ][diff.inserted.highlight|four][diff.inserted|!] + + [diff.deleted|-three of those lines ][diff.deleted.highlight|will] + [diff.deleted|-][diff.deleted.highlight|collapse][diff.deleted| onto one] + [diff.deleted|-(to see if it works)] + [diff.inserted|+three of those lines ][diff.inserted.highlight|have] + [diff.inserted|+][diff.inserted.highlight|collapsed][diff.inserted| onto one] + +multibyte character shouldn't be broken up in word diff: + + $ $PYTHON <<'EOF' + > with open("utf8", "wb") as f: + > f.write(b"blah \xe3\x82\xa2 blah\n") + > EOF + $ hg ci -Am 'add utf8 char' utf8 + $ $PYTHON <<'EOF' + > with open("utf8", "wb") as f: + > f.write(b"blah \xe3\x82\xa4 blah\n") + > EOF + $ hg ci -m 'slightly change utf8 char' utf8 + $ hg diff --config experimental.worddiff=True --color=debug -c. + [diff.diffline|diff --git a/utf8 b/utf8] + [diff.file_a|--- a/utf8] + [diff.file_b|+++ b/utf8] + [diff.hunk|@@ -1,1 +1,1 @@] + [diff.deleted|-blah ][diff.deleted.highlight|\xe3\x82\xa2][diff.deleted| blah] (esc) + [diff.inserted|+blah ][diff.inserted.highlight|\xe3\x82\xa4][diff.inserted| blah] (esc) diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-diff-upgrade.t --- a/tests/test-diff-upgrade.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-diff-upgrade.t Mon Jan 22 17:53:02 2018 -0500 @@ -16,7 +16,7 @@ $ echo regular > regular $ echo rmregular > rmregular - $ $PYTHON -c "file('bintoregular', 'wb').write('\0')" + $ $PYTHON -c "open('bintoregular', 'wb').write(b'\0')" $ touch rmempty $ echo exec > exec $ chmod +x exec @@ -26,7 +26,7 @@ $ echo unsetexec > unsetexec $ chmod +x unsetexec $ echo binary > binary - $ $PYTHON -c "file('rmbinary', 'wb').write('\0')" + $ $PYTHON -c "open('rmbinary', 'wb').write(b'\0')" $ hg ci -Am addfiles adding binary adding bintoregular @@ -50,8 +50,8 @@ $ rm rmexec $ chmod +x setexec $ chmod -x unsetexec - $ $PYTHON -c "file('binary', 'wb').write('\0\0')" - $ $PYTHON -c "file('newbinary', 'wb').write('\0')" + $ $PYTHON -c "open('binary', 'wb').write(b'\0\0')" + $ $PYTHON -c "open('newbinary', 'wb').write(b'\0')" $ rm rmbinary $ hg addremove -s 0 adding newbinary diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-diffstat.t --- a/tests/test-diffstat.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-diffstat.t Mon Jan 22 17:53:02 2018 -0500 @@ -35,7 +35,7 @@ $ hg ci -m appenda - >>> open("c", "wb").write("\0") + >>> open("c", "wb").write(b"\0") $ touch d $ hg add c d @@ -54,7 +54,7 @@ $ hg ci -m createb - >>> open("file with spaces", "wb").write("\0") + >>> open("file with spaces", "wb").write(b"\0") $ hg add "file with spaces" Filename with spaces diffstat: @@ -151,7 +151,7 @@ 1 files changed, 1 insertions(+), 0 deletions(-) $ hg diff --stat --root ../dir1 ../dir2 - warning: ../dir2 not inside relative root . (glob) + warning: ../dir2 not inside relative root . $ hg diff --stat --root . -I old diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-directaccess.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-directaccess.t Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,201 @@ +Tests for access level on hidden commits by various commands on based of their +type. + +Setting the required config to start this + + $ cat >> $HGRCPATH < [experimental] + > evolution=createmarkers, allowunstable + > directaccess=True + > directaccess.revnums=True + > [extensions] + > amend = + > EOF + + $ hg init repo + $ cd repo + $ for ch in a b c; do touch $ch; echo "foo" >> $ch; hg ci -Aqm "Added "$ch; done + + $ hg log -G -T '{rev}:{node} {desc}' --hidden + @ 2:28ad74487de9599d00d81085be739c61fc340652 Added c + | + o 1:29becc82797a4bc11ec8880b58eaecd2ab3e7760 Added b + | + o 0:18d04c59bb5d2d4090ad9a5b59bd6274adb63add Added a + + $ echo "bar" >> c + $ hg amend + + $ hg log -G -T '{rev}:{node} {desc}' --hidden + @ 3:2443a0e664694756d8b435d06b6ad84f941b6fc0 Added c + | + | x 2:28ad74487de9599d00d81085be739c61fc340652 Added c + |/ + o 1:29becc82797a4bc11ec8880b58eaecd2ab3e7760 Added b + | + o 0:18d04c59bb5d2d4090ad9a5b59bd6274adb63add Added a + +Testing read only commands on the hidden revision + +Testing with rev number + + $ hg exp 2 --config experimental.directaccess.revnums=False + abort: hidden revision '2' was rewritten as: 2443a0e66469! + (use --hidden to access hidden revisions) + [255] + + $ hg exp 2 + # HG changeset patch + # User test + # Date 0 0 + # Thu Jan 01 00:00:00 1970 +0000 + # Node ID 28ad74487de9599d00d81085be739c61fc340652 + # Parent 29becc82797a4bc11ec8880b58eaecd2ab3e7760 + Added c + + diff -r 29becc82797a -r 28ad74487de9 c + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/c Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,1 @@ + +foo + + $ hg log -r 2 + changeset: 2:28ad74487de9 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + obsolete: rewritten using amend as 3:2443a0e66469 + summary: Added c + + $ hg identify -r 2 + 28ad74487de9 + + $ hg status --change 2 + A c + + $ hg status --change 2 --config experimental.directaccess.revnums=False + abort: hidden revision '2' was rewritten as: 2443a0e66469! + (use --hidden to access hidden revisions) + [255] + + $ hg diff -c 2 + diff -r 29becc82797a -r 28ad74487de9 c + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/c Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,1 @@ + +foo + +Testing with hash + +`hg export` + + $ hg exp 28ad74 + # HG changeset patch + # User test + # Date 0 0 + # Thu Jan 01 00:00:00 1970 +0000 + # Node ID 28ad74487de9599d00d81085be739c61fc340652 + # Parent 29becc82797a4bc11ec8880b58eaecd2ab3e7760 + Added c + + diff -r 29becc82797a -r 28ad74487de9 c + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/c Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,1 @@ + +foo + +`hg log` + + $ hg log -r 28ad74 + changeset: 2:28ad74487de9 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + obsolete: rewritten using amend as 3:2443a0e66469 + summary: Added c + +`hg cat` + + $ hg cat -r 28ad74 c + foo + +`hg diff` + + $ hg diff -c 28ad74 + diff -r 29becc82797a -r 28ad74487de9 c + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/c Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,1 @@ + +foo + +`hg files` + + $ hg files -r 28ad74 + a + b + c + +`hg identify` + + $ hg identify -r 28ad74 + 28ad74487de9 + +`hg annotate` + + $ hg annotate -r 28ad74 a + 0: foo + +`hg status` + + $ hg status --change 28ad74 + A c + +`hg archive` + +This should not throw error + $ hg archive -r 28ad74 foo + +`hg update` + + $ hg up 28ad74 + updating to a hidden changeset 28ad74487de9 + (hidden revision '28ad74487de9' was rewritten as: 2443a0e66469) + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ hg up 3 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ hg up + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + +`hg revert` + + $ hg revert -r 28ad74 --all + reverting c + + $ hg diff + diff -r 2443a0e66469 c + --- a/c Thu Jan 01 00:00:00 1970 +0000 + +++ b/c Thu Jan 01 00:00:00 1970 +0000 + @@ -1,2 +1,1 @@ + foo + -bar + +Commands with undefined cmdtype should not work right now + + $ hg phase -r 28ad74 + abort: hidden revision '28ad74' was rewritten as: 2443a0e66469! + (use --hidden to access hidden revisions) + [255] + + $ hg phase -r 2 + abort: hidden revision '2' was rewritten as: 2443a0e66469! + (use --hidden to access hidden revisions) + [255] + +Setting a bookmark will make that changeset unhidden, so this should come in end + + $ hg bookmarks -r 28ad74 book + bookmarking hidden changeset 28ad74487de9 + (hidden revision '28ad74487de9' was rewritten as: 2443a0e66469) + + $ hg bookmarks + book 2:28ad74487de9 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-dirstate-race.t --- a/tests/test-dirstate-race.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-dirstate-race.t Mon Jan 22 17:53:02 2018 -0500 @@ -45,7 +45,7 @@ #endif $ hg add b dir1 d e - adding dir1/c (glob) + adding dir1/c $ hg commit -m test2 $ cat >> $TESTTMP/dirstaterace.py << EOF diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-dirstate.t --- a/tests/test-dirstate.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-dirstate.t Mon Jan 22 17:53:02 2018 -0500 @@ -11,9 +11,9 @@ adding a/b/c/d/y adding a/b/c/d/z $ hg mv a z - moving a/b/c/d/x to z/b/c/d/x (glob) - moving a/b/c/d/y to z/b/c/d/y (glob) - moving a/b/c/d/z to z/b/c/d/z (glob) + moving a/b/c/d/x to z/b/c/d/x + moving a/b/c/d/y to z/b/c/d/y + moving a/b/c/d/z to z/b/c/d/z Test name collisions diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-dispatch.t --- a/tests/test-dispatch.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-dispatch.t Mon Jan 22 17:53:02 2018 -0500 @@ -37,10 +37,17 @@ hg log [OPTION]... [FILE] (use 'hg log -h' to show more help) - $ hg log -R -- 2>&1 | grep 'hg log' - hg log: option -R requires argument - hg log [OPTION]... [FILE] - (use 'hg log -h' to show more help) +"--" may be an option value: + + $ hg -R -- log + abort: repository -- not found! + [255] + $ hg log -R -- + abort: repository -- not found! + [255] + $ hg log -T -- + -- (no-eol) + $ hg log -T -- -k nomatch Parsing of early options should stop at "--": @@ -87,7 +94,7 @@ [255] $ hg log -b --cwd=inexistent default - abort: No such file or directory: 'inexistent' + abort: $ENOENT$: 'inexistent' [255] $ hg log -b '--config=ui.traceback=yes' 2>&1 | grep '^Traceback' @@ -149,6 +156,10 @@ [255] $ HGPLAIN=+strictflags hg --cwd .. -q -Ra log -b default 0:cb9a9f314b8b + $ HGPLAIN=+strictflags hg --cwd .. -q --repository a log -b default + 0:cb9a9f314b8b + $ HGPLAIN=+strictflags hg --cwd .. -q --repo a log -b default + 0:cb9a9f314b8b For compatibility reasons, HGPLAIN=+strictflags is not enabled by plain HGPLAIN: @@ -200,7 +211,7 @@ The output could be one of the following and something else: chg: abort: failed to getcwd (errno = *) (glob) abort: error getting current working directory: * (glob) - sh: 0: getcwd() failed: No such file or directory + sh: 0: getcwd() failed: $ENOENT$ Since the exact behavior depends on the shell, only check it returns non-zero. $ HGDEMANDIMPORT=disable hg version -q 2>/dev/null || false [1] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-drawdag.t --- a/tests/test-drawdag.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-drawdag.t Mon Jan 22 17:53:02 2018 -0500 @@ -206,9 +206,10 @@ > \|/ > A > EOS + 1 new orphan changesets $ hg log -r 'sort(all(), topo)' -G --hidden -T '{desc} {node}' - o G 711f53bbef0bebd12eb6f0511d5e2e998b984846 + * G 711f53bbef0bebd12eb6f0511d5e2e998b984846 | x F 64a8289d249234b9886244d379f15e6b650b28e3 | @@ -227,11 +228,11 @@ o A 426bada5c67598ca65036d57d9e4b64b0c1ce7a0 $ hg debugobsolete - 112478962961147124edd43549aedd1a335e44bf 7fb047a69f220c21711122dfd94305a9efb60cba 64a8289d249234b9886244d379f15e6b650b28e3 711f53bbef0bebd12eb6f0511d5e2e998b984846 0 (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'split', 'user': 'test'} - 26805aba1e600a82e93661149f2313866a221a7b be0ef73c17ade3fc89dc41701eb9fc3a91b58282 0 (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'replace', 'user': 'test'} - be0ef73c17ade3fc89dc41701eb9fc3a91b58282 575c4b5ec114d64b681d33f8792853568bfb2b2c 0 (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'replace', 'user': 'test'} - 64a8289d249234b9886244d379f15e6b650b28e3 0 {7fb047a69f220c21711122dfd94305a9efb60cba} (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'prune', 'user': 'test'} - 58e6b987bf7045fcd9c54f496396ca1d1fc81047 0 {575c4b5ec114d64b681d33f8792853568bfb2b2c} (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'prune', 'user': 'test'} + 112478962961147124edd43549aedd1a335e44bf 7fb047a69f220c21711122dfd94305a9efb60cba 64a8289d249234b9886244d379f15e6b650b28e3 711f53bbef0bebd12eb6f0511d5e2e998b984846 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'split', 'user': 'test'} + 26805aba1e600a82e93661149f2313866a221a7b be0ef73c17ade3fc89dc41701eb9fc3a91b58282 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '9', 'operation': 'replace', 'user': 'test'} + be0ef73c17ade3fc89dc41701eb9fc3a91b58282 575c4b5ec114d64b681d33f8792853568bfb2b2c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'replace', 'user': 'test'} + 64a8289d249234b9886244d379f15e6b650b28e3 0 {7fb047a69f220c21711122dfd94305a9efb60cba} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'prune', 'user': 'test'} + 58e6b987bf7045fcd9c54f496396ca1d1fc81047 0 {575c4b5ec114d64b681d33f8792853568bfb2b2c} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'prune', 'user': 'test'} Change file contents via comments @@ -261,12 +262,12 @@ a FILE B b - FILE dir1/a (glob) + FILE dir1/a 1 2 - FILE dir1/c (glob) + FILE dir1/c 5 - FILE dir2/b (glob) + FILE dir2/b 34 - FILE dir2/c (glob) + FILE dir2/c 6 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-A3.t --- a/tests/test-exchange-obsmarkers-case-A3.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-A3.t Mon Jan 22 17:53:02 2018 -0500 @@ -74,6 +74,7 @@ created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` obsoleted 1 changesets + 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` obsoleted 1 changesets $ hg log -G --hidden @@ -163,6 +164,7 @@ created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` obsoleted 1 changesets + 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` obsoleted 1 changesets $ hg log -G --hidden @@ -218,6 +220,7 @@ remote: added 1 changesets with 1 changes to 1 files (+1 heads) remote: 1 new obsolescence markers remote: obsoleted 1 changesets + remote: 1 new orphan changesets ## post push state # obstore: main 28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} @@ -234,6 +237,7 @@ added 1 changesets with 1 changes to 1 files (+1 heads) 1 new obsolescence markers obsoleted 1 changesets + 1 new orphan changesets new changesets e5ea8f9c7314 (run 'hg heads' to see heads, 'hg merge' to merge) ## post pull state diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-A4.t --- a/tests/test-exchange-obsmarkers-case-A4.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-A4.t Mon Jan 22 17:53:02 2018 -0500 @@ -65,10 +65,11 @@ $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'` $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` obsoleted 1 changesets + 1 new orphan changesets $ hg log -G --hidden @ e5ea8f9c7314 (draft): A1 | - | o 06055a7959d4 (draft): B + | * 06055a7959d4 (draft): B | | | x 28b51eb45704 (draft): A0 |/ diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-B5.t --- a/tests/test-exchange-obsmarkers-case-B5.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-B5.t Mon Jan 22 17:53:02 2018 -0500 @@ -71,6 +71,7 @@ $ mkcommit B1 $ hg debugobsolete --hidden `getid 'desc(A0)'` `getid 'desc(A1)'` obsoleted 1 changesets + 2 new orphan changesets $ hg debugobsolete --hidden aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(B0)'` $ hg debugobsolete --hidden `getid 'desc(B0)'` `getid 'desc(B1)'` obsoleted 1 changesets @@ -80,7 +81,7 @@ | @ e5ea8f9c7314 (draft): A1 | - | o 1d0f3cd25300 (draft): C + | * 1d0f3cd25300 (draft): C | | | x 6e72f0a95b5e (draft): B0 | | diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-C1.t --- a/tests/test-exchange-obsmarkers-case-C1.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-C1.t Mon Jan 22 17:53:02 2018 -0500 @@ -58,6 +58,7 @@ $ mkcommit A $ mkcommit B $ hg prune -qd '0 0' '.~1' + 1 new orphan changesets $ hg prune -qd '0 0' . $ hg log -G --hidden x f6fbb35d8ac9 (draft): B diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-C4.t --- a/tests/test-exchange-obsmarkers-case-C4.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-C4.t Mon Jan 22 17:53:02 2018 -0500 @@ -67,6 +67,7 @@ $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(B)'` obsoleted 1 changesets $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(C)'` + 2 new content-divergent changesets $ hg prune -qd '0 0' . $ hg log -G --hidden x 7f7f229b13a6 (draft): C diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-D1.t --- a/tests/test-exchange-obsmarkers-case-D1.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-D1.t Mon Jan 22 17:53:02 2018 -0500 @@ -62,6 +62,7 @@ created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` obsoleted 1 changesets + 1 new orphan changesets $ hg prune -d '0 0' 'desc(B)' obsoleted 1 changesets $ hg strip --hidden -q 'desc(A0)' diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-exchange-obsmarkers-case-D4.t --- a/tests/test-exchange-obsmarkers-case-D4.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-exchange-obsmarkers-case-D4.t Mon Jan 22 17:53:02 2018 -0500 @@ -60,6 +60,7 @@ $ mkcommit B1 $ hg debugobsolete `getid 'desc(A0)'` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa obsoleted 1 changesets + 1 new orphan changesets $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A1)'` $ hg debugobsolete `getid 'desc(B0)'` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb obsoleted 1 changesets diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-extdata.t --- a/tests/test-extdata.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-extdata.t Mon Jan 22 17:53:02 2018 -0500 @@ -46,8 +46,8 @@ test non-zero exit of shell command $ hg log -qr "extdata(emptygrep)" - $ hg log -qr "extdata(emptygrep)" --debug - extdata command 'cat extdata.txt | grep empty' exited with status * (glob) + abort: extdata command 'cat extdata.txt | grep empty' failed: exited with status 1 + [255] test bad extdata() revset source @@ -88,8 +88,7 @@ $ mkdir sub $ cd sub $ hg log -qr "extdata(filedata)" - abort: error: The system cannot find the file specified (windows !) - abort: error: No such file or directory (no-windows !) + abort: error: $ENOENT$ [255] $ hg log -qr "extdata(shelldata)" 2:f6ed99a58333 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-extension.t --- a/tests/test-extension.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-extension.t Mon Jan 22 17:53:02 2018 -0500 @@ -180,7 +180,7 @@ > EOF $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}/libroot; hg --config extensions.loadabs=loadabs.py root) ambigabs.s=libroot/ambig.py - $TESTTMP/a (glob) + $TESTTMP/a #if no-py3k $ cat > $TESTTMP/libroot/mod/ambigrel.py < EOF $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}/libroot; hg --config extensions.loadrel=loadrel.py root) ambigrel.s=libroot/mod/ambig.py - $TESTTMP/a (glob) + $TESTTMP/a #endif Check absolute/relative import of extension specific modules @@ -245,7 +245,7 @@ (extroot) import extroot: this is extroot.__init__ (extroot) from extroot.bar import s: this is extroot.bar (extroot) import extroot.bar in func(): this is extroot.bar - $TESTTMP/a (glob) + $TESTTMP/a #if no-py3k $ rm "$TESTTMP"/extroot/foo.* @@ -277,7 +277,7 @@ (extroot) import sub1: this is extroot.sub1.__init__ (extroot) from bar import s: this is extroot.bar (extroot) import bar in func(): this is extroot.bar - $TESTTMP/a (glob) + $TESTTMP/a #endif #if demandimport @@ -534,7 +534,7 @@ Mercurial Distributed SCM (version *) (glob) (see https://mercurial-scm.org for more information) - Copyright (C) 2005-2017 Matt Mackall and others + Copyright (C) 2005-* Matt Mackall and others (glob) This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. @@ -1225,7 +1225,7 @@ > cmdtable = None > EOF $ hg --config extensions.path=./path.py help foo > /dev/null - warning: error finding commands in $TESTTMP/hgext/forest.py (glob) + warning: error finding commands in $TESTTMP/hgext/forest.py abort: no such help topic: foo (try 'hg help --keyword foo') [255] @@ -1503,17 +1503,17 @@ $ echo '# enable extension locally' >> src/.hg/hgrc $ echo "reposetuptest = $TESTTMP/reposetuptest.py" >> src/.hg/hgrc $ hg -R src status - reposetup() for $TESTTMP/reposetup-test/src (glob) - reposetup() for $TESTTMP/reposetup-test/src (glob) (chg !) + reposetup() for $TESTTMP/reposetup-test/src + reposetup() for $TESTTMP/reposetup-test/src (chg !) $ hg clone -U src clone-dst1 - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/src $ hg init push-dst1 $ hg -q -R src push push-dst1 - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/src $ hg init pull-src1 $ hg -q -R pull-src1 pull src - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/src $ cat <> $HGRCPATH > [extensions] @@ -1521,13 +1521,13 @@ > reposetuptest = ! > EOF $ hg clone -U src clone-dst2 - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/src $ hg init push-dst2 $ hg -q -R src push push-dst2 - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/src $ hg init pull-src2 $ hg -q -R pull-src2 pull src - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/src $ cat <> $HGRCPATH > [extensions] @@ -1535,32 +1535,32 @@ > reposetuptest = $TESTTMP/reposetuptest.py > EOF $ hg clone -U src clone-dst3 - reposetup() for $TESTTMP/reposetup-test/src (glob) - reposetup() for $TESTTMP/reposetup-test/clone-dst3 (glob) + reposetup() for $TESTTMP/reposetup-test/src + reposetup() for $TESTTMP/reposetup-test/clone-dst3 $ hg init push-dst3 - reposetup() for $TESTTMP/reposetup-test/push-dst3 (glob) + reposetup() for $TESTTMP/reposetup-test/push-dst3 $ hg -q -R src push push-dst3 - reposetup() for $TESTTMP/reposetup-test/src (glob) - reposetup() for $TESTTMP/reposetup-test/push-dst3 (glob) + reposetup() for $TESTTMP/reposetup-test/src + reposetup() for $TESTTMP/reposetup-test/push-dst3 $ hg init pull-src3 - reposetup() for $TESTTMP/reposetup-test/pull-src3 (glob) + reposetup() for $TESTTMP/reposetup-test/pull-src3 $ hg -q -R pull-src3 pull src - reposetup() for $TESTTMP/reposetup-test/pull-src3 (glob) - reposetup() for $TESTTMP/reposetup-test/src (glob) + reposetup() for $TESTTMP/reposetup-test/pull-src3 + reposetup() for $TESTTMP/reposetup-test/src $ echo '[extensions]' >> src/.hg/hgrc $ echo '# disable extension locally' >> src/.hg/hgrc $ echo 'reposetuptest = !' >> src/.hg/hgrc $ hg clone -U src clone-dst4 - reposetup() for $TESTTMP/reposetup-test/clone-dst4 (glob) + reposetup() for $TESTTMP/reposetup-test/clone-dst4 $ hg init push-dst4 - reposetup() for $TESTTMP/reposetup-test/push-dst4 (glob) + reposetup() for $TESTTMP/reposetup-test/push-dst4 $ hg -q -R src push push-dst4 - reposetup() for $TESTTMP/reposetup-test/push-dst4 (glob) + reposetup() for $TESTTMP/reposetup-test/push-dst4 $ hg init pull-src4 - reposetup() for $TESTTMP/reposetup-test/pull-src4 (glob) + reposetup() for $TESTTMP/reposetup-test/pull-src4 $ hg -q -R pull-src4 pull src - reposetup() for $TESTTMP/reposetup-test/pull-src4 (glob) + reposetup() for $TESTTMP/reposetup-test/pull-src4 disabling in command line overlays with all configuration $ hg --config extensions.reposetuptest=! clone -U src clone-dst5 @@ -1605,8 +1605,8 @@ $ echo "reposetuptest = $TESTTMP/reposetuptest.py" >> parent/.hg/hgrc $ cp parent/.hg/hgrc parent/sub2/.hg/hgrc $ hg -R parent status -S -A - reposetup() for $TESTTMP/reposetup-test/parent (glob) - reposetup() for $TESTTMP/reposetup-test/parent/sub2 (glob) + reposetup() for $TESTTMP/reposetup-test/parent + reposetup() for $TESTTMP/reposetup-test/parent/sub2 C .hgsub C .hgsubstate C sub1/1 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-fileset.t --- a/tests/test-fileset.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-fileset.t Mon Jan 22 17:53:02 2018 -0500 @@ -27,6 +27,24 @@ (string 're:a\\d') a1 a2 + $ fileset -v '!re:"a\d"' + (not + (kindpat + (symbol 're') + (string 'a\\d'))) + b1 + b2 + $ fileset -v 'path:a1 or glob:b?' + (or + (kindpat + (symbol 'path') + (symbol 'a1')) + (kindpat + (symbol 'glob') + (symbol 'b?'))) + a1 + b1 + b2 $ fileset -v 'a1 or a2' (or (symbol 'a1') @@ -53,6 +71,49 @@ hg: parse error: invalid \x escape [255] +Test invalid syntax + + $ fileset -v '"added"()' + (func + (string 'added') + None) + hg: parse error: not a symbol + [255] + $ fileset -v '()()' + (func + (group + None) + None) + hg: parse error: not a symbol + [255] + $ fileset -v -- '-x' + (negate + (symbol 'x')) + hg: parse error: can't use negate operator in this context + [255] + $ fileset -v -- '-()' + (negate + (group + None)) + hg: parse error: can't use negate operator in this context + [255] + + $ fileset '"path":.' + hg: parse error: not a symbol + [255] + $ fileset 'path:foo bar' + hg: parse error at 9: invalid token + [255] + $ fileset 'foo:bar:baz' + hg: parse error: not a symbol + [255] + $ fileset 'foo:bar()' + hg: parse error: pattern must be a string + [255] + $ fileset 'foo:bar' + hg: parse error: invalid pattern kind: foo + [255] + Test files status $ rm a1 @@ -199,7 +260,7 @@ merging b2 warning: conflicts while merging b2! (edit, then use 'hg resolve --mark') * files updated, 0 files merged, 1 files removed, 1 files unresolved (glob) - use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] $ fileset 'resolved()' $ fileset 'unresolved()' @@ -319,6 +380,9 @@ $ fileset -r4 'subrepo("re:su.*")' sub sub2 + $ fileset -r4 'subrepo(re:su.*)' + sub + sub2 $ fileset -r4 'subrepo("sub")' sub $ fileset -r4 'b2 or c1' diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-flagprocessor.t --- a/tests/test-flagprocessor.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-flagprocessor.t Mon Jan 22 17:53:02 2018 -0500 @@ -81,7 +81,7 @@ # Push to the server $ hg push - pushing to $TESTTMP/server (glob) + pushing to $TESTTMP/server searching for changes adding changesets adding manifests @@ -101,7 +101,7 @@ # Pull from server and update to latest revision $ hg pull default - pulling from $TESTTMP/server (glob) + pulling from $TESTTMP/server requesting all changes adding changesets adding manifests diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-fncache.t --- a/tests/test-fncache.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-fncache.t Mon Jan 22 17:53:02 2018 -0500 @@ -14,7 +14,7 @@ $ mkdir a.i $ echo "some other text" > a.i/b $ hg add - adding a.i/b (glob) + adding a.i/b $ hg ci -m second $ cat .hg/store/fncache | sort data/a.i @@ -25,7 +25,7 @@ $ mkdir a.i.hg $ echo "yet another text" > a.i.hg/c $ hg add - adding a.i.hg/c (glob) + adding a.i.hg/c $ hg ci -m third $ cat .hg/store/fncache | sort data/a.i diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-fuzz-targets.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-fuzz-targets.t Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,5 @@ +#require clang-libfuzzer test-repo + $ cd $TESTDIR/../contrib/fuzz + $ make +Just run the fuzzer for five seconds to verify it works at all. + $ ./bdiff -max_total_time 5 diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-generaldelta.t --- a/tests/test-generaldelta.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-generaldelta.t Mon Jan 22 17:53:02 2018 -0500 @@ -154,7 +154,7 @@ Test that strip bundle use bundle2 $ hg --config extensions.strip= strip . 0 files updated, 0 files merged, 5 files removed, 0 files unresolved - saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob) + saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg $ hg debugbundle .hg/strip-backup/* Stream params: {Compression: BZ} changegroup -- {nbchanges: 1, version: 02} diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-getbundle.t --- a/tests/test-getbundle.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-getbundle.t Mon Jan 22 17:53:02 2018 -0500 @@ -264,9 +264,9 @@ $ cat access.log * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) - * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) + $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob) * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) - * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=700b7e19db54103633c4bf4a6a6b6d55f4d50c03+d5f6e1ea452285324836a49d7d3c2a63cfed1d31&heads=13c0170174366b441dc68e8e33757232fa744458+bac16991d12ff45f9dc43c52da1946dfadb83e80 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) + $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=700b7e19db54103633c4bf4a6a6b6d55f4d50c03+d5f6e1ea452285324836a49d7d3c2a63cfed1d31&heads=13c0170174366b441dc68e8e33757232fa744458+bac16991d12ff45f9dc43c52da1946dfadb83e80 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob) $ cat error.log diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-git-export.t --- a/tests/test-git-export.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-git-export.t Mon Jan 22 17:53:02 2018 -0500 @@ -99,7 +99,7 @@ warning: dir2 not inside relative root dir1 $ hg diff --git --root dir1 -r 1:tip 'dir2/{copy}' - warning: dir2/{copy} not inside relative root dir1 (glob) + warning: dir2/{copy} not inside relative root dir1 $ cd dir1 $ hg diff --git --root .. -r 1:tip @@ -161,7 +161,7 @@ new +copy1 $ hg diff --git --root . -r 1:tip ../dir2 - warning: ../dir2 not inside relative root . (glob) + warning: ../dir2 not inside relative root . $ hg diff --git --root . -r 1:tip '../dir2/*' warning: ../dir2/* not inside relative root . (glob) $ cd .. diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-githelp.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-githelp.t Mon Jan 22 17:53:02 2018 -0500 @@ -0,0 +1,296 @@ + $ cat >> $HGRCPATH << EOF + > [extensions] + > githelp = + > EOF + + $ hg init repo + $ cd repo + $ echo foo > test_file + $ mkdir dir + $ echo foo > dir/file + $ echo foo > removed_file + $ echo foo > deleted_file + $ hg add -q . + $ hg commit -m 'bar' + $ hg bookmark both + $ touch both + $ touch untracked_file + $ hg remove removed_file + $ rm deleted_file + +githelp on a single command should succeed + $ hg githelp -- commit + hg commit + $ hg githelp -- git commit + hg commit + +githelp should fail nicely if we don't give it arguments + $ hg githelp + abort: missing git command - usage: hg githelp -- + [255] + $ hg githelp -- git + abort: missing git command - usage: hg githelp -- + [255] + +githelp on a command with options should succeed + $ hg githelp -- commit -pm "abc" + hg commit --interactive -m 'abc' + +githelp on a command with standalone unrecognized option should succeed with warning + $ hg githelp -- commit -p -v + ignoring unknown option -v + hg commit --interactive + +githelp on a command with unrecognized option packed with other options should fail with error + $ hg githelp -- commit -pv + abort: unknown option v packed with other options + Please try passing the option as it's own flag: -v + [255] + +githelp for git rebase --skip + $ hg githelp -- git rebase --skip + hg revert --all -r . + hg rebase --continue + +githelp for git commit --amend (hg commit --amend pulls up an editor) + $ hg githelp -- commit --amend + hg commit --amend + +githelp for git commit --amend --no-edit (hg amend does not pull up an editor) + $ hg githelp -- commit --amend --no-edit + hg amend + +githelp for git checkout -- . (checking out a directory) + $ hg githelp -- checkout -- . + note: use --no-backup to avoid creating .orig files + + hg revert . + +githelp for git checkout "HEAD^" (should still work to pass a rev) + $ hg githelp -- checkout "HEAD^" + hg update .^ + +githelp checkout: args after -- should be treated as paths no matter what + $ hg githelp -- checkout -- HEAD + note: use --no-backup to avoid creating .orig files + + hg revert HEAD + +githelp for git checkout with rev and path + $ hg githelp -- checkout "HEAD^" -- file.txt + note: use --no-backup to avoid creating .orig files + + hg revert -r .^ file.txt + +githelp for git with rev and path, without separator + $ hg githelp -- checkout "HEAD^" file.txt + note: use --no-backup to avoid creating .orig files + + hg revert -r .^ file.txt + +githelp for checkout with a file as first argument + $ hg githelp -- checkout test_file + note: use --no-backup to avoid creating .orig files + + hg revert test_file + +githelp for checkout with a removed file as first argument + $ hg githelp -- checkout removed_file + note: use --no-backup to avoid creating .orig files + + hg revert removed_file + +githelp for checkout with a deleted file as first argument + $ hg githelp -- checkout deleted_file + note: use --no-backup to avoid creating .orig files + + hg revert deleted_file + +githelp for checkout with a untracked file as first argument + $ hg githelp -- checkout untracked_file + note: use --no-backup to avoid creating .orig files + + hg revert untracked_file + +githelp for checkout with a directory as first argument + $ hg githelp -- checkout dir + note: use --no-backup to avoid creating .orig files + + hg revert dir + +githelp for checkout when not in repo root + $ cd dir + $ hg githelp -- checkout file + note: use --no-backup to avoid creating .orig files + + hg revert file + + $ cd .. + +githelp for checkout with an argument that is both a file and a revision + $ hg githelp -- checkout both + hg update both + +githelp for checkout with the -p option + $ hg githelp -- git checkout -p xyz + hg revert -i -r xyz + + $ hg githelp -- git checkout -p xyz -- abc + note: use --no-backup to avoid creating .orig files + + hg revert -i -r xyz abc + +githelp for checkout with the -f option and a rev + $ hg githelp -- git checkout -f xyz + hg update -C xyz + $ hg githelp -- git checkout --force xyz + hg update -C xyz + +githelp for checkout with the -f option without an arg + $ hg githelp -- git checkout -f + hg revert --all + $ hg githelp -- git checkout --force + hg revert --all + +githelp for grep with pattern and path + $ hg githelp -- grep shrubbery flib/intern/ + hg grep shrubbery flib/intern/ + +githelp for reset, checking ~ in git becomes ~1 in mercurial + $ hg githelp -- reset HEAD~ + hg update .~1 + $ hg githelp -- reset "HEAD^" + hg update .^ + $ hg githelp -- reset HEAD~3 + hg update .~3 + + $ hg githelp -- reset --mixed HEAD + NOTE: --mixed has no meaning since Mercurial has no staging area + + hg update . + $ hg githelp -- reset --soft HEAD + NOTE: --soft has no meaning since Mercurial has no staging area + + hg update . + $ hg githelp -- reset --hard HEAD + hg update --clean . + +githelp for git show --name-status + $ hg githelp -- git show --name-status + hg log --style status -r . + +githelp for git show --pretty=format: --name-status + $ hg githelp -- git show --pretty=format: --name-status + hg status --change . + +githelp for show with no arguments + $ hg githelp -- show + hg export + +githelp for show with a path + $ hg githelp -- show test_file + hg cat test_file + +githelp for show with not a path: + $ hg githelp -- show rev + hg export rev + +githelp for show with many arguments + $ hg githelp -- show argone argtwo + hg export argone argtwo + $ hg githelp -- show test_file argone argtwo + hg cat test_file argone argtwo + +githelp for show with --unified options + $ hg githelp -- show --unified=10 + hg export --config diff.unified=10 + $ hg githelp -- show -U100 + hg export --config diff.unified=100 + +githelp for show with a path and --unified + $ hg githelp -- show -U20 test_file + hg cat test_file --config diff.unified=20 + +githelp for stash drop without name + $ hg githelp -- git stash drop + hg shelve -d + +githelp for stash drop with name + $ hg githelp -- git stash drop xyz + hg shelve -d xyz + +githelp for whatchanged should show deprecated message + $ hg githelp -- whatchanged -p + This command has been deprecated in the git project, thus isn't supported by this tool. + + +githelp for git branch -m renaming + $ hg githelp -- git branch -m old new + hg bookmark -m old new + +When the old name is omitted, git branch -m new renames the current branch. + $ hg githelp -- git branch -m new + hg bookmark -m `hg log -T"{activebookmark}" -r .` new + +Branch deletion in git strips commits + $ hg githelp -- git branch -d + hg strip -B + $ hg githelp -- git branch -d feature + hg strip -B feature -B + $ hg githelp -- git branch --delete experiment1 experiment2 + hg strip -B experiment1 -B experiment2 -B + +githelp for reuse message using the shorthand + $ hg githelp -- git commit -C deadbeef + hg commit -M deadbeef + +githelp for reuse message using the the long version + $ hg githelp -- git commit --reuse-message deadbeef + hg commit -M deadbeef + +githelp for apply with no options + $ hg githelp -- apply + hg import --no-commit + +githelp for apply with directory strip custom + $ hg githelp -- apply -p 5 + hg import --no-commit -p 5 + +git merge-base + $ hg githelp -- git merge-base --is-ancestor + ignoring unknown option --is-ancestor + NOTE: ancestors() is part of the revset language. + Learn more about revsets with 'hg help revsets' + + hg log -T '{node}\n' -r 'ancestor(A,B)' + +githelp for git blame + $ hg githelp -- git blame + hg annotate -udl + +githelp for add + + $ hg githelp -- git add + hg add + + $ hg githelp -- git add -p + note: Mercurial will commit when complete, as there is no staging area in Mercurial + + hg commit --interactive + + $ hg githelp -- git add --all + note: use hg addremove to remove files that have been deleted. + + hg add + +githelp for reflog + + $ hg githelp -- git reflog + hg journal + + note: in hg commits can be deleted from repo but we always have backups. + + $ hg githelp -- git reflog --all + hg journal --all + + note: in hg commits can be deleted from repo but we always have backups. diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-globalopts.t --- a/tests/test-globalopts.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-globalopts.t Mon Jan 22 17:53:02 2018 -0500 @@ -88,7 +88,7 @@ abort: no repository found in '$TESTTMP' (.hg not found)! [255] $ hg -R b ann a/a - abort: a/a not under root '$TESTTMP/b' (glob) + abort: a/a not under root '$TESTTMP/b' (consider using '--cwd b') [255] $ hg log @@ -355,6 +355,7 @@ environment Environment Variables extensions Using Additional Features filesets Specifying File Sets + flags Command-line flags glossary Glossary hgignore Syntax for Mercurial Ignore Files hgweb Configuring hgweb @@ -439,6 +440,7 @@ environment Environment Variables extensions Using Additional Features filesets Specifying File Sets + flags Command-line flags glossary Glossary hgignore Syntax for Mercurial Ignore Files hgweb Configuring hgweb diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-glog.t --- a/tests/test-glog.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-glog.t Mon Jan 22 17:53:02 2018 -0500 @@ -88,21 +88,34 @@ > commands, > extensions, > revsetlang, + > smartset, > ) > + > def logrevset(repo, pats, opts): + > revs = cmdutil._logrevs(repo, opts) + > if not revs: + > return None + > match, pats, slowpath = cmdutil._makelogmatcher(repo, revs, pats, opts) + > return cmdutil._makelogrevset(repo, match, pats, slowpath, opts) + > > def uisetup(ui): - > def printrevset(orig, ui, repo, *pats, **opts): + > def printrevset(orig, repo, pats, opts): + > revs, filematcher = orig(repo, pats, opts) > if opts.get('print_revset'): - > expr = cmdutil.getgraphlogrevs(repo, pats, opts)[1] + > expr = logrevset(repo, pats, opts) > if expr: > tree = revsetlang.parse(expr) + > tree = revsetlang.analyze(tree) > else: > tree = [] + > ui = repo.ui > ui.write('%r\n' % (opts.get('rev', []),)) > ui.write(revsetlang.prettyformat(tree) + '\n') - > return 0 - > return orig(ui, repo, *pats, **opts) - > entry = extensions.wrapcommand(commands.table, 'log', printrevset) + > ui.write(smartset.prettyformat(revs) + '\n') + > revs = smartset.baseset() # display no revisions + > return revs, filematcher + > extensions.wrapfunction(cmdutil, 'getlogrevs', printrevset) + > aliases, entry = cmdutil.findcmd('log', commands.table) > entry[1].append(('', 'print-revset', False, > 'print generated revset and exit (DEPRECATED)')) > EOF @@ -1445,6 +1458,7 @@ $ testlog -r 27 -r 25 -r 21 -r 34 -r 32 -r 31 ['27', '25', '21', '34', '32', '31'] [] + --- log.nodes * (glob) +++ glog.nodes * (glob) @@ -1,6 +1,6 @@ @@ -1459,90 +1473,126 @@ +nodetag 21 $ testlog -u test -u not-a-user [] - (group - (group - (or - (list - (func - (symbol 'user') - (string 'test')) - (func - (symbol 'user') - (string 'not-a-user')))))) + (or + (list + (func + (symbol 'user') + (string 'test')) + (func + (symbol 'user') + (string 'not-a-user')))) + , + , + >, + , + >>> $ testlog -b not-a-branch abort: unknown revision 'not-a-branch'! abort: unknown revision 'not-a-branch'! abort: unknown revision 'not-a-branch'! $ testlog -b 35 -b 36 --only-branch branch [] - (group - (group + (or + (list + (func + (symbol 'branch') + (string 'default')) (or (list (func (symbol 'branch') - (string 'default')) - (func - (symbol 'branch') (string 'branch')) (func (symbol 'branch') (string 'branch')))))) + , + , + >, + , + >, + , + >>>> $ testlog -k expand -k merge [] - (group - (group - (or - (list - (func - (symbol 'keyword') - (string 'expand')) - (func - (symbol 'keyword') - (string 'merge')))))) + (or + (list + (func + (symbol 'keyword') + (string 'expand')) + (func + (symbol 'keyword') + (string 'merge')))) + , + , + >, + , + >>> $ testlog --only-merges [] - (group + (func + (symbol 'merge') + None) + , + > + $ testlog --no-merges + [] + (not (func (symbol 'merge') None)) - $ testlog --no-merges - [] - (group - (not - (func - (symbol 'merge') - None))) + , + , + >>> $ testlog --date '2 0 to 4 0' [] - (group - (func - (symbol 'date') - (string '2 0 to 4 0'))) + (func + (symbol 'date') + (string '2 0 to 4 0')) + , + > $ hg log -G -d 'brace ) in a date' hg: parse error: invalid date: 'brace ) in a date' [255] $ testlog --prune 31 --prune 32 [] - (group - (group - (and - (not - (group - (or - (list - (string '31') - (func - (symbol 'ancestors') - (string '31')))))) - (not - (group - (or - (list - (string '32') - (func - (symbol 'ancestors') - (string '32'))))))))) + (not + (or + (list + (func + (symbol 'ancestors') + (string '31')) + (func + (symbol 'ancestors') + (string '32'))))) + , + , + >, + , + >>>> Dedicated repo for --follow and paths filtering. The g is crafted to have 2 filelog topological heads in a linear changeset graph. @@ -1553,9 +1603,11 @@ $ testlog --follow [] [] + $ testlog -rnull ['null'] [] + $ echo a > a $ echo aa > aa $ echo f > f @@ -1589,53 +1641,60 @@ $ testlog a [] - (group - (group - (func - (symbol 'filelog') - (string 'a')))) + (func + (symbol 'filelog') + (string 'a')) + , set([0])> $ testlog a b [] - (group - (group - (or - (list - (func - (symbol 'filelog') - (string 'a')) - (func - (symbol 'filelog') - (string 'b')))))) + (or + (list + (func + (symbol 'filelog') + (string 'a')) + (func + (symbol 'filelog') + (string 'b')))) + , + , + >> Test falling back to slow path for non-existing files $ testlog a c [] - (group - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:a') - (string 'p:c')))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:a') + (string 'p:c'))) + , + > Test multiple --include/--exclude/paths $ testlog --include a --include e --exclude b --exclude e a e [] - (group - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:a') - (string 'p:e') - (string 'i:a') - (string 'i:e') - (string 'x:b') - (string 'x:e')))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:a') + (string 'p:e') + (string 'i:a') + (string 'i:e') + (string 'x:b') + (string 'x:e'))) + , + > Test glob expansion of pats @@ -1647,11 +1706,11 @@ > testlog a*; > fi; [] - (group - (group - (func - (symbol 'filelog') - (string 'aa')))) + (func + (symbol 'filelog') + (string 'aa')) + , set([0])> Test --follow on a non-existent directory @@ -1665,17 +1724,15 @@ $ hg up -q '.^' $ testlog -f dir [] - (group - (and - (func - (symbol 'ancestors') - (symbol '.')) - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:dir'))))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:dir'))) + , + > $ hg up -q tip Test --follow on file not in parent revision @@ -1689,50 +1746,39 @@ $ testlog -f 'glob:*' [] - (group - (and - (func - (symbol 'ancestors') - (symbol '.')) - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:glob:*'))))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:glob:*'))) + , + > Test --follow on a single rename $ hg up -q 2 $ testlog -f a [] - (group - (group - (func - (symbol 'follow') - (string 'a')))) + [] + Test --follow and multiple renames $ hg up -q tip $ testlog -f e [] - (group - (group - (func - (symbol 'follow') - (string 'e')))) + [] + Test --follow and multiple filelog heads $ hg up -q 2 $ testlog -f g [] - (group - (group - (func - (symbol 'follow') - (string 'g')))) + [] + $ cat log.nodes nodetag 2 nodetag 1 @@ -1740,11 +1786,8 @@ $ hg up -q tip $ testlog -f g [] - (group - (group - (func - (symbol 'follow') - (string 'g')))) + [] + $ cat log.nodes nodetag 3 nodetag 2 @@ -1754,16 +1797,8 @@ $ testlog -f g e [] - (group - (group - (or - (list - (func - (symbol 'follow') - (string 'g')) - (func - (symbol 'follow') - (string 'e')))))) + [] + $ cat log.nodes nodetag 4 nodetag 3 @@ -1777,6 +1812,7 @@ $ testlog -f [] [] + Test --follow-first @@ -1791,22 +1827,15 @@ $ hg ci -m "merge 5 and 4" $ testlog --follow-first [] - (group - (func - (symbol '_firstancestors') - (func - (symbol 'rev') - (symbol '6')))) + [] + Cannot compare with log --follow-first FILE as it never worked $ hg log -G --print-revset --follow-first e [] - (group - (group - (func - (symbol '_followfirst') - (string 'e')))) + [] + $ hg log -G --follow-first e --template '{rev} {desc|firstline}\n' @ 6 merge 5 and 4 |\ @@ -1838,53 +1867,59 @@ $ hg up -q 4 $ testlog "set:copied()" [] - (group - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:set:copied()')))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:set:copied()'))) + , + > $ testlog --include "set:copied()" [] - (group - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'i:set:copied()')))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'i:set:copied()'))) + , + > $ testlog -r "sort(file('set:copied()'), -rev)" ["sort(file('set:copied()'), -rev)"] [] + Test --removed $ testlog --removed [] [] + $ testlog --removed a [] - (group - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:a')))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:a'))) + , + > $ testlog --removed --follow a [] - (group - (and - (func - (symbol 'ancestors') - (symbol '.')) - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:a'))))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:a'))) + , + > Test --patch and --stat with --follow and --follow-first @@ -1988,6 +2023,7 @@ $ testlog -r 'foo-bar' ['foo-bar'] [] + Test --follow and forward --rev @@ -2208,52 +2244,29 @@ +g $ testlog --follow -r6 -r8 -r5 -r7 -r4 ['6', '8', '5', '7', '4'] - (group - (func - (symbol 'descendants') - (func - (symbol 'rev') - (symbol '6')))) + [] + Test --follow-first and forward --rev $ testlog --follow-first -r6 -r8 -r5 -r7 -r4 ['6', '8', '5', '7', '4'] - (group - (func - (symbol '_firstdescendants') - (func - (symbol 'rev') - (symbol '6')))) - --- log.nodes * (glob) - +++ glog.nodes * (glob) - @@ -1,3 +1,3 @@ - -nodetag 6 - nodetag 8 - nodetag 7 - +nodetag 6 + [] + Test --follow and backward --rev $ testlog --follow -r6 -r5 -r7 -r8 -r4 ['6', '5', '7', '8', '4'] - (group - (func - (symbol 'ancestors') - (func - (symbol 'rev') - (symbol '6')))) + [] + Test --follow-first and backward --rev $ testlog --follow-first -r6 -r5 -r7 -r8 -r4 ['6', '5', '7', '8', '4'] - (group - (func - (symbol '_firstancestors') - (func - (symbol 'rev') - (symbol '6')))) + [] + Test --follow with --rev of graphlog extension @@ -2269,27 +2282,26 @@ $ cd dir $ testlog . [] - (group - (func - (symbol '_matchfiles') - (list - (string 'r:') - (string 'd:relpath') - (string 'p:.')))) + (func + (symbol '_matchfiles') + (list + (string 'r:') + (string 'd:relpath') + (string 'p:.'))) + , + > $ testlog ../b [] - (group - (group - (func - (symbol 'filelog') - (string '../b')))) + (func + (symbol 'filelog') + (string '../b')) + , set([1])> $ testlog -f ../b [] - (group - (group - (func - (symbol 'follow') - (string 'b')))) + [] + $ cd .. Test --hidden @@ -2305,9 +2317,11 @@ $ testlog [] [] + $ testlog --hidden [] [] + $ hg log -G --template '{rev} {desc}\n' o 7 Added tag foo-bar for changeset fc281d8ff18d | diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-graft.t --- a/tests/test-graft.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-graft.t Mon Jan 22 17:53:02 2018 -0500 @@ -811,7 +811,7 @@ $ hg up -qC 7 $ hg tag -l -r 13 tmp $ hg --config extensions.strip= strip 2 - saved backup bundle to $TESTTMP/a/.hg/strip-backup/5c095ad7e90f-d323a1e4-backup.hg (glob) + saved backup bundle to $TESTTMP/a/.hg/strip-backup/5c095ad7e90f-d323a1e4-backup.hg $ hg graft tmp skipping already grafted revision 8:7a4785234d87 (2:ef0ef43d49e7 also has unknown origin 5c095ad7e90f) [255] diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-hardlinks.t --- a/tests/test-hardlinks.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-hardlinks.t Mon Jan 22 17:53:02 2018 -0500 @@ -155,7 +155,7 @@ $ cd r3 $ hg push - pushing to $TESTTMP/r1 (glob) + pushing to $TESTTMP/r1 searching for changes adding changesets adding manifests diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-help.t --- a/tests/test-help.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-help.t Mon Jan 22 17:53:02 2018 -0500 @@ -110,6 +110,7 @@ environment Environment Variables extensions Using Additional Features filesets Specifying File Sets + flags Command-line flags glossary Glossary hgignore Syntax for Mercurial Ignore Files hgweb Configuring hgweb @@ -188,6 +189,7 @@ environment Environment Variables extensions Using Additional Features filesets Specifying File Sets + flags Command-line flags glossary Glossary hgignore Syntax for Mercurial Ignore Files hgweb Configuring hgweb @@ -259,6 +261,7 @@ eol automatically manage newlines in repository files extdiff command to allow external programs to compare revisions factotum http authentication with factotum + githelp try mapping git commands to Mercurial commands gpg commands to sign and verify changesets hgk browse the repository in a graphical way highlight syntax highlighting for hgweb (requires Pygments) @@ -865,6 +868,7 @@ environment Environment Variables extensions Using Additional Features filesets Specifying File Sets + flags Command-line flags glossary Glossary hgignore Syntax for Mercurial Ignore Files hgweb Configuring hgweb @@ -895,6 +899,8 @@ builds a repo with a given DAG from scratch in the current empty repo debugbundle lists the contents of a bundle + debugcapabilities + lists the capabilities of a remote peer debugcheckstate validate the correctness of the current dirstate debugcolor show available color, effects or style @@ -914,9 +920,12 @@ show the contents of the current dirstate debugdiscovery runs the changeset discovery protocol in isolation + debugdownload + download a resource using Mercurial logic and config debugextensions show information about active extensions debugfileset parse and apply a fileset specification + debugformat display format information about the current repository debugfsinfo show information detected about current filesystem debuggetbundle retrieves a bundle from a repo @@ -2011,6 +2020,13 @@ Specifying File Sets
      + @@ -783,7 +783,7 @@ @@ -791,7 +791,7 @@ @@ -799,7 +799,7 @@ @@ -816,7 +816,7 @@ ajaxScrollInit( '/shortlog/%next%', '', @@ -1780,66 +1780,51 @@
      -
        - -
          + +
          @@ -1850,9 +1835,12 @@ - -
          tag:{tag|escape}
          tag:{tag|escape}
          parent {rev}:
          + + flags + + + Command-line flags +
          glossary diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-hgignore.t --- a/tests/test-hgignore.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-hgignore.t Mon Jan 22 17:53:02 2018 -0500 @@ -59,9 +59,9 @@ I dir/c.o $ hg debugignore dir/c.o dir/missing.o - dir/c.o is ignored (glob) + dir/c.o is ignored (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob) - dir/missing.o is ignored (glob) + dir/missing.o is ignored (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob) $ cd dir $ hg debugignore c.o missing.o @@ -164,7 +164,7 @@ $ echo "syntax: invalid" > .hgignore $ hg status - $TESTTMP/ignorerepo/.hgignore: ignoring invalid syntax 'invalid' (glob) + $TESTTMP/ignorerepo/.hgignore: ignoring invalid syntax 'invalid' A dir/b.o ? .hgignore ? a.c @@ -236,7 +236,7 @@ $ hg debugignore a.c a.c is not ignored $ hg debugignore dir/c.o - dir/c.o is ignored (glob) + dir/c.o is ignored (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 2: 'dir/**/c.o') (glob) Check using 'include:' in ignore file @@ -265,7 +265,7 @@ $ cp otherignore goodignore $ echo "include:badignore" >> otherignore $ hg status - skipping unreadable pattern file 'badignore': No such file or directory + skipping unreadable pattern file 'badignore': $ENOENT$ A dir/b.o $ mv goodignore otherignore @@ -322,7 +322,7 @@ $ hg status | grep file2 [1] $ hg debugignore dir1/file2 - dir1/file2 is ignored (glob) + dir1/file2 is ignored (ignore rule in dir2/.hgignore, line 1: 'file*2') #if windows diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-hgrc.t --- a/tests/test-hgrc.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-hgrc.t Mon Jan 22 17:53:02 2018 -0500 @@ -30,7 +30,7 @@ $ cat .hg/hgrc # example repository config (see 'hg help config' for more info) [paths] - default = $TESTTMP/foo%bar (glob) + default = $TESTTMP/foo%bar # path aliases to other clones of this repo in URLs or filesystem paths # (see 'hg help config.paths' for more info) @@ -43,10 +43,10 @@ # name and email (local to this repository, optional), e.g. # username = Jane Doe $ hg paths - default = $TESTTMP/foo%bar (glob) + default = $TESTTMP/foo%bar $ hg showconfig - bundle.mainreporoot=$TESTTMP/foobar (glob) - paths.default=$TESTTMP/foo%bar (glob) + bundle.mainreporoot=$TESTTMP/foobar + paths.default=$TESTTMP/foo%bar $ cd .. issue1829: wrong indentation @@ -242,4 +242,4 @@ $ hg showconfig --debug paths plain: True read config from: $TESTTMP/hgrc - $TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar (glob) + $TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-hgweb-bundle.t --- a/tests/test-hgweb-bundle.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-hgweb-bundle.t Mon Jan 22 17:53:02 2018 -0500 @@ -18,7 +18,7 @@ $ hg strip -r 1 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - saved backup bundle to $TESTTMP/server/.hg/strip-backup/ed602e697e0f-cc9fff6a-backup.hg (glob) + saved backup bundle to $TESTTMP/server/.hg/strip-backup/ed602e697e0f-cc9fff6a-backup.hg Serve from a bundle file diff -r 87676e8ee056 -r 27b6df1b5adb tests/test-hgweb-commands.t --- a/tests/test-hgweb-commands.t Mon Jan 08 16:07:51 2018 -0800 +++ b/tests/test-hgweb-commands.t Mon Jan 22 17:53:02 2018 -0500 @@ -775,7 +775,7 @@ test branch commit with null character: - unstable tip something + draft unstable tip something
          test branch - stable + draft stable
          test Added tag 1.0 for changeset 2ef0ac749a14 - default + draft default
          test base - 1.0 anotherthing + draft 1.0 anotherthing