--- a/.clang-format Thu Mar 15 22:35:07 2018 -0700
+++ b/.clang-format Mon Mar 19 08:07:18 2018 -0700
@@ -6,3 +6,8 @@
IndentCaseLabels: false
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
+IncludeCategories:
+ - Regex: '^<'
+ Priority: 1
+ - Regex: '^"'
+ Priority: 2
--- a/Makefile Thu Mar 15 22:35:07 2018 -0700
+++ b/Makefile Mon Mar 19 08:07:18 2018 -0700
@@ -132,8 +132,9 @@
$(PYTHON) i18n/hggettext mercurial/commands.py \
hgext/*.py hgext/*/__init__.py \
mercurial/fileset.py mercurial/revset.py \
- mercurial/templatefilters.py mercurial/templatekw.py \
- mercurial/templater.py \
+ mercurial/templatefilters.py \
+ mercurial/templatefuncs.py \
+ mercurial/templatekw.py \
mercurial/filemerge.py \
mercurial/hgweb/webcommands.py \
mercurial/util.py \
@@ -234,18 +235,6 @@
docker-ubuntu-xenial-ppa: contrib/docker/ubuntu-xenial
contrib/dockerdeb ubuntu xenial --source-only
-docker-ubuntu-yakkety: contrib/docker/ubuntu-yakkety
- contrib/dockerdeb ubuntu yakkety
-
-docker-ubuntu-yakkety-ppa: contrib/docker/ubuntu-yakkety
- contrib/dockerdeb ubuntu yakkety --source-only
-
-docker-ubuntu-zesty: contrib/docker/ubuntu-zesty
- contrib/dockerdeb ubuntu zesty
-
-docker-ubuntu-zesty-ppa: contrib/docker/ubuntu-zesty
- contrib/dockerdeb ubuntu zesty --source-only
-
docker-ubuntu-artful: contrib/docker/ubuntu-artful
contrib/dockerdeb ubuntu artful
@@ -318,8 +307,6 @@
osx deb ppa docker-debian-jessie docker-debian-stretch \
docker-ubuntu-trusty docker-ubuntu-trusty-ppa \
docker-ubuntu-xenial docker-ubuntu-xenial-ppa \
- docker-ubuntu-yakkety docker-ubuntu-yakkety-ppa \
- docker-ubuntu-zesty docker-ubuntu-zesty-ppa \
docker-ubuntu-artful docker-ubuntu-artful-ppa \
fedora20 docker-fedora20 fedora21 docker-fedora21 \
centos5 docker-centos5 centos6 docker-centos6 centos7 docker-centos7 \
--- a/contrib/Makefile.python Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/Makefile.python Mon Mar 19 08:07:18 2018 -0700
@@ -1,4 +1,4 @@
-PYTHONVER=2.7.10
+PYTHONVER=2.7.14
PYTHONNAME=python-
PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER)
SYMLINKDIR=$(HOME)/bin
--- a/contrib/buildrpm Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/buildrpm Mon Mar 19 08:07:18 2018 -0700
@@ -20,8 +20,8 @@
;;
--withpython | --with-python)
shift
- PYTHONVER=2.7.10
- PYTHONMD5=d7547558fd673bd9d38e2108c6b42521
+ PYTHONVER=2.7.14
+ PYTHONMD5=cee2e4b33ad3750da77b2e85f2f8b724
;;
--rpmbuilddir )
shift
--- a/contrib/check-code.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/check-code.py Mon Mar 19 08:07:18 2018 -0700
@@ -150,6 +150,7 @@
(r'grep.* -[ABC]', "don't use grep's context flags"),
(r'find.*-printf',
"don't use 'find -printf', it doesn't exist on BSD find(1)"),
+ (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"),
],
# warnings
[
@@ -318,9 +319,9 @@
"use util.readfile() instead"),
(r'[\s\(](open|file)\([^)]*\)\.write\(',
"use util.writefile() instead"),
- (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
+ (r'^[\s\(]*(open(er)?|file)\([^)]*\)(?!\.close\(\))',
"always assign an opened file to a variable, and close it afterwards"),
- (r'[\s\(](open|file)\([^)]*\)\.',
+ (r'[\s\(](open|file)\([^)]*\)\.(?!close\(\))',
"always assign an opened file to a variable, and close it afterwards"),
(r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
(r'\.debug\(\_', "don't mark debug messages for translation"),
@@ -541,8 +542,11 @@
for i, pseq in enumerate(pats):
# fix-up regexes for multi-line searches
p = pseq[0]
- # \s doesn't match \n
- p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
+ # \s doesn't match \n (done in two steps)
+ # first, we replace \s that appears in a set already
+ p = re.sub(r'\[\\s', r'[ \\t', p)
+ # now we replace other \s instances.
+ p = re.sub(r'(?<!(\\|\[))\\s', r'[ \\t]', p)
# [^...] doesn't match newline
p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
--- a/contrib/check-config.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/check-config.py Mon Mar 19 08:07:18 2018 -0700
@@ -15,7 +15,7 @@
documented = {}
allowinconsistent = set()
-configre = re.compile(r'''
+configre = re.compile(br'''
# Function call
ui\.config(?P<ctype>|int|bool|list)\(
# First argument.
@@ -25,7 +25,7 @@
(?:default=)?(?P<default>\S+?))?
\)''', re.VERBOSE | re.MULTILINE)
-configwithre = re.compile('''
+configwithre = re.compile(b'''
ui\.config(?P<ctype>with)\(
# First argument is callback function. This doesn't parse robustly
# if it is e.g. a function call.
@@ -35,57 +35,57 @@
(?:default=)?(?P<default>\S+?))?
\)''', re.VERBOSE | re.MULTILINE)
-configpartialre = (r"""ui\.config""")
+configpartialre = (br"""ui\.config""")
-ignorere = re.compile(r'''
+ignorere = re.compile(br'''
\#\s(?P<reason>internal|experimental|deprecated|developer|inconsistent)\s
config:\s(?P<config>\S+\.\S+)$
''', re.VERBOSE | re.MULTILINE)
def main(args):
for f in args:
- sect = ''
- prevname = ''
- confsect = ''
- carryover = ''
+ sect = b''
+ prevname = b''
+ confsect = b''
+ carryover = b''
linenum = 0
- for l in open(f):
+ for l in open(f, 'rb'):
linenum += 1
# check topic-like bits
- m = re.match('\s*``(\S+)``', l)
+ m = re.match(b'\s*``(\S+)``', l)
if m:
prevname = m.group(1)
- if re.match('^\s*-+$', l):
+ if re.match(b'^\s*-+$', l):
sect = prevname
- prevname = ''
+ prevname = b''
if sect and prevname:
- name = sect + '.' + prevname
+ name = sect + b'.' + prevname
documented[name] = 1
# check docstring bits
- m = re.match(r'^\s+\[(\S+)\]', l)
+ m = re.match(br'^\s+\[(\S+)\]', l)
if m:
confsect = m.group(1)
continue
- m = re.match(r'^\s+(?:#\s*)?(\S+) = ', l)
+ m = re.match(br'^\s+(?:#\s*)?(\S+) = ', l)
if m:
- name = confsect + '.' + m.group(1)
+ name = confsect + b'.' + m.group(1)
documented[name] = 1
# like the bugzilla extension
- m = re.match(r'^\s*(\S+\.\S+)$', l)
+ m = re.match(br'^\s*(\S+\.\S+)$', l)
if m:
documented[m.group(1)] = 1
# like convert
- m = re.match(r'^\s*:(\S+\.\S+):\s+', l)
+ m = re.match(br'^\s*:(\S+\.\S+):\s+', l)
if m:
documented[m.group(1)] = 1
# quoted in help or docstrings
- m = re.match(r'.*?``(\S+\.\S+)``', l)
+ m = re.match(br'.*?``(\S+\.\S+)``', l)
if m:
documented[m.group(1)] = 1
@@ -108,7 +108,7 @@
default = m.group('default')
if default in (None, 'False', 'None', '0', '[]', '""', "''"):
default = ''
- if re.match('[a-z.]+$', default):
+ if re.match(b'[a-z.]+$', default):
default = '<variable>'
if (name in foundopts and (ctype, default) != foundopts[name]
and name not in allowinconsistent):
--- a/contrib/chg/chg.c Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/chg/chg.c Mon Mar 19 08:07:18 2018 -0700
@@ -38,11 +38,13 @@
const char **args;
};
-static void initcmdserveropts(struct cmdserveropts *opts) {
+static void initcmdserveropts(struct cmdserveropts *opts)
+{
memset(opts, 0, sizeof(struct cmdserveropts));
}
-static void freecmdserveropts(struct cmdserveropts *opts) {
+static void freecmdserveropts(struct cmdserveropts *opts)
+{
free(opts->args);
opts->args = NULL;
opts->argsize = 0;
@@ -59,12 +61,8 @@
const char *name;
size_t narg;
} flags[] = {
- {"--config", 1},
- {"--cwd", 1},
- {"--repo", 1},
- {"--repository", 1},
- {"--traceback", 0},
- {"-R", 1},
+ {"--config", 1}, {"--cwd", 1}, {"--repo", 1},
+ {"--repository", 1}, {"--traceback", 0}, {"-R", 1},
};
size_t i;
for (i = 0; i < sizeof(flags) / sizeof(flags[0]); ++i) {
@@ -89,21 +87,21 @@
/*
* Parse argv[] and put sensitive flags to opts->args
*/
-static void setcmdserverargs(struct cmdserveropts *opts,
- int argc, const char *argv[])
+static void setcmdserverargs(struct cmdserveropts *opts, int argc,
+ const char *argv[])
{
size_t i, step;
opts->argsize = 0;
for (i = 0, step = 1; i < (size_t)argc; i += step, step = 1) {
if (!argv[i])
- continue; /* pass clang-analyse */
+ continue; /* pass clang-analyse */
if (strcmp(argv[i], "--") == 0)
break;
size_t n = testsensitiveflag(argv[i]);
if (n == 0 || i + n > (size_t)argc)
continue;
- opts->args = reallocx(opts->args,
- (n + opts->argsize) * sizeof(char *));
+ opts->args =
+ reallocx(opts->args, (n + opts->argsize) * sizeof(char *));
memcpy(opts->args + opts->argsize, argv + i,
sizeof(char *) * n);
opts->argsize += n;
@@ -180,8 +178,8 @@
r = snprintf(opts->sockname, sizeof(opts->sockname), sockfmt, basename);
if (r < 0 || (size_t)r >= sizeof(opts->sockname))
abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
- r = snprintf(opts->initsockname, sizeof(opts->initsockname),
- "%s.%u", opts->sockname, (unsigned)getpid());
+ r = snprintf(opts->initsockname, sizeof(opts->initsockname), "%s.%u",
+ opts->sockname, (unsigned)getpid());
if (r < 0 || (size_t)r >= sizeof(opts->initsockname))
abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
}
@@ -208,11 +206,14 @@
const char *hgcmd = gethgcmd();
const char *baseargv[] = {
- hgcmd,
- "serve",
- "--cmdserver", "chgunix",
- "--address", opts->initsockname,
- "--daemon-postexec", "chdir:/",
+ hgcmd,
+ "serve",
+ "--cmdserver",
+ "chgunix",
+ "--address",
+ opts->initsockname,
+ "--daemon-postexec",
+ "chdir:/",
};
size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
size_t argsize = baseargvsize + opts->argsize + 1;
@@ -237,7 +238,7 @@
debugmsg("try connect to %s repeatedly", opts->initsockname);
- unsigned int timeoutsec = 60; /* default: 60 seconds */
+ unsigned int timeoutsec = 60; /* default: 60 seconds */
const char *timeoutenv = getenv("CHGTIMEOUT");
if (timeoutenv)
sscanf(timeoutenv, "%u", &timeoutsec);
@@ -246,7 +247,7 @@
hgclient_t *hgc = hgc_open(opts->initsockname);
if (hgc) {
debugmsg("rename %s to %s", opts->initsockname,
- opts->sockname);
+ opts->sockname);
int r = rename(opts->initsockname, opts->sockname);
if (r != 0)
abortmsgerrno("cannot rename");
@@ -270,7 +271,7 @@
if (WIFEXITED(pst)) {
if (WEXITSTATUS(pst) == 0)
abortmsg("could not connect to cmdserver "
- "(exited with status 0)");
+ "(exited with status 0)");
debugmsg("cmdserver exited with status %d", WEXITSTATUS(pst));
exit(WEXITSTATUS(pst));
} else if (WIFSIGNALED(pst)) {
@@ -284,8 +285,8 @@
/* Connect to a cmdserver. Will start a new server on demand. */
static hgclient_t *connectcmdserver(struct cmdserveropts *opts)
{
- const char *sockname = opts->redirectsockname[0] ?
- opts->redirectsockname : opts->sockname;
+ const char *sockname =
+ opts->redirectsockname[0] ? opts->redirectsockname : opts->sockname;
debugmsg("try connect to %s", sockname);
hgclient_t *hgc = hgc_open(sockname);
if (hgc)
@@ -339,8 +340,8 @@
unlink(*pinst + 7);
} else if (strncmp(*pinst, "redirect ", 9) == 0) {
int r = snprintf(opts->redirectsockname,
- sizeof(opts->redirectsockname),
- "%s", *pinst + 9);
+ sizeof(opts->redirectsockname), "%s",
+ *pinst + 9);
if (r < 0 || r >= (int)sizeof(opts->redirectsockname))
abortmsg("redirect path is too long (%d)", r);
needreconnect = 1;
@@ -365,10 +366,9 @@
*/
static int isunsupported(int argc, const char *argv[])
{
- enum {
- SERVE = 1,
- DAEMON = 2,
- SERVEDAEMON = SERVE | DAEMON,
+ enum { SERVE = 1,
+ DAEMON = 2,
+ SERVEDAEMON = SERVE | DAEMON,
};
unsigned int state = 0;
int i;
@@ -378,7 +378,7 @@
if (i == 0 && strcmp("serve", argv[i]) == 0)
state |= SERVE;
else if (strcmp("-d", argv[i]) == 0 ||
- strcmp("--daemon", argv[i]) == 0)
+ strcmp("--daemon", argv[i]) == 0)
state |= DAEMON;
}
return (state & SERVEDAEMON) == SERVEDAEMON;
@@ -401,9 +401,9 @@
if (getenv("CHGINTERNALMARK"))
abortmsg("chg started by chg detected.\n"
- "Please make sure ${HG:-hg} is not a symlink or "
- "wrapper to chg. Alternatively, set $CHGHG to the "
- "path of real hg.");
+ "Please make sure ${HG:-hg} is not a symlink or "
+ "wrapper to chg. Alternatively, set $CHGHG to the "
+ "path of real hg.");
if (isunsupported(argc - 1, argv + 1))
execoriginalhg(argv);
@@ -435,11 +435,11 @@
hgc_close(hgc);
if (++retry > 10)
abortmsg("too many redirections.\n"
- "Please make sure %s is not a wrapper which "
- "changes sensitive environment variables "
- "before executing hg. If you have to use a "
- "wrapper, wrap chg instead of hg.",
- gethgcmd());
+ "Please make sure %s is not a wrapper which "
+ "changes sensitive environment variables "
+ "before executing hg. If you have to use a "
+ "wrapper, wrap chg instead of hg.",
+ gethgcmd());
}
setupsignalhandler(hgc_peerpid(hgc), hgc_peerpgid(hgc));
--- a/contrib/chg/hgclient.c Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/chg/hgclient.c Mon Mar 19 08:07:18 2018 -0700
@@ -7,7 +7,7 @@
* GNU General Public License version 2 or any later version.
*/
-#include <arpa/inet.h> /* for ntohl(), htonl() */
+#include <arpa/inet.h> /* for ntohl(), htonl() */
#include <assert.h>
#include <ctype.h>
#include <errno.h>
@@ -26,16 +26,15 @@
#include "procutil.h"
#include "util.h"
-enum {
- CAP_GETENCODING = 0x0001,
- CAP_RUNCOMMAND = 0x0002,
- /* cHg extension: */
- CAP_ATTACHIO = 0x0100,
- CAP_CHDIR = 0x0200,
- CAP_SETENV = 0x0800,
- CAP_SETUMASK = 0x1000,
- CAP_VALIDATE = 0x2000,
- CAP_SETPROCNAME = 0x4000,
+enum { CAP_GETENCODING = 0x0001,
+ CAP_RUNCOMMAND = 0x0002,
+ /* cHg extension: */
+ CAP_ATTACHIO = 0x0100,
+ CAP_CHDIR = 0x0200,
+ CAP_SETENV = 0x0800,
+ CAP_SETUMASK = 0x1000,
+ CAP_VALIDATE = 0x2000,
+ CAP_SETPROCNAME = 0x4000,
};
typedef struct {
@@ -44,15 +43,15 @@
} cappair_t;
static const cappair_t captable[] = {
- {"getencoding", CAP_GETENCODING},
- {"runcommand", CAP_RUNCOMMAND},
- {"attachio", CAP_ATTACHIO},
- {"chdir", CAP_CHDIR},
- {"setenv", CAP_SETENV},
- {"setumask", CAP_SETUMASK},
- {"validate", CAP_VALIDATE},
- {"setprocname", CAP_SETPROCNAME},
- {NULL, 0}, /* terminator */
+ {"getencoding", CAP_GETENCODING},
+ {"runcommand", CAP_RUNCOMMAND},
+ {"attachio", CAP_ATTACHIO},
+ {"chdir", CAP_CHDIR},
+ {"setenv", CAP_SETENV},
+ {"setumask", CAP_SETUMASK},
+ {"validate", CAP_VALIDATE},
+ {"setprocname", CAP_SETPROCNAME},
+ {NULL, 0}, /* terminator */
};
typedef struct {
@@ -88,8 +87,8 @@
if (newsize <= ctx->maxdatasize)
return;
- newsize = defaultdatasize
- * ((newsize + defaultdatasize - 1) / defaultdatasize);
+ newsize = defaultdatasize *
+ ((newsize + defaultdatasize - 1) / defaultdatasize);
ctx->data = reallocx(ctx->data, newsize);
ctx->maxdatasize = newsize;
debugmsg("enlarge context buffer to %zu", ctx->maxdatasize);
@@ -126,12 +125,12 @@
enlargecontext(&hgc->ctx, hgc->ctx.datasize);
if (isupper(hgc->ctx.ch) && hgc->ctx.ch != 'S')
- return; /* assumes input request */
+ return; /* assumes input request */
size_t cursize = 0;
while (cursize < hgc->ctx.datasize) {
rsize = recv(hgc->sockfd, hgc->ctx.data + cursize,
- hgc->ctx.datasize - cursize, 0);
+ hgc->ctx.datasize - cursize, 0);
if (rsize < 1)
abortmsg("failed to read data block");
cursize += rsize;
@@ -176,19 +175,19 @@
/* Build '\0'-separated list of args. argsize < 0 denotes that args are
* terminated by NULL. */
static void packcmdargs(context_t *ctx, const char *const args[],
- ssize_t argsize)
+ ssize_t argsize)
{
ctx->datasize = 0;
const char *const *const end = (argsize >= 0) ? args + argsize : NULL;
for (const char *const *it = args; it != end && *it; ++it) {
- const size_t n = strlen(*it) + 1; /* include '\0' */
+ const size_t n = strlen(*it) + 1; /* include '\0' */
enlargecontext(ctx, ctx->datasize + n);
memcpy(ctx->data + ctx->datasize, *it, n);
ctx->datasize += n;
}
if (ctx->datasize > 0)
- --ctx->datasize; /* strip last '\0' */
+ --ctx->datasize; /* strip last '\0' */
}
/* Extract '\0'-separated list of args to new buffer, terminated by NULL */
@@ -199,7 +198,7 @@
const char *s = ctx->data;
const char *e = ctx->data + ctx->datasize;
for (;;) {
- if (nargs + 1 >= maxnargs) { /* including last NULL */
+ if (nargs + 1 >= maxnargs) { /* including last NULL */
maxnargs += 256;
args = reallocx(args, maxnargs * sizeof(args[0]));
}
@@ -237,7 +236,7 @@
{
context_t *ctx = &hgc->ctx;
enlargecontext(ctx, ctx->datasize + 1);
- ctx->data[ctx->datasize] = '\0'; /* terminate last string */
+ ctx->data[ctx->datasize] = '\0'; /* terminate last string */
const char **args = unpackcmdargsnul(ctx);
if (!args[0] || !args[1] || !args[2])
@@ -269,8 +268,8 @@
for (;;) {
readchannel(hgc);
context_t *ctx = &hgc->ctx;
- debugmsg("response read from channel %c, size %zu",
- ctx->ch, ctx->datasize);
+ debugmsg("response read from channel %c, size %zu", ctx->ch,
+ ctx->datasize);
switch (ctx->ch) {
case 'o':
fwrite(ctx->data, sizeof(ctx->data[0]), ctx->datasize,
@@ -299,7 +298,7 @@
default:
if (isupper(ctx->ch))
abortmsg("cannot handle response (ch = %c)",
- ctx->ch);
+ ctx->ch);
}
}
}
@@ -366,8 +365,8 @@
static void updateprocname(hgclient_t *hgc)
{
- int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize,
- "chg[worker/%d]", (int)getpid());
+ int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize, "chg[worker/%d]",
+ (int)getpid());
if (r < 0 || (size_t)r >= hgc->ctx.maxdatasize)
abortmsg("insufficient buffer to write procname (r = %d)", r);
hgc->ctx.datasize = (size_t)r;
@@ -387,7 +386,7 @@
static const int fds[3] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO};
struct msghdr msgh;
memset(&msgh, 0, sizeof(msgh));
- struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */
+ struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
char fdbuf[CMSG_SPACE(sizeof(fds))];
@@ -552,7 +551,7 @@
* the last string is guaranteed to be NULL.
*/
const char **hgc_validate(hgclient_t *hgc, const char *const args[],
- size_t argsize)
+ size_t argsize)
{
assert(hgc);
if (!(hgc->capflags & CAP_VALIDATE))
--- a/contrib/chg/hgclient.h Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/chg/hgclient.h Mon Mar 19 08:07:18 2018 -0700
@@ -22,9 +22,9 @@
pid_t hgc_peerpid(const hgclient_t *hgc);
const char **hgc_validate(hgclient_t *hgc, const char *const args[],
- size_t argsize);
+ size_t argsize);
int hgc_runcommand(hgclient_t *hgc, const char *const args[], size_t argsize);
void hgc_attachio(hgclient_t *hgc);
void hgc_setenv(hgclient_t *hgc, const char *const envp[]);
-#endif /* HGCLIENT_H_ */
+#endif /* HGCLIENT_H_ */
--- a/contrib/chg/procutil.c Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/chg/procutil.c Mon Mar 19 08:07:18 2018 -0700
@@ -54,7 +54,7 @@
goto error;
forwardsignal(sig);
- if (raise(sig) < 0) /* resend to self */
+ if (raise(sig) < 0) /* resend to self */
goto error;
if (sigaction(sig, &sa, &oldsa) < 0)
goto error;
@@ -205,8 +205,8 @@
close(pipefds[0]);
close(pipefds[1]);
- int r = execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL,
- envp);
+ int r =
+ execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL, envp);
if (r < 0) {
abortmsgerrno("cannot start pager '%s'", pagercmd);
}
--- a/contrib/chg/util.c Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/chg/util.c Mon Mar 19 08:07:18 2018 -0700
@@ -62,7 +62,8 @@
static int debugmsgenabled = 0;
static double debugstart = 0;
-static double now() {
+static double now()
+{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_usec / 1e6 + t.tv_sec;
--- a/contrib/chg/util.h Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/chg/util.h Mon Mar 19 08:07:18 2018 -0700
@@ -32,4 +32,4 @@
int runshellcmd(const char *cmd, const char *envp[], const char *cwd);
-#endif /* UTIL_H_ */
+#endif /* UTIL_H_ */
--- a/contrib/clang-format-blacklist Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/clang-format-blacklist Mon Mar 19 08:07:18 2018 -0700
@@ -1,23 +1,8 @@
# Files that just need to be migrated to the formatter.
# Do not add new files here!
-contrib/chg/chg.c
-contrib/chg/hgclient.c
-contrib/chg/hgclient.h
-contrib/chg/procutil.c
-contrib/chg/procutil.h
-contrib/chg/util.c
-contrib/chg/util.h
-contrib/hgsh/hgsh.c
-mercurial/cext/base85.c
-mercurial/cext/bdiff.c
-mercurial/cext/charencode.c
-mercurial/cext/charencode.h
-mercurial/cext/diffhelpers.c
mercurial/cext/dirs.c
mercurial/cext/manifest.c
-mercurial/cext/mpatch.c
mercurial/cext/osutil.c
-mercurial/cext/pathencode.c
mercurial/cext/revlog.c
# Vendored code that we should never format:
contrib/python-zstandard/c-ext/bufferutil.c
@@ -67,3 +52,18 @@
contrib/python-zstandard/zstd/dictBuilder/zdict.h
contrib/python-zstandard/zstd/zstd.h
hgext/fsmonitor/pywatchman/bser.c
+mercurial/thirdparty/xdiff/xdiff.h
+mercurial/thirdparty/xdiff/xdiffi.c
+mercurial/thirdparty/xdiff/xdiffi.h
+mercurial/thirdparty/xdiff/xemit.c
+mercurial/thirdparty/xdiff/xemit.h
+mercurial/thirdparty/xdiff/xhistogram.c
+mercurial/thirdparty/xdiff/xinclude.h
+mercurial/thirdparty/xdiff/xmacros.h
+mercurial/thirdparty/xdiff/xmerge.c
+mercurial/thirdparty/xdiff/xpatience.c
+mercurial/thirdparty/xdiff/xprepare.c
+mercurial/thirdparty/xdiff/xprepare.h
+mercurial/thirdparty/xdiff/xtypes.h
+mercurial/thirdparty/xdiff/xutils.c
+mercurial/thirdparty/xdiff/xutils.h
--- a/contrib/dirstatenonnormalcheck.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/dirstatenonnormalcheck.py Mon Mar 19 08:07:18 2018 -0700
@@ -17,7 +17,7 @@
"""Compute nonnormal entries from dirstate's dmap"""
res = set()
for f, e in dmap.iteritems():
- if e[0] != 'n' or e[3] == -1:
+ if e[0] != b'n' or e[3] == -1:
res.add(f)
return res
@@ -25,24 +25,25 @@
"""Compute nonnormalset from dmap, check that it matches _nonnormalset"""
nonnormalcomputedmap = nonnormalentries(dmap)
if _nonnormalset != nonnormalcomputedmap:
- ui.develwarn("%s call to %s\n" % (label, orig), config='dirstate')
- ui.develwarn("inconsistency in nonnormalset\n", config='dirstate')
- ui.develwarn("[nonnormalset] %s\n" % _nonnormalset, config='dirstate')
- ui.develwarn("[map] %s\n" % nonnormalcomputedmap, config='dirstate')
+ ui.develwarn(b"%s call to %s\n" % (label, orig), config=b'dirstate')
+ ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
+ ui.develwarn(b"[nonnormalset] %s\n" % _nonnormalset, config=b'dirstate')
+ ui.develwarn(b"[map] %s\n" % nonnormalcomputedmap, config=b'dirstate')
def _checkdirstate(orig, self, arg):
"""Check nonnormal set consistency before and after the call to orig"""
checkconsistency(self._ui, orig, self._map, self._map.nonnormalset,
- "before")
+ b"before")
r = orig(self, arg)
- checkconsistency(self._ui, orig, self._map, self._map.nonnormalset, "after")
+ checkconsistency(self._ui, orig, self._map, self._map.nonnormalset,
+ b"after")
return r
def extsetup(ui):
"""Wrap functions modifying dirstate to check nonnormalset consistency"""
dirstatecl = dirstate.dirstate
- devel = ui.configbool('devel', 'all-warnings')
- paranoid = ui.configbool('experimental', 'nonnormalparanoidcheck')
+ devel = ui.configbool(b'devel', b'all-warnings')
+ paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
if devel:
extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
if paranoid:
--- a/contrib/dumprevlog Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/dumprevlog Mon Mar 19 08:07:18 2018 -0700
@@ -14,8 +14,12 @@
for fp in (sys.stdin, sys.stdout, sys.stderr):
util.setbinary(fp)
+def binopen(path, mode='rb'):
+ if 'b' not in mode:
+ mode = mode + 'b'
+ return open(path, mode)
+
for f in sys.argv[1:]:
- binopen = lambda fn: open(fn, 'rb')
r = revlog.revlog(binopen, f)
print("file:", f)
for i in r:
--- a/contrib/fuzz/Makefile Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/fuzz/Makefile Mon Mar 19 08:07:18 2018 -0700
@@ -13,8 +13,28 @@
$$CXX $$CXXFLAGS -std=c++11 -I../../mercurial bdiff.cc \
bdiff-oss-fuzz.o -lFuzzingEngine -o $$OUT/bdiff_fuzzer
-all: bdiff
+x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h
+ clang -g -O1 -fsanitize=fuzzer-no-link,address -c \
+ -o $@ \
+ $<
+
+xdiff: xdiff.cc xdiffi.o xprepare.o xutils.o
+ clang -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \
+ -I../../mercurial xdiff.cc \
+ xdiffi.o xprepare.o xutils.o -o xdiff
-oss-fuzz: bdiff_fuzzer
+fuzz-x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h
+ $$CC $$CFLAGS -c \
+ -o $@ \
+ $<
+
+xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o
+ $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial xdiff.cc \
+ fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o \
+ -lFuzzingEngine -o $$OUT/xdiff_fuzzer
+
+all: bdiff xdiff
+
+oss-fuzz: bdiff_fuzzer xdiff_fuzzer
.PHONY: all oss-fuzz
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/README.rst Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,26 @@
+How to add fuzzers (partially cribbed from oss-fuzz[0]):
+
+ 1) git clone https://github.com/google/oss-fuzz
+ 2) cd oss-fuzz
+ 3) python infra/helper.py build_image mercurial
+ 4) docker run --cap-add=SYS_PTRACE -it -v $HG_REPO_PATH:/hg-new \
+ gcr.io/oss-fuzz/mercurial bash
+ 5) cd /src
+ 6) rm -r mercurial
+ 7) ln -s /hg-new mercurial
+ 8) cd mercurial
+ 9) compile
+ 10) ls $OUT
+
+Step 9 is literally running the command "compile", which is part of
+the docker container. Once you have that working, you can build the
+fuzzers like this (in the oss-fuzz repo):
+
+python infra/helper.py build_fuzzers --sanitizer address mercurial $HG_REPO_PATH
+
+(you can also say "memory", "undefined" or "coverage" for
+sanitizer). Then run the built fuzzers like this:
+
+python infra/helper.py run_fuzzer mercurial -- $FUZZER
+
+0: https://github.com/google/oss-fuzz/blob/master/docs/new_project_guide.md
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/xdiff.cc Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,60 @@
+/*
+ * xdiff.cc - fuzzer harness for thirdparty/xdiff
+ *
+ * Copyright 2018, Google Inc.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License, incorporated herein by reference.
+ */
+#include "thirdparty/xdiff/xdiff.h"
+#include <inttypes.h>
+#include <stdlib.h>
+
+extern "C" {
+
+int hunk_consumer(long a1, long a2, long b1, long b2, void *priv)
+{
+ // TODO: probably also test returning -1 from this when things break?
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ if (!Size) {
+ return 0;
+ }
+ // figure out a random point in [0, Size] to split our input.
+ size_t split = Data[0] / 255.0 * Size;
+
+ mmfile_t a, b;
+
+ // `a` input to diff is data[1:split]
+ a.ptr = (char *)Data + 1;
+ // which has len split-1
+ a.size = split - 1;
+ // `b` starts at the next byte after `a` ends
+ b.ptr = a.ptr + a.size;
+ b.size = Size - split;
+ xpparam_t xpp = {
+ XDF_INDENT_HEURISTIC, /* flags */
+ };
+ xdemitconf_t xecfg = {
+ XDL_EMIT_BDIFFHUNK, /* flags */
+ hunk_consumer, /* hunk_consume_func */
+ };
+ xdemitcb_t ecb = {
+ NULL, /* priv */
+ };
+ xdl_diff(&a, &b, &xpp, &xecfg, &ecb);
+ return 0; // Non-zero return values are reserved for future use.
+}
+
+#ifdef HG_FUZZER_INCLUDE_MAIN
+int main(int argc, char **argv)
+{
+ const char data[] = "asdf";
+ return LLVMFuzzerTestOneInput((const uint8_t *)data, 4);
+}
+#endif
+
+} // extern "C"
--- a/contrib/hgsh/hgsh.c Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/hgsh/hgsh.c Mon Mar 19 08:07:18 2018 -0700
@@ -48,7 +48,7 @@
* have such machine, set to NULL.
*/
#ifndef HG_GATEWAY
-#define HG_GATEWAY "gateway"
+#define HG_GATEWAY "gateway"
#endif
/*
@@ -56,7 +56,7 @@
* NULL.
*/
#ifndef HG_HOST
-#define HG_HOST "mercurial"
+#define HG_HOST "mercurial"
#endif
/*
@@ -64,7 +64,7 @@
* host username are same, set to NULL.
*/
#ifndef HG_USER
-#define HG_USER "hg"
+#define HG_USER "hg"
#endif
/*
@@ -72,14 +72,14 @@
* validate location of repo when someone is try to access, set to NULL.
*/
#ifndef HG_ROOT
-#define HG_ROOT "/home/hg/repos"
+#define HG_ROOT "/home/hg/repos"
#endif
/*
* HG: path to the mercurial executable to run.
*/
#ifndef HG
-#define HG "/home/hg/bin/hg"
+#define HG "/home/hg/bin/hg"
#endif
/*
@@ -88,7 +88,7 @@
* impossible, set to NULL.
*/
#ifndef HG_SHELL
-#define HG_SHELL NULL
+#define HG_SHELL NULL
/* #define HG_SHELL "/bin/bash" */
#endif
@@ -97,7 +97,7 @@
* should not get helpful message, set to NULL.
*/
#ifndef HG_HELP
-#define HG_HELP "please contact support@example.com for help."
+#define HG_HELP "please contact support@example.com for help."
#endif
/*
@@ -106,7 +106,7 @@
* arguments it is called with. see forward_through_gateway.
*/
#ifndef SSH
-#define SSH "/usr/bin/ssh"
+#define SSH "/usr/bin/ssh"
#endif
/*
@@ -249,7 +249,6 @@
hg_serve,
};
-
/*
* attempt to verify that a directory is really a hg repo, by testing
* for the existence of a subdirectory.
@@ -310,8 +309,7 @@
if (sscanf(argv[2], "hg init %as", &repo) == 1) {
cmd = hg_init;
- }
- else if (sscanf(argv[2], "hg -R %as serve --stdio", &repo) == 1) {
+ } else if (sscanf(argv[2], "hg -R %as serve --stdio", &repo) == 1) {
cmd = hg_serve;
} else {
goto badargs;
--- a/contrib/mercurial.spec Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/mercurial.spec Mon Mar 19 08:07:18 2018 -0700
@@ -6,8 +6,8 @@
%global pythonver %{withpython}
%global pythonname Python-%{withpython}
-%global docutilsname docutils-0.12
-%global docutilsmd5 4622263b62c5c771c03502afa3157768
+%global docutilsname docutils-0.14
+%global docutilsmd5 c53768d63db3873b7d452833553469de
%global pythonhg python-hg
%global hgpyprefix /opt/%{pythonhg}
# byte compilation will fail on some some Python /test/ files
--- a/contrib/perf.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/perf.py Mon Mar 19 08:07:18 2018 -0700
@@ -64,6 +64,12 @@
from mercurial import scmutil # since 1.9 (or 8b252e826c68)
except ImportError:
pass
+try:
+ from mercurial import pycompat
+ getargspec = pycompat.getargspec # added to module after 4.5
+except (ImportError, AttributeError):
+ import inspect
+ getargspec = inspect.getargspec
# for "historical portability":
# define util.safehasattr forcibly, because util.safehasattr has been
@@ -114,9 +120,8 @@
if safehasattr(registrar, 'command'):
command = registrar.command(cmdtable)
elif safehasattr(cmdutil, 'command'):
- import inspect
command = cmdutil.command(cmdtable)
- if 'norepo' not in inspect.getargspec(command)[0]:
+ if 'norepo' not in getargspec(command).args:
# for "historical portability":
# wrap original cmdutil.command, because "norepo" option has
# been available since 3.1 (or 75a96326cecb)
@@ -934,11 +939,16 @@
timer(d)
fm.end()
-def _bdiffworker(q, ready, done):
+def _bdiffworker(q, blocks, xdiff, ready, done):
while not done.is_set():
pair = q.get()
while pair is not None:
- mdiff.textdiff(*pair)
+ if xdiff:
+ mdiff.bdiff.xdiffblocks(*pair)
+ elif blocks:
+ mdiff.bdiff.blocks(*pair)
+ else:
+ mdiff.textdiff(*pair)
q.task_done()
pair = q.get()
q.task_done() # for the None one
@@ -949,6 +959,8 @@
('', 'count', 1, 'number of revisions to test (when using --startrev)'),
('', 'alldata', False, 'test bdiffs for all associated revisions'),
('', 'threads', 0, 'number of thread to use (disable with 0)'),
+ ('', 'blocks', False, 'test computing diffs into blocks'),
+ ('', 'xdiff', False, 'use xdiff algorithm'),
],
'-c|-m|FILE REV')
@@ -964,6 +976,11 @@
measure bdiffs for all changes related to that changeset (manifest
and filelogs).
"""
+ opts = pycompat.byteskwargs(opts)
+
+ if opts['xdiff'] and not opts['blocks']:
+ raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
+
if opts['alldata']:
opts['changelog'] = True
@@ -972,6 +989,8 @@
elif rev is None:
raise error.CommandError('perfbdiff', 'invalid arguments')
+ blocks = opts['blocks']
+ xdiff = opts['xdiff']
textpairs = []
r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
@@ -1002,7 +1021,12 @@
if not withthreads:
def d():
for pair in textpairs:
- mdiff.textdiff(*pair)
+ if xdiff:
+ mdiff.bdiff.xdiffblocks(*pair)
+ elif blocks:
+ mdiff.bdiff.blocks(*pair)
+ else:
+ mdiff.textdiff(*pair)
else:
q = util.queue()
for i in xrange(threads):
@@ -1010,7 +1034,8 @@
ready = threading.Condition()
done = threading.Event()
for i in xrange(threads):
- threading.Thread(target=_bdiffworker, args=(q, ready, done)).start()
+ threading.Thread(target=_bdiffworker,
+ args=(q, blocks, xdiff, ready, done)).start()
q.join()
def d():
for pair in textpairs:
@@ -1031,6 +1056,71 @@
with ready:
ready.notify_all()
+@command('perfunidiff', revlogopts + formatteropts + [
+ ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
+ ('', 'alldata', False, 'test unidiffs for all associated revisions'),
+ ], '-c|-m|FILE REV')
+def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
+ """benchmark a unified diff between revisions
+
+ This doesn't include any copy tracing - it's just a unified diff
+ of the texts.
+
+ By default, benchmark a diff between its delta parent and itself.
+
+ With ``--count``, benchmark diffs between delta parents and self for N
+ revisions starting at the specified revision.
+
+ With ``--alldata``, assume the requested revision is a changeset and
+ measure diffs for all changes related to that changeset (manifest
+ and filelogs).
+ """
+ if opts['alldata']:
+ opts['changelog'] = True
+
+ if opts.get('changelog') or opts.get('manifest'):
+ file_, rev = None, file_
+ elif rev is None:
+ raise error.CommandError('perfunidiff', 'invalid arguments')
+
+ textpairs = []
+
+ r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
+
+ startrev = r.rev(r.lookup(rev))
+ for rev in range(startrev, min(startrev + count, len(r) - 1)):
+ if opts['alldata']:
+ # Load revisions associated with changeset.
+ ctx = repo[rev]
+ mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+ for pctx in ctx.parents():
+ pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+ textpairs.append((pman, mtext))
+
+ # Load filelog revisions by iterating manifest delta.
+ man = ctx.manifest()
+ pman = ctx.p1().manifest()
+ for filename, change in pman.diff(man).items():
+ fctx = repo.file(filename)
+ f1 = fctx.revision(change[0][0] or -1)
+ f2 = fctx.revision(change[1][0] or -1)
+ textpairs.append((f1, f2))
+ else:
+ dp = r.deltaparent(rev)
+ textpairs.append((r.revision(dp), r.revision(rev)))
+
+ def d():
+ for left, right in textpairs:
+ # The date strings don't matter, so we pass empty strings.
+ headerlines, hunks = mdiff.unidiff(
+ left, '', right, '', 'left', 'right', binary=False)
+ # consume iterators in roughly the way patch.py does
+ b'\n'.join(headerlines)
+ b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
+ timer, fm = gettimer(ui, opts)
+ timer(d)
+ fm.end()
+
@command('perfdiffwd', formatteropts)
def perfdiffwd(ui, repo, **opts):
"""Profile diff of working directory changes"""
@@ -1498,11 +1588,13 @@
('', 'clear-revbranch', False,
'purge the revbranch cache between computation'),
] + formatteropts)
-def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts):
+def perfbranchmap(ui, repo, *filternames, **opts):
"""benchmark the update of a branchmap
This benchmarks the full repo.branchmap() call with read and write disabled
"""
+ full = opts.get("full", False)
+ clear_revbranch = opts.get("clear_revbranch", False)
timer, fm = gettimer(ui, opts)
def getbranchmap(filtername):
"""generate a benchmark function for the filtername"""
@@ -1521,6 +1613,8 @@
return d
# add filter in smaller subset to bigger subset
possiblefilters = set(repoview.filtertable)
+ if filternames:
+ possiblefilters &= set(filternames)
subsettable = getbranchmapsubsettable()
allfilters = []
while possiblefilters:
@@ -1537,8 +1631,9 @@
if not full:
for name in allfilters:
repo.filtered(name).branchmap()
- # add unfiltered
- allfilters.append(None)
+ if not filternames or 'unfiltered' in filternames:
+ # add unfiltered
+ allfilters.append(None)
branchcacheread = safeattrsetter(branchmap, 'read')
branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
@@ -1546,7 +1641,10 @@
branchcachewrite.set(lambda bc, repo: None)
try:
for name in allfilters:
- timer(getbranchmap(name), title=str(name))
+ printname = name
+ if name is None:
+ printname = 'unfiltered'
+ timer(getbranchmap(name), title=str(printname))
finally:
branchcacheread.restore()
branchcachewrite.restore()
--- a/contrib/phabricator.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/phabricator.py Mon Mar 19 08:07:18 2018 -0700
@@ -22,7 +22,8 @@
url = https://phab.example.com/
# API token. Get it from https://$HOST/conduit/login/
- token = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ # Deprecated: see [phabricator.auth] below
+ #token = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
# callsign is "FOO".
@@ -33,6 +34,11 @@
# if you need to specify advanced options that is not easily supported by
# the internal library.
curlcmd = curl --connect-timeout 2 --retry 3 --silent
+
+ [phabricator.auth]
+ example.url = https://phab.example.com/
+ # API token. Get it from https://$HOST/conduit/login/
+ example.token = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
"""
from __future__ import absolute_import
@@ -94,20 +100,56 @@
process('', params)
return util.urlreq.urlencode(flatparams)
+printed_token_warning = False
+
+def readlegacytoken(repo):
+ """Transitional support for old phabricator tokens.
+
+ Remove before the 4.6 release.
+ """
+ global printed_token_warning
+ token = repo.ui.config('phabricator', 'token')
+ if token and not printed_token_warning:
+ printed_token_warning = True
+ repo.ui.warn(_('phabricator.token is deprecated - please '
+ 'migrate to the phabricator.auth section.\n'))
+ return token
+
def readurltoken(repo):
"""return conduit url, token and make sure they exist
Currently read from [phabricator] config section. In the future, it might
make sense to read from .arcconfig and .arcrc as well.
"""
- values = []
- section = 'phabricator'
- for name in ['url', 'token']:
- value = repo.ui.config(section, name)
- if not value:
- raise error.Abort(_('config %s.%s is required') % (section, name))
- values.append(value)
- return values
+ url = repo.ui.config('phabricator', 'url')
+ if not url:
+ raise error.Abort(_('config %s.%s is required')
+ % ('phabricator', 'url'))
+
+ groups = {}
+ for key, val in repo.ui.configitems('phabricator.auth'):
+ if '.' not in key:
+ repo.ui.warn(_("ignoring invalid [phabricator.auth] key '%s'\n")
+ % key)
+ continue
+ group, setting = key.rsplit('.', 1)
+ groups.setdefault(group, {})[setting] = val
+
+ token = None
+ for group, auth in groups.iteritems():
+ if url != auth.get('url'):
+ continue
+ token = auth.get('token')
+ if token:
+ break
+
+ if not token:
+ token = readlegacytoken(repo)
+ if not token:
+ raise error.Abort(_('Can\'t find conduit token associated to %s')
+ % (url,))
+
+ return url, token
def callconduit(repo, name, params):
"""call Conduit API, params is a dict. return json.loads result, or None"""
@@ -868,11 +910,12 @@
templatekeyword = registrar.templatekeyword()
-@templatekeyword('phabreview')
-def template_review(repo, ctx, revcache, **args):
+@templatekeyword('phabreview', requires={'ctx'})
+def template_review(context, mapping):
""":phabreview: Object describing the review for this changeset.
Has attributes `url` and `id`.
"""
+ ctx = context.resource(mapping, 'ctx')
m = _differentialrevisiondescre.search(ctx.description())
if m:
return {
--- a/contrib/python3-ratchet.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/python3-ratchet.py Mon Mar 19 08:07:18 2018 -0700
@@ -80,8 +80,7 @@
print('warning: Python 3.6.0 and 3.6.1 have '
'a bug which breaks Mercurial')
print('(see https://bugs.python.org/issue29714 for details)')
- # TODO(augie): uncomment exit when Python 3.6.2 is available
- # sys.exit(1)
+ sys.exit(1)
rt = subprocess.Popen([opts.python3, 'run-tests.py', '-j', str(opts.j),
'--blacklist', opts.working_tests, '--json'])
--- a/contrib/python3-whitelist Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/python3-whitelist Mon Mar 19 08:07:18 2018 -0700
@@ -1,16 +1,42 @@
+test-abort-checkin.t
test-add.t
test-addremove-similar.t
test-addremove.t
+test-amend-subrepo.t
+test-amend.t
test-ancestor.py
+test-annotate.py
+test-annotate.t
+test-archive-symlinks.t
+test-atomictempfile.py
+test-audit-path.t
+test-audit-subrepo.t
test-automv.t
+test-backout.t
test-backwards-remove.t
+test-basic.t
test-bheads.t
+test-bisect.t
test-bisect2.t
+test-bisect3.t
+test-blackbox.t
+test-bookmarks-current.t
test-bookmarks-merge.t
+test-bookmarks-rebase.t
test-bookmarks-strip.t
+test-bookmarks.t
+test-branch-change.t
+test-branch-option.t
test-branch-tag-confict.t
+test-branches.t
+test-bundle-phases.t
+test-bundle-type.t
+test-bundle-vs-outgoing.t
+test-bundle2-multiple-changegroups.t
+test-cappedreader.py
test-casecollision.t
test-cat.t
+test-censor.t
test-changelog-exec.t
test-check-commit.t
test-check-execute.t
@@ -19,11 +45,41 @@
test-check-pylint.t
test-check-shbang.t
test-children.t
+test-clone-pull-corruption.t
+test-clone-r.t
+test-clone-update-order.t
+test-command-template.t
+test-commit-amend.t
+test-commit-interactive.t
+test-commit-multiple.t
test-commit-unresolved.t
+test-commit.t
+test-committer.t
test-completion.t
+test-config-env.py
+test-config.t
+test-conflict.t
+test-confused-revert.t
test-contrib-check-code.t
test-contrib-check-commit.t
+test-convert-authormap.t
+test-convert-clonebranches.t
+test-convert-datesort.t
+test-convert-filemap.t
+test-convert-hg-sink.t
+test-convert-hg-source.t
+test-convert-hg-startrev.t
+test-copy-move-merge.t
+test-copy.t
+test-copytrace-heuristics.t
+test-debugbuilddag.t
+test-debugbundle.t
+test-debugextensions.t
+test-debugindexdot.t
test-debugrename.t
+test-default-push.t
+test-diff-binary-file.t
+test-diff-change.t
test-diff-copy-depth.t
test-diff-hashes.t
test-diff-issue2761.t
@@ -32,42 +88,138 @@
test-diff-subdir.t
test-diffdir.t
test-directaccess.t
+test-dirstate-backup.t
test-dirstate-nonnormalset.t
test-doctest.py
test-double-merge.t
+test-drawdag.t
test-duplicateoptions.py
test-empty-dir.t
test-empty-file.t
+test-empty-group.t
test-empty.t
+test-encode.t
test-encoding-func.py
+test-encoding.t
+test-eol-add.t
+test-eol-clone.t
+test-eol-hook.t
+test-eol-tag.t
+test-eol-update.t
test-excessive-merge.t
+test-exchange-obsmarkers-case-A1.t
+test-exchange-obsmarkers-case-A2.t
+test-exchange-obsmarkers-case-A3.t
+test-exchange-obsmarkers-case-A4.t
+test-exchange-obsmarkers-case-A5.t
+test-exchange-obsmarkers-case-A6.t
+test-exchange-obsmarkers-case-A7.t
+test-exchange-obsmarkers-case-B1.t
+test-exchange-obsmarkers-case-B2.t
+test-exchange-obsmarkers-case-B3.t
+test-exchange-obsmarkers-case-B4.t
+test-exchange-obsmarkers-case-B5.t
+test-exchange-obsmarkers-case-B6.t
+test-exchange-obsmarkers-case-B7.t
+test-exchange-obsmarkers-case-C1.t
+test-exchange-obsmarkers-case-C2.t
+test-exchange-obsmarkers-case-C3.t
+test-exchange-obsmarkers-case-C4.t
+test-exchange-obsmarkers-case-D1.t
+test-exchange-obsmarkers-case-D2.t
+test-exchange-obsmarkers-case-D3.t
+test-exchange-obsmarkers-case-D4.t
test-execute-bit.t
+test-extdiff.t
+test-extra-filelog-entry.t
+test-filebranch.t
+test-fileset-generated.t
+test-flags.t
+test-generaldelta.t
+test-getbundle.t
+test-git-export.t
+test-glog-topological.t
test-gpg.t
+test-graft.t
test-hghave.t
+test-hgignore.t
+test-hgk.t
+test-hgweb-bundle.t
+test-hgweb-descend-empties.t
+test-hgweb-removed.t
+test-histedit-arguments.t
+test-histedit-base.t
+test-histedit-bookmark-motion.t
+test-histedit-commute.t
+test-histedit-drop.t
+test-histedit-edit.t
+test-histedit-fold-non-commute.t
+test-histedit-fold.t
+test-histedit-no-change.t
+test-histedit-non-commute-abort.t
+test-histedit-non-commute.t
+test-histedit-obsolete.t
+test-histedit-outgoing.t
+test-histedit-templates.t
+test-http-branchmap.t
+test-http-bundle1.t
+test-http-clone-r.t
+test-identify.t
+test-import-unknown.t
test-imports-checker.t
+test-inherit-mode.t
test-issue1089.t
+test-issue1102.t
test-issue1175.t
+test-issue1306.t
+test-issue1438.t
test-issue1502.t
test-issue1802.t
test-issue1877.t
test-issue1993.t
+test-issue2137.t
+test-issue3084.t
+test-issue4074.t
test-issue522.t
+test-issue586.t
test-issue612.t
test-issue619.t
test-issue672.t
test-issue842.t
test-journal-exists.t
+test-largefiles-cache.t
+test-largefiles-misc.t
+test-largefiles-small-disk.t
+test-largefiles-update.t
+test-lfs-largefiles.t
test-locate.t
+test-lock-badness.t
+test-log.t
+test-logexchange.t
test-lrucachedict.py
-test-manifest.py
+test-mactext.t
test-manifest-merging.t
+test-manifest.py
+test-manifest.t
test-match.py
+test-mdiff.py
+test-merge-changedelete.t
+test-merge-closedheads.t
+test-merge-commit.t
+test-merge-criss-cross.t
test-merge-default.t
+test-merge-force.t
+test-merge-halt.t
test-merge-internal-tools-pattern.t
+test-merge-local.t
test-merge-remove.t
test-merge-revert.t
test-merge-revert2.t
test-merge-subrepos.t
+test-merge-symlinks.t
+test-merge-tools.t
+test-merge-types.t
+test-merge1.t
test-merge10.t
test-merge2.t
test-merge4.t
@@ -75,9 +227,69 @@
test-merge6.t
test-merge7.t
test-merge8.t
+test-merge9.t
+test-mq-git.t
+test-mq-header-date.t
+test-mq-header-from.t
+test-mq-pull-from-bundle.t
+test-mq-qdiff.t
+test-mq-qfold.t
+test-mq-qgoto.t
test-mq-qimport-fail-cleanup.t
+test-mq-qpush-exact.t
+test-mq-qqueue.t
+test-mq-qrefresh-interactive.t
+test-mq-qrefresh-replace-log-message.t
+test-mq-qrefresh.t
+test-mq-qrename.t
+test-mq-qsave.t
+test-mq-safety.t
+test-mq-subrepo.t
+test-mq-symlinks.t
+test-mv-cp-st-diff.t
+test-narrow-archive.t
+test-narrow-clone-no-ellipsis.t
+test-narrow-clone-nonlinear.t
+test-narrow-clone.t
+test-narrow-commit.t
+test-narrow-copies.t
+test-narrow-debugcommands.t
+test-narrow-debugrebuilddirstate.t
+test-narrow-exchange-merges.t
+test-narrow-exchange.t
+test-narrow-expanddirstate.t
+test-narrow-merge.t
+test-narrow-patch.t
+test-narrow-patterns.t
+test-narrow-pull.t
+test-narrow-rebase.t
+test-narrow-shallow-merges.t
+test-narrow-shallow.t
+test-narrow-strip.t
+test-narrow-update.t
+test-nested-repo.t
+test-newbranch.t
test-obshistory.t
+test-obsmarker-template.t
+test-obsmarkers-effectflag.t
+test-obsolete-bundle-strip.t
+test-obsolete-changeset-exchange.t
+test-obsolete-checkheads.t
+test-obsolete-distributed.t
+test-obsolete-tag-cache.t
+test-parents.t
+test-pathconflicts-merge.t
+test-pathconflicts-update.t
+test-pending.t
test-permissions.t
+test-phases.t
+test-pull-branch.t
+test-pull-http.t
+test-pull-permission.t
+test-pull-pull-corruption.t
+test-pull-r.t
+test-pull-update.t
+test-purge.t
test-push-checkheads-partial-C1.t
test-push-checkheads-partial-C2.t
test-push-checkheads-partial-C3.t
@@ -105,27 +317,94 @@
test-push-checkheads-unpushed-D5.t
test-push-checkheads-unpushed-D6.t
test-push-checkheads-unpushed-D7.t
+test-push-http.t
+test-push-warn.t
+test-pushvars.t
+test-rebase-abort.t
+test-rebase-base-flag.t
+test-rebase-bookmarks.t
+test-rebase-brute-force.t
+test-rebase-cache.t
+test-rebase-check-restore.t
+test-rebase-collapse.t
+test-rebase-dest.t
+test-rebase-detach.t
+test-rebase-emptycommit.t
+test-rebase-inmemory.t
+test-rebase-interruptions.t
+test-rebase-issue-noparam-single-rev.t
+test-rebase-legacy.t
+test-rebase-mq-skip.t
+test-rebase-named-branches.t
+test-rebase-newancestor.t
+test-rebase-obsolete.t
+test-rebase-parameters.t
+test-rebase-partial.t
+test-rebase-pull.t
+test-rebase-rename.t
+test-rebase-scenario-global.t
+test-rebase-templates.t
+test-rebase-transaction.t
test-record.t
+test-relink.t
+test-remove.t
+test-rename-after-merge.t
test-rename-dir-merge.t
test-rename-merge1.t
test-rename.t
+test-repair-strip.t
+test-repo-compengines.t
+test-resolve.t
test-revert-flags.t
test-revert-unknown.t
+test-revlog-ancestry.py
test-revlog-group-emptyiter.t
test-revlog-mmapindex.t
test-revlog-packentry.t
+test-revset-dirstate-parents.t
+test-revset-outgoing.t
+test-rollback.t
test-run-tests.py
+test-schemes.t
+test-serve.t
+test-share.t
test-show-stack.t
+test-show-work.t
+test-show.t
test-simple-update.t
+test-single-head.t
test-sparse-clear.t
test-sparse-merges.t
test-sparse-requirement.t
test-sparse-verbose-json.t
+test-ssh-clone-r.t
+test-ssh-proto.t
+test-sshserver.py
+test-status-rev.t
test-status-terse.t
+test-strip-cross.t
+test-strip.t
+test-subrepo-deep-nested-change.t
+test-subrepo.t
+test-symlinks.t
+test-tag.t
+test-tags.t
+test-template-engine.t
+test-treemanifest.t
+test-unamend.t
test-uncommit.t
test-unified-test.t
test-unrelated-pull.t
+test-up-local-change.t
+test-update-branches.t
+test-update-dest.t
test-update-issue1456.t
test-update-names.t
test-update-reverse.t
+test-upgrade-repo.t
+test-url-rev.t
+test-username-newline.t
+test-verify.t
+test-websub.t
+test-win32text.t
test-xdg.t
--- a/contrib/synthrepo.py Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/synthrepo.py Mon Mar 19 08:07:18 2018 -0700
@@ -59,8 +59,8 @@
patch,
registrar,
scmutil,
- util,
)
+from mercurial.utils import dateutil
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -381,8 +381,8 @@
ui.progress(_synthesizing, None)
message = 'synthesized wide repo with %d files' % (len(files),)
mc = context.memctx(repo, [pctx.node(), nullid], message,
- files.iterkeys(), filectxfn, ui.username(),
- '%d %d' % util.makedate())
+ files, filectxfn, ui.username(),
+ '%d %d' % dateutil.makedate())
initnode = mc.commit()
if ui.debugflag:
hexfn = hex
--- a/contrib/wix/help.wxs Thu Mar 15 22:35:07 2018 -0700
+++ b/contrib/wix/help.wxs Mon Mar 19 08:07:18 2018 -0700
@@ -40,6 +40,7 @@
<Directory Id="help.internaldir" Name="internals">
<Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'>
+ <File Id="internals.bundle2.txt" Name="bundle2.txt" />
<File Id="internals.bundles.txt" Name="bundles.txt" KeyPath="yes" />
<File Id="internals.censor.txt" Name="censor.txt" />
<File Id="internals.changegroups.txt" Name="changegroups.txt" />
--- a/hgext/acl.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/acl.py Mon Mar 19 08:07:18 2018 -0700
@@ -193,8 +193,6 @@
from __future__ import absolute_import
-import getpass
-
from mercurial.i18n import _
from mercurial import (
error,
@@ -334,13 +332,13 @@
return
user = None
- if source == 'serve' and 'url' in kwargs:
- url = kwargs['url'].split(':')
+ if source == 'serve' and r'url' in kwargs:
+ url = kwargs[r'url'].split(':')
if url[0] == 'remote' and url[1].startswith('http'):
user = urlreq.unquote(url[3])
if user is None:
- user = getpass.getuser()
+ user = util.getuser()
ui.debug('acl: checking access for user "%s"\n' % user)
@@ -355,7 +353,7 @@
allow = buildmatch(ui, repo, user, 'acl.allow')
deny = buildmatch(ui, repo, user, 'acl.deny')
- for rev in xrange(repo[node], len(repo)):
+ for rev in xrange(repo[node].rev(), len(repo)):
ctx = repo[rev]
branch = ctx.branch()
if denybranches and denybranches(branch):
--- a/hgext/blackbox.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/blackbox.py Mon Mar 19 08:07:18 2018 -0700
@@ -49,6 +49,7 @@
ui as uimod,
util,
)
+from mercurial.utils import dateutil
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -164,7 +165,7 @@
return
ui._bbinlog = True
default = self.configdate('devel', 'default-date')
- date = util.datestr(default, '%Y/%m/%d %H:%M:%S')
+ date = dateutil.datestr(default, '%Y/%m/%d %H:%M:%S')
user = util.getuser()
pid = '%d' % util.getpid()
formattedmsg = msg[0] % msg[1:]
--- a/hgext/bugzilla.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/bugzilla.py Mon Mar 19 08:07:18 2018 -0700
@@ -300,8 +300,8 @@
from mercurial.i18n import _
from mercurial.node import short
from mercurial import (
- cmdutil,
error,
+ logcmdutil,
mail,
registrar,
url,
@@ -1090,9 +1090,8 @@
if not mapfile and not tmpl:
tmpl = _('changeset {node|short} in repo {root} refers '
'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
- spec = cmdutil.logtemplatespec(tmpl, mapfile)
- t = cmdutil.changeset_templater(self.ui, self.repo, spec,
- False, None, False)
+ spec = logcmdutil.templatespec(tmpl, mapfile)
+ t = logcmdutil.changesettemplater(self.ui, self.repo, spec)
self.ui.pushbuffer()
t.show(ctx, changes=ctx.changeset(),
bug=str(bugid),
--- a/hgext/children.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/children.py Mon Mar 19 08:07:18 2018 -0700
@@ -19,6 +19,7 @@
from mercurial.i18n import _
from mercurial import (
cmdutil,
+ logcmdutil,
pycompat,
registrar,
)
@@ -65,7 +66,7 @@
ctx = repo[rev]
childctxs = ctx.children()
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for cctx in childctxs:
displayer.show(cctx)
displayer.close()
--- a/hgext/churn.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/churn.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,12 +18,13 @@
from mercurial import (
cmdutil,
encoding,
+ logcmdutil,
patch,
pycompat,
registrar,
scmutil,
- util,
)
+from mercurial.utils import dateutil
cmdtable = {}
command = registrar.command(cmdtable)
@@ -54,7 +55,7 @@
return date.strftime(opts['dateformat'])
else:
tmpl = opts.get('oldtemplate') or opts.get('template')
- tmpl = cmdutil.makelogtemplater(ui, repo, tmpl)
+ tmpl = logcmdutil.maketemplater(ui, repo, tmpl)
def getkey(ctx):
ui.pushbuffer()
tmpl.show(ctx)
@@ -64,7 +65,7 @@
rate = {}
df = False
if opts.get('date'):
- df = util.matchdate(opts['date'])
+ df = dateutil.matchdate(opts['date'])
m = scmutil.match(repo[None], pats, opts)
def prep(ctx, fns):
@@ -170,7 +171,7 @@
ui.warn(_("skipping malformed alias: %s\n") % l)
continue
- rate = countrate(ui, repo, amap, *pats, **opts).items()
+ rate = list(countrate(ui, repo, amap, *pats, **opts).items())
if not rate:
return
--- a/hgext/commitextras.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/commitextras.py Mon Mar 19 08:07:18 2018 -0700
@@ -70,7 +70,7 @@
# This __dict__ logic is needed because the normal
# extension.wrapfunction doesn't seem to work.
- repo.__dict__['commit'] = _wrappedcommit
+ repo.__dict__[r'commit'] = _wrappedcommit
return orig(ui, repo, *pats, **opts)
finally:
- del repo.__dict__['commit']
+ del repo.__dict__[r'commit']
--- a/hgext/convert/__init__.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/__init__.py Mon Mar 19 08:07:18 2018 -0700
@@ -477,7 +477,8 @@
dates.'''
return cvsps.debugcvsps(ui, *args, **opts)
-def kwconverted(ctx, name):
+def kwconverted(context, mapping, name):
+ ctx = context.resource(mapping, 'ctx')
rev = ctx.extra().get('convert_revision', '')
if rev.startswith('svn:'):
if name == 'svnrev':
@@ -490,20 +491,20 @@
templatekeyword = registrar.templatekeyword()
-@templatekeyword('svnrev')
-def kwsvnrev(repo, ctx, **args):
+@templatekeyword('svnrev', requires={'ctx'})
+def kwsvnrev(context, mapping):
"""String. Converted subversion revision number."""
- return kwconverted(ctx, 'svnrev')
+ return kwconverted(context, mapping, 'svnrev')
-@templatekeyword('svnpath')
-def kwsvnpath(repo, ctx, **args):
+@templatekeyword('svnpath', requires={'ctx'})
+def kwsvnpath(context, mapping):
"""String. Converted subversion revision project path."""
- return kwconverted(ctx, 'svnpath')
+ return kwconverted(context, mapping, 'svnpath')
-@templatekeyword('svnuuid')
-def kwsvnuuid(repo, ctx, **args):
+@templatekeyword('svnuuid', requires={'ctx'})
+def kwsvnuuid(context, mapping):
"""String. Converted subversion revision repository identifier."""
- return kwconverted(ctx, 'svnuuid')
+ return kwconverted(context, mapping, 'svnuuid')
# tell hggettext to extract docstrings from these functions:
i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
--- a/hgext/convert/common.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/common.py Mon Mar 19 08:07:18 2018 -0700
@@ -11,6 +11,7 @@
import errno
import os
import re
+import shlex
import subprocess
from mercurial.i18n import _
@@ -18,12 +19,65 @@
encoding,
error,
phases,
+ pycompat,
util,
)
pickle = util.pickle
propertycache = util.propertycache
+def _encodeornone(d):
+ if d is None:
+ return
+ return d.encode('latin1')
+
+class _shlexpy3proxy(object):
+
+ def __init__(self, l):
+ self._l = l
+
+ def __iter__(self):
+ return (_encodeornone(v) for v in self._l)
+
+ def get_token(self):
+ return _encodeornone(self._l.get_token())
+
+ @property
+ def infile(self):
+ return self._l.infile or '<unknown>'
+
+ @property
+ def lineno(self):
+ return self._l.lineno
+
+def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
+ if data is None:
+ if pycompat.ispy3:
+ data = open(filepath, 'r', encoding=r'latin1')
+ else:
+ data = open(filepath, 'r')
+ else:
+ if filepath is not None:
+ raise error.ProgrammingError(
+ 'shlexer only accepts data or filepath, not both')
+ if pycompat.ispy3:
+ data = data.decode('latin1')
+ l = shlex.shlex(data, infile=filepath, posix=True)
+ if whitespace is not None:
+ l.whitespace_split = True
+ if pycompat.ispy3:
+ l.whitespace += whitespace.decode('latin1')
+ else:
+ l.whitespace += whitespace
+ if wordchars is not None:
+ if pycompat.ispy3:
+ l.wordchars += wordchars.decode('latin1')
+ else:
+ l.wordchars += wordchars
+ if pycompat.ispy3:
+ return _shlexpy3proxy(l)
+ return l
+
def encodeargs(args):
def encodearg(s):
lines = base64.encodestring(s)
@@ -322,6 +376,7 @@
pass
def _cmdline(self, cmd, *args, **kwargs):
+ kwargs = pycompat.byteskwargs(kwargs)
cmdline = [self.command, cmd] + list(args)
for k, v in kwargs.iteritems():
if len(k) == 1:
@@ -337,7 +392,7 @@
pass
cmdline = [util.shellquote(arg) for arg in cmdline]
if not self.ui.debugflag:
- cmdline += ['2>', os.devnull]
+ cmdline += ['2>', pycompat.bytestr(os.devnull)]
cmdline = ' '.join(cmdline)
return cmdline
@@ -416,17 +471,17 @@
def _limit_arglist(self, arglist, cmd, *args, **kwargs):
cmdlen = len(self._cmdline(cmd, *args, **kwargs))
limit = self.argmax - cmdlen
- bytes = 0
+ numbytes = 0
fl = []
for fn in arglist:
b = len(fn) + 3
- if bytes + b < limit or len(fl) == 0:
+ if numbytes + b < limit or len(fl) == 0:
fl.append(fn)
- bytes += b
+ numbytes += b
else:
yield fl
fl = [fn]
- bytes = b
+ numbytes = b
if fl:
yield fl
@@ -447,7 +502,7 @@
if not self.path:
return
try:
- fp = open(self.path, 'r')
+ fp = open(self.path, 'rb')
except IOError as err:
if err.errno != errno.ENOENT:
raise
@@ -471,12 +526,12 @@
def __setitem__(self, key, value):
if self.fp is None:
try:
- self.fp = open(self.path, 'a')
+ self.fp = open(self.path, 'ab')
except IOError as err:
raise error.Abort(
_('could not open map file %r: %s') %
(self.path, encoding.strtolocal(err.strerror)))
- self.fp.write('%s %s\n' % (key, value))
+ self.fp.write(util.tonativeeol('%s %s\n' % (key, value)))
self.fp.flush()
super(mapfile, self).__setitem__(key, value)
@@ -486,7 +541,7 @@
self.fp = None
def makedatetimestamp(t):
- """Like util.makedate() but for time t instead of current time"""
+ """Like dateutil.makedate() but for time t instead of current time"""
delta = (datetime.datetime.utcfromtimestamp(t) -
datetime.datetime.fromtimestamp(t))
tz = delta.days * 86400 + delta.seconds
--- a/hgext/convert/convcmd.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/convcmd.py Mon Mar 19 08:07:18 2018 -0700
@@ -8,7 +8,6 @@
import collections
import os
-import shlex
import shutil
from mercurial.i18n import _
@@ -16,9 +15,11 @@
encoding,
error,
hg,
+ pycompat,
scmutil,
util,
)
+from mercurial.utils import dateutil
from . import (
bzr,
@@ -55,9 +56,10 @@
def recode(s):
if isinstance(s, unicode):
- return s.encode(orig_encoding, 'replace')
+ return s.encode(pycompat.sysstr(orig_encoding), 'replace')
else:
- return s.decode('utf-8').encode(orig_encoding, 'replace')
+ return s.decode('utf-8').encode(
+ pycompat.sysstr(orig_encoding), 'replace')
def mapbranch(branch, branchmap):
'''
@@ -202,16 +204,14 @@
return {}
m = {}
try:
- fp = open(path, 'r')
+ fp = open(path, 'rb')
for i, line in enumerate(util.iterfile(fp)):
line = line.splitlines()[0].rstrip()
if not line:
# Ignore blank lines
continue
# split line
- lex = shlex.shlex(line, posix=True)
- lex.whitespace_split = True
- lex.whitespace += ','
+ lex = common.shlexer(data=line, whitespace=',')
line = list(lex)
# check number of parents
if not (2 <= len(line) <= 3):
@@ -356,7 +356,7 @@
dates = {}
def getdate(n):
if n not in dates:
- dates[n] = util.parsedate(self.commitcache[n].date)
+ dates[n] = dateutil.parsedate(self.commitcache[n].date)
return dates[n]
def picknext(nodes):
@@ -407,13 +407,14 @@
authorfile = self.authorfile
if authorfile:
self.ui.status(_('writing author map file %s\n') % authorfile)
- ofile = open(authorfile, 'w+')
+ ofile = open(authorfile, 'wb+')
for author in self.authors:
- ofile.write("%s=%s\n" % (author, self.authors[author]))
+ ofile.write(util.tonativeeol("%s=%s\n"
+ % (author, self.authors[author])))
ofile.close()
def readauthormap(self, authorfile):
- afile = open(authorfile, 'r')
+ afile = open(authorfile, 'rb')
for line in afile:
line = line.strip()
@@ -564,6 +565,7 @@
self.map.close()
def convert(ui, src, dest=None, revmapfile=None, **opts):
+ opts = pycompat.byteskwargs(opts)
global orig_encoding
orig_encoding = encoding.encoding
encoding.encoding = 'UTF-8'
--- a/hgext/convert/cvs.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/cvs.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,6 +18,7 @@
pycompat,
util,
)
+from mercurial.utils import dateutil
from . import (
common,
@@ -46,8 +47,8 @@
self.tags = {}
self.lastbranch = {}
self.socket = None
- self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
- self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
+ self.cvsroot = open(os.path.join(cvs, "Root"), 'rb').read()[:-1]
+ self.cvsrepo = open(os.path.join(cvs, "Repository"), 'rb').read()[:-1]
self.encoding = encoding.encoding
self._connect()
@@ -93,7 +94,7 @@
cs.comment = self.recode(cs.comment)
if self.ui.configbool('convert', 'localtimezone'):
cs.date = makedatetimestamp(cs.date[0])
- date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
+ date = dateutil.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
self.tags.update(dict.fromkeys(cs.tags, id))
files = {}
@@ -141,7 +142,7 @@
passw = "A"
cvspass = os.path.expanduser("~/.cvspass")
try:
- pf = open(cvspass)
+ pf = open(cvspass, 'rb')
for line in pf.read().splitlines():
part1, part2 = line.split(' ', 1)
# /1 :pserver:user@example.com:2401/cvsroot/foo
@@ -179,7 +180,7 @@
# :ext:user@host/home/user/path/to/cvsroot
if root.startswith(":ext:"):
root = root[5:]
- m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
+ m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
# Do not take Windows path "c:\foo\bar" for a connection strings
if os.path.isdir(root) or not m:
conntype = "local"
--- a/hgext/convert/cvsps.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/cvsps.py Mon Mar 19 08:07:18 2018 -0700
@@ -17,6 +17,7 @@
pycompat,
util,
)
+from mercurial.utils import dateutil
pickle = util.pickle
@@ -132,7 +133,7 @@
# Get the real directory in the repository
try:
- prefix = open(os.path.join('CVS','Repository')).read().strip()
+ prefix = open(os.path.join('CVS','Repository'), 'rb').read().strip()
directory = prefix
if prefix == ".":
prefix = ""
@@ -144,7 +145,7 @@
# Use the Root file in the sandbox, if it exists
try:
- root = open(os.path.join('CVS','Root')).read().strip()
+ root = open(os.path.join('CVS','Root'), 'rb').read().strip()
except IOError:
pass
@@ -170,14 +171,14 @@
# /pserver/user/server/path
# are mapped to different cache file names.
cachefile = root.split(":") + [directory, "cache"]
- cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
+ cachefile = ['-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
cachefile = os.path.join(cachedir,
'.'.join([s for s in cachefile if s]))
if cache == 'update':
try:
ui.note(_('reading cvs log cache %s\n') % cachefile)
- oldlog = pickle.load(open(cachefile))
+ oldlog = pickle.load(open(cachefile, 'rb'))
for e in oldlog:
if not (util.safehasattr(e, 'branchpoints') and
util.safehasattr(e, 'commitid') and
@@ -192,7 +193,7 @@
if oldlog:
date = oldlog[-1].date # last commit date as a (time,tz) tuple
- date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
+ date = dateutil.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
# build the CVS commandline
cmd = ['cvs', '-q']
@@ -336,7 +337,7 @@
if len(d.split()) != 3:
# cvs log dates always in GMT
d = d + ' UTC'
- e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
+ e.date = dateutil.parsedate(d, ['%y/%m/%d %H:%M:%S',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S'])
e.author = scache(match.group(2))
@@ -486,7 +487,7 @@
# write the new cachefile
ui.note(_('writing cvs log cache %s\n') % cachefile)
- pickle.dump(log, open(cachefile, 'w'))
+ pickle.dump(log, open(cachefile, 'wb'))
else:
log = oldlog
@@ -855,6 +856,7 @@
repository, and convert the log to changesets based on matching
commit log entries and dates.
'''
+ opts = pycompat.byteskwargs(opts)
if opts["new_cache"]:
cache = "write"
elif opts["update_cache"]:
@@ -900,7 +902,7 @@
# bug-for-bug compatibility with cvsps.
ui.write('---------------------\n')
ui.write(('PatchSet %d \n' % cs.id))
- ui.write(('Date: %s\n' % util.datestr(cs.date,
+ ui.write(('Date: %s\n' % dateutil.datestr(cs.date,
'%Y/%m/%d %H:%M:%S %1%2')))
ui.write(('Author: %s\n' % cs.author))
ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
--- a/hgext/convert/darcs.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/darcs.py Mon Mar 19 08:07:18 2018 -0700
@@ -16,6 +16,7 @@
error,
util,
)
+from mercurial.utils import dateutil
from . import common
NoRepo = common.NoRepo
@@ -148,12 +149,14 @@
def getcommit(self, rev):
elt = self.changes[rev]
- date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
+ dateformat = '%a %b %d %H:%M:%S %Z %Y'
+ date = dateutil.strdate(elt.get('local_date'), dateformat)
desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
# etree can return unicode objects for name, comment, and author,
# so recode() is used to ensure str objects are emitted.
+ newdateformat = '%Y-%m-%d %H:%M:%S %1%2'
return common.commit(author=self.recode(elt.get('author')),
- date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ date=dateutil.datestr(date, newdateformat),
desc=self.recode(desc).strip(),
parents=self.parents[rev])
--- a/hgext/convert/filemap.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/filemap.py Mon Mar 19 08:07:18 2018 -0700
@@ -7,11 +7,11 @@
from __future__ import absolute_import, print_function
import posixpath
-import shlex
from mercurial.i18n import _
from mercurial import (
error,
+ pycompat,
)
from . import common
SKIPREV = common.SKIPREV
@@ -68,11 +68,12 @@
name.endswith('/') or
'//' in name):
self.ui.warn(_('%s:%d: superfluous / in %s %r\n') %
- (lex.infile, lex.lineno, listname, name))
+ (lex.infile, lex.lineno, listname,
+ pycompat.bytestr(name)))
return 1
return 0
- lex = shlex.shlex(open(path), path, True)
- lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
+ lex = common.shlexer(
+ filepath=path, wordchars='!@#$%^&*()-=+[]{}|;:,./<>?')
cmd = lex.get_token()
while cmd:
if cmd == 'include':
@@ -93,7 +94,7 @@
errs += self.parse(normalize(lex.get_token()))
else:
self.ui.warn(_('%s:%d: unknown directive %r\n') %
- (lex.infile, lex.lineno, cmd))
+ (lex.infile, lex.lineno, pycompat.bytestr(cmd)))
errs += 1
cmd = lex.get_token()
return errs
--- a/hgext/convert/git.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/git.py Mon Mar 19 08:07:18 2018 -0700
@@ -168,19 +168,19 @@
raise error.Abort(_('cannot retrieve git head "%s"') % rev)
return heads
- def catfile(self, rev, type):
+ def catfile(self, rev, ftype):
if rev == nodemod.nullhex:
raise IOError
self.catfilepipe[0].write(rev+'\n')
self.catfilepipe[0].flush()
info = self.catfilepipe[1].readline().split()
- if info[1] != type:
- raise error.Abort(_('cannot read %r object at %s') % (type, rev))
+ if info[1] != ftype:
+ raise error.Abort(_('cannot read %r object at %s') % (ftype, rev))
size = int(info[2])
data = self.catfilepipe[1].read(size)
if len(data) < size:
raise error.Abort(_('cannot read %r object at %s: unexpected size')
- % (type, rev))
+ % (ftype, rev))
# read the trailing newline
self.catfilepipe[1].read(1)
return data
--- a/hgext/convert/gnuarch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/gnuarch.py Mon Mar 19 08:07:18 2018 -0700
@@ -19,6 +19,7 @@
error,
util,
)
+from mercurial.utils import dateutil
from . import common
class gnuarch_source(common.converter_source, common.commandline):
@@ -280,8 +281,8 @@
catlog = self.catlogparser.parsestr(data)
# Commit date
- self.changes[rev].date = util.datestr(
- util.strdate(catlog['Standard-date'],
+ self.changes[rev].date = dateutil.datestr(
+ dateutil.strdate(catlog['Standard-date'],
'%Y-%m-%d %H:%M:%S'))
# Commit author
--- a/hgext/convert/hg.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/hg.py Mon Mar 19 08:07:18 2018 -0700
@@ -36,13 +36,14 @@
scmutil,
util,
)
+from mercurial.utils import dateutil
stringio = util.stringio
from . import common
mapfile = common.mapfile
NoRepo = common.NoRepo
-sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
+sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
class mercurial_sink(common.converter_sink):
def __init__(self, ui, repotype, path):
@@ -563,12 +564,7 @@
if copysource in self.ignored:
continue
# Ignore copy sources not in parent revisions
- found = False
- for p in parents:
- if copysource in p:
- found = True
- break
- if not found:
+ if not any(copysource in p for p in parents):
continue
copies[name] = copysource
except TypeError:
@@ -588,7 +584,7 @@
crev = rev
return common.commit(author=ctx.user(),
- date=util.datestr(ctx.date(),
+ date=dateutil.datestr(ctx.date(),
'%Y-%m-%d %H:%M:%S %1%2'),
desc=ctx.description(),
rev=crev,
@@ -625,8 +621,8 @@
def converted(self, rev, destrev):
if self.convertfp is None:
- self.convertfp = open(self.repo.vfs.join('shamap'), 'a')
- self.convertfp.write('%s %s\n' % (destrev, rev))
+ self.convertfp = open(self.repo.vfs.join('shamap'), 'ab')
+ self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev)))
self.convertfp.flush()
def before(self):
--- a/hgext/convert/monotone.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/monotone.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,8 +13,9 @@
from mercurial.i18n import _
from mercurial import (
error,
- util,
+ pycompat,
)
+from mercurial.utils import dateutil
from . import common
@@ -36,7 +37,7 @@
if not os.path.exists(os.path.join(path, '_MTN')):
# Could be a monotone repository (SQLite db file)
try:
- f = file(path, 'rb')
+ f = open(path, 'rb')
header = f.read(16)
f.close()
except IOError:
@@ -45,11 +46,11 @@
raise norepo
# regular expressions for parsing monotone output
- space = r'\s*'
- name = r'\s+"((?:\\"|[^"])*)"\s*'
+ space = br'\s*'
+ name = br'\s+"((?:\\"|[^"])*)"\s*'
value = name
- revision = r'\s+\[(\w+)\]\s*'
- lines = r'(?:.|\n)+'
+ revision = br'\s+\[(\w+)\]\s*'
+ lines = br'(?:.|\n)+'
self.dir_re = re.compile(space + "dir" + name)
self.file_re = re.compile(space + "file" + name +
@@ -84,11 +85,12 @@
return self.mtnrunsingle(*args, **kwargs)
def mtnrunsingle(self, *args, **kwargs):
- kwargs['d'] = self.path
+ kwargs[r'd'] = self.path
return self.run0('automate', *args, **kwargs)
def mtnrunstdio(self, *args, **kwargs):
# Prepare the command in automate stdio format
+ kwargs = pycompat.byteskwargs(kwargs)
command = []
for k, v in kwargs.iteritems():
command.append("%s:%s" % (len(k), k))
@@ -308,9 +310,10 @@
certs = self.mtngetcerts(rev)
if certs.get('suspend') == certs["branch"]:
extra['close'] = 1
+ dateformat = "%Y-%m-%dT%H:%M:%S"
return common.commit(
author=certs["author"],
- date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
+ date=dateutil.datestr(dateutil.strdate(certs["date"], dateformat)),
desc=certs["changelog"],
rev=rev,
parents=self.mtnrun("parents", rev).splitlines(),
--- a/hgext/convert/p4.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/p4.py Mon Mar 19 08:07:18 2018 -0700
@@ -14,6 +14,7 @@
error,
util,
)
+from mercurial.utils import dateutil
from . import common
@@ -346,7 +347,7 @@
parents = []
return common.commit(author=self.recode(obj["user"]),
- date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
parents=parents, desc=desc, branch=None, rev=obj['change'],
extra={"p4": obj['change'], "convert_revision": obj['change']})
--- a/hgext/convert/subversion.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/convert/subversion.py Mon Mar 19 08:07:18 2018 -0700
@@ -16,6 +16,7 @@
util,
vfs as vfsmod,
)
+from mercurial.utils import dateutil
from . import common
@@ -146,10 +147,10 @@
# Caller may interrupt the iteration
pickle.dump(None, fp, protocol)
except Exception as inst:
- pickle.dump(str(inst), fp, protocol)
+ pickle.dump(util.forcebytestr(inst), fp, protocol)
else:
pickle.dump(None, fp, protocol)
- fp.close()
+ fp.flush()
# With large history, cleanup process goes crazy and suddenly
# consumes *huge* amount of memory. The output file being closed,
# there is no need for clean termination.
@@ -231,7 +232,7 @@
def httpcheck(ui, path, proto):
try:
opener = urlreq.buildopener()
- rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
+ rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path), 'rb')
data = rsp.read()
except urlerr.httperror as inst:
if inst.code != 404:
@@ -384,7 +385,7 @@
def setrevmap(self, revmap):
lastrevs = {}
- for revid in revmap.iterkeys():
+ for revid in revmap:
uuid, module, revnum = revsplit(revid)
lastrevnum = lastrevs.setdefault(module, revnum)
if revnum > lastrevnum:
@@ -639,8 +640,9 @@
return
if self.convertfp is None:
self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
- 'a')
- self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
+ 'ab')
+ self.convertfp.write(util.tonativeeol('%s %d\n'
+ % (destrev, self.revnum(rev))))
self.convertfp.flush()
def revid(self, revnum, module=None):
@@ -890,7 +892,7 @@
# Example SVN datetime. Includes microseconds.
# ISO-8601 conformant
# '2007-01-04T17:35:00.902377Z'
- date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
+ date = dateutil.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
if self.ui.configbool('convert', 'localtimezone'):
date = makedatetimestamp(date[0])
@@ -912,7 +914,7 @@
branch = None
cset = commit(author=author,
- date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
desc=log,
parents=parents,
branch=branch,
@@ -1128,7 +1130,7 @@
self.wc = os.path.realpath(path)
self.run0('update')
else:
- if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
+ if not re.search(br'^(file|http|https|svn|svn\+ssh)\://', path):
path = os.path.realpath(path)
if os.path.isdir(os.path.dirname(path)):
if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
@@ -1158,7 +1160,7 @@
if created:
hook = os.path.join(created, 'hooks', 'pre-revprop-change')
- fp = open(hook, 'w')
+ fp = open(hook, 'wb')
fp.write(pre_revprop_change)
fp.close()
util.setflags(hook, False, True)
@@ -1308,8 +1310,8 @@
self.setexec = []
fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
- fp = os.fdopen(fd, pycompat.sysstr('w'))
- fp.write(commit.desc)
+ fp = os.fdopen(fd, r'wb')
+ fp.write(util.tonativeeol(commit.desc))
fp.close()
try:
output = self.run0('commit',
--- a/hgext/eol.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/eol.py Mon Mar 19 08:07:18 2018 -0700
@@ -222,7 +222,7 @@
data = ctx[f].data()
if (target == "to-lf" and "\r\n" in data
or target == "to-crlf" and singlelf.search(data)):
- failed.append((f, target, str(ctx)))
+ failed.append((f, target, bytes(ctx)))
break
return failed
--- a/hgext/extdiff.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/extdiff.py Mon Mar 19 08:07:18 2018 -0700
@@ -65,6 +65,7 @@
import os
import re
import shutil
+import stat
import tempfile
from mercurial.i18n import _
from mercurial.node import (
@@ -88,12 +89,12 @@
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('extdiff', r'opts\..*',
+configitem('extdiff', br'opts\..*',
default='',
generic=True,
)
-configitem('diff-tools', r'.*\.diffargs$',
+configitem('diff-tools', br'.*\.diffargs$',
default=None,
generic=True,
)
@@ -256,8 +257,8 @@
cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
fntemplate=repo.vfs.reljoin(tmproot, template),
match=matcher)
- label1a = cmdutil.makefilename(repo, template, node1a)
- label2 = cmdutil.makefilename(repo, template, node2)
+ label1a = cmdutil.makefilename(repo[node1a], template)
+ label2 = cmdutil.makefilename(repo[node2], template)
dir1a = repo.vfs.reljoin(tmproot, label1a)
dir2 = repo.vfs.reljoin(tmproot, label2)
dir1b = None
@@ -279,13 +280,13 @@
return pre + util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
- regex = (r'''(['"]?)([^\s'"$]*)'''
- r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
+ regex = (br'''(['"]?)([^\s'"$]*)'''
+ br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
if not do3way and not re.search(regex, cmdline):
cmdline += ' $parent1 $child'
cmdline = re.sub(regex, quote, cmdline)
- ui.debug('running %r in %s\n' % (cmdline, tmproot))
+ ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
for copy_fn, working_fn, st in fnsandstat:
@@ -297,7 +298,8 @@
# copyfile() carries over the permission, so the mode check could
# be in an 'elif' branch, but for the case where the file has
# changed without affecting mtime or size.
- if (cpstat.st_mtime != st.st_mtime or cpstat.st_size != st.st_size
+ if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
+ or cpstat.st_size != st.st_size
or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
@@ -366,7 +368,7 @@
# We can't pass non-ASCII through docstrings (and path is
# in an unknown encoding anyway)
docpath = util.escapestr(path)
- self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
+ self.__doc__ %= {r'path': pycompat.sysstr(util.uirepr(docpath))}
self._cmdline = cmdline
def __call__(self, ui, repo, *pats, **opts):
--- a/hgext/fetch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/fetch.py Mon Mar 19 08:07:18 2018 -0700
@@ -23,6 +23,7 @@
registrar,
util,
)
+from mercurial.utils import dateutil
release = lock.release
cmdtable = {}
@@ -64,7 +65,7 @@
opts = pycompat.byteskwargs(opts)
date = opts.get('date')
if date:
- opts['date'] = util.parsedate(date)
+ opts['date'] = dateutil.parsedate(date)
parent, _p2 = repo.dirstate.parents()
branch = repo.dirstate.branch()
--- a/hgext/githelp.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/githelp.py Mon Mar 19 08:07:18 2018 -0700
@@ -22,6 +22,7 @@
from mercurial.i18n import _
from mercurial import (
+ encoding,
error,
fancyopts,
registrar,
@@ -109,7 +110,7 @@
self.args = []
self.opts = {}
- def __str__(self):
+ def __bytes__(self):
cmd = "hg " + self.name
if self.opts:
for k, values in sorted(self.opts.iteritems()):
@@ -123,6 +124,8 @@
cmd += " ".join(self.args)
return cmd
+ __str__ = encoding.strmethod(__bytes__)
+
def append(self, value):
self.args.append(value)
@@ -167,14 +170,14 @@
ui.status(_("note: use hg addremove to remove files that have "
"been deleted.\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def am(ui, repo, *args, **kwargs):
cmdoptions=[
]
args, opts = parseoptions(ui, cmdoptions, args)
cmd = Command('import')
- ui.status(str(cmd), "\n")
+ ui.status(bytes(cmd), "\n")
def apply(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -187,7 +190,7 @@
cmd['-p'] = opts.get('p')
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def bisect(ui, repo, *args, **kwargs):
ui.status(_("See 'hg help bisect' for how to use bisect.\n\n"))
@@ -198,7 +201,7 @@
args, opts = parseoptions(ui, cmdoptions, args)
cmd = Command('annotate -udl')
cmd.extend([convert(v) for v in args])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def branch(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -239,7 +242,7 @@
cmd.append(args[0])
elif len(args) == 1:
cmd.append(args[0])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def ispath(repo, string):
"""
@@ -330,7 +333,7 @@
else:
raise error.Abort("a commit must be specified")
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def cherrypick(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -352,7 +355,7 @@
else:
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def clean(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -367,7 +370,7 @@
cmd['--all'] = None
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def clone(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -397,7 +400,7 @@
cocmd.append(opts.get('branch'))
cmd = cmd & cocmd
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def commit(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -445,7 +448,7 @@
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def deprecated(ui, repo, *args, **kwargs):
ui.warn(_('This command has been deprecated in the git project, ' +
@@ -476,7 +479,7 @@
except Exception:
cmd.append(a)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def difftool(ui, repo, *args, **kwargs):
ui.status(_('Mercurial does not enable external difftool by default. You '
@@ -509,7 +512,7 @@
else:
cmd['-r'] = v
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def grep(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -522,7 +525,7 @@
# pattern first, followed by paths.
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def init(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -534,7 +537,7 @@
if len(args) > 0:
cmd.append(args[0])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def log(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -588,7 +591,7 @@
del args[0]
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def lsfiles(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -624,7 +627,7 @@
for include in args:
cmd['-I'] = util.shellquote(include)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def merge(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -636,7 +639,7 @@
if len(args) > 0:
cmd.append(args[len(args) - 1])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def mergebase(ui, repo, *args, **kwargs):
cmdoptions = []
@@ -650,7 +653,7 @@
ui.status(_('NOTE: ancestors() is part of the revset language.\n'),
_("Learn more about revsets with 'hg help revsets'\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def mergetool(ui, repo, *args, **kwargs):
cmdoptions = []
@@ -661,7 +664,7 @@
if len(args) == 0:
cmd['--all'] = None
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def mv(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -675,7 +678,7 @@
if opts.get('force'):
cmd['-f'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def pull(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -701,7 +704,7 @@
else:
cmd['-r'] = v
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def push(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -728,7 +731,7 @@
if opts.get('force'):
cmd['-f'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def rebase(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -748,12 +751,12 @@
if len(args) > 0:
ui.status(_("also note: 'hg histedit' will automatically detect"
" your stack, so no second argument is necessary.\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
return
if opts.get('skip'):
cmd = Command('revert --all -r .')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
cmd = Command('rebase')
@@ -777,7 +780,7 @@
cmd['-d'] = convert(args[0])
cmd['-b'] = convert(args[1])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def reflog(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -791,7 +794,7 @@
if len(args) > 0:
cmd.append(args[0])
- ui.status(str(cmd), "\n\n")
+ ui.status(bytes(cmd), "\n\n")
ui.status(_("note: in hg commits can be deleted from repo but we always"
" have backups.\n"))
@@ -819,7 +822,7 @@
cmd.append(commit)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def revert(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -834,7 +837,7 @@
if args:
cmd.append(args[0])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def revparse(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -847,7 +850,7 @@
cmd = Command('root')
if opts.get('show_cdup'):
ui.status(_("note: hg root prints the root of the repository\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
else:
ui.status(_("note: see hg help revset for how to refer to commits\n"))
@@ -866,7 +869,7 @@
if opts.get('dry_run'):
cmd['-n'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def show(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -898,7 +901,7 @@
else:
cmd = Command('export')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def stash(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -934,7 +937,7 @@
elif len(args) > 1:
cmd['--name'] = args[1]
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def status(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -948,7 +951,7 @@
if opts.get('ignored'):
cmd['-i'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svn(ui, repo, *args, **kwargs):
svncmd = args[0]
@@ -965,7 +968,7 @@
cmd = Command('push')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svnfetch(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -975,7 +978,7 @@
cmd = Command('pull')
cmd.append('default-push')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svnfindrev(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -985,7 +988,7 @@
cmd = Command('log')
cmd['-r'] = args[0]
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svnrebase(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -1000,7 +1003,7 @@
cmd = pullcmd & rebasecmd
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def tag(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -1024,7 +1027,7 @@
if opts.get('force'):
cmd['-f'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
gitcommands = {
'add': add,
--- a/hgext/gpg.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/gpg.py Mon Mar 19 08:07:18 2018 -0700
@@ -21,6 +21,7 @@
registrar,
util,
)
+from mercurial.utils import dateutil
cmdtable = {}
command = registrar.command(cmdtable)
@@ -59,11 +60,11 @@
try:
# create temporary files
fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
- fp = os.fdopen(fd, pycompat.sysstr('wb'))
+ fp = os.fdopen(fd, r'wb')
fp.write(sig)
fp.close()
fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
- fp = os.fdopen(fd, pycompat.sysstr('wb'))
+ fp = os.fdopen(fd, r'wb')
fp.write(data)
fp.close()
gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
@@ -153,8 +154,7 @@
# warn for expired key and/or sigs
for key in keys:
if key[0] == "ERRSIG":
- ui.write(_("%s Unknown key ID \"%s\"\n")
- % (prefix, shortkey(ui, key[1][:15])))
+ ui.write(_("%s Unknown key ID \"%s\"\n") % (prefix, key[1]))
continue
if key[0] == "BADSIG":
ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
@@ -259,7 +259,7 @@
date = opts.get('date')
if date:
- opts['date'] = util.parsedate(date)
+ opts['date'] = dateutil.parsedate(date)
if revs:
nodes = [repo.lookup(n) for n in revs]
@@ -318,14 +318,7 @@
repo.commit(message, opts['user'], opts['date'], match=msigs,
editor=editor)
except ValueError as inst:
- raise error.Abort(str(inst))
-
-def shortkey(ui, key):
- if len(key) != 16:
- ui.debug("key ID \"%s\" format error\n" % key)
- return key
-
- return key[-8:]
+ raise error.Abort(pycompat.bytestr(inst))
def node2txt(repo, node, ver):
"""map a manifest into some text"""
--- a/hgext/hgk.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/hgk.py Mon Mar 19 08:07:18 2018 -0700
@@ -51,7 +51,6 @@
pycompat,
registrar,
scmutil,
- util,
)
cmdtable = {}
@@ -105,15 +104,15 @@
while True:
if opts[r'stdin']:
- try:
- line = util.bytesinput(ui.fin, ui.fout).split(' ')
- node1 = line[0]
- if len(line) > 1:
- node2 = line[1]
- else:
- node2 = None
- except EOFError:
+ line = ui.fin.readline()
+ if not line:
break
+ line = line.rstrip(pycompat.oslinesep).split(b' ')
+ node1 = line[0]
+ if len(line) > 1:
+ node2 = line[1]
+ else:
+ node2 = None
node1 = repo.lookup(node1)
if node2:
node2 = repo.lookup(node2)
@@ -146,7 +145,7 @@
date = ctx.date()
description = ctx.description().replace("\0", "")
- ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])))
+ ui.write(("author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
if 'committer' in ctx.extra():
ui.write(("committer %s\n" % ctx.extra()['committer']))
@@ -186,12 +185,11 @@
#
prefix = ""
if opts[r'stdin']:
- try:
- (type, r) = util.bytesinput(ui.fin, ui.fout).split(' ')
- prefix = " "
- except EOFError:
+ line = ui.fin.readline()
+ if not line:
return
-
+ (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
+ prefix = " "
else:
if not type or not r:
ui.warn(_("cat-file: type or revision not supplied\n"))
@@ -204,10 +202,10 @@
n = repo.lookup(r)
catcommit(ui, repo, n, prefix)
if opts[r'stdin']:
- try:
- (type, r) = util.bytesinput(ui.fin, ui.fout).split(' ')
- except EOFError:
+ line = ui.fin.readline()
+ if not line:
break
+ (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
else:
break
--- a/hgext/highlight/__init__.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/highlight/__init__.py Mon Mar 19 08:07:18 2018 -0700
@@ -30,13 +30,11 @@
from . import highlight
from mercurial.hgweb import (
- common,
webcommands,
webutil,
)
from mercurial import (
- encoding,
extensions,
fileset,
)
@@ -59,8 +57,8 @@
highlight.pygmentize(field, fctx, style, tmpl,
guessfilenameonly=filenameonly)
-def filerevision_highlight(orig, web, req, tmpl, fctx):
- mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
+def filerevision_highlight(orig, web, fctx):
+ mt = web.res.headers['Content-Type']
# only pygmentize for mimetype containing 'html' so we both match
# 'text/html' and possibly 'application/xhtml+xml' in the future
# so that we don't have to touch the extension when the mimetype
@@ -69,24 +67,27 @@
# can't clash with the file's content-type here in case we
# pygmentize a html file
if 'html' in mt:
- pygmentize(web, 'fileline', fctx, tmpl)
+ pygmentize(web, 'fileline', fctx, web.tmpl)
- return orig(web, req, tmpl, fctx)
+ return orig(web, fctx)
-def annotate_highlight(orig, web, req, tmpl):
- mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
+def annotate_highlight(orig, web):
+ mt = web.res.headers['Content-Type']
if 'html' in mt:
- fctx = webutil.filectx(web.repo, req)
- pygmentize(web, 'annotateline', fctx, tmpl)
+ fctx = webutil.filectx(web.repo, web.req)
+ pygmentize(web, 'annotateline', fctx, web.tmpl)
- return orig(web, req, tmpl)
+ return orig(web)
-def generate_css(web, req, tmpl):
+def generate_css(web):
pg_style = web.config('web', 'pygments_style', 'colorful')
fmter = highlight.HtmlFormatter(style=pg_style)
- req.respond(common.HTTP_OK, 'text/css')
- return ['/* pygments_style = %s */\n\n' % pg_style,
- fmter.get_style_defs('')]
+ web.res.headers['Content-Type'] = 'text/css'
+ web.res.setbodybytes(''.join([
+ '/* pygments_style = %s */\n\n' % pg_style,
+ fmter.get_style_defs(''),
+ ]))
+ return web.res.sendresponse()
def extsetup():
# monkeypatch in the new version
--- a/hgext/histedit.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/histedit.py Mon Mar 19 08:07:18 2018 -0700
@@ -221,7 +221,7 @@
default=False,
)
configitem('histedit', 'defaultrev',
- default=configitem.dynamicdefault,
+ default=None,
)
configitem('histedit', 'dropmissing',
default=False,
@@ -344,7 +344,7 @@
fp.write('v1\n')
fp.write('%s\n' % node.hex(self.parentctxnode))
fp.write('%s\n' % node.hex(self.topmost))
- fp.write('%s\n' % self.keep)
+ fp.write('%s\n' % ('True' if self.keep else 'False'))
fp.write('%d\n' % len(self.actions))
for action in self.actions:
fp.write('%s\n' % action.tostate())
@@ -491,7 +491,7 @@
repo.dirstate.setbranch(rulectx.branch())
if stats and stats[3] > 0:
buf = repo.ui.popbuffer()
- repo.ui.write(*buf)
+ repo.ui.write(buf)
raise error.InterventionRequired(
_('Fix up the change (%s %s)') %
(self.verb, node.short(self.node)),
@@ -567,7 +567,7 @@
repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
return stats
-def collapse(repo, first, last, commitopts, skipprompt=False):
+def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
"""collapse the set of revisions from first to last as new one.
Expected commit options are:
@@ -577,14 +577,14 @@
Commit message is edited in all cases.
This function works in memory."""
- ctxs = list(repo.set('%d::%d', first, last))
+ ctxs = list(repo.set('%d::%d', firstctx.rev(), lastctx.rev()))
if not ctxs:
return None
for c in ctxs:
if not c.mutable():
raise error.ParseError(
_("cannot fold into public change %s") % node.short(c.node()))
- base = first.parents()[0]
+ base = firstctx.parents()[0]
# commit a new version of the old changeset, including the update
# collect all files which might be affected
@@ -593,15 +593,15 @@
files.update(ctx.files())
# Recompute copies (avoid recording a -> b -> a)
- copied = copies.pathcopies(base, last)
+ copied = copies.pathcopies(base, lastctx)
# prune files which were reverted by the updates
- files = [f for f in files if not cmdutil.samefile(f, last, base)]
+ files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
# commit version of these files as defined by head
- headmf = last.manifest()
+ headmf = lastctx.manifest()
def filectxfn(repo, ctx, path):
if path in headmf:
- fctx = last[path]
+ fctx = lastctx[path]
flags = fctx.flags()
mctx = context.memfilectx(repo, ctx,
fctx.path(), fctx.data(),
@@ -614,12 +614,12 @@
if commitopts.get('message'):
message = commitopts['message']
else:
- message = first.description()
+ message = firstctx.description()
user = commitopts.get('user')
date = commitopts.get('date')
extra = commitopts.get('extra')
- parents = (first.p1().node(), first.p2().node())
+ parents = (firstctx.p1().node(), firstctx.p2().node())
editor = None
if not skipprompt:
editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold')
@@ -730,8 +730,9 @@
return ctx, [(self.node, (parentctxnode,))]
parentctx = repo[parentctxnode]
- newcommits = set(c.node() for c in repo.set('(%d::. - %d)', parentctx,
- parentctx))
+ newcommits = set(c.node() for c in repo.set('(%d::. - %d)',
+ parentctx.rev(),
+ parentctx.rev()))
if not newcommits:
repo.ui.warn(_('%s: cannot fold - working copy is not a '
'descendant of previous commit %s\n') %
@@ -1316,8 +1317,8 @@
# Create a backup so we can always abort completely.
backupfile = None
if not obsolete.isenabled(repo, obsolete.createmarkersopt):
- backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
- 'histedit')
+ backupfile = repair.backupbundle(repo, [parentctxnode],
+ [topmost], root, 'histedit')
state.backupfile = backupfile
def _getsummary(ctx):
@@ -1353,19 +1354,19 @@
"""select and validate the set of revision to edit
When keep is false, the specified set can't have children."""
- ctxs = list(repo.set('%n::%n', old, new))
- if ctxs and not keep:
+ revs = repo.revs('%n::%n', old, new)
+ if revs and not keep:
if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and
- repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
+ repo.revs('(%ld::) - (%ld)', revs, revs)):
raise error.Abort(_('can only histedit a changeset together '
'with all its descendants'))
- if repo.revs('(%ld) and merge()', ctxs):
+ if repo.revs('(%ld) and merge()', revs):
raise error.Abort(_('cannot edit history that contains merges'))
- root = ctxs[0] # list is already sorted by repo.set
+ root = repo[revs.first()] # list is already sorted by repo.revs()
if not root.mutable():
raise error.Abort(_('cannot edit public changeset: %s') % root,
hint=_("see 'hg help phases' for details"))
- return [c.node() for c in ctxs]
+ return pycompat.maplist(repo.changelog.node, revs)
def ruleeditor(repo, ui, actions, editcomment=""):
"""open an editor to edit rules
@@ -1415,9 +1416,8 @@
# Save edit rules in .hg/histedit-last-edit.txt in case
# the user needs to ask for help after something
# surprising happens.
- f = open(repo.vfs.join('histedit-last-edit.txt'), 'w')
- f.write(rules)
- f.close()
+ with repo.vfs('histedit-last-edit.txt', 'wb') as f:
+ f.write(rules)
return rules
--- a/hgext/journal.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/journal.py Mon Mar 19 08:07:18 2018 -0700
@@ -24,18 +24,19 @@
bookmarks,
cmdutil,
dispatch,
+ encoding,
error,
extensions,
hg,
localrepo,
lock,
+ logcmdutil,
node,
pycompat,
registrar,
util,
)
-
-from . import share
+from mercurial.utils import dateutil
cmdtable = {}
command = registrar.command(cmdtable)
@@ -168,7 +169,7 @@
"""Copy shared journal entries into this repo when unsharing"""
if (repo.path == repopath and repo.shared() and
util.safehasattr(repo, 'journal')):
- sharedrepo = share._getsrcrepo(repo)
+ sharedrepo = hg.sharedreposource(repo)
sharedfeatures = _readsharedfeatures(repo)
if sharedrepo and sharedfeatures > {'journal'}:
# there is a shared repository and there are shared journal entries
@@ -219,8 +220,8 @@
(timestamp, tz), user, command, namespace, name,
oldhashes, newhashes)
- def __str__(self):
- """String representation for storage"""
+ def __bytes__(self):
+ """bytes representation for storage"""
time = ' '.join(map(str, self.timestamp))
oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
@@ -228,6 +229,8 @@
time, self.user, self.command, self.namespace, self.name,
oldhashes, newhashes))
+ __str__ = encoding.strmethod(__bytes__)
+
class journalstorage(object):
"""Storage for journal entries
@@ -257,7 +260,7 @@
self.sharedfeatures = self.sharedvfs = None
if repo.shared():
features = _readsharedfeatures(repo)
- sharedrepo = share._getsrcrepo(repo)
+ sharedrepo = hg.sharedreposource(repo)
if sharedrepo is not None and 'journal' in features:
self.sharedvfs = sharedrepo.vfs
self.sharedfeatures = features
@@ -327,7 +330,7 @@
newhashes = [newhashes]
entry = journalentry(
- util.makedate(), self.user, self.command, namespace, name,
+ dateutil.makedate(), self.user, self.command, namespace, name,
oldhashes, newhashes)
vfs = self.vfs
@@ -348,7 +351,7 @@
# Read just enough bytes to get a version number (up to 2
# digits plus separator)
version = f.read(3).partition('\0')[0]
- if version and version != str(storageversion):
+ if version and version != "%d" % storageversion:
# different version of the storage. Exit early (and not
# write anything) if this is not a version we can handle or
# the file is corrupt. In future, perhaps rotate the file
@@ -358,9 +361,9 @@
return
if not version:
# empty file, write version first
- f.write(str(storageversion) + '\0')
+ f.write(("%d" % storageversion) + '\0')
f.seek(0, os.SEEK_END)
- f.write(str(entry) + '\0')
+ f.write(bytes(entry) + '\0')
def filtered(self, namespace=None, name=None):
"""Yield all journal entries with the given namespace or name
@@ -410,7 +413,7 @@
lines = raw.split('\0')
version = lines and lines[0]
- if version != str(storageversion):
+ if version != "%d" % storageversion:
version = version or _('not available')
raise error.Abort(_("unknown journal file version '%s'") % version)
@@ -478,7 +481,7 @@
displayname = "'%s'" % name
ui.status(_("previous locations of %s:\n") % displayname)
- limit = cmdutil.loglimit(opts)
+ limit = logcmdutil.getlimit(opts)
entry = None
ui.pager('journal')
for count, entry in enumerate(repo.journal.filtered(name=name)):
@@ -502,13 +505,13 @@
fm.write('command', ' %s\n', entry.command)
if opts.get("commits"):
- displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for hash in entry.newhashes:
try:
ctx = repo[hash]
displayer.show(ctx)
except error.RepoLookupError as e:
- fm.write('repolookuperror', "%s\n\n", str(e))
+ fm.write('repolookuperror', "%s\n\n", pycompat.bytestr(e))
displayer.close()
fm.end()
--- a/hgext/keyword.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/keyword.py Mon Mar 19 08:07:18 2018 -0700
@@ -101,6 +101,7 @@
extensions,
filelog,
localrepo,
+ logcmdutil,
match,
patch,
pathutil,
@@ -110,6 +111,7 @@
templatefilters,
util,
)
+from mercurial.utils import dateutil
cmdtable = {}
command = registrar.command(cmdtable)
@@ -155,21 +157,23 @@
def utcdate(text):
'''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
'''
- return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
+ dateformat = '%Y/%m/%d %H:%M:%S'
+ return dateutil.datestr((dateutil.parsedate(text)[0], 0), dateformat)
# date like in svn's $Date
@templatefilter('svnisodate')
def svnisodate(text):
'''Date. Returns a date in this format: "2009-08-18 13:00:13
+0200 (Tue, 18 Aug 2009)".
'''
- return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
+ return dateutil.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
# date like in svn's $Id
@templatefilter('svnutcdate')
def svnutcdate(text):
'''Date. Returns a UTC-date in this format: "2009-08-18
11:00:13Z".
'''
- return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
+ dateformat = '%Y-%m-%d %H:%M:%SZ'
+ return dateutil.datestr((dateutil.parsedate(text)[0], 0), dateformat)
# make keyword tools accessible
kwtools = {'hgcmd': ''}
@@ -254,7 +258,7 @@
'''Replaces keywords in data with expanded template.'''
def kwsub(mobj):
kw = mobj.group(1)
- ct = cmdutil.makelogtemplater(self.ui, self.repo,
+ ct = logcmdutil.maketemplater(self.ui, self.repo,
self.templates[kw])
self.ui.pushbuffer()
ct.show(ctx, root=self.repo.root, file=path)
@@ -610,14 +614,14 @@
if kwt:
kwt.restrict = restrict
-def kwweb_skip(orig, web, req, tmpl):
+def kwweb_skip(orig, web):
'''Wraps webcommands.x turning off keyword expansion.'''
kwt = getattr(web.repo, '_keywordkwt', None)
if kwt:
origmatch = kwt.match
kwt.match = util.never
try:
- for chunk in orig(web, req, tmpl):
+ for chunk in orig(web):
yield chunk
finally:
if kwt:
--- a/hgext/largefiles/lfcommands.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/lfcommands.py Mon Mar 19 08:07:18 2018 -0700
@@ -365,7 +365,7 @@
at = 0
ui.debug("sending statlfile command for %d largefiles\n" % len(files))
retval = store.exists(files)
- files = filter(lambda h: not retval[h], files)
+ files = [h for h in files if not retval[h]]
ui.debug("%d largefiles need to be uploaded\n" % len(files))
for hash in files:
--- a/hgext/largefiles/lfutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/lfutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -15,6 +15,7 @@
import stat
from mercurial.i18n import _
+from mercurial.node import hex
from mercurial import (
dirstate,
@@ -371,7 +372,7 @@
for data in instream:
hasher.update(data)
outfile.write(data)
- return hasher.hexdigest()
+ return hex(hasher.digest())
def hashfile(file):
if not os.path.exists(file):
@@ -404,7 +405,7 @@
h = hashlib.sha1()
for chunk in util.filechunkiter(fileobj):
h.update(chunk)
- return h.hexdigest()
+ return hex(h.digest())
def httpsendfile(ui, filename):
return httpconnection.httpsendfile(ui, filename, 'rb')
--- a/hgext/largefiles/overrides.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/overrides.py Mon Mar 19 08:07:18 2018 -0700
@@ -19,6 +19,7 @@
cmdutil,
error,
hg,
+ logcmdutil,
match as matchmod,
pathutil,
pycompat,
@@ -41,7 +42,7 @@
matcher'''
m = copy.copy(match)
lfile = lambda f: lfutil.standin(f) in manifest
- m._files = filter(lfile, m._files)
+ m._files = [lf for lf in m._files if lfile(lf)]
m._fileset = set(m._files)
m.always = lambda: False
origmatchfn = m.matchfn
@@ -56,7 +57,7 @@
m = copy.copy(match)
notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
manifest or f in excluded)
- m._files = filter(notlfile, m._files)
+ m._files = [lf for lf in m._files if notlfile(lf)]
m._fileset = set(m._files)
m.always = lambda: False
origmatchfn = m.matchfn
@@ -388,20 +389,20 @@
# (2) to determine what files to print out diffs for.
# The magic matchandpats override should be used for case (1) but not for
# case (2).
- def overridemakelogfilematcher(repo, pats, opts, badfn=None):
+ def overridemakefilematcher(repo, pats, opts, badfn=None):
wctx = repo[None]
match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
- return lambda rev: match
+ return lambda ctx: match
oldmatchandpats = installmatchandpatsfn(overridematchandpats)
- oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
- setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
+ oldmakefilematcher = logcmdutil._makenofollowfilematcher
+ setattr(logcmdutil, '_makenofollowfilematcher', overridemakefilematcher)
try:
return orig(ui, repo, *pats, **opts)
finally:
restorematchandpatsfn()
- setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
+ setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher)
def overrideverify(orig, ui, repo, *pats, **opts):
large = opts.pop(r'large', False)
@@ -597,7 +598,7 @@
try:
result = orig(ui, repo, pats, opts, rename)
except error.Abort as e:
- if str(e) != _('no files to copy'):
+ if pycompat.bytestr(e) != _('no files to copy'):
raise e
else:
nonormalfiles = True
@@ -704,7 +705,7 @@
lfdirstate.add(destlfile)
lfdirstate.write()
except error.Abort as e:
- if str(e) != _('no files to copy'):
+ if pycompat.bytestr(e) != _('no files to copy'):
raise e
else:
nolfiles = True
@@ -811,7 +812,7 @@
repo.firstpulled = revsprepull # for pulled() revset expression
try:
for rev in scmutil.revrange(repo, lfrevs):
- ui.note(_('pulling largefiles for revision %s\n') % rev)
+ ui.note(_('pulling largefiles for revision %d\n') % rev)
(cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
numcached += len(cached)
finally:
@@ -823,7 +824,7 @@
"""Override push command and store --lfrev parameters in opargs"""
lfrevs = kwargs.pop(r'lfrev', None)
if lfrevs:
- opargs = kwargs.setdefault('opargs', {})
+ opargs = kwargs.setdefault(r'opargs', {})
opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
return orig(ui, repo, *args, **kwargs)
@@ -931,11 +932,11 @@
finally:
repo.unfiltered().lfstatus = False
-def hgwebarchive(orig, web, req, tmpl):
+def hgwebarchive(orig, web):
web.repo.lfstatus = True
try:
- return orig(web, req, tmpl)
+ return orig(web)
finally:
web.repo.lfstatus = False
@@ -1076,9 +1077,9 @@
finally:
repo.lfstatus = False
-def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
+def cmdutilforget(orig, ui, repo, match, prefix, explicitonly, dryrun):
normalmatcher = composenormalfilematcher(match, repo[None].manifest())
- bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
+ bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly, dryrun)
m = composelargefilematcher(match, repo[None].manifest())
try:
@@ -1237,10 +1238,11 @@
matchfn = m.matchfn
m.matchfn = lambda f: f in s.deleted and matchfn(f)
- removelargefiles(repo.ui, repo, True, m, **opts)
+ removelargefiles(repo.ui, repo, True, m, **pycompat.strkwargs(opts))
# Call into the normal add code, and any files that *should* be added as
# largefiles will be
- added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
+ added, bad = addlargefiles(repo.ui, repo, True, matcher,
+ **pycompat.strkwargs(opts))
# Now that we've handled largefiles, hand off to the original addremove
# function to take care of the rest. Make sure it doesn't do anything with
# largefiles by passing a matcher that will ignore them.
@@ -1358,8 +1360,7 @@
m.visitdir = lfvisitdirfn
for f in ctx.walk(m):
- with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
- pathname=f) as fp:
+ with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
lf = lfutil.splitstandin(f)
if lf is None or origmatchfn(f):
# duplicating unreachable code from commands.cat
--- a/hgext/largefiles/proto.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/proto.py Mon Mar 19 08:07:18 2018 -0700
@@ -14,6 +14,7 @@
httppeer,
util,
wireproto,
+ wireprototypes,
)
from . import (
@@ -34,27 +35,26 @@
def putlfile(repo, proto, sha):
'''Server command for putting a largefile into a repository's local store
and into the user cache.'''
- proto.redirect()
-
- path = lfutil.storepath(repo, sha)
- util.makedirs(os.path.dirname(path))
- tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
+ with proto.mayberedirectstdio() as output:
+ path = lfutil.storepath(repo, sha)
+ util.makedirs(os.path.dirname(path))
+ tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
- try:
- proto.getfile(tmpfp)
- tmpfp._fp.seek(0)
- if sha != lfutil.hexsha1(tmpfp._fp):
- raise IOError(0, _('largefile contents do not match hash'))
- tmpfp.close()
- lfutil.linktousercache(repo, sha)
- except IOError as e:
- repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
- (sha, e.strerror))
- return wireproto.pushres(1)
- finally:
- tmpfp.discard()
+ try:
+ proto.forwardpayload(tmpfp)
+ tmpfp._fp.seek(0)
+ if sha != lfutil.hexsha1(tmpfp._fp):
+ raise IOError(0, _('largefile contents do not match hash'))
+ tmpfp.close()
+ lfutil.linktousercache(repo, sha)
+ except IOError as e:
+ repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
+ (sha, e.strerror))
+ return wireproto.pushres(1, output.getvalue() if output else '')
+ finally:
+ tmpfp.discard()
- return wireproto.pushres(0)
+ return wireproto.pushres(0, output.getvalue() if output else '')
def getlfile(repo, proto, sha):
'''Server command for retrieving a largefile from the repository-local
@@ -86,8 +86,8 @@
server side.'''
filename = lfutil.findfile(repo, sha)
if not filename:
- return '2\n'
- return '0\n'
+ return wireprototypes.bytesresponse('2\n')
+ return wireprototypes.bytesresponse('0\n')
def wirereposetup(ui, repo):
class lfileswirerepository(repo.__class__):
@@ -97,7 +97,7 @@
# it ...
if issubclass(self.__class__, httppeer.httppeer):
res = self._call('putlfile', data=fd, sha=sha,
- headers={'content-type':'application/mercurial-0.1'})
+ headers={r'content-type': r'application/mercurial-0.1'})
try:
d, output = res.split('\n', 1)
for l in output.splitlines(True):
@@ -180,7 +180,7 @@
args[r'cmds'] = args[r'cmds'].replace('heads ', 'lheads ')
return ssholdcallstream(self, cmd, **args)
-headsre = re.compile(r'(^|;)heads\b')
+headsre = re.compile(br'(^|;)heads\b')
def httprepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
--- a/hgext/largefiles/remotestore.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/remotestore.py Mon Mar 19 08:07:18 2018 -0700
@@ -52,7 +52,7 @@
except IOError as e:
raise error.Abort(
_('remotestore: could not open file %s: %s')
- % (filename, str(e)))
+ % (filename, util.forcebytestr(e)))
def _getfile(self, tmpfile, filename, hash):
try:
@@ -60,7 +60,8 @@
except urlerr.httperror as e:
# 401s get converted to error.Aborts; everything else is fine being
# turned into a StoreError
- raise basestore.StoreError(filename, hash, self.url, str(e))
+ raise basestore.StoreError(filename, hash, self.url,
+ util.forcebytestr(e))
except urlerr.urlerror as e:
# This usually indicates a connection problem, so don't
# keep trying with the other files... they will probably
@@ -68,7 +69,8 @@
raise error.Abort('%s: %s' %
(util.hidepassword(self.url), e.reason))
except IOError as e:
- raise basestore.StoreError(filename, hash, self.url, str(e))
+ raise basestore.StoreError(filename, hash, self.url,
+ util.forcebytestr(e))
return lfutil.copyandhash(chunks, tmpfile)
--- a/hgext/largefiles/storefactory.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/storefactory.py Mon Mar 19 08:07:18 2018 -0700
@@ -80,7 +80,7 @@
'ssh': [wirestore.wirestore],
}
-_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
+_scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
def getlfile(ui, hash):
return util.chunkbuffer(openstore(ui=ui)._get(hash))
--- a/hgext/largefiles/uisetup.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/largefiles/uisetup.py Mon Mar 19 08:07:18 2018 -0700
@@ -164,20 +164,17 @@
overrides.openlargefile)
# create the new wireproto commands ...
- wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
- wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
- wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
+ wireproto.wireprotocommand('putlfile', 'sha', permission='push')(
+ proto.putlfile)
+ wireproto.wireprotocommand('getlfile', 'sha', permission='pull')(
+ proto.getlfile)
+ wireproto.wireprotocommand('statlfile', 'sha', permission='pull')(
+ proto.statlfile)
+ wireproto.wireprotocommand('lheads', '', permission='pull')(
+ wireproto.heads)
# ... and wrap some existing ones
- wireproto.commands['heads'] = (proto.heads, '')
- wireproto.commands['lheads'] = (wireproto.heads, '')
-
- # make putlfile behave the same as push and {get,stat}lfile behave
- # the same as pull w.r.t. permissions checks
- wireproto.permissions['putlfile'] = 'push'
- wireproto.permissions['getlfile'] = 'pull'
- wireproto.permissions['statlfile'] = 'pull'
- wireproto.permissions['lheads'] = 'pull'
+ wireproto.commands['heads'].func = proto.heads
extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
@@ -185,9 +182,9 @@
# can't do this in reposetup because it needs to have happened before
# wirerepo.__init__ is called
- proto.ssholdcallstream = sshpeer.sshpeer._callstream
+ proto.ssholdcallstream = sshpeer.sshv1peer._callstream
proto.httpoldcallstream = httppeer.httppeer._callstream
- sshpeer.sshpeer._callstream = proto.sshrepocallstream
+ sshpeer.sshv1peer._callstream = proto.sshrepocallstream
httppeer.httppeer._callstream = proto.httprepocallstream
# override some extensions' stuff as well
--- a/hgext/lfs/__init__.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/lfs/__init__.py Mon Mar 19 08:07:18 2018 -0700
@@ -143,7 +143,7 @@
registrar,
revlog,
scmutil,
- templatekw,
+ templateutil,
upgrade,
util,
vfs as vfsmod,
@@ -192,6 +192,7 @@
command = registrar.command(cmdtable)
templatekeyword = registrar.templatekeyword()
+filesetpredicate = registrar.filesetpredicate()
def featuresetup(ui, supported):
# don't die on seeing a repo with the lfs requirement
@@ -211,7 +212,7 @@
class lfsrepo(repo.__class__):
@localrepo.unfilteredmethod
def commitctx(self, ctx, error=False):
- repo.svfs.options['lfstrack'] = _trackedmatcher(self, ctx)
+ repo.svfs.options['lfstrack'] = _trackedmatcher(self)
return super(lfsrepo, self).commitctx(ctx, error)
repo.__class__ = lfsrepo
@@ -219,12 +220,12 @@
if 'lfs' not in repo.requirements:
def checkrequireslfs(ui, repo, **kwargs):
if 'lfs' not in repo.requirements:
- last = kwargs.get('node_last')
+ last = kwargs.get(r'node_last')
_bin = node.bin
if last:
- s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last))
+ s = repo.set('%n:%n', _bin(kwargs[r'node']), _bin(last))
else:
- s = repo.set('%n', _bin(kwargs['node']))
+ s = repo.set('%n', _bin(kwargs[r'node']))
for ctx in s:
# TODO: is there a way to just walk the files in the commit?
if any(ctx[f].islfs() for f in ctx.files() if f in ctx):
@@ -238,7 +239,7 @@
else:
repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
-def _trackedmatcher(repo, ctx):
+def _trackedmatcher(repo):
"""Return a function (path, size) -> bool indicating whether or not to
track a given file with lfs."""
if not repo.wvfs.exists('.hglfs'):
@@ -331,6 +332,8 @@
wrapfunction(hg, 'clone', wrapper.hgclone)
wrapfunction(hg, 'postshare', wrapper.hgpostshare)
+ scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles)
+
# Make bundle choose changegroup3 instead of changegroup2. This affects
# "hg bundle" command. Note: it does not cover all bundle formats like
# "packed1". Using "packed1" with lfs will likely cause trouble.
@@ -345,12 +348,22 @@
# when writing a bundle via "hg bundle" command, upload related LFS blobs
wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
-@templatekeyword('lfs_files')
-def lfsfiles(repo, ctx, **args):
- """List of strings. LFS files added or modified by the changeset."""
- args = pycompat.byteskwargs(args)
+@filesetpredicate('lfs()', callstatus=True)
+def lfsfileset(mctx, x):
+ """File that uses LFS storage."""
+ # i18n: "lfs" is a keyword
+ fileset.getargs(x, 0, 0, _("lfs takes no arguments"))
+ return [f for f in mctx.subset
+ if wrapper.pointerfromctx(mctx.ctx, f, removed=True) is not None]
- pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
+@templatekeyword('lfs_files', requires={'ctx', 'templ'})
+def lfsfiles(context, mapping):
+ """List of strings. All files modified, added, or removed by this
+ changeset."""
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
+
+ pointers = wrapper.pointersfromctx(ctx, removed=True) # {path: pointer}
files = sorted(pointers.keys())
def pointer(v):
@@ -361,18 +374,18 @@
makemap = lambda v: {
'file': v,
- 'lfsoid': pointers[v].oid(),
- 'lfspointer': templatekw.hybriddict(pointer(v)),
+ 'lfsoid': pointers[v].oid() if pointers[v] else None,
+ 'lfspointer': templateutil.hybriddict(pointer(v)),
}
# TODO: make the separator ', '?
- f = templatekw._showlist('lfs_file', files, args)
- return templatekw._hybrid(f, files, makemap, pycompat.identity)
+ f = templateutil._showlist('lfs_file', files, templ, mapping)
+ return templateutil.hybrid(f, files, makemap, pycompat.identity)
@command('debuglfsupload',
[('r', 'rev', [], _('upload large files introduced by REV'))])
def debuglfsupload(ui, repo, **opts):
"""upload lfs blobs added by the working copy parent or given revisions"""
- revs = opts.get('rev', [])
+ revs = opts.get(r'rev', [])
pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
wrapper.uploadblobs(repo, pointers)
--- a/hgext/lfs/blobstore.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/lfs/blobstore.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,6 +18,7 @@
from mercurial import (
error,
pathutil,
+ pycompat,
url as urlmod,
util,
vfs as vfsmod,
@@ -27,7 +28,7 @@
from ..largefiles import lfutil
# 64 bytes for SHA256
-_lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
+_lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
class lfsvfs(vfsmod.vfs):
def join(self, path):
@@ -194,11 +195,11 @@
def writebatch(self, pointers, fromstore):
"""Batch upload from local to remote blobstore."""
- self._batch(pointers, fromstore, 'upload')
+ self._batch(_deduplicate(pointers), fromstore, 'upload')
def readbatch(self, pointers, tostore):
"""Batch download from remote to local blostore."""
- self._batch(pointers, tostore, 'download')
+ self._batch(_deduplicate(pointers), tostore, 'download')
def _batchrequest(self, pointers, action):
"""Get metadata about objects pointed by pointers for given action
@@ -216,7 +217,8 @@
batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
try:
- rawjson = self.urlopener.open(batchreq).read()
+ rsp = self.urlopener.open(batchreq)
+ rawjson = rsp.read()
except util.urlerr.httperror as ex:
raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
% (ex, action))
@@ -225,6 +227,19 @@
except ValueError:
raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
% rawjson)
+
+ if self.ui.debugflag:
+ self.ui.debug('Status: %d\n' % rsp.status)
+ # lfs-test-server and hg serve return headers in different order
+ self.ui.debug('%s\n'
+ % '\n'.join(sorted(str(rsp.info()).splitlines())))
+
+ if 'objects' in response:
+ response['objects'] = sorted(response['objects'],
+ key=lambda p: p['oid'])
+ self.ui.debug('%s\n'
+ % json.dumps(response, indent=2, sort_keys=True))
+
return response
def _checkforservererror(self, pointers, responses, action):
@@ -281,9 +296,9 @@
See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
basic-transfers.md
"""
- oid = str(obj['oid'])
+ oid = pycompat.bytestr(obj['oid'])
- href = str(obj['actions'][action].get('href'))
+ href = pycompat.bytestr(obj['actions'][action].get('href'))
headers = obj['actions'][action].get('header', {}).items()
request = util.urlreq.request(href)
@@ -300,6 +315,13 @@
response = b''
try:
req = self.urlopener.open(request)
+
+ if self.ui.debugflag:
+ self.ui.debug('Status: %d\n' % req.status)
+ # lfs-test-server and hg serve return headers in different order
+ self.ui.debug('%s\n'
+ % '\n'.join(sorted(str(req.info()).splitlines())))
+
if action == 'download':
# If downloading blobs, store downloaded data to local blobstore
localstore.download(oid, req)
@@ -366,12 +388,23 @@
oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
processed = 0
+ blobs = 0
for _one, oid in oids:
processed += sizes[oid]
+ blobs += 1
self.ui.progress(topic, processed, total=total)
self.ui.note(_('lfs: processed: %s\n') % oid)
self.ui.progress(topic, pos=None, total=total)
+ if blobs > 0:
+ if action == 'upload':
+ self.ui.status(_('lfs: uploaded %d files (%s)\n')
+ % (blobs, util.bytecount(processed)))
+ # TODO: coalesce the download requests, and comment this in
+ #elif action == 'download':
+ # self.ui.status(_('lfs: downloaded %d files (%s)\n')
+ # % (blobs, util.bytecount(processed)))
+
def __del__(self):
# copied from mercurial/httppeer.py
urlopener = getattr(self, 'urlopener', None)
@@ -388,13 +421,13 @@
self.vfs = lfsvfs(fullpath)
def writebatch(self, pointers, fromstore):
- for p in pointers:
+ for p in _deduplicate(pointers):
content = fromstore.read(p.oid(), verify=True)
with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
fp.write(content)
def readbatch(self, pointers, tostore):
- for p in pointers:
+ for p in _deduplicate(pointers):
with self.vfs(p.oid(), 'rb') as fp:
tostore.download(p.oid(), fp)
@@ -433,6 +466,13 @@
None: _promptremote,
}
+def _deduplicate(pointers):
+ """Remove any duplicate oids that exist in the list"""
+ reduced = util.sortdict()
+ for p in pointers:
+ reduced[p.oid()] = p
+ return reduced.values()
+
def _verify(oid, content):
realoid = hashlib.sha256(content).hexdigest()
if realoid != oid:
--- a/hgext/lfs/pointer.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/lfs/pointer.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,6 +13,7 @@
from mercurial import (
error,
+ pycompat,
)
class InvalidPointer(error.RevlogError):
@@ -23,7 +24,8 @@
def __init__(self, *args, **kwargs):
self['version'] = self.VERSION
- super(gitlfspointer, self).__init__(*args, **kwargs)
+ super(gitlfspointer, self).__init__(*args)
+ self.update(pycompat.byteskwargs(kwargs))
@classmethod
def deserialize(cls, text):
@@ -45,12 +47,12 @@
# regular expressions used by _validate
# see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
- _keyre = re.compile(r'\A[a-z0-9.-]+\Z')
- _valuere = re.compile(r'\A[^\n]*\Z')
+ _keyre = re.compile(br'\A[a-z0-9.-]+\Z')
+ _valuere = re.compile(br'\A[^\n]*\Z')
_requiredre = {
- 'size': re.compile(r'\A[0-9]+\Z'),
- 'oid': re.compile(r'\Asha256:[0-9a-f]{64}\Z'),
- 'version': re.compile(r'\A%s\Z' % re.escape(VERSION)),
+ 'size': re.compile(br'\A[0-9]+\Z'),
+ 'oid': re.compile(br'\Asha256:[0-9a-f]{64}\Z'),
+ 'version': re.compile(br'\A%s\Z' % re.escape(VERSION)),
}
def validate(self):
--- a/hgext/lfs/wrapper.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/lfs/wrapper.py Mon Mar 19 08:07:18 2018 -0700
@@ -10,7 +10,7 @@
import hashlib
from mercurial.i18n import _
-from mercurial.node import bin, nullid, short
+from mercurial.node import bin, hex, nullid, short
from mercurial import (
error,
@@ -85,12 +85,12 @@
text = text[offset:]
# git-lfs only supports sha256
- oid = hashlib.sha256(text).hexdigest()
+ oid = hex(hashlib.sha256(text).digest())
self.opener.lfslocalblobstore.write(oid, text)
# replace contents with metadata
longoid = 'sha256:%s' % oid
- metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
+ metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
# by default, we expect the content to be binary. however, LFS could also
# be used for non-binary content. add a special entry for non-binary data.
@@ -249,6 +249,21 @@
if 'lfs' in destrepo.requirements:
destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
+def _prefetchfiles(repo, ctx, files):
+ """Ensure that required LFS blobs are present, fetching them as a group if
+ needed."""
+ pointers = []
+ localstore = repo.svfs.lfslocalblobstore
+
+ for f in files:
+ p = pointerfromctx(ctx, f)
+ if p and not localstore.has(p.oid()):
+ p.filename = f
+ pointers.append(p)
+
+ if pointers:
+ repo.svfs.lfsremoteblobstore.readbatch(pointers, localstore)
+
def _canskipupload(repo):
# if remotestore is a null store, upload is a no-op and can be skipped
return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
@@ -307,20 +322,47 @@
pointers[p.oid()] = p
return sorted(pointers.values())
-def pointersfromctx(ctx):
- """return a dict {path: pointer} for given single changectx"""
+def pointerfromctx(ctx, f, removed=False):
+ """return a pointer for the named file from the given changectx, or None if
+ the file isn't LFS.
+
+ Optionally, the pointer for a file deleted from the context can be returned.
+ Since no such pointer is actually stored, and to distinguish from a non LFS
+ file, this pointer is represented by an empty dict.
+ """
+ _ctx = ctx
+ if f not in ctx:
+ if not removed:
+ return None
+ if f in ctx.p1():
+ _ctx = ctx.p1()
+ elif f in ctx.p2():
+ _ctx = ctx.p2()
+ else:
+ return None
+ fctx = _ctx[f]
+ if not _islfs(fctx.filelog(), fctx.filenode()):
+ return None
+ try:
+ p = pointer.deserialize(fctx.rawdata())
+ if ctx == _ctx:
+ return p
+ return {}
+ except pointer.InvalidPointer as ex:
+ raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
+ % (f, short(_ctx.node()), ex))
+
+def pointersfromctx(ctx, removed=False):
+ """return a dict {path: pointer} for given single changectx.
+
+ If ``removed`` == True and the LFS file was removed from ``ctx``, the value
+ stored for the path is an empty dict.
+ """
result = {}
for f in ctx.files():
- if f not in ctx:
- continue
- fctx = ctx[f]
- if not _islfs(fctx.filelog(), fctx.filenode()):
- continue
- try:
- result[f] = pointer.deserialize(fctx.rawdata())
- except pointer.InvalidPointer as ex:
- raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
- % (f, short(ctx.node()), ex))
+ p = pointerfromctx(ctx, f, removed=removed)
+ if p is not None:
+ result[f] = p
return result
def uploadblobs(repo, pointers):
--- a/hgext/mq.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/mq.py Mon Mar 19 08:07:18 2018 -0700
@@ -86,6 +86,7 @@
hg,
localrepo,
lock as lockmod,
+ logcmdutil,
patch as patchmod,
phases,
pycompat,
@@ -93,10 +94,11 @@
revsetlang,
scmutil,
smartset,
- subrepo,
+ subrepoutil,
util,
vfs as vfsmod,
)
+from mercurial.utils import dateutil
release = lockmod.release
seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
@@ -148,9 +150,13 @@
class statusentry(object):
def __init__(self, node, name):
self.node, self.name = node, name
- def __repr__(self):
+
+ def __bytes__(self):
return hex(self.node) + ':' + self.name
+ __str__ = encoding.strmethod(__bytes__)
+ __repr__ = encoding.strmethod(__bytes__)
+
# The order of the headers in 'hg export' HG patches:
HGHEADERS = [
# '# HG changeset patch',
@@ -276,7 +282,7 @@
nodeid = None
diffstart = 0
- for line in file(pf):
+ for line in open(pf, 'rb'):
line = line.rstrip()
if (line.startswith('diff --git')
or (diffstart and line.startswith('+++ '))):
@@ -391,12 +397,14 @@
self.comments.append('')
self.comments.append(message)
- def __str__(self):
+ def __bytes__(self):
s = '\n'.join(self.comments).rstrip()
if not s:
return ''
return s + '\n\n'
+ __str__ = encoding.strmethod(__bytes__)
+
def _delmsg(self):
'''Remove existing message, keeping the rest of the comments fields.
If comments contains 'subject: ', message will prepend
@@ -438,9 +446,9 @@
def __init__(self, ui, baseui, path, patchdir=None):
self.basepath = path
try:
- fh = open(os.path.join(path, 'patches.queue'))
- cur = fh.read().rstrip()
- fh.close()
+ with open(os.path.join(path, 'patches.queue'), r'rb') as fh:
+ cur = fh.read().rstrip()
+
if not cur:
curpath = os.path.join(path, 'patches')
else:
@@ -546,10 +554,8 @@
for patchfn in patches:
patchf = self.opener(patchfn, 'r')
# if the patch was a git patch, refresh it as a git patch
- for line in patchf:
- if line.startswith('diff --git'):
- diffopts.git = True
- break
+ diffopts.git = any(line.startswith('diff --git')
+ for line in patchf)
patchf.close()
return diffopts
@@ -643,7 +649,7 @@
self.seriesdirty = True
def pushable(self, idx):
- if isinstance(idx, str):
+ if isinstance(idx, bytes):
idx = self.series.index(idx)
patchguards = self.seriesguards[idx]
if not patchguards:
@@ -691,12 +697,12 @@
def savedirty(self):
def writelist(items, path):
- fp = self.opener(path, 'w')
+ fp = self.opener(path, 'wb')
for i in items:
fp.write("%s\n" % i)
fp.close()
if self.applieddirty:
- writelist(map(str, self.applied), self.statuspath)
+ writelist(map(bytes, self.applied), self.statuspath)
self.applieddirty = False
if self.seriesdirty:
writelist(self.fullseries, self.seriespath)
@@ -717,7 +723,8 @@
try:
os.unlink(undo)
except OSError as inst:
- self.ui.warn(_('error removing undo: %s\n') % str(inst))
+ self.ui.warn(_('error removing undo: %s\n') %
+ util.forcebytestr(inst))
def backup(self, repo, files, copy=False):
# backup local changes in --force case
@@ -739,8 +746,8 @@
opts = {}
stat = opts.get('stat')
m = scmutil.match(repo[node1], files, opts)
- cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
- changes, stat, fp)
+ logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
+ changes, stat, fp)
def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
# first try just applying the patch
@@ -773,7 +780,7 @@
diffopts = self.patchopts(diffopts, patch)
patchf = self.opener(patch, "w")
- comments = str(ph)
+ comments = bytes(ph)
if comments:
patchf.write(comments)
self.printdiff(repo, diffopts, head, n, fp=patchf)
@@ -850,7 +857,7 @@
files=files, eolmode=None)
return (True, list(files), fuzz)
except Exception as inst:
- self.ui.note(str(inst) + '\n')
+ self.ui.note(util.forcebytestr(inst) + '\n')
if not self.ui.verbose:
self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
self.ui.traceback()
@@ -963,8 +970,8 @@
wctx = repo[None]
pctx = repo['.']
overwrite = False
- mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
- overwrite)
+ mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
+ overwrite)
files += mergedsubstate.keys()
match = scmutil.matchfiles(repo, files or [])
@@ -1178,7 +1185,7 @@
except error.Abort:
pass
i += 1
- name = '%s__%s' % (namebase, i)
+ name = '%s__%d' % (namebase, i)
return name
def checkkeepchanges(self, keepchanges, force):
@@ -1189,13 +1196,14 @@
"""options:
msg: a string or a no-argument function returning a string
"""
+ opts = pycompat.byteskwargs(opts)
msg = opts.get('msg')
edit = opts.get('edit')
editform = opts.get('editform', 'mq.qnew')
user = opts.get('user')
date = opts.get('date')
if date:
- date = util.parsedate(date)
+ date = dateutil.parsedate(date)
diffopts = self.diffopts({'git': opts.get('git')}, plain=True)
if opts.get('checkname', True):
self.checkpatchname(patchfn)
@@ -1259,13 +1267,13 @@
if user:
ph.setuser(user)
if date:
- ph.setdate('%s %s' % date)
+ ph.setdate('%d %d' % date)
ph.setparent(hex(nctx.p1().node()))
msg = nctx.description().strip()
if msg == defaultmsg.strip():
msg = ''
ph.setmessage(msg)
- p.write(str(ph))
+ p.write(bytes(ph))
if commitfiles:
parent = self.qparents(repo, n)
if inclsubs:
@@ -1550,12 +1558,8 @@
update = True
else:
parents = [p.node() for p in repo[None].parents()]
- needupdate = False
- for entry in self.applied[start:]:
- if entry.node in parents:
- needupdate = True
- break
- update = needupdate
+ update = any(entry.node in parents
+ for entry in self.applied[start:])
tobackup = set()
if update:
@@ -1632,6 +1636,7 @@
self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
def refresh(self, repo, pats=None, **opts):
+ opts = pycompat.byteskwargs(opts)
if not self.applied:
self.ui.write(_("no patches applied\n"))
return 1
@@ -1641,7 +1646,7 @@
newuser = opts.get('user')
newdate = opts.get('date')
if newdate:
- newdate = '%d %d' % util.parsedate(newdate)
+ newdate = '%d %d' % dateutil.parsedate(newdate)
wlock = repo.wlock()
try:
@@ -1846,7 +1851,7 @@
self.putsubstate2changes(substatestate, c)
chunks = patchmod.diff(repo, patchparent,
changes=c, opts=diffopts)
- comments = str(ph)
+ comments = bytes(ph)
if comments:
patchf.write(comments)
for chunk in chunks:
@@ -1927,7 +1932,7 @@
length = len(self.series) - start
if not missing:
if self.ui.verbose:
- idxwidth = len(str(start + length - 1))
+ idxwidth = len("%d" % (start + length - 1))
for i in xrange(start, start + length):
patch = self.series[i]
if patch in applied:
@@ -2093,7 +2098,7 @@
if not self.ui.verbose:
p = pname
else:
- p = str(self.series.index(pname)) + " " + pname
+ p = ("%d" % self.series.index(pname)) + " " + pname
return p
def qimport(self, repo, files, patchname=None, rev=None, existing=None,
@@ -2260,7 +2265,7 @@
To stop managing a patch and move it into permanent history,
use the :hg:`qfinish` command."""
q = repo.mq
- q.delete(repo, patches, opts)
+ q.delete(repo, patches, pycompat.byteskwargs(opts))
q.savedirty()
return 0
@@ -2593,7 +2598,7 @@
if not opts.get('user') and opts.get('currentuser'):
opts['user'] = ui.username()
if not opts.get('date') and opts.get('currentdate'):
- opts['date'] = "%d %d" % util.makedate()
+ opts['date'] = "%d %d" % dateutil.makedate()
@command("^qnew",
[('e', 'edit', None, _('invoke editor on commit messages')),
@@ -3189,7 +3194,7 @@
guards[g] += 1
if ui.verbose:
guards['NONE'] = noguards
- guards = guards.items()
+ guards = list(guards.items())
guards.sort(key=lambda x: x[0][1:])
if guards:
ui.note(_('guards in series file:\n'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/TODO.rst Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,37 @@
+Integration with the share extension needs improvement. Right now
+we've seen some odd bugs, and the way we modify the contents of the
+.hg/shared file is unfortunate. See wrappostshare() and unsharenarrowspec().
+
+Resolve commentary on narrowrepo.wraprepo.narrowrepository.status
+about the filtering of status being done at an awkward layer. This
+came up the import to hgext, but nobody's got concrete improvement
+ideas as of then.
+
+Fold most (or preferably all) of narrowrevlog.py into core.
+
+Address commentary in narrowrevlog.excludedmanifestrevlog.add -
+specifically we should improve the collaboration with core so that
+add() never gets called on an excluded directory and we can improve
+the stand-in to raise a ProgrammingError.
+
+Figure out how to correctly produce narrowmanifestrevlog and
+narrowfilelog instances instead of monkeypatching regular revlogs at
+runtime to our subclass. Even better, merge the narrowing logic
+directly into core.
+
+Reason more completely about rename-filtering logic in
+narrowfilelog. There could be some surprises lurking there.
+
+Formally document the narrowspec format. Unify with sparse, if at all
+possible. For bonus points, unify with the server-specified narrowspec
+format.
+
+narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
+they're holding the wlock.
+
+Implement a simple version of the expandnarrow wireproto command for
+core. Having configurable shorthands for narrowspecs has been useful
+at Google (and sparse has a similar feature from Facebook), so it
+probably makes sense to implement the feature in core. (Google's
+handler is entirely custom to Google, with a custom format related to
+bazel's build language, so it's not in the narrowhg distribution.)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/__init__.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,95 @@
+# __init__.py - narrowhg extension
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+'''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
+
+from __future__ import absolute_import
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+from mercurial import (
+ changegroup,
+ extensions,
+ hg,
+ localrepo,
+ registrar,
+ verify as verifymod,
+)
+
+from . import (
+ narrowbundle2,
+ narrowchangegroup,
+ narrowcommands,
+ narrowcopies,
+ narrowdirstate,
+ narrowmerge,
+ narrowpatch,
+ narrowrepo,
+ narrowrevlog,
+ narrowtemplates,
+ narrowwirepeer,
+)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+# Narrowhg *has* support for serving ellipsis nodes (which are used at
+# least by Google's internal server), but that support is pretty
+# fragile and has a lot of problems on real-world repositories that
+# have complex graph topologies. This could probably be corrected, but
+# absent someone needing the full support for ellipsis nodes in
+# repositories with merges, it's unlikely this work will get done. As
+# of this writining in late 2017, all repositories large enough for
+# ellipsis nodes to be a hard requirement also enforce strictly linear
+# history for other scaling reasons.
+configitem('experimental', 'narrowservebrokenellipses',
+ default=False,
+ alias=[('narrow', 'serveellipses')],
+)
+
+# Export the commands table for Mercurial to see.
+cmdtable = narrowcommands.table
+
+localrepo.localrepository._basesupported.add(changegroup.NARROW_REQUIREMENT)
+
+def uisetup(ui):
+ """Wraps user-facing mercurial commands with narrow-aware versions."""
+ narrowrevlog.setup()
+ narrowbundle2.setup()
+ narrowmerge.setup()
+ narrowcommands.setup()
+ narrowchangegroup.setup()
+ narrowwirepeer.uisetup()
+
+def reposetup(ui, repo):
+ """Wraps local repositories with narrow repo support."""
+ if not isinstance(repo, localrepo.localrepository):
+ return
+
+ narrowrepo.wraprepo(repo)
+ if changegroup.NARROW_REQUIREMENT in repo.requirements:
+ narrowcopies.setup(repo)
+ narrowdirstate.setup(repo)
+ narrowpatch.setup(repo)
+ narrowwirepeer.reposetup(repo)
+
+def _verifierinit(orig, self, repo, matcher=None):
+ # The verifier's matcher argument was desgined for narrowhg, so it should
+ # be None from core. If another extension passes a matcher (unlikely),
+ # we'll have to fail until matchers can be composed more easily.
+ assert matcher is None
+ orig(self, repo, repo.narrowmatch())
+
+def extsetup(ui):
+ extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
+ extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
+ extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
+
+templatekeyword = narrowtemplates.templatekeyword
+revsetpredicate = narrowtemplates.revsetpredicate
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowbundle2.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,502 @@
+# narrowbundle2.py - bundle2 extensions for narrow repository support
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import errno
+import struct
+
+from mercurial.i18n import _
+from mercurial.node import (
+ bin,
+ nullid,
+ nullrev,
+)
+from mercurial import (
+ bundle2,
+ changegroup,
+ dagutil,
+ error,
+ exchange,
+ extensions,
+ narrowspec,
+ repair,
+ util,
+ wireproto,
+)
+
+NARROWCAP = 'narrow'
+_NARROWACL_SECTION = 'narrowhgacl'
+_CHANGESPECPART = NARROWCAP + ':changespec'
+_SPECPART = NARROWCAP + ':spec'
+_SPECPART_INCLUDE = 'include'
+_SPECPART_EXCLUDE = 'exclude'
+_KILLNODESIGNAL = 'KILL'
+_DONESIGNAL = 'DONE'
+_ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
+_ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
+_CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
+_MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
+
+# When advertising capabilities, always include narrow clone support.
+def getrepocaps_narrow(orig, repo, **kwargs):
+ caps = orig(repo, **kwargs)
+ caps[NARROWCAP] = ['v0']
+ return caps
+
+def _computeellipsis(repo, common, heads, known, match, depth=None):
+ """Compute the shape of a narrowed DAG.
+
+ Args:
+ repo: The repository we're transferring.
+ common: The roots of the DAG range we're transferring.
+ May be just [nullid], which means all ancestors of heads.
+ heads: The heads of the DAG range we're transferring.
+ match: The narrowmatcher that allows us to identify relevant changes.
+ depth: If not None, only consider nodes to be full nodes if they are at
+ most depth changesets away from one of heads.
+
+ Returns:
+ A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
+
+ visitnodes: The list of nodes (either full or ellipsis) which
+ need to be sent to the client.
+ relevant_nodes: The set of changelog nodes which change a file inside
+ the narrowspec. The client needs these as non-ellipsis nodes.
+ ellipsisroots: A dict of {rev: parents} that is used in
+ narrowchangegroup to produce ellipsis nodes with the
+ correct parents.
+ """
+ cl = repo.changelog
+ mfl = repo.manifestlog
+
+ cldag = dagutil.revlogdag(cl)
+ # dagutil does not like nullid/nullrev
+ commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
+ headsrevs = cldag.internalizeall(heads)
+ if depth:
+ revdepth = {h: 0 for h in headsrevs}
+
+ ellipsisheads = collections.defaultdict(set)
+ ellipsisroots = collections.defaultdict(set)
+
+ def addroot(head, curchange):
+ """Add a root to an ellipsis head, splitting heads with 3 roots."""
+ ellipsisroots[head].add(curchange)
+ # Recursively split ellipsis heads with 3 roots by finding the
+ # roots' youngest common descendant which is an elided merge commit.
+ # That descendant takes 2 of the 3 roots as its own, and becomes a
+ # root of the head.
+ while len(ellipsisroots[head]) > 2:
+ child, roots = splithead(head)
+ splitroots(head, child, roots)
+ head = child # Recurse in case we just added a 3rd root
+
+ def splitroots(head, child, roots):
+ ellipsisroots[head].difference_update(roots)
+ ellipsisroots[head].add(child)
+ ellipsisroots[child].update(roots)
+ ellipsisroots[child].discard(child)
+
+ def splithead(head):
+ r1, r2, r3 = sorted(ellipsisroots[head])
+ for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
+ mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
+ nr1, head, nr2, head)
+ for j in mid:
+ if j == nr2:
+ return nr2, (nr1, nr2)
+ if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
+ return j, (nr1, nr2)
+ raise error.Abort('Failed to split up ellipsis node! head: %d, '
+ 'roots: %d %d %d' % (head, r1, r2, r3))
+
+ missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
+ visit = reversed(missing)
+ relevant_nodes = set()
+ visitnodes = [cl.node(m) for m in missing]
+ required = set(headsrevs) | known
+ for rev in visit:
+ clrev = cl.changelogrevision(rev)
+ ps = cldag.parents(rev)
+ if depth is not None:
+ curdepth = revdepth[rev]
+ for p in ps:
+ revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
+ needed = False
+ shallow_enough = depth is None or revdepth[rev] <= depth
+ if shallow_enough:
+ curmf = mfl[clrev.manifest].read()
+ if ps:
+ # We choose to not trust the changed files list in
+ # changesets because it's not always correct. TODO: could
+ # we trust it for the non-merge case?
+ p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
+ needed = bool(curmf.diff(p1mf, match))
+ if not needed and len(ps) > 1:
+ # For merge changes, the list of changed files is not
+ # helpful, since we need to emit the merge if a file
+ # in the narrow spec has changed on either side of the
+ # merge. As a result, we do a manifest diff to check.
+ p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
+ needed = bool(curmf.diff(p2mf, match))
+ else:
+ # For a root node, we need to include the node if any
+ # files in the node match the narrowspec.
+ needed = any(curmf.walk(match))
+
+ if needed:
+ for head in ellipsisheads[rev]:
+ addroot(head, rev)
+ for p in ps:
+ required.add(p)
+ relevant_nodes.add(cl.node(rev))
+ else:
+ if not ps:
+ ps = [nullrev]
+ if rev in required:
+ for head in ellipsisheads[rev]:
+ addroot(head, rev)
+ for p in ps:
+ ellipsisheads[p].add(rev)
+ else:
+ for p in ps:
+ ellipsisheads[p] |= ellipsisheads[rev]
+
+ # add common changesets as roots of their reachable ellipsis heads
+ for c in commonrevs:
+ for head in ellipsisheads[c]:
+ addroot(head, c)
+ return visitnodes, relevant_nodes, ellipsisroots
+
+def _packellipsischangegroup(repo, common, match, relevant_nodes,
+ ellipsisroots, visitnodes, depth, source, version):
+ if version in ('01', '02'):
+ raise error.Abort(
+ 'ellipsis nodes require at least cg3 on client and server, '
+ 'but negotiated version %s' % version)
+ # We wrap cg1packer.revchunk, using a side channel to pass
+ # relevant_nodes into that area. Then if linknode isn't in the
+ # set, we know we have an ellipsis node and we should defer
+ # sending that node's data. We override close() to detect
+ # pending ellipsis nodes and flush them.
+ packer = changegroup.getbundler(version, repo)
+ # Let the packer have access to the narrow matcher so it can
+ # omit filelogs and dirlogs as needed
+ packer._narrow_matcher = lambda : match
+ # Give the packer the list of nodes which should not be
+ # ellipsis nodes. We store this rather than the set of nodes
+ # that should be an ellipsis because for very large histories
+ # we expect this to be significantly smaller.
+ packer.full_nodes = relevant_nodes
+ # Maps ellipsis revs to their roots at the changelog level.
+ packer.precomputed_ellipsis = ellipsisroots
+ # Maps CL revs to per-revlog revisions. Cleared in close() at
+ # the end of each group.
+ packer.clrev_to_localrev = {}
+ packer.next_clrev_to_localrev = {}
+ # Maps changelog nodes to changelog revs. Filled in once
+ # during changelog stage and then left unmodified.
+ packer.clnode_to_rev = {}
+ packer.changelog_done = False
+ # If true, informs the packer that it is serving shallow content and might
+ # need to pack file contents not introduced by the changes being packed.
+ packer.is_shallow = depth is not None
+
+ return packer.generate(common, visitnodes, False, source)
+
+# Serve a changegroup for a client with a narrow clone.
+def getbundlechangegrouppart_narrow(bundler, repo, source,
+ bundlecaps=None, b2caps=None, heads=None,
+ common=None, **kwargs):
+ cgversions = b2caps.get('changegroup')
+ if cgversions: # 3.1 and 3.2 ship with an empty value
+ cgversions = [v for v in cgversions
+ if v in changegroup.supportedoutgoingversions(repo)]
+ if not cgversions:
+ raise ValueError(_('no common changegroup version'))
+ version = max(cgversions)
+ else:
+ raise ValueError(_("server does not advertise changegroup version,"
+ " can't negotiate support for ellipsis nodes"))
+
+ include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+ exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+ newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
+ if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
+ outgoing = exchange._computeoutgoing(repo, heads, common)
+ if not outgoing.missing:
+ return
+ def wrappedgetbundler(orig, *args, **kwargs):
+ bundler = orig(*args, **kwargs)
+ bundler._narrow_matcher = lambda : newmatch
+ return bundler
+ with extensions.wrappedfunction(changegroup, 'getbundler',
+ wrappedgetbundler):
+ cg = changegroup.makestream(repo, outgoing, version, source)
+ part = bundler.newpart('changegroup', data=cg)
+ part.addparam('version', version)
+ if 'treemanifest' in repo.requirements:
+ part.addparam('treemanifest', '1')
+
+ if include or exclude:
+ narrowspecpart = bundler.newpart(_SPECPART)
+ if include:
+ narrowspecpart.addparam(
+ _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
+ if exclude:
+ narrowspecpart.addparam(
+ _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
+
+ return
+
+ depth = kwargs.get(r'depth', None)
+ if depth is not None:
+ depth = int(depth)
+ if depth < 1:
+ raise error.Abort(_('depth must be positive, got %d') % depth)
+
+ heads = set(heads or repo.heads())
+ common = set(common or [nullid])
+ oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
+ oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
+ known = {bin(n) for n in kwargs.get(r'known', [])}
+ if known and (oldinclude != include or oldexclude != exclude):
+ # Steps:
+ # 1. Send kill for "$known & ::common"
+ #
+ # 2. Send changegroup for ::common
+ #
+ # 3. Proceed.
+ #
+ # In the future, we can send kills for only the specific
+ # nodes we know should go away or change shape, and then
+ # send a data stream that tells the client something like this:
+ #
+ # a) apply this changegroup
+ # b) apply nodes XXX, YYY, ZZZ that you already have
+ # c) goto a
+ #
+ # until they've built up the full new state.
+ # Convert to revnums and intersect with "common". The client should
+ # have made it a subset of "common" already, but let's be safe.
+ known = set(repo.revs("%ln & ::%ln", known, common))
+ # TODO: we could send only roots() of this set, and the
+ # list of nodes in common, and the client could work out
+ # what to strip, instead of us explicitly sending every
+ # single node.
+ deadrevs = known
+ def genkills():
+ for r in deadrevs:
+ yield _KILLNODESIGNAL
+ yield repo.changelog.node(r)
+ yield _DONESIGNAL
+ bundler.newpart(_CHANGESPECPART, data=genkills())
+ newvisit, newfull, newellipsis = _computeellipsis(
+ repo, set(), common, known, newmatch)
+ if newvisit:
+ cg = _packellipsischangegroup(
+ repo, common, newmatch, newfull, newellipsis,
+ newvisit, depth, source, version)
+ part = bundler.newpart('changegroup', data=cg)
+ part.addparam('version', version)
+ if 'treemanifest' in repo.requirements:
+ part.addparam('treemanifest', '1')
+
+ visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
+ repo, common, heads, set(), newmatch, depth=depth)
+
+ repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
+ if visitnodes:
+ cg = _packellipsischangegroup(
+ repo, common, newmatch, relevant_nodes, ellipsisroots,
+ visitnodes, depth, source, version)
+ part = bundler.newpart('changegroup', data=cg)
+ part.addparam('version', version)
+ if 'treemanifest' in repo.requirements:
+ part.addparam('treemanifest', '1')
+
+def applyacl_narrow(repo, kwargs):
+ ui = repo.ui
+ username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
+ user_includes = ui.configlist(
+ _NARROWACL_SECTION, username + '.includes',
+ ui.configlist(_NARROWACL_SECTION, 'default.includes'))
+ user_excludes = ui.configlist(
+ _NARROWACL_SECTION, username + '.excludes',
+ ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
+ if not user_includes:
+ raise error.Abort(_("{} configuration for user {} is empty")
+ .format(_NARROWACL_SECTION, username))
+
+ user_includes = [
+ 'path:.' if p == '*' else 'path:' + p for p in user_includes]
+ user_excludes = [
+ 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
+
+ req_includes = set(kwargs.get(r'includepats', []))
+ req_excludes = set(kwargs.get(r'excludepats', []))
+
+ req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
+ req_includes, req_excludes, user_includes, user_excludes)
+
+ if invalid_includes:
+ raise error.Abort(
+ _("The following includes are not accessible for {}: {}")
+ .format(username, invalid_includes))
+
+ new_args = {}
+ new_args.update(kwargs)
+ new_args['includepats'] = req_includes
+ if req_excludes:
+ new_args['excludepats'] = req_excludes
+ return new_args
+
+@bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
+def _handlechangespec_2(op, inpart):
+ includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
+ excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
+ if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
+ op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+ op.repo._writerequirements()
+ op.repo.setnarrowpats(includepats, excludepats)
+
+@bundle2.parthandler(_CHANGESPECPART)
+def _handlechangespec(op, inpart):
+ repo = op.repo
+ cl = repo.changelog
+
+ # changesets which need to be stripped entirely. either they're no longer
+ # needed in the new narrow spec, or the server is sending a replacement
+ # in the changegroup part.
+ clkills = set()
+
+ # A changespec part contains all the updates to ellipsis nodes
+ # that will happen as a result of widening or narrowing a
+ # repo. All the changes that this block encounters are ellipsis
+ # nodes or flags to kill an existing ellipsis.
+ chunksignal = changegroup.readexactly(inpart, 4)
+ while chunksignal != _DONESIGNAL:
+ if chunksignal == _KILLNODESIGNAL:
+ # a node used to be an ellipsis but isn't anymore
+ ck = changegroup.readexactly(inpart, 20)
+ if cl.hasnode(ck):
+ clkills.add(ck)
+ else:
+ raise error.Abort(
+ _('unexpected changespec node chunk type: %s') % chunksignal)
+ chunksignal = changegroup.readexactly(inpart, 4)
+
+ if clkills:
+ # preserve bookmarks that repair.strip() would otherwise strip
+ bmstore = repo._bookmarks
+ class dummybmstore(dict):
+ def applychanges(self, repo, tr, changes):
+ pass
+ def recordchange(self, tr): # legacy version
+ pass
+ repo._bookmarks = dummybmstore()
+ chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
+ topic='widen')
+ repo._bookmarks = bmstore
+ if chgrpfile:
+ # presence of _widen_bundle attribute activates widen handler later
+ op._widen_bundle = chgrpfile
+ # Set the new narrowspec if we're widening. The setnewnarrowpats() method
+ # will currently always be there when using the core+narrowhg server, but
+ # other servers may include a changespec part even when not widening (e.g.
+ # because we're deepening a shallow repo).
+ if util.safehasattr(repo, 'setnewnarrowpats'):
+ repo.setnewnarrowpats()
+
+def handlechangegroup_widen(op, inpart):
+ """Changegroup exchange handler which restores temporarily-stripped nodes"""
+ # We saved a bundle with stripped node data we must now restore.
+ # This approach is based on mercurial/repair.py@6ee26a53c111.
+ repo = op.repo
+ ui = op.ui
+
+ chgrpfile = op._widen_bundle
+ del op._widen_bundle
+ vfs = repo.vfs
+
+ ui.note(_("adding branch\n"))
+ f = vfs.open(chgrpfile, "rb")
+ try:
+ gen = exchange.readbundle(ui, f, chgrpfile, vfs)
+ if not ui.verbose:
+ # silence internal shuffling chatter
+ ui.pushbuffer()
+ if isinstance(gen, bundle2.unbundle20):
+ with repo.transaction('strip') as tr:
+ bundle2.processbundle(repo, gen, lambda: tr)
+ else:
+ gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
+ if not ui.verbose:
+ ui.popbuffer()
+ finally:
+ f.close()
+
+ # remove undo files
+ for undovfs, undofile in repo.undofiles():
+ try:
+ undovfs.unlink(undofile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ ui.warn(_('error removing %s: %s\n') %
+ (undovfs.join(undofile), util.forcebytestr(e)))
+
+ # Remove partial backup only if there were no exceptions
+ vfs.unlink(chgrpfile)
+
+def setup():
+ """Enable narrow repo support in bundle2-related extension points."""
+ extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
+
+ wireproto.gboptsmap['narrow'] = 'boolean'
+ wireproto.gboptsmap['depth'] = 'plain'
+ wireproto.gboptsmap['oldincludepats'] = 'csv'
+ wireproto.gboptsmap['oldexcludepats'] = 'csv'
+ wireproto.gboptsmap['includepats'] = 'csv'
+ wireproto.gboptsmap['excludepats'] = 'csv'
+ wireproto.gboptsmap['known'] = 'csv'
+
+ # Extend changegroup serving to handle requests from narrow clients.
+ origcgfn = exchange.getbundle2partsmapping['changegroup']
+ def wrappedcgfn(*args, **kwargs):
+ repo = args[1]
+ if repo.ui.has_section(_NARROWACL_SECTION):
+ getbundlechangegrouppart_narrow(
+ *args, **applyacl_narrow(repo, kwargs))
+ elif kwargs.get(r'narrow', False):
+ getbundlechangegrouppart_narrow(*args, **kwargs)
+ else:
+ origcgfn(*args, **kwargs)
+ exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
+
+ # disable rev branch cache exchange when serving a narrow bundle
+ # (currently incompatible with that part)
+ origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
+ def wrappedcgfn(*args, **kwargs):
+ repo = args[1]
+ if repo.ui.has_section(_NARROWACL_SECTION):
+ return
+ elif kwargs.get(r'narrow', False):
+ return
+ else:
+ origrbcfn(*args, **kwargs)
+ exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
+
+ # Extend changegroup receiver so client can fixup after widen requests.
+ origcghandler = bundle2.parthandlermapping['changegroup']
+ def wrappedcghandler(op, inpart):
+ origcghandler(op, inpart)
+ if util.safehasattr(op, '_widen_bundle'):
+ handlechangegroup_widen(op, inpart)
+ wrappedcghandler.params = origcghandler.params
+ bundle2.parthandlermapping['changegroup'] = wrappedcghandler
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowchangegroup.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,372 @@
+# narrowchangegroup.py - narrow clone changegroup creation and consumption
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ changegroup,
+ error,
+ extensions,
+ manifest,
+ match as matchmod,
+ mdiff,
+ node,
+ revlog,
+ util,
+)
+
+def setup():
+
+ def _cgmatcher(cgpacker):
+ localmatcher = cgpacker._repo.narrowmatch()
+ remotematcher = getattr(cgpacker, '_narrow_matcher', lambda: None)()
+ if remotematcher:
+ return matchmod.intersectmatchers(localmatcher, remotematcher)
+ else:
+ return localmatcher
+
+ def prune(orig, self, revlog, missing, commonrevs):
+ if isinstance(revlog, manifest.manifestrevlog):
+ matcher = _cgmatcher(self)
+ if (matcher and
+ not matcher.visitdir(revlog._dir[:-1] or '.')):
+ return []
+ return orig(self, revlog, missing, commonrevs)
+
+ extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
+
+ def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
+ source):
+ matcher = _cgmatcher(self)
+ if matcher:
+ changedfiles = list(filter(matcher, changedfiles))
+ if getattr(self, 'is_shallow', False):
+ # See comment in generate() for why this sadness is a thing.
+ mfdicts = self._mfdicts
+ del self._mfdicts
+ # In a shallow clone, the linknodes callback needs to also include
+ # those file nodes that are in the manifests we sent but weren't
+ # introduced by those manifests.
+ commonctxs = [self._repo[c] for c in commonrevs]
+ oldlinknodes = linknodes
+ clrev = self._repo.changelog.rev
+ def linknodes(flog, fname):
+ for c in commonctxs:
+ try:
+ fnode = c.filenode(fname)
+ self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
+ except error.ManifestLookupError:
+ pass
+ links = oldlinknodes(flog, fname)
+ if len(links) != len(mfdicts):
+ for mf, lr in mfdicts:
+ fnode = mf.get(fname, None)
+ if fnode in links:
+ links[fnode] = min(links[fnode], lr, key=clrev)
+ elif fnode:
+ links[fnode] = lr
+ return links
+ return orig(self, changedfiles, linknodes, commonrevs, source)
+ extensions.wrapfunction(
+ changegroup.cg1packer, 'generatefiles', generatefiles)
+
+ def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
+ n = revlog_.node(rev)
+ p1n, p2n = revlog_.node(p1), revlog_.node(p2)
+ flags = revlog_.flags(rev)
+ flags |= revlog.REVIDX_ELLIPSIS
+ meta = packer.builddeltaheader(
+ n, p1n, p2n, node.nullid, linknode, flags)
+ # TODO: try and actually send deltas for ellipsis data blocks
+ diffheader = mdiff.trivialdiffheader(len(data))
+ l = len(meta) + len(diffheader) + len(data)
+ return ''.join((changegroup.chunkheader(l),
+ meta,
+ diffheader,
+ data))
+
+ def close(orig, self):
+ getattr(self, 'clrev_to_localrev', {}).clear()
+ if getattr(self, 'next_clrev_to_localrev', {}):
+ self.clrev_to_localrev = self.next_clrev_to_localrev
+ del self.next_clrev_to_localrev
+ self.changelog_done = True
+ return orig(self)
+ extensions.wrapfunction(changegroup.cg1packer, 'close', close)
+
+ # In a perfect world, we'd generate better ellipsis-ified graphs
+ # for non-changelog revlogs. In practice, we haven't started doing
+ # that yet, so the resulting DAGs for the manifestlog and filelogs
+ # are actually full of bogus parentage on all the ellipsis
+ # nodes. This has the side effect that, while the contents are
+ # correct, the individual DAGs might be completely out of whack in
+ # a case like 882681bc3166 and its ancestors (back about 10
+ # revisions or so) in the main hg repo.
+ #
+ # The one invariant we *know* holds is that the new (potentially
+ # bogus) DAG shape will be valid if we order the nodes in the
+ # order that they're introduced in dramatis personae by the
+ # changelog, so what we do is we sort the non-changelog histories
+ # by the order in which they are used by the changelog.
+ def _sortgroup(orig, self, revlog, nodelist, lookup):
+ if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
+ return orig(self, revlog, nodelist, lookup)
+ key = lambda n: self.clnode_to_rev[lookup(n)]
+ return [revlog.rev(n) for n in sorted(nodelist, key=key)]
+
+ extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
+
+ def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
+ '''yield a sequence of changegroup chunks (strings)'''
+ # Note: other than delegating to orig, the only deviation in
+ # logic from normal hg's generate is marked with BEGIN/END
+ # NARROW HACK.
+ if not util.safehasattr(self, 'full_nodes'):
+ # not sending a narrow bundle
+ for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
+ yield x
+ return
+
+ repo = self._repo
+ cl = repo.changelog
+ mfl = repo.manifestlog
+ mfrevlog = mfl._revlog
+
+ clrevorder = {}
+ mfs = {} # needed manifests
+ fnodes = {} # needed file nodes
+ changedfiles = set()
+
+ # Callback for the changelog, used to collect changed files and manifest
+ # nodes.
+ # Returns the linkrev node (identity in the changelog case).
+ def lookupcl(x):
+ c = cl.read(x)
+ clrevorder[x] = len(clrevorder)
+ # BEGIN NARROW HACK
+ #
+ # Only update mfs if x is going to be sent. Otherwise we
+ # end up with bogus linkrevs specified for manifests and
+ # we skip some manifest nodes that we should otherwise
+ # have sent.
+ if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
+ n = c[0]
+ # record the first changeset introducing this manifest version
+ mfs.setdefault(n, x)
+ # Set this narrow-specific dict so we have the lowest manifest
+ # revnum to look up for this cl revnum. (Part of mapping
+ # changelog ellipsis parents to manifest ellipsis parents)
+ self.next_clrev_to_localrev.setdefault(cl.rev(x),
+ mfrevlog.rev(n))
+ # We can't trust the changed files list in the changeset if the
+ # client requested a shallow clone.
+ if self.is_shallow:
+ changedfiles.update(mfl[c[0]].read().keys())
+ else:
+ changedfiles.update(c[3])
+ # END NARROW HACK
+ # Record a complete list of potentially-changed files in
+ # this manifest.
+ return x
+
+ self._verbosenote(_('uncompressed size of bundle content:\n'))
+ size = 0
+ for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
+ size += len(chunk)
+ yield chunk
+ self._verbosenote(_('%8.i (changelog)\n') % size)
+
+ # We need to make sure that the linkrev in the changegroup refers to
+ # the first changeset that introduced the manifest or file revision.
+ # The fastpath is usually safer than the slowpath, because the filelogs
+ # are walked in revlog order.
+ #
+ # When taking the slowpath with reorder=None and the manifest revlog
+ # uses generaldelta, the manifest may be walked in the "wrong" order.
+ # Without 'clrevorder', we would get an incorrect linkrev (see fix in
+ # cc0ff93d0c0c).
+ #
+ # When taking the fastpath, we are only vulnerable to reordering
+ # of the changelog itself. The changelog never uses generaldelta, so
+ # it is only reordered when reorder=True. To handle this case, we
+ # simply take the slowpath, which already has the 'clrevorder' logic.
+ # This was also fixed in cc0ff93d0c0c.
+ fastpathlinkrev = fastpathlinkrev and not self._reorder
+ # Treemanifests don't work correctly with fastpathlinkrev
+ # either, because we don't discover which directory nodes to
+ # send along with files. This could probably be fixed.
+ fastpathlinkrev = fastpathlinkrev and (
+ 'treemanifest' not in repo.requirements)
+ # Shallow clones also don't work correctly with fastpathlinkrev
+ # because file nodes may need to be sent for a manifest even if they
+ # weren't introduced by that manifest.
+ fastpathlinkrev = fastpathlinkrev and not self.is_shallow
+
+ for chunk in self.generatemanifests(commonrevs, clrevorder,
+ fastpathlinkrev, mfs, fnodes, source):
+ yield chunk
+ # BEGIN NARROW HACK
+ mfdicts = None
+ if self.is_shallow:
+ mfdicts = [(self._repo.manifestlog[n].read(), lr)
+ for (n, lr) in mfs.iteritems()]
+ # END NARROW HACK
+ mfs.clear()
+ clrevs = set(cl.rev(x) for x in clnodes)
+
+ if not fastpathlinkrev:
+ def linknodes(unused, fname):
+ return fnodes.get(fname, {})
+ else:
+ cln = cl.node
+ def linknodes(filerevlog, fname):
+ llr = filerevlog.linkrev
+ fln = filerevlog.node
+ revs = ((r, llr(r)) for r in filerevlog)
+ return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
+
+ # BEGIN NARROW HACK
+ #
+ # We need to pass the mfdicts variable down into
+ # generatefiles(), but more than one command might have
+ # wrapped generatefiles so we can't modify the function
+ # signature. Instead, we pass the data to ourselves using an
+ # instance attribute. I'm sorry.
+ self._mfdicts = mfdicts
+ # END NARROW HACK
+ for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
+ source):
+ yield chunk
+
+ yield self.close()
+
+ if clnodes:
+ repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
+ extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
+
+ def revchunk(orig, self, revlog, rev, prev, linknode):
+ if not util.safehasattr(self, 'full_nodes'):
+ # not sending a narrow changegroup
+ for x in orig(self, revlog, rev, prev, linknode):
+ yield x
+ return
+ # build up some mapping information that's useful later. See
+ # the local() nested function below.
+ if not self.changelog_done:
+ self.clnode_to_rev[linknode] = rev
+ linkrev = rev
+ self.clrev_to_localrev[linkrev] = rev
+ else:
+ linkrev = self.clnode_to_rev[linknode]
+ self.clrev_to_localrev[linkrev] = rev
+ # This is a node to send in full, because the changeset it
+ # corresponds to was a full changeset.
+ if linknode in self.full_nodes:
+ for x in orig(self, revlog, rev, prev, linknode):
+ yield x
+ return
+ # At this point, a node can either be one we should skip or an
+ # ellipsis. If it's not an ellipsis, bail immediately.
+ if linkrev not in self.precomputed_ellipsis:
+ return
+ linkparents = self.precomputed_ellipsis[linkrev]
+ def local(clrev):
+ """Turn a changelog revnum into a local revnum.
+
+ The ellipsis dag is stored as revnums on the changelog,
+ but when we're producing ellipsis entries for
+ non-changelog revlogs, we need to turn those numbers into
+ something local. This does that for us, and during the
+ changelog sending phase will also expand the stored
+ mappings as needed.
+ """
+ if clrev == node.nullrev:
+ return node.nullrev
+ if not self.changelog_done:
+ # If we're doing the changelog, it's possible that we
+ # have a parent that is already on the client, and we
+ # need to store some extra mapping information so that
+ # our contained ellipsis nodes will be able to resolve
+ # their parents.
+ if clrev not in self.clrev_to_localrev:
+ clnode = revlog.node(clrev)
+ self.clnode_to_rev[clnode] = clrev
+ return clrev
+ # Walk the ellipsis-ized changelog breadth-first looking for a
+ # change that has been linked from the current revlog.
+ #
+ # For a flat manifest revlog only a single step should be necessary
+ # as all relevant changelog entries are relevant to the flat
+ # manifest.
+ #
+ # For a filelog or tree manifest dirlog however not every changelog
+ # entry will have been relevant, so we need to skip some changelog
+ # nodes even after ellipsis-izing.
+ walk = [clrev]
+ while walk:
+ p = walk[0]
+ walk = walk[1:]
+ if p in self.clrev_to_localrev:
+ return self.clrev_to_localrev[p]
+ elif p in self.full_nodes:
+ walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
+ if pp != node.nullrev])
+ elif p in self.precomputed_ellipsis:
+ walk.extend([pp for pp in self.precomputed_ellipsis[p]
+ if pp != node.nullrev])
+ else:
+ # In this case, we've got an ellipsis with parents
+ # outside the current bundle (likely an
+ # incremental pull). We "know" that we can use the
+ # value of this same revlog at whatever revision
+ # is pointed to by linknode. "Know" is in scare
+ # quotes because I haven't done enough examination
+ # of edge cases to convince myself this is really
+ # a fact - it works for all the (admittedly
+ # thorough) cases in our testsuite, but I would be
+ # somewhat unsurprised to find a case in the wild
+ # where this breaks down a bit. That said, I don't
+ # know if it would hurt anything.
+ for i in xrange(rev, 0, -1):
+ if revlog.linkrev(i) == clrev:
+ return i
+ # We failed to resolve a parent for this node, so
+ # we crash the changegroup construction.
+ raise error.Abort(
+ 'unable to resolve parent while packing %r %r'
+ ' for changeset %r' % (revlog.indexfile, rev, clrev))
+ return node.nullrev
+
+ if not linkparents or (
+ revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
+ p1, p2 = node.nullrev, node.nullrev
+ elif len(linkparents) == 1:
+ p1, = sorted(local(p) for p in linkparents)
+ p2 = node.nullrev
+ else:
+ p1, p2 = sorted(local(p) for p in linkparents)
+ yield ellipsisdata(
+ self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
+ extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
+
+ def deltaparent(orig, self, revlog, rev, p1, p2, prev):
+ if util.safehasattr(self, 'full_nodes'):
+ # TODO: send better deltas when in narrow mode.
+ #
+ # changegroup.group() loops over revisions to send,
+ # including revisions we'll skip. What this means is that
+ # `prev` will be a potentially useless delta base for all
+ # ellipsis nodes, as the client likely won't have it. In
+ # the future we should do bookkeeping about which nodes
+ # have been sent to the client, and try to be
+ # significantly smarter about delta bases. This is
+ # slightly tricky because this same code has to work for
+ # all revlogs, and we don't have the linkrev/linknode here.
+ return p1
+ return orig(self, revlog, rev, p1, p2, prev)
+ extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowcommands.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,401 @@
+# narrowcommands.py - command modifications for narrowhg extension
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import itertools
+
+from mercurial.i18n import _
+from mercurial import (
+ changegroup,
+ cmdutil,
+ commands,
+ discovery,
+ error,
+ exchange,
+ extensions,
+ hg,
+ merge,
+ narrowspec,
+ node,
+ pycompat,
+ registrar,
+ repair,
+ repoview,
+ util,
+)
+
+from . import (
+ narrowbundle2,
+)
+
+table = {}
+command = registrar.command(table)
+
+def setup():
+ """Wraps user-facing mercurial commands with narrow-aware versions."""
+
+ entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
+ entry[1].append(('', 'narrow', None,
+ _("create a narrow clone of select files")))
+ entry[1].append(('', 'depth', '',
+ _("limit the history fetched by distance from heads")))
+ # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
+ if 'sparse' not in extensions.enabled():
+ entry[1].append(('', 'include', [],
+ _("specifically fetch this file/directory")))
+ entry[1].append(
+ ('', 'exclude', [],
+ _("do not fetch this file/directory, even if included")))
+
+ entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
+ entry[1].append(('', 'depth', '',
+ _("limit the history fetched by distance from heads")))
+
+ extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
+
+def expandpull(pullop, includepats, excludepats):
+ if not narrowspec.needsexpansion(includepats):
+ return includepats, excludepats
+
+ heads = pullop.heads or pullop.rheads
+ includepats, excludepats = pullop.remote.expandnarrow(
+ includepats, excludepats, heads)
+ pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
+ includepats, excludepats))
+ return set(includepats), set(excludepats)
+
+def clonenarrowcmd(orig, ui, repo, *args, **opts):
+ """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
+ opts = pycompat.byteskwargs(opts)
+ wrappedextraprepare = util.nullcontextmanager()
+ opts_narrow = opts['narrow']
+ if opts_narrow:
+ def pullbundle2extraprepare_widen(orig, pullop, kwargs):
+ # Create narrow spec patterns from clone flags
+ includepats = narrowspec.parsepatterns(opts['include'])
+ excludepats = narrowspec.parsepatterns(opts['exclude'])
+
+ # If necessary, ask the server to expand the narrowspec.
+ includepats, excludepats = expandpull(
+ pullop, includepats, excludepats)
+
+ if not includepats and excludepats:
+ # If nothing was included, we assume the user meant to include
+ # everything, except what they asked to exclude.
+ includepats = {'path:.'}
+
+ pullop.repo.setnarrowpats(includepats, excludepats)
+
+ # This will populate 'includepats' etc with the values from the
+ # narrowspec we just saved.
+ orig(pullop, kwargs)
+
+ if opts.get('depth'):
+ kwargs['depth'] = opts['depth']
+ wrappedextraprepare = extensions.wrappedfunction(exchange,
+ '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+
+ def pullnarrow(orig, repo, *args, **kwargs):
+ if opts_narrow:
+ repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+ repo._writerequirements()
+
+ return orig(repo, *args, **kwargs)
+
+ wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
+
+ with wrappedextraprepare, wrappedpull:
+ return orig(ui, repo, *args, **pycompat.strkwargs(opts))
+
+def pullnarrowcmd(orig, ui, repo, *args, **opts):
+ """Wraps pull command to allow modifying narrow spec."""
+ wrappedextraprepare = util.nullcontextmanager()
+ if changegroup.NARROW_REQUIREMENT in repo.requirements:
+
+ def pullbundle2extraprepare_widen(orig, pullop, kwargs):
+ orig(pullop, kwargs)
+ if opts.get(r'depth'):
+ kwargs['depth'] = opts[r'depth']
+ wrappedextraprepare = extensions.wrappedfunction(exchange,
+ '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+
+ with wrappedextraprepare:
+ return orig(ui, repo, *args, **opts)
+
+def archivenarrowcmd(orig, ui, repo, *args, **opts):
+ """Wraps archive command to narrow the default includes."""
+ if changegroup.NARROW_REQUIREMENT in repo.requirements:
+ repo_includes, repo_excludes = repo.narrowpats
+ includes = set(opts.get(r'include', []))
+ excludes = set(opts.get(r'exclude', []))
+ includes, excludes, unused_invalid = narrowspec.restrictpatterns(
+ includes, excludes, repo_includes, repo_excludes)
+ if includes:
+ opts[r'include'] = includes
+ if excludes:
+ opts[r'exclude'] = excludes
+ return orig(ui, repo, *args, **opts)
+
+def pullbundle2extraprepare(orig, pullop, kwargs):
+ repo = pullop.repo
+ if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+ return orig(pullop, kwargs)
+
+ if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
+ raise error.Abort(_("server doesn't support narrow clones"))
+ orig(pullop, kwargs)
+ kwargs['narrow'] = True
+ include, exclude = repo.narrowpats
+ kwargs['oldincludepats'] = include
+ kwargs['oldexcludepats'] = exclude
+ kwargs['includepats'] = include
+ kwargs['excludepats'] = exclude
+ kwargs['known'] = [node.hex(ctx.node()) for ctx in
+ repo.set('::%ln', pullop.common)
+ if ctx.node() != node.nullid]
+ if not kwargs['known']:
+ # Mercurial serialized an empty list as '' and deserializes it as
+ # [''], so delete it instead to avoid handling the empty string on the
+ # server.
+ del kwargs['known']
+
+extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
+ pullbundle2extraprepare)
+
+def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
+ newincludes, newexcludes, force):
+ oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
+ newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
+
+ # This is essentially doing "hg outgoing" to find all local-only
+ # commits. We will then check that the local-only commits don't
+ # have any changes to files that will be untracked.
+ unfi = repo.unfiltered()
+ outgoing = discovery.findcommonoutgoing(unfi, remote,
+ commoninc=commoninc)
+ ui.status(_('looking for local changes to affected paths\n'))
+ localnodes = []
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+ localnodes.append(n)
+ revstostrip = unfi.revs('descendants(%ln)', localnodes)
+ hiddenrevs = repoview.filterrevs(repo, 'visible')
+ visibletostrip = list(repo.changelog.node(r)
+ for r in (revstostrip - hiddenrevs))
+ if visibletostrip:
+ ui.status(_('The following changeset(s) or their ancestors have '
+ 'local changes not on the remote:\n'))
+ maxnodes = 10
+ if ui.verbose or len(visibletostrip) <= maxnodes:
+ for n in visibletostrip:
+ ui.status('%s\n' % node.short(n))
+ else:
+ for n in visibletostrip[:maxnodes]:
+ ui.status('%s\n' % node.short(n))
+ ui.status(_('...and %d more, use --verbose to list all\n') %
+ (len(visibletostrip) - maxnodes))
+ if not force:
+ raise error.Abort(_('local changes found'),
+ hint=_('use --force-delete-local-changes to '
+ 'ignore'))
+
+ if revstostrip:
+ tostrip = [unfi.changelog.node(r) for r in revstostrip]
+ if repo['.'].node() in tostrip:
+ # stripping working copy, so move to a different commit first
+ urev = max(repo.revs('(::%n) - %ln + null',
+ repo['.'].node(), visibletostrip))
+ hg.clean(repo, urev)
+ repair.strip(ui, unfi, tostrip, topic='narrow')
+
+ todelete = []
+ for f, f2, size in repo.store.datafiles():
+ if f.startswith('data/'):
+ file = f[5:-2]
+ if not newmatch(file):
+ todelete.append(f)
+ elif f.startswith('meta/'):
+ dir = f[5:-13]
+ dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
+ include = True
+ for d in dirs:
+ visit = newmatch.visitdir(d)
+ if not visit:
+ include = False
+ break
+ if visit == 'all':
+ break
+ if not include:
+ todelete.append(f)
+
+ repo.destroying()
+
+ with repo.transaction("narrowing"):
+ for f in todelete:
+ ui.status(_('deleting %s\n') % f)
+ util.unlinkpath(repo.svfs.join(f))
+ repo.store.markremoved(f)
+
+ for f in repo.dirstate:
+ if not newmatch(f):
+ repo.dirstate.drop(f)
+ repo.wvfs.unlinkpath(f)
+ repo.setnarrowpats(newincludes, newexcludes)
+
+ repo.destroyed()
+
+def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
+ newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
+
+ # TODO(martinvonz): Get expansion working with widening/narrowing.
+ if narrowspec.needsexpansion(newincludes):
+ raise error.Abort('Expansion not yet supported on pull')
+
+ def pullbundle2extraprepare_widen(orig, pullop, kwargs):
+ orig(pullop, kwargs)
+ # The old{in,ex}cludepats have already been set by orig()
+ kwargs['includepats'] = newincludes
+ kwargs['excludepats'] = newexcludes
+ wrappedextraprepare = extensions.wrappedfunction(exchange,
+ '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+
+ # define a function that narrowbundle2 can call after creating the
+ # backup bundle, but before applying the bundle from the server
+ def setnewnarrowpats():
+ repo.setnarrowpats(newincludes, newexcludes)
+ repo.setnewnarrowpats = setnewnarrowpats
+
+ ds = repo.dirstate
+ p1, p2 = ds.p1(), ds.p2()
+ with ds.parentchange():
+ ds.setparents(node.nullid, node.nullid)
+ common = commoninc[0]
+ with wrappedextraprepare:
+ exchange.pull(repo, remote, heads=common)
+ with ds.parentchange():
+ ds.setparents(p1, p2)
+
+ actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
+ addgaction = actions['g'].append
+
+ mf = repo['.'].manifest().matches(newmatch)
+ for f, fn in mf.iteritems():
+ if f not in repo.dirstate:
+ addgaction((f, (mf.flags(f), False),
+ "add from widened narrow clone"))
+
+ merge.applyupdates(repo, actions, wctx=repo[None],
+ mctx=repo['.'], overwrite=False)
+ merge.recordupdates(repo, actions, branchmerge=False)
+
+# TODO(rdamazio): Make new matcher format and update description
+@command('tracked',
+ [('', 'addinclude', [], _('new paths to include')),
+ ('', 'removeinclude', [], _('old paths to no longer include')),
+ ('', 'addexclude', [], _('new paths to exclude')),
+ ('', 'removeexclude', [], _('old paths to no longer exclude')),
+ ('', 'clear', False, _('whether to replace the existing narrowspec')),
+ ('', 'force-delete-local-changes', False,
+ _('forces deletion of local changes when narrowing')),
+ ] + commands.remoteopts,
+ _('[OPTIONS]... [REMOTE]'),
+ inferrepo=True)
+def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
+ """show or change the current narrowspec
+
+ With no argument, shows the current narrowspec entries, one per line. Each
+ line will be prefixed with 'I' or 'X' for included or excluded patterns,
+ respectively.
+
+ The narrowspec is comprised of expressions to match remote files and/or
+ directories that should be pulled into your client.
+ The narrowspec has *include* and *exclude* expressions, with excludes always
+ trumping includes: that is, if a file matches an exclude expression, it will
+ be excluded even if it also matches an include expression.
+ Excluding files that were never included has no effect.
+
+ Each included or excluded entry is in the format described by
+ 'hg help patterns'.
+
+ The options allow you to add or remove included and excluded expressions.
+
+ If --clear is specified, then all previous includes and excludes are DROPPED
+ and replaced by the new ones specified to --addinclude and --addexclude.
+ If --clear is specified without any further options, the narrowspec will be
+ empty and will not match any files.
+ """
+ opts = pycompat.byteskwargs(opts)
+ if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+ ui.warn(_('The narrow command is only supported on respositories cloned'
+ ' with --narrow.\n'))
+ return 1
+
+ # Before supporting, decide whether it "hg tracked --clear" should mean
+ # tracking no paths or all paths.
+ if opts['clear']:
+ ui.warn(_('The --clear option is not yet supported.\n'))
+ return 1
+
+ if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
+ raise error.Abort('Expansion not yet supported on widen/narrow')
+
+ addedincludes = narrowspec.parsepatterns(opts['addinclude'])
+ removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
+ addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
+ removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
+ widening = addedincludes or removedexcludes
+ narrowing = removedincludes or addedexcludes
+ only_show = not widening and not narrowing
+
+ # Only print the current narrowspec.
+ if only_show:
+ include, exclude = repo.narrowpats
+
+ ui.pager('tracked')
+ fm = ui.formatter('narrow', opts)
+ for i in sorted(include):
+ fm.startitem()
+ fm.write('status', '%s ', 'I', label='narrow.included')
+ fm.write('pat', '%s\n', i, label='narrow.included')
+ for i in sorted(exclude):
+ fm.startitem()
+ fm.write('status', '%s ', 'X', label='narrow.excluded')
+ fm.write('pat', '%s\n', i, label='narrow.excluded')
+ fm.end()
+ return 0
+
+ with repo.wlock(), repo.lock():
+ cmdutil.bailifchanged(repo)
+
+ # Find the revisions we have in common with the remote. These will
+ # be used for finding local-only changes for narrowing. They will
+ # also define the set of revisions to update for widening.
+ remotepath = ui.expandpath(remotepath or 'default')
+ url, branches = hg.parseurl(remotepath)
+ ui.status(_('comparing with %s\n') % util.hidepassword(url))
+ remote = hg.peer(repo, opts, url)
+ commoninc = discovery.findcommonincoming(repo, remote)
+
+ oldincludes, oldexcludes = repo.narrowpats
+ if narrowing:
+ newincludes = oldincludes - removedincludes
+ newexcludes = oldexcludes | addedexcludes
+ _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
+ newincludes, newexcludes,
+ opts['force_delete_local_changes'])
+ # _narrow() updated the narrowspec and _widen() below needs to
+ # use the updated values as its base (otherwise removed includes
+ # and addedexcludes will be lost in the resulting narrowspec)
+ oldincludes = newincludes
+ oldexcludes = newexcludes
+
+ if widening:
+ newincludes = oldincludes | addedincludes
+ newexcludes = oldexcludes - removedexcludes
+ _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
+
+ return 0
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowcopies.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,34 @@
+# narrowcopies.py - extensions to mercurial copies module to support narrow
+# clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ copies,
+ extensions,
+)
+
+def setup(repo):
+ def _computeforwardmissing(orig, a, b, match=None):
+ missing = orig(a, b, match)
+ narrowmatch = repo.narrowmatch()
+ if narrowmatch.always():
+ return missing
+ missing = [f for f in missing if narrowmatch(f)]
+ return missing
+
+ def _checkcopies(orig, srcctx, dstctx, f, base, tca, remotebase, limit,
+ data):
+ narrowmatch = repo.narrowmatch()
+ if not narrowmatch(f):
+ return
+ orig(srcctx, dstctx, f, base, tca, remotebase, limit, data)
+
+ extensions.wrapfunction(copies, '_computeforwardmissing',
+ _computeforwardmissing)
+ extensions.wrapfunction(copies, '_checkcopies', _checkcopies)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowdirstate.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,82 @@
+# narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ dirstate,
+ error,
+ extensions,
+ match as matchmod,
+ narrowspec,
+ util as hgutil,
+)
+
+def setup(repo):
+ """Add narrow spec dirstate ignore, block changes outside narrow spec."""
+
+ def walk(orig, self, match, subrepos, unknown, ignored, full=True,
+ narrowonly=True):
+ if narrowonly:
+ # hack to not exclude explicitly-specified paths so that they can
+ # be warned later on e.g. dirstate.add()
+ em = matchmod.exact(match._root, match._cwd, match.files())
+ nm = matchmod.unionmatcher([repo.narrowmatch(), em])
+ match = matchmod.intersectmatchers(match, nm)
+ return orig(self, match, subrepos, unknown, ignored, full)
+
+ extensions.wrapfunction(dirstate.dirstate, 'walk', walk)
+
+ # Prevent adding files that are outside the sparse checkout
+ editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge']
+ for func in editfuncs:
+ def _wrapper(orig, self, *args):
+ dirstate = repo.dirstate
+ narrowmatch = repo.narrowmatch()
+ for f in args:
+ if f is not None and not narrowmatch(f) and f not in dirstate:
+ raise error.Abort(_("cannot track '%s' - it is outside " +
+ "the narrow clone") % f)
+ return orig(self, *args)
+ extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
+
+ def filterrebuild(orig, self, parent, allfiles, changedfiles=None):
+ if changedfiles is None:
+ # Rebuilding entire dirstate, let's filter allfiles to match the
+ # narrowspec.
+ allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
+ orig(self, parent, allfiles, changedfiles)
+
+ extensions.wrapfunction(dirstate.dirstate, 'rebuild', filterrebuild)
+
+ def _narrowbackupname(backupname):
+ assert 'dirstate' in backupname
+ return backupname.replace('dirstate', narrowspec.FILENAME)
+
+ def restorebackup(orig, self, tr, backupname):
+ self._opener.rename(_narrowbackupname(backupname), narrowspec.FILENAME,
+ checkambig=True)
+ orig(self, tr, backupname)
+
+ extensions.wrapfunction(dirstate.dirstate, 'restorebackup', restorebackup)
+
+ def savebackup(orig, self, tr, backupname):
+ orig(self, tr, backupname)
+
+ narrowbackupname = _narrowbackupname(backupname)
+ self._opener.tryunlink(narrowbackupname)
+ hgutil.copyfile(self._opener.join(narrowspec.FILENAME),
+ self._opener.join(narrowbackupname), hardlink=True)
+
+ extensions.wrapfunction(dirstate.dirstate, 'savebackup', savebackup)
+
+ def clearbackup(orig, self, tr, backupname):
+ orig(self, tr, backupname)
+ self._opener.unlink(_narrowbackupname(backupname))
+
+ extensions.wrapfunction(dirstate.dirstate, 'clearbackup', clearbackup)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowmerge.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,77 @@
+# narrowmerge.py - extensions to mercurial merge module to support narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ copies,
+ error,
+ extensions,
+ merge,
+)
+
+def setup():
+ def _manifestmerge(orig, repo, wctx, p2, pa, branchmerge, *args, **kwargs):
+ """Filter updates to only lay out files that match the narrow spec."""
+ actions, diverge, renamedelete = orig(
+ repo, wctx, p2, pa, branchmerge, *args, **kwargs)
+
+ narrowmatch = repo.narrowmatch()
+ if narrowmatch.always():
+ return actions, diverge, renamedelete
+
+ nooptypes = set(['k']) # TODO: handle with nonconflicttypes
+ nonconflicttypes = set('a am c cm f g r e'.split())
+ # We mutate the items in the dict during iteration, so iterate
+ # over a copy.
+ for f, action in list(actions.items()):
+ if narrowmatch(f):
+ pass
+ elif not branchmerge:
+ del actions[f] # just updating, ignore changes outside clone
+ elif action[0] in nooptypes:
+ del actions[f] # merge does not affect file
+ elif action[0] in nonconflicttypes:
+ raise error.Abort(_('merge affects file \'%s\' outside narrow, '
+ 'which is not yet supported') % f,
+ hint=_('merging in the other direction '
+ 'may work'))
+ else:
+ raise error.Abort(_('conflict in file \'%s\' is outside '
+ 'narrow clone') % f)
+
+ return actions, diverge, renamedelete
+
+ extensions.wrapfunction(merge, 'manifestmerge', _manifestmerge)
+
+ def _checkcollision(orig, repo, wmf, actions):
+ narrowmatch = repo.narrowmatch()
+ if not narrowmatch.always():
+ wmf = wmf.matches(narrowmatch)
+ if actions:
+ narrowactions = {}
+ for m, actionsfortype in actions.iteritems():
+ narrowactions[m] = []
+ for (f, args, msg) in actionsfortype:
+ if narrowmatch(f):
+ narrowactions[m].append((f, args, msg))
+ actions = narrowactions
+ return orig(repo, wmf, actions)
+
+ extensions.wrapfunction(merge, '_checkcollision', _checkcollision)
+
+ def _computenonoverlap(orig, repo, *args, **kwargs):
+ u1, u2 = orig(repo, *args, **kwargs)
+ narrowmatch = repo.narrowmatch()
+ if narrowmatch.always():
+ return u1, u2
+
+ u1 = [f for f in u1 if narrowmatch(f)]
+ u2 = [f for f in u2 if narrowmatch(f)]
+ return u1, u2
+ extensions.wrapfunction(copies, '_computenonoverlap', _computenonoverlap)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowpatch.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,41 @@
+# narrowpatch.py - extensions to mercurial patch module to support narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ extensions,
+ patch,
+)
+
+def setup(repo):
+ def _filepairs(orig, *args):
+ """Only includes files within the narrow spec in the diff."""
+ narrowmatch = repo.narrowmatch()
+ if not narrowmatch.always():
+ for x in orig(*args):
+ f1, f2, copyop = x
+ if ((not f1 or narrowmatch(f1)) and
+ (not f2 or narrowmatch(f2))):
+ yield x
+ else:
+ for x in orig(*args):
+ yield x
+
+ def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
+ copy, getfilectx, *args, **kwargs):
+ narrowmatch = repo.narrowmatch()
+ if not narrowmatch.always():
+ modified = [f for f in modified if narrowmatch(f)]
+ added = [f for f in added if narrowmatch(f)]
+ removed = [f for f in removed if narrowmatch(f)]
+ copy = {k: v for k, v in copy.iteritems() if narrowmatch(k)}
+ return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
+ getfilectx, *args, **kwargs)
+
+ extensions.wrapfunction(patch, '_filepairs', _filepairs)
+ extensions.wrapfunction(patch, 'trydiff', trydiff)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowrepo.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,86 @@
+# narrowrepo.py - repository which supports narrow revlogs, lazy loading
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ bundlerepo,
+ changegroup,
+ hg,
+ localrepo,
+ narrowspec,
+ scmutil,
+)
+
+from . import (
+ narrowrevlog,
+)
+
+def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
+ orig(sourcerepo, destrepo, **kwargs)
+ if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements:
+ with destrepo.wlock():
+ with destrepo.vfs('shared', 'a') as fp:
+ fp.write(narrowspec.FILENAME + '\n')
+
+def unsharenarrowspec(orig, ui, repo, repopath):
+ if (changegroup.NARROW_REQUIREMENT in repo.requirements
+ and repo.path == repopath and repo.shared()):
+ srcrepo = hg.sharedreposource(repo)
+ with srcrepo.vfs(narrowspec.FILENAME) as f:
+ spec = f.read()
+ with repo.vfs(narrowspec.FILENAME, 'w') as f:
+ f.write(spec)
+ return orig(ui, repo, repopath)
+
+def wraprepo(repo):
+ """Enables narrow clone functionality on a single local repository."""
+
+ cacheprop = localrepo.storecache
+ if isinstance(repo, bundlerepo.bundlerepository):
+ # We have to use a different caching property decorator for
+ # bundlerepo because storecache blows up in strange ways on a
+ # bundlerepo. Fortunately, there's no risk of data changing in
+ # a bundlerepo.
+ cacheprop = lambda name: localrepo.unfilteredpropertycache
+
+ class narrowrepository(repo.__class__):
+
+ def _constructmanifest(self):
+ manifest = super(narrowrepository, self)._constructmanifest()
+ narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
+ return manifest
+
+ @cacheprop('00manifest.i')
+ def manifestlog(self):
+ mfl = super(narrowrepository, self).manifestlog
+ narrowrevlog.makenarrowmanifestlog(mfl, self)
+ return mfl
+
+ def file(self, f):
+ fl = super(narrowrepository, self).file(f)
+ narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
+ return fl
+
+ # I'm not sure this is the right place to do this filter.
+ # context._manifestmatches() would probably be better, or perhaps
+ # move it to a later place, in case some of the callers do want to know
+ # which directories changed. This seems to work for now, though.
+ def status(self, *args, **kwargs):
+ s = super(narrowrepository, self).status(*args, **kwargs)
+ narrowmatch = self.narrowmatch()
+ modified = list(filter(narrowmatch, s.modified))
+ added = list(filter(narrowmatch, s.added))
+ removed = list(filter(narrowmatch, s.removed))
+ deleted = list(filter(narrowmatch, s.deleted))
+ unknown = list(filter(narrowmatch, s.unknown))
+ ignored = list(filter(narrowmatch, s.ignored))
+ clean = list(filter(narrowmatch, s.clean))
+ return scmutil.status(modified, added, removed, deleted, unknown,
+ ignored, clean)
+
+ repo.__class__ = narrowrepository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowrevlog.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,187 @@
+# narrowrevlog.py - revlog storing irrelevant nodes as "ellipsis" nodes
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ error,
+ manifest,
+ revlog,
+ util,
+)
+
+def readtransform(self, text):
+ return text, False
+
+def writetransform(self, text):
+ return text, False
+
+def rawtransform(self, text):
+ return False
+
+revlog.addflagprocessor(revlog.REVIDX_ELLIPSIS,
+ (readtransform, writetransform, rawtransform))
+
+def setup():
+ # We just wanted to add the flag processor, which is done at module
+ # load time.
+ pass
+
+class excludeddir(manifest.treemanifest):
+ """Stand-in for a directory that is excluded from the repository.
+
+ With narrowing active on a repository that uses treemanifests,
+ some of the directory revlogs will be excluded from the resulting
+ clone. This is a huge storage win for clients, but means we need
+ some sort of pseudo-manifest to surface to internals so we can
+ detect a merge conflict outside the narrowspec. That's what this
+ class is: it stands in for a directory whose node is known, but
+ whose contents are unknown.
+ """
+ def __init__(self, dir, node):
+ super(excludeddir, self).__init__(dir)
+ self._node = node
+ # Add an empty file, which will be included by iterators and such,
+ # appearing as the directory itself (i.e. something like "dir/")
+ self._files[''] = node
+ self._flags[''] = 't'
+
+ # Manifests outside the narrowspec should never be modified, so avoid
+ # copying. This makes a noticeable difference when there are very many
+ # directories outside the narrowspec. Also, it makes sense for the copy to
+ # be of the same type as the original, which would not happen with the
+ # super type's copy().
+ def copy(self):
+ return self
+
+class excludeddirmanifestctx(manifest.treemanifestctx):
+ """context wrapper for excludeddir - see that docstring for rationale"""
+ def __init__(self, dir, node):
+ self._dir = dir
+ self._node = node
+
+ def read(self):
+ return excludeddir(self._dir, self._node)
+
+ def write(self, *args):
+ raise error.ProgrammingError(
+ 'attempt to write manifest from excluded dir %s' % self._dir)
+
+class excludedmanifestrevlog(manifest.manifestrevlog):
+ """Stand-in for excluded treemanifest revlogs.
+
+ When narrowing is active on a treemanifest repository, we'll have
+ references to directories we can't see due to the revlog being
+ skipped. This class exists to conform to the manifestrevlog
+ interface for those directories and proactively prevent writes to
+ outside the narrowspec.
+ """
+
+ def __init__(self, dir):
+ self._dir = dir
+
+ def __len__(self):
+ raise error.ProgrammingError(
+ 'attempt to get length of excluded dir %s' % self._dir)
+
+ def rev(self, node):
+ raise error.ProgrammingError(
+ 'attempt to get rev from excluded dir %s' % self._dir)
+
+ def linkrev(self, node):
+ raise error.ProgrammingError(
+ 'attempt to get linkrev from excluded dir %s' % self._dir)
+
+ def node(self, rev):
+ raise error.ProgrammingError(
+ 'attempt to get node from excluded dir %s' % self._dir)
+
+ def add(self, *args, **kwargs):
+ # We should never write entries in dirlogs outside the narrow clone.
+ # However, the method still gets called from writesubtree() in
+ # _addtree(), so we need to handle it. We should possibly make that
+ # avoid calling add() with a clean manifest (_dirty is always False
+ # in excludeddir instances).
+ pass
+
+def makenarrowmanifestrevlog(mfrevlog, repo):
+ if util.safehasattr(mfrevlog, '_narrowed'):
+ return
+
+ class narrowmanifestrevlog(mfrevlog.__class__):
+ # This function is called via debug{revlog,index,data}, but also during
+ # at least some push operations. This will be used to wrap/exclude the
+ # child directories when using treemanifests.
+ def dirlog(self, d):
+ if d and not d.endswith('/'):
+ d = d + '/'
+ if not repo.narrowmatch().visitdir(d[:-1] or '.'):
+ return excludedmanifestrevlog(d)
+ result = super(narrowmanifestrevlog, self).dirlog(d)
+ makenarrowmanifestrevlog(result, repo)
+ return result
+
+ mfrevlog.__class__ = narrowmanifestrevlog
+ mfrevlog._narrowed = True
+
+def makenarrowmanifestlog(mfl, repo):
+ class narrowmanifestlog(mfl.__class__):
+ def get(self, dir, node, verify=True):
+ if not repo.narrowmatch().visitdir(dir[:-1] or '.'):
+ return excludeddirmanifestctx(dir, node)
+ return super(narrowmanifestlog, self).get(dir, node, verify=verify)
+ mfl.__class__ = narrowmanifestlog
+
+def makenarrowfilelog(fl, narrowmatch):
+ class narrowfilelog(fl.__class__):
+ def renamed(self, node):
+ # Renames that come from outside the narrowspec are
+ # problematic at least for git-diffs, because we lack the
+ # base text for the rename. This logic was introduced in
+ # 3cd72b1 of narrowhg (authored by martinvonz, reviewed by
+ # adgar), but that revision doesn't have any additional
+ # commentary on what problems we can encounter.
+ m = super(narrowfilelog, self).renamed(node)
+ if m and not narrowmatch(m[0]):
+ return None
+ return m
+
+ def size(self, rev):
+ # We take advantage of the fact that remotefilelog
+ # lacks a node() method to just skip the
+ # rename-checking logic when on remotefilelog. This
+ # might be incorrect on other non-revlog-based storage
+ # engines, but for now this seems to be fine.
+ #
+ # TODO: when remotefilelog is in core, improve this to
+ # explicitly look for remotefilelog instead of cheating
+ # with a hasattr check.
+ if util.safehasattr(self, 'node'):
+ node = self.node(rev)
+ # Because renamed() is overridden above to
+ # sometimes return None even if there is metadata
+ # in the revlog, size can be incorrect for
+ # copies/renames, so we need to make sure we call
+ # the super class's implementation of renamed()
+ # for the purpose of size calculation.
+ if super(narrowfilelog, self).renamed(node):
+ return len(self.read(node))
+ return super(narrowfilelog, self).size(rev)
+
+ def cmp(self, node, text):
+ different = super(narrowfilelog, self).cmp(node, text)
+ if different:
+ # Similar to size() above, if the file was copied from
+ # a file outside the narrowspec, the super class's
+ # would have returned True because we tricked it into
+ # thinking that the file was not renamed.
+ if super(narrowfilelog, self).renamed(node):
+ t2 = self.read(node)
+ return t2 != text
+ return different
+
+ fl.__class__ = narrowfilelog
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowtemplates.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,48 @@
+# narrowtemplates.py - added template keywords for narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ registrar,
+ revlog,
+)
+
+keywords = {}
+templatekeyword = registrar.templatekeyword(keywords)
+revsetpredicate = registrar.revsetpredicate()
+
+def _isellipsis(repo, rev):
+ if repo.changelog.flags(rev) & revlog.REVIDX_ELLIPSIS:
+ return True
+ return False
+
+@templatekeyword('ellipsis', requires={'repo', 'ctx'})
+def ellipsis(context, mapping):
+ """String. 'ellipsis' if the change is an ellipsis node, else ''."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ if _isellipsis(repo, ctx.rev()):
+ return 'ellipsis'
+ return ''
+
+@templatekeyword('outsidenarrow', requires={'repo', 'ctx'})
+def outsidenarrow(context, mapping):
+ """String. 'outsidenarrow' if the change affects no tracked files,
+ else ''."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ m = repo.narrowmatch()
+ if not m.always():
+ if not any(m(f) for f in ctx.files()):
+ return 'outsidenarrow'
+ return ''
+
+@revsetpredicate('ellipsis')
+def ellipsisrevset(repo, subset, x):
+ """Changesets that are ellipsis nodes."""
+ return subset.filter(lambda r: _isellipsis(repo, r))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowwirepeer.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,52 @@
+# narrowwirepeer.py - passes narrow spec with unbundle command
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ error,
+ extensions,
+ hg,
+ narrowspec,
+ node,
+)
+
+def uisetup():
+ def peersetup(ui, peer):
+ # We must set up the expansion before reposetup below, since it's used
+ # at clone time before we have a repo.
+ class expandingpeer(peer.__class__):
+ def expandnarrow(self, narrow_include, narrow_exclude, nodes):
+ ui.status(_("expanding narrowspec\n"))
+ if not self.capable('exp-expandnarrow'):
+ raise error.Abort(
+ 'peer does not support expanding narrowspecs')
+
+ hex_nodes = (node.hex(n) for n in nodes)
+ new_narrowspec = self._call(
+ 'expandnarrow',
+ includepats=','.join(narrow_include),
+ excludepats=','.join(narrow_exclude),
+ nodes=','.join(hex_nodes))
+
+ return narrowspec.parseserverpatterns(new_narrowspec)
+ peer.__class__ = expandingpeer
+ hg.wirepeersetupfuncs.append(peersetup)
+
+def reposetup(repo):
+ def wirereposetup(ui, peer):
+ def wrapped(orig, cmd, *args, **kwargs):
+ if cmd == 'unbundle':
+ # TODO: don't blindly add include/exclude wireproto
+ # arguments to unbundle.
+ include, exclude = repo.narrowpats
+ kwargs[r"includepats"] = ','.join(include)
+ kwargs[r"excludepats"] = ','.join(exclude)
+ return orig(cmd, *args, **kwargs)
+ extensions.wrapfunction(peer, '_calltwowaystream', wrapped)
+ hg.wirepeersetupfuncs.append(wirereposetup)
--- a/hgext/notify.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/notify.py Mon Mar 19 08:07:18 2018 -0700
@@ -142,13 +142,14 @@
from mercurial.i18n import _
from mercurial import (
- cmdutil,
error,
+ logcmdutil,
mail,
patch,
registrar,
util,
)
+from mercurial.utils import dateutil
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -257,9 +258,8 @@
mapfile = self.ui.config('notify', 'style')
if not mapfile and not template:
template = deftemplates.get(hooktype) or single_template
- spec = cmdutil.logtemplatespec(template, mapfile)
- self.t = cmdutil.changeset_templater(self.ui, self.repo, spec,
- False, None, False)
+ spec = logcmdutil.templatespec(template, mapfile)
+ self.t = logcmdutil.changesettemplater(self.ui, self.repo, spec)
def strip(self, path):
'''strip leading slashes from local path, turn into web-safe path.'''
@@ -361,7 +361,7 @@
for k, v in headers:
msg[k] = v
- msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
+ msg['Date'] = dateutil.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
# try to make subject line exist and be useful
if not subject:
--- a/hgext/patchbomb.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/patchbomb.py Mon Mar 19 08:07:18 2018 -0700
@@ -74,6 +74,8 @@
from __future__ import absolute_import
import email as emailmod
+import email.generator as emailgen
+import email.utils as eutil
import errno
import os
import socket
@@ -83,6 +85,7 @@
from mercurial import (
cmdutil,
commands,
+ encoding,
error,
formatter,
hg,
@@ -96,6 +99,7 @@
templater,
util,
)
+from mercurial.utils import dateutil
stringio = util.stringio
cmdtable = {}
@@ -208,7 +212,7 @@
if not numbered:
return '[PATCH%s]' % flag
else:
- tlen = len(str(total))
+ tlen = len("%d" % total)
return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
@@ -265,11 +269,10 @@
if patchtags:
patchname = patchtags[0]
elif total > 1:
- patchname = cmdutil.makefilename(repo, '%b-%n.patch',
- binnode, seqno=idx,
- total=total)
+ patchname = cmdutil.makefilename(repo[node], '%b-%n.patch',
+ seqno=idx, total=total)
else:
- patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
+ patchname = cmdutil.makefilename(repo[node], '%b.patch')
disposition = 'inline'
if opts.get('attach'):
disposition = 'attachment'
@@ -627,7 +630,7 @@
if outgoing:
revs = _getoutgoing(repo, dest, revs)
if bundle:
- opts['revs'] = [str(r) for r in revs]
+ opts['revs'] = ["%d" % r for r in revs]
# check if revision exist on the public destination
publicurl = repo.ui.config('patchbomb', 'publicurl')
@@ -655,19 +658,21 @@
else:
msg = _('public url %s is missing %s')
msg %= (publicurl, missing[0])
+ missingrevs = [ctx.rev() for ctx in missing]
revhint = ' '.join('-r %s' % h
- for h in repo.set('heads(%ld)', missing))
+ for h in repo.set('heads(%ld)', missingrevs))
hint = _("use 'hg push %s %s'") % (publicurl, revhint)
raise error.Abort(msg, hint=hint)
# start
if date:
- start_time = util.parsedate(date)
+ start_time = dateutil.parsedate(date)
else:
- start_time = util.makedate()
+ start_time = dateutil.makedate()
def genmsgid(id):
- return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
+ return '<%s.%d@%s>' % (id[:20], int(start_time[0]),
+ encoding.strtolocal(socket.getfqdn()))
# deprecated config: patchbomb.from
sender = (opts.get('from') or ui.config('email', 'from') or
@@ -744,7 +749,7 @@
if not parent.endswith('>'):
parent += '>'
- sender_addr = emailmod.Utils.parseaddr(sender)[1]
+ sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1]
sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
sendmail = None
firstpatch = None
@@ -763,7 +768,7 @@
parent = m['Message-Id']
m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
- m['Date'] = emailmod.Utils.formatdate(start_time[0], localtime=True)
+ m['Date'] = eutil.formatdate(start_time[0], localtime=True)
start_time = (start_time[0] + 1, start_time[1])
m['From'] = sender
@@ -777,7 +782,7 @@
if opts.get('test'):
ui.status(_('displaying '), subj, ' ...\n')
ui.pager('email')
- generator = emailmod.Generator.Generator(ui, mangle_from_=False)
+ generator = emailgen.Generator(ui, mangle_from_=False)
try:
generator.flatten(m, 0)
ui.write('\n')
@@ -794,7 +799,7 @@
# Exim does not remove the Bcc field
del m['Bcc']
fp = stringio()
- generator = emailmod.Generator.Generator(fp, mangle_from_=False)
+ generator = emailgen.Generator(fp, mangle_from_=False)
generator.flatten(m, 0)
sendmail(sender_addr, to + bcc + cc, fp.getvalue())
--- a/hgext/purge.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/purge.py Mon Mar 19 08:07:18 2018 -0700
@@ -31,6 +31,7 @@
from mercurial import (
cmdutil,
error,
+ pycompat,
registrar,
scmutil,
util,
@@ -84,6 +85,7 @@
list of files that this program would delete, use the --print
option.
'''
+ opts = pycompat.byteskwargs(opts)
act = not opts.get('print')
eol = '\n'
if opts.get('print0'):
--- a/hgext/rebase.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/rebase.py Mon Mar 19 08:07:18 2018 -0700
@@ -174,9 +174,6 @@
self.keepf = opts.get('keep', False)
self.keepbranchesf = opts.get('keepbranches', False)
- # keepopen is not meant for use on the command line, but by
- # other extensions
- self.keepopen = opts.get('keepopen', False)
self.obsoletenotrebased = {}
self.obsoletewithoutsuccessorindestination = set()
self.inmemory = inmemory
@@ -214,7 +211,7 @@
if v >= 0:
newrev = repo[v].hex()
else:
- newrev = v
+ newrev = "%d" % v
destnode = repo[destmap[d]].hex()
f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
repo.ui.debug('rebase status stored\n')
@@ -289,7 +286,7 @@
skipped.add(old)
seen.add(new)
repo.ui.debug('computed skipped revs: %s\n' %
- (' '.join(str(r) for r in sorted(skipped)) or None))
+ (' '.join('%d' % r for r in sorted(skipped)) or ''))
repo.ui.debug('rebase status resumed\n')
self.originalwd = originalwd
@@ -312,10 +309,13 @@
if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
return
obsoleteset = set(obsoleterevs)
- self.obsoletenotrebased, self.obsoletewithoutsuccessorindestination = \
- _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
+ (self.obsoletenotrebased,
+ self.obsoletewithoutsuccessorindestination,
+ obsoleteextinctsuccessors) = _computeobsoletenotrebased(
+ self.repo, obsoleteset, destmap)
skippedset = set(self.obsoletenotrebased)
skippedset.update(self.obsoletewithoutsuccessorindestination)
+ skippedset.update(obsoleteextinctsuccessors)
_checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
def _prepareabortorcontinue(self, isabort):
@@ -423,131 +423,130 @@
cands = [k for k, v in self.state.iteritems() if v == revtodo]
total = len(cands)
- pos = 0
+ posholder = [0]
+ def progress(ctx):
+ posholder[0] += 1
+ self.repo.ui.progress(_("rebasing"), posholder[0],
+ ("%d:%s" % (ctx.rev(), ctx)),
+ _('changesets'), total)
+ allowdivergence = self.ui.configbool(
+ 'experimental', 'evolution.allowdivergence')
for subset in sortsource(self.destmap):
- pos = self._performrebasesubset(tr, subset, pos, total)
+ sortedrevs = self.repo.revs('sort(%ld, -topo)', subset)
+ if not allowdivergence:
+ sortedrevs -= self.repo.revs(
+ 'descendants(%ld) and not %ld',
+ self.obsoletewithoutsuccessorindestination,
+ self.obsoletewithoutsuccessorindestination,
+ )
+ for rev in sortedrevs:
+ self._rebasenode(tr, rev, allowdivergence, progress)
ui.progress(_('rebasing'), None)
ui.note(_('rebase merging completed\n'))
- def _performrebasesubset(self, tr, subset, pos, total):
+ def _rebasenode(self, tr, rev, allowdivergence, progressfn):
repo, ui, opts = self.repo, self.ui, self.opts
- sortedrevs = repo.revs('sort(%ld, -topo)', subset)
- allowdivergence = self.ui.configbool(
- 'experimental', 'evolution.allowdivergence')
- if not allowdivergence:
- sortedrevs -= repo.revs(
- 'descendants(%ld) and not %ld',
- self.obsoletewithoutsuccessorindestination,
- self.obsoletewithoutsuccessorindestination,
- )
- for rev in sortedrevs:
- dest = self.destmap[rev]
- ctx = repo[rev]
- desc = _ctxdesc(ctx)
- if self.state[rev] == rev:
- ui.status(_('already rebased %s\n') % desc)
- elif (not allowdivergence
- and rev in self.obsoletewithoutsuccessorindestination):
- msg = _('note: not rebasing %s and its descendants as '
- 'this would cause divergence\n') % desc
- repo.ui.status(msg)
- self.skipped.add(rev)
- elif rev in self.obsoletenotrebased:
- succ = self.obsoletenotrebased[rev]
- if succ is None:
- msg = _('note: not rebasing %s, it has no '
- 'successor\n') % desc
- else:
- succdesc = _ctxdesc(repo[succ])
- msg = (_('note: not rebasing %s, already in '
- 'destination as %s\n') % (desc, succdesc))
- repo.ui.status(msg)
- # Make clearrebased aware state[rev] is not a true successor
- self.skipped.add(rev)
- # Record rev as moved to its desired destination in self.state.
- # This helps bookmark and working parent movement.
- dest = max(adjustdest(repo, rev, self.destmap, self.state,
- self.skipped))
- self.state[rev] = dest
- elif self.state[rev] == revtodo:
- pos += 1
- ui.status(_('rebasing %s\n') % desc)
- ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
- _('changesets'), total)
- p1, p2, base = defineparents(repo, rev, self.destmap,
- self.state, self.skipped,
- self.obsoletenotrebased)
- self.storestatus(tr=tr)
- storecollapsemsg(repo, self.collapsemsg)
- if len(repo[None].parents()) == 2:
- repo.ui.debug('resuming interrupted rebase\n')
+ dest = self.destmap[rev]
+ ctx = repo[rev]
+ desc = _ctxdesc(ctx)
+ if self.state[rev] == rev:
+ ui.status(_('already rebased %s\n') % desc)
+ elif (not allowdivergence
+ and rev in self.obsoletewithoutsuccessorindestination):
+ msg = _('note: not rebasing %s and its descendants as '
+ 'this would cause divergence\n') % desc
+ repo.ui.status(msg)
+ self.skipped.add(rev)
+ elif rev in self.obsoletenotrebased:
+ succ = self.obsoletenotrebased[rev]
+ if succ is None:
+ msg = _('note: not rebasing %s, it has no '
+ 'successor\n') % desc
+ else:
+ succdesc = _ctxdesc(repo[succ])
+ msg = (_('note: not rebasing %s, already in '
+ 'destination as %s\n') % (desc, succdesc))
+ repo.ui.status(msg)
+ # Make clearrebased aware state[rev] is not a true successor
+ self.skipped.add(rev)
+ # Record rev as moved to its desired destination in self.state.
+ # This helps bookmark and working parent movement.
+ dest = max(adjustdest(repo, rev, self.destmap, self.state,
+ self.skipped))
+ self.state[rev] = dest
+ elif self.state[rev] == revtodo:
+ ui.status(_('rebasing %s\n') % desc)
+ progressfn(ctx)
+ p1, p2, base = defineparents(repo, rev, self.destmap,
+ self.state, self.skipped,
+ self.obsoletenotrebased)
+ self.storestatus(tr=tr)
+ if len(repo[None].parents()) == 2:
+ repo.ui.debug('resuming interrupted rebase\n')
+ else:
+ overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
+ with ui.configoverride(overrides, 'rebase'):
+ stats = rebasenode(repo, rev, p1, base, self.collapsef,
+ dest, wctx=self.wctx)
+ if stats[3] > 0:
+ if self.wctx.isinmemory():
+ raise error.InMemoryMergeConflictsError()
+ else:
+ raise error.InterventionRequired(
+ _('unresolved conflicts (see hg '
+ 'resolve, then hg rebase --continue)'))
+ if not self.collapsef:
+ merging = p2 != nullrev
+ editform = cmdutil.mergeeditform(merging, 'rebase')
+ editor = cmdutil.getcommiteditor(editform=editform,
+ **pycompat.strkwargs(opts))
+ if self.wctx.isinmemory():
+ newnode = concludememorynode(repo, rev, p1, p2,
+ wctx=self.wctx,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date)
+ mergemod.mergestate.clean(repo)
else:
- try:
- ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
- 'rebase')
- stats = rebasenode(repo, rev, p1, base, self.state,
- self.collapsef, dest, wctx=self.wctx)
- if stats and stats[3] > 0:
- if self.wctx.isinmemory():
- raise error.InMemoryMergeConflictsError()
- else:
- raise error.InterventionRequired(
- _('unresolved conflicts (see hg '
- 'resolve, then hg rebase --continue)'))
- finally:
- ui.setconfig('ui', 'forcemerge', '', 'rebase')
+ newnode = concludenode(repo, rev, p1, p2,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date)
+
+ if newnode is None:
+ # If it ended up being a no-op commit, then the normal
+ # merge state clean-up path doesn't happen, so do it
+ # here. Fix issue5494
+ mergemod.mergestate.clean(repo)
+ else:
+ # Skip commit if we are collapsing
+ if self.wctx.isinmemory():
+ self.wctx.setbase(repo[p1])
+ else:
+ repo.setparents(repo[p1].node())
+ newnode = None
+ # Update the state
+ if newnode is not None:
+ self.state[rev] = repo[newnode].rev()
+ ui.debug('rebased as %s\n' % short(newnode))
+ else:
if not self.collapsef:
- merging = p2 != nullrev
- editform = cmdutil.mergeeditform(merging, 'rebase')
- editor = cmdutil.getcommiteditor(editform=editform, **opts)
- if self.wctx.isinmemory():
- newnode = concludememorynode(repo, rev, p1, p2,
- wctx=self.wctx,
- extrafn=_makeextrafn(self.extrafns),
- editor=editor,
- keepbranches=self.keepbranchesf,
- date=self.date)
- mergemod.mergestate.clean(repo)
- else:
- newnode = concludenode(repo, rev, p1, p2,
- extrafn=_makeextrafn(self.extrafns),
- editor=editor,
- keepbranches=self.keepbranchesf,
- date=self.date)
-
- if newnode is None:
- # If it ended up being a no-op commit, then the normal
- # merge state clean-up path doesn't happen, so do it
- # here. Fix issue5494
- mergemod.mergestate.clean(repo)
- else:
- # Skip commit if we are collapsing
- if self.wctx.isinmemory():
- self.wctx.setbase(repo[p1])
- else:
- repo.setparents(repo[p1].node())
- newnode = None
- # Update the state
- if newnode is not None:
- self.state[rev] = repo[newnode].rev()
- ui.debug('rebased as %s\n' % short(newnode))
- else:
- if not self.collapsef:
- ui.warn(_('note: rebase of %d:%s created no changes '
- 'to commit\n') % (rev, ctx))
- self.skipped.add(rev)
- self.state[rev] = p1
- ui.debug('next revision set to %s\n' % p1)
- else:
- ui.status(_('already rebased %s as %s\n') %
- (desc, repo[self.state[rev]]))
- return pos
+ ui.warn(_('note: rebase of %d:%s created no changes '
+ 'to commit\n') % (rev, ctx))
+ self.skipped.add(rev)
+ self.state[rev] = p1
+ ui.debug('next revision set to %d\n' % p1)
+ else:
+ ui.status(_('already rebased %s as %s\n') %
+ (desc, repo[self.state[rev]]))
def _finishrebase(self):
repo, ui, opts = self.repo, self.ui, self.opts
fm = ui.formatter('rebase', opts)
fm.startitem()
- if self.collapsef and not self.keepopen:
+ if self.collapsef:
p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
self.state, self.skipped,
self.obsoletenotrebased)
@@ -564,7 +563,6 @@
editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
revtoreuse = max(self.state)
- dsguard = None
if self.inmemory:
newnode = concludememorynode(repo, revtoreuse, p1,
self.external,
@@ -574,22 +572,26 @@
keepbranches=self.keepbranchesf,
date=self.date, wctx=self.wctx)
else:
- if ui.configbool('rebase', 'singletransaction'):
- dsguard = dirstateguard.dirstateguard(repo, 'rebase')
- with util.acceptintervention(dsguard):
- newnode = concludenode(repo, revtoreuse, p1, self.external,
- commitmsg=commitmsg,
- extrafn=_makeextrafn(self.extrafns),
- editor=editor,
- keepbranches=self.keepbranchesf,
- date=self.date)
+ newnode = concludenode(repo, revtoreuse, p1, self.external,
+ commitmsg=commitmsg,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date)
+
+ if newnode is None:
+ # If it ended up being a no-op commit, then the normal
+ # merge state clean-up path doesn't happen, so do it
+ # here. Fix issue5494
+ mergemod.mergestate.clean(repo)
if newnode is not None:
newrev = repo[newnode].rev()
- for oldrev in self.state.iterkeys():
+ for oldrev in self.state:
self.state[oldrev] = newrev
if 'qtip' in repo.tags():
- updatemq(repo, self.state, self.skipped, **opts)
+ updatemq(repo, self.state, self.skipped,
+ **pycompat.strkwargs(opts))
# restore original working directory
# (we do this before stripping)
@@ -597,15 +599,13 @@
if newwd < 0:
# original directory is a parent of rebase set root or ignored
newwd = self.originalwd
- if (newwd not in [c.rev() for c in repo[None].parents()] and
- not self.inmemory):
+ if newwd not in [c.rev() for c in repo[None].parents()]:
ui.note(_("update back to initial working directory parent\n"))
hg.updaterepo(repo, newwd, False)
collapsedas = None
- if not self.keepf:
- if self.collapsef:
- collapsedas = newnode
+ if self.collapsef and not self.keepf:
+ collapsedas = newnode
clearrebased(ui, repo, self.destmap, self.state, self.skipped,
collapsedas, self.keepf, fm=fm)
@@ -845,9 +845,9 @@
retcode = rbsrt._preparenewrebase(destmap)
if retcode is not None:
return retcode
+ storecollapsemsg(repo, rbsrt.collapsemsg)
tr = None
- dsguard = None
singletr = ui.configbool('rebase', 'singletransaction')
if singletr:
@@ -859,12 +859,12 @@
with util.acceptintervention(tr):
# Same logic for the dirstate guard, except we don't create one when
# rebasing in-memory (it's not needed).
+ dsguard = None
if singletr and not inmemory:
dsguard = dirstateguard.dirstateguard(repo, 'rebase')
with util.acceptintervention(dsguard):
rbsrt._performrebase(tr)
-
- rbsrt._finishrebase()
+ rbsrt._finishrebase()
def _definedestmap(ui, repo, rbsrt, destf=None, srcf=None, basef=None,
revf=None, destspace=None):
@@ -914,12 +914,12 @@
dest = scmutil.revsingle(repo, destf)
else:
dest = repo[_destrebase(repo, base, destspace=destspace)]
- destf = str(dest)
+ destf = bytes(dest)
roots = [] # selected children of branching points
bpbase = {} # {branchingpoint: [origbase]}
for b in base: # group bases by branching points
- bp = repo.revs('ancestor(%d, %d)', b, dest).first()
+ bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first()
bpbase[bp] = bpbase.get(bp, []) + [b]
if None in bpbase:
# emulate the old behavior, showing "nothing to rebase" (a better
@@ -941,12 +941,12 @@
else:
ui.status(_('nothing to rebase - working directory '
'parent is also destination\n'))
- elif not repo.revs('%ld - ::%d', base, dest):
+ elif not repo.revs('%ld - ::%d', base, dest.rev()):
if basef:
ui.status(_('nothing to rebase - "base" %s is '
'already an ancestor of destination '
'%s\n') %
- ('+'.join(str(repo[r]) for r in base),
+ ('+'.join(bytes(repo[r]) for r in base),
dest))
else:
ui.status(_('nothing to rebase - working '
@@ -954,29 +954,19 @@
'ancestor of destination %s\n') % dest)
else: # can it happen?
ui.status(_('nothing to rebase from %s to %s\n') %
- ('+'.join(str(repo[r]) for r in base), dest))
+ ('+'.join(bytes(repo[r]) for r in base), dest))
return None
- # If rebasing the working copy parent, force in-memory merge to be off.
- #
- # This is because the extra work of checking out the newly rebased commit
- # outweights the benefits of rebasing in-memory, and executing an extra
- # update command adds a bit of overhead, so better to just do it on disk. In
- # all other cases leave it on.
- #
- # Note that there are cases where this isn't true -- e.g., rebasing large
- # stacks that include the WCP. However, I'm not yet sure where the cutoff
- # is.
+
rebasingwcp = repo['.'].rev() in rebaseset
ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp)
if rbsrt.inmemory and rebasingwcp:
- rbsrt.inmemory = False
# Check these since we did not before.
cmdutil.checkunfinished(repo)
cmdutil.bailifchanged(repo)
if not destf:
dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
- destf = str(dest)
+ destf = bytes(dest)
allsrc = revsetlang.formatspec('%ld', rebaseset)
alias = {'ALLSRC': allsrc}
@@ -1031,10 +1021,10 @@
return nullrev
if len(parents) == 1:
return parents.pop()
- raise error.Abort(_('unable to collapse on top of %s, there is more '
+ raise error.Abort(_('unable to collapse on top of %d, there is more '
'than one external parent: %s') %
(max(destancestors),
- ', '.join(str(p) for p in sorted(parents))))
+ ', '.join("%d" % p for p in sorted(parents))))
def concludememorynode(repo, rev, p1, p2, wctx=None,
commitmsg=None, editor=None, extrafn=None,
@@ -1052,9 +1042,9 @@
destphase = max(ctx.phase(), phases.draft)
overrides = {('phases', 'new-commit'): destphase}
+ if keepbranch:
+ overrides[('ui', 'allowemptycommit')] = True
with repo.ui.configoverride(overrides, 'rebase'):
- if keepbranch:
- repo.ui.setconfig('ui', 'allowemptycommit', True)
# Replicates the empty check in ``repo.commit``.
if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
return None
@@ -1094,9 +1084,9 @@
destphase = max(ctx.phase(), phases.draft)
overrides = {('phases', 'new-commit'): destphase}
+ if keepbranch:
+ overrides[('ui', 'allowemptycommit')] = True
with repo.ui.configoverride(overrides, 'rebase'):
- if keepbranch:
- repo.ui.setconfig('ui', 'allowemptycommit', True)
# Commit might fail if unresolved files exist
if date is None:
date = ctx.date()
@@ -1106,7 +1096,7 @@
repo.dirstate.setbranch(repo[newnode].branch())
return newnode
-def rebasenode(repo, rev, p1, base, state, collapse, dest, wctx):
+def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
'Rebase a single revision rev on top of p1 using base as merge ancestor'
# Merge phase
# Update to destination and merge it with local
@@ -1220,7 +1210,7 @@
`rebaseobsrevs`: set of obsolete revision in source
`rebaseobsskipped`: set of revisions from source skipped because they have
- successors in destination
+ successors in destination or no non-obsolete successor.
"""
# Obsolete node with successors not in dest leads to divergence
divergenceok = ui.configbool('experimental',
@@ -1228,7 +1218,7 @@
divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
if divergencebasecandidates and not divergenceok:
- divhashes = (str(repo[r])
+ divhashes = (bytes(repo[r])
for r in divergencebasecandidates)
msg = _("this rebase will cause "
"divergences from: %s")
@@ -1436,7 +1426,7 @@
def isagitpatch(repo, patchname):
'Return true if the given patch is in git format'
mqpatch = os.path.join(repo.mq.path, patchname)
- for line in patch.linereader(file(mqpatch, 'rb')):
+ for line in patch.linereader(open(mqpatch, 'rb')):
if line.startswith('diff --git'):
return True
return False
@@ -1465,10 +1455,10 @@
for rev in sorted(mqrebase, reverse=True):
if rev not in skipped:
name, isgit = mqrebase[rev]
- repo.ui.note(_('updating mq patch %s to %s:%s\n') %
+ repo.ui.note(_('updating mq patch %s to %d:%s\n') %
(name, state[rev], repo[state[rev]]))
mq.qimport(repo, (), patchname=name, git=isgit,
- rev=[str(state[rev])])
+ rev=["%d" % state[rev]])
else:
# Rebased and skipped
skippedpatches.add(mqrebase[rev][0])
@@ -1550,7 +1540,7 @@
cleanup = True
if immutable:
repo.ui.warn(_("warning: can't clean up public changesets %s\n")
- % ', '.join(str(repo[r]) for r in immutable),
+ % ', '.join(bytes(repo[r]) for r in immutable),
hint=_("see 'hg help phases' for details"))
cleanup = False
@@ -1645,7 +1635,9 @@
roots = list(repo.set('roots(%ld)', sortedsrc[0]))
if not roots:
raise error.Abort(_('no matching revisions'))
- roots.sort()
+ def revof(r):
+ return r.rev()
+ roots = sorted(roots, key=revof)
state = dict.fromkeys(rebaseset, revtodo)
emptyrebase = (len(sortedsrc) == 1)
for root in roots:
@@ -1784,25 +1776,34 @@
`obsoletewithoutsuccessorindestination` is a set with obsolete revisions
without a successor in destination.
+
+ `obsoleteextinctsuccessors` is a set of obsolete revisions with only
+ obsolete successors.
"""
obsoletenotrebased = {}
obsoletewithoutsuccessorindestination = set([])
+ obsoleteextinctsuccessors = set([])
assert repo.filtername is None
cl = repo.changelog
nodemap = cl.nodemap
+ extinctnodes = set(cl.node(r) for r in repo.revs('extinct()'))
for srcrev in rebaseobsrevs:
srcnode = cl.node(srcrev)
destnode = cl.node(destmap[srcrev])
# XXX: more advanced APIs are required to handle split correctly
- successors = list(obsutil.allsuccessors(repo.obsstore, [srcnode]))
- if len(successors) == 1:
- # obsutil.allsuccessors includes node itself. When the list only
- # contains one element, it means there are no successors.
+ successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
+ # obsutil.allsuccessors includes node itself
+ successors.remove(srcnode)
+ if successors.issubset(extinctnodes):
+ # all successors are extinct
+ obsoleteextinctsuccessors.add(srcrev)
+ if not successors:
+ # no successor
obsoletenotrebased[srcrev] = None
else:
for succnode in successors:
- if succnode == srcnode or succnode not in nodemap:
+ if succnode not in nodemap:
continue
if cl.isancestor(succnode, destnode):
obsoletenotrebased[srcrev] = nodemap[succnode]
@@ -1811,11 +1812,14 @@
# If 'srcrev' has a successor in rebase set but none in
# destination (which would be catched above), we shall skip it
# and its descendants to avoid divergence.
- if any(nodemap[s] in destmap
- for s in successors if s != srcnode):
+ if any(nodemap[s] in destmap for s in successors):
obsoletewithoutsuccessorindestination.add(srcrev)
- return obsoletenotrebased, obsoletewithoutsuccessorindestination
+ return (
+ obsoletenotrebased,
+ obsoletewithoutsuccessorindestination,
+ obsoleteextinctsuccessors,
+ )
def summaryhook(ui, repo):
if not repo.vfs.exists('rebasestate'):
--- a/hgext/releasenotes.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/releasenotes.py Mon Mar 19 08:07:18 2018 -0700
@@ -311,8 +311,8 @@
title = block['lines'][0].strip() if block['lines'] else None
if i + 1 == len(blocks):
- raise error.Abort(_('release notes directive %s lacks content')
- % directive)
+ raise error.Abort(_('changeset %s: release notes directive %s '
+ 'lacks content') % (ctx, directive))
# Now search ahead and find all paragraphs attached to this
# admonition.
@@ -324,9 +324,12 @@
if pblock['type'] == 'margin':
continue
+ if pblock['type'] == 'admonition':
+ break
+
if pblock['type'] != 'paragraph':
- raise error.Abort(_('unexpected block in release notes '
- 'directive %s') % directive)
+ repo.ui.warn(_('changeset %s: unexpected block in release '
+ 'notes directive %s\n') % (ctx, directive))
if pblock['indent'] > 0:
paragraphs.append(pblock['lines'])
--- a/hgext/relink.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/relink.py Mon Mar 19 08:07:18 2018 -0700
@@ -168,8 +168,8 @@
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
- sfp = file(source, 'rb')
- dfp = file(tgt, 'rb')
+ sfp = open(source, 'rb')
+ dfp = open(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
@@ -187,7 +187,7 @@
relinked += 1
savedbytes += sz
except OSError as inst:
- ui.warn('%s: %s\n' % (tgt, str(inst)))
+ ui.warn('%s: %s\n' % (tgt, util.forcebytestr(inst)))
ui.progress(_('relinking'), None)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotenames.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,305 @@
+# remotenames.py - extension to display remotenames
+#
+# Copyright 2017 Augie Fackler <raf@durin42.com>
+# Copyright 2017 Sean Farley <sean@farley.io>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+""" showing remotebookmarks and remotebranches in UI
+
+By default both remotebookmarks and remotebranches are turned on. Config knob to
+control the individually are as follows.
+
+Config options to tweak the default behaviour:
+
+remotenames.bookmarks
+ Boolean value to enable or disable showing of remotebookmarks
+
+remotenames.branches
+ Boolean value to enable or disable showing of remotebranches
+"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+
+from mercurial.node import (
+ bin,
+)
+from mercurial import (
+ logexchange,
+ namespaces,
+ pycompat,
+ registrar,
+ revsetlang,
+ smartset,
+ templateutil,
+)
+
+if pycompat.ispy3:
+ import collections.abc
+ mutablemapping = collections.abc.MutableMapping
+else:
+ import collections
+ mutablemapping = collections.MutableMapping
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+templatekeyword = registrar.templatekeyword()
+revsetpredicate = registrar.revsetpredicate()
+
+configitem('remotenames', 'bookmarks',
+ default=True,
+)
+configitem('remotenames', 'branches',
+ default=True,
+)
+
+class lazyremotenamedict(mutablemapping):
+ """
+ Read-only dict-like Class to lazily resolve remotename entries
+
+ We are doing that because remotenames startup was slow.
+ We lazily read the remotenames file once to figure out the potential entries
+ and store them in self.potentialentries. Then when asked to resolve an
+ entry, if it is not in self.potentialentries, then it isn't there, if it
+ is in self.potentialentries we resolve it and store the result in
+ self.cache. We cannot be lazy is when asked all the entries (keys).
+ """
+ def __init__(self, kind, repo):
+ self.cache = {}
+ self.potentialentries = {}
+ self._kind = kind # bookmarks or branches
+ self._repo = repo
+ self.loaded = False
+
+ def _load(self):
+ """ Read the remotenames file, store entries matching selected kind """
+ self.loaded = True
+ repo = self._repo
+ for node, rpath, rname in logexchange.readremotenamefile(repo,
+ self._kind):
+ name = rpath + '/' + rname
+ self.potentialentries[name] = (node, rpath, name)
+
+ def _resolvedata(self, potentialentry):
+ """ Check that the node for potentialentry exists and return it """
+ if not potentialentry in self.potentialentries:
+ return None
+ node, remote, name = self.potentialentries[potentialentry]
+ repo = self._repo
+ binnode = bin(node)
+ # if the node doesn't exist, skip it
+ try:
+ repo.changelog.rev(binnode)
+ except LookupError:
+ return None
+ # Skip closed branches
+ if (self._kind == 'branches' and repo[binnode].closesbranch()):
+ return None
+ return [binnode]
+
+ def __getitem__(self, key):
+ if not self.loaded:
+ self._load()
+ val = self._fetchandcache(key)
+ if val is not None:
+ return val
+ else:
+ raise KeyError()
+
+ def __iter__(self):
+ return iter(self.potentialentries)
+
+ def __len__(self):
+ return len(self.potentialentries)
+
+ def __setitem__(self):
+ raise NotImplementedError
+
+ def __delitem__(self):
+ raise NotImplementedError
+
+ def _fetchandcache(self, key):
+ if key in self.cache:
+ return self.cache[key]
+ val = self._resolvedata(key)
+ if val is not None:
+ self.cache[key] = val
+ return val
+ else:
+ return None
+
+ def keys(self):
+ """ Get a list of bookmark or branch names """
+ if not self.loaded:
+ self._load()
+ return self.potentialentries.keys()
+
+ def iteritems(self):
+ """ Iterate over (name, node) tuples """
+
+ if not self.loaded:
+ self._load()
+
+ for k, vtup in self.potentialentries.iteritems():
+ yield (k, [bin(vtup[0])])
+
+class remotenames(object):
+ """
+ This class encapsulates all the remotenames state. It also contains
+ methods to access that state in convenient ways. Remotenames are lazy
+ loaded. Whenever client code needs to ensure the freshest copy of
+ remotenames, use the `clearnames` method to force an eventual load.
+ """
+
+ def __init__(self, repo, *args):
+ self._repo = repo
+ self.clearnames()
+
+ def clearnames(self):
+ """ Clear all remote names state """
+ self.bookmarks = lazyremotenamedict("bookmarks", self._repo)
+ self.branches = lazyremotenamedict("branches", self._repo)
+ self._invalidatecache()
+
+ def _invalidatecache(self):
+ self._nodetobmarks = None
+ self._nodetobranch = None
+
+ def bmarktonodes(self):
+ return self.bookmarks
+
+ def nodetobmarks(self):
+ if not self._nodetobmarks:
+ bmarktonodes = self.bmarktonodes()
+ self._nodetobmarks = {}
+ for name, node in bmarktonodes.iteritems():
+ self._nodetobmarks.setdefault(node[0], []).append(name)
+ return self._nodetobmarks
+
+ def branchtonodes(self):
+ return self.branches
+
+ def nodetobranch(self):
+ if not self._nodetobranch:
+ branchtonodes = self.branchtonodes()
+ self._nodetobranch = {}
+ for name, nodes in branchtonodes.iteritems():
+ for node in nodes:
+ self._nodetobranch.setdefault(node, []).append(name)
+ return self._nodetobranch
+
+def reposetup(ui, repo):
+ if not repo.local():
+ return
+
+ repo._remotenames = remotenames(repo)
+ ns = namespaces.namespace
+
+ if ui.configbool('remotenames', 'bookmarks'):
+ remotebookmarkns = ns(
+ 'remotebookmarks',
+ templatename='remotebookmarks',
+ colorname='remotebookmark',
+ logfmt='remote bookmark: %s\n',
+ listnames=lambda repo: repo._remotenames.bmarktonodes().keys(),
+ namemap=lambda repo, name:
+ repo._remotenames.bmarktonodes().get(name, []),
+ nodemap=lambda repo, node:
+ repo._remotenames.nodetobmarks().get(node, []))
+ repo.names.addnamespace(remotebookmarkns)
+
+ if ui.configbool('remotenames', 'branches'):
+ remotebranchns = ns(
+ 'remotebranches',
+ templatename='remotebranches',
+ colorname='remotebranch',
+ logfmt='remote branch: %s\n',
+ listnames = lambda repo: repo._remotenames.branchtonodes().keys(),
+ namemap = lambda repo, name:
+ repo._remotenames.branchtonodes().get(name, []),
+ nodemap = lambda repo, node:
+ repo._remotenames.nodetobranch().get(node, []))
+ repo.names.addnamespace(remotebranchns)
+
+@templatekeyword('remotenames', requires={'repo', 'ctx', 'templ'})
+def remotenameskw(context, mapping):
+ """List of strings. Remote names associated with the changeset."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+
+ remotenames = []
+ if 'remotebookmarks' in repo.names:
+ remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
+
+ if 'remotebranches' in repo.names:
+ remotenames += repo.names['remotebranches'].names(repo, ctx.node())
+
+ return templateutil.compatlist(context, mapping, 'remotename', remotenames,
+ plural='remotenames')
+
+@templatekeyword('remotebookmarks', requires={'repo', 'ctx', 'templ'})
+def remotebookmarkskw(context, mapping):
+ """List of strings. Remote bookmarks associated with the changeset."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+
+ remotebmarks = []
+ if 'remotebookmarks' in repo.names:
+ remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node())
+
+ return templateutil.compatlist(context, mapping, 'remotebookmark',
+ remotebmarks, plural='remotebookmarks')
+
+@templatekeyword('remotebranches', requires={'repo', 'ctx', 'templ'})
+def remotebrancheskw(context, mapping):
+ """List of strings. Remote branches associated with the changeset."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+
+ remotebranches = []
+ if 'remotebranches' in repo.names:
+ remotebranches = repo.names['remotebranches'].names(repo, ctx.node())
+
+ return templateutil.compatlist(context, mapping, 'remotebranch',
+ remotebranches, plural='remotebranches')
+
+def _revsetutil(repo, subset, x, rtypes):
+ """utility function to return a set of revs based on the rtypes"""
+
+ revs = set()
+ cl = repo.changelog
+ for rtype in rtypes:
+ if rtype in repo.names:
+ ns = repo.names[rtype]
+ for name in ns.listnames(repo):
+ revs.update(ns.nodes(repo, name))
+
+ results = (cl.rev(n) for n in revs if cl.hasnode(n))
+ return subset & smartset.baseset(sorted(results))
+
+@revsetpredicate('remotenames()')
+def remotenamesrevset(repo, subset, x):
+ """All changesets which have a remotename on them."""
+ revsetlang.getargs(x, 0, 0, _("remotenames takes no arguments"))
+ return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches'))
+
+@revsetpredicate('remotebranches()')
+def remotebranchesrevset(repo, subset, x):
+ """All changesets which are branch heads on remotes."""
+ revsetlang.getargs(x, 0, 0, _("remotebranches takes no arguments"))
+ return _revsetutil(repo, subset, x, ('remotebranches',))
+
+@revsetpredicate('remotebookmarks()')
+def remotebmarksrevset(repo, subset, x):
+ """All changesets which have bookmarks on remotes."""
+ revsetlang.getargs(x, 0, 0, _("remotebookmarks takes no arguments"))
+ return _revsetutil(repo, subset, x, ('remotebookmarks',))
--- a/hgext/schemes.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/schemes.py Mon Mar 19 08:07:18 2018 -0700
@@ -94,7 +94,7 @@
parts = parts[:-1]
else:
tail = ''
- context = dict((str(i + 1), v) for i, v in enumerate(parts))
+ context = dict(('%d' % (i + 1), v) for i, v in enumerate(parts))
return ''.join(self.templater.process(self.url, context)) + tail
def hasdriveletter(orig, path):
--- a/hgext/share.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/share.py Mon Mar 19 08:07:18 2018 -0700
@@ -52,9 +52,6 @@
util,
)
-repository = hg.repository
-parseurl = hg.parseurl
-
cmdtable = {}
command = registrar.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -135,27 +132,9 @@
return False
return hg.sharedbookmarks in shared
-def _getsrcrepo(repo):
- """
- Returns the source repository object for a given shared repository.
- If repo is not a shared repository, return None.
- """
- if repo.sharedpath == repo.path:
- return None
-
- if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
- return repo.srcrepo
-
- # the sharedpath always ends in the .hg; we want the path to the repo
- source = repo.vfs.split(repo.sharedpath)[0]
- srcurl, branches = parseurl(source)
- srcrepo = repository(repo.ui, srcurl)
- repo.srcrepo = srcrepo
- return srcrepo
-
def getbkfile(orig, repo):
if _hassharedbookmarks(repo):
- srcrepo = _getsrcrepo(repo)
+ srcrepo = hg.sharedreposource(repo)
if srcrepo is not None:
# just orig(srcrepo) doesn't work as expected, because
# HG_PENDING refers repo.root.
@@ -186,7 +165,7 @@
orig(self, tr)
if _hassharedbookmarks(self._repo):
- srcrepo = _getsrcrepo(self._repo)
+ srcrepo = hg.sharedreposource(self._repo)
if srcrepo is not None:
category = 'share-bookmarks'
tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
@@ -196,6 +175,6 @@
orig(self, repo)
if _hassharedbookmarks(self._repo):
- srcrepo = _getsrcrepo(self._repo)
+ srcrepo = hg.sharedreposource(self._repo)
if srcrepo is not None:
orig(self, srcrepo)
--- a/hgext/shelve.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/shelve.py Mon Mar 19 08:07:18 2018 -0700
@@ -25,6 +25,7 @@
import collections
import errno
import itertools
+import stat
from mercurial.i18n import _
from mercurial import (
@@ -55,6 +56,7 @@
from . import (
rebase,
)
+from mercurial.utils import dateutil
cmdtable = {}
command = registrar.command(cmdtable)
@@ -192,7 +194,7 @@
d['nodestoremove'] = [nodemod.bin(h)
for h in d['nodestoremove'].split(' ')]
except (ValueError, TypeError, KeyError) as err:
- raise error.CorruptedState(str(err))
+ raise error.CorruptedState(pycompat.bytestr(err))
@classmethod
def _getversion(cls, repo):
@@ -201,7 +203,7 @@
try:
version = int(fp.readline().strip())
except ValueError as err:
- raise error.CorruptedState(str(err))
+ raise error.CorruptedState(pycompat.bytestr(err))
finally:
fp.close()
return version
@@ -251,7 +253,7 @@
if d.get('activebook', '') != cls._noactivebook:
obj.activebookmark = d.get('activebook', '')
except (error.RepoLookupError, KeyError) as err:
- raise error.CorruptedState(str(err))
+ raise error.CorruptedState(pycompat.bytestr(err))
return obj
@@ -271,7 +273,7 @@
"activebook": activebook or cls._noactivebook
}
scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
- .write(info, firstline=str(cls._version))
+ .write(info, firstline=("%d" % cls._version))
@classmethod
def clear(cls, repo):
@@ -282,7 +284,7 @@
maxbackups = repo.ui.configint('shelve', 'maxbackups')
hgfiles = [f for f in vfs.listdir()
if f.endswith('.' + patchextension)]
- hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
+ hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
if 0 < maxbackups and maxbackups < len(hgfiles):
bordermtime = hgfiles[-maxbackups][0]
else:
@@ -541,7 +543,7 @@
if not pfx or sfx != patchextension:
continue
st = shelvedfile(repo, name).stat()
- info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
+ info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
return sorted(info, reverse=True)
def listcmd(ui, repo, pats, opts):
@@ -563,7 +565,8 @@
continue
ui.write(' ' * (16 - len(sname)))
used = 16
- age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
+ date = dateutil.makedate(mtime)
+ age = '(%s)' % templatefilters.age(date, abbrev=True)
ui.write(age, label='shelve.age')
ui.write(' ' * (12 - len(age)))
used += 12
@@ -619,7 +622,7 @@
repo.vfs.rename('unshelverebasestate', 'rebasestate')
try:
rebase.rebase(ui, repo, **{
- 'abort' : True
+ r'abort' : True
})
except Exception:
repo.vfs.rename('rebasestate', 'unshelverebasestate')
@@ -648,7 +651,7 @@
ui.pushbuffer(True)
cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
*pathtofiles(repo, files),
- **{'no_backup': True})
+ **{r'no_backup': True})
ui.popbuffer()
def restorebranch(ui, repo, branchtorestore):
@@ -681,7 +684,7 @@
repo.vfs.rename('unshelverebasestate', 'rebasestate')
try:
rebase.rebase(ui, repo, **{
- 'continue' : True
+ r'continue' : True
})
except Exception:
repo.vfs.rename('rebasestate', 'unshelverebasestate')
@@ -744,10 +747,10 @@
ui.status(_('rebasing shelved changes\n'))
try:
rebase.rebase(ui, repo, **{
- 'rev': [shelvectx.rev()],
- 'dest': str(tmpwctx.rev()),
- 'keep': True,
- 'tool': opts.get('tool', ''),
+ r'rev': [shelvectx.rev()],
+ r'dest': "%d" % tmpwctx.rev(),
+ r'keep': True,
+ r'tool': opts.get('tool', ''),
})
except error.InterventionRequired:
tr.close()
@@ -881,7 +884,7 @@
raise
cmdutil.wrongtooltocontinue(repo, _('unshelve'))
except error.CorruptedState as err:
- ui.debug(str(err) + '\n')
+ ui.debug(pycompat.bytestr(err) + '\n')
if continuef:
msg = _('corrupted shelved state file')
hint = _('please run hg unshelve --abort to abort unshelve '
--- a/hgext/show.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/show.py Mon Mar 19 08:07:18 2018 -0700
@@ -39,6 +39,7 @@
error,
formatter,
graphmod,
+ logcmdutil,
phases,
pycompat,
registrar,
@@ -125,7 +126,7 @@
ui.write('\n')
for name, func in sorted(views.items()):
- ui.write(('%s\n') % func.__doc__)
+ ui.write(('%s\n') % pycompat.sysbytes(func.__doc__))
ui.write('\n')
raise error.Abort(_('no view requested'),
@@ -148,7 +149,7 @@
elif fn._csettopic:
ref = 'show%s' % fn._csettopic
spec = formatter.lookuptemplate(ui, ref, template)
- displayer = cmdutil.changeset_templater(ui, repo, spec, buffered=True)
+ displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
return fn(ui, repo, displayer)
else:
return fn(ui, repo)
@@ -259,7 +260,7 @@
shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen,
resources=tres)
def shortest(ctx):
- return shortesttmpl.render({'ctx': ctx, 'node': ctx.hex()})
+ return shortesttmpl.renderdefault({'ctx': ctx, 'node': ctx.hex()})
# We write out new heads to aid in DAG awareness and to help with decision
# making on how the stack should be reconciled with commits made since the
@@ -409,8 +410,8 @@
revdag = graphmod.dagwalker(repo, revs)
ui.setconfig('experimental', 'graphshorten', True)
- cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
- props={'nodelen': nodelen})
+ logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
+ props={'nodelen': nodelen})
def extsetup(ui):
# Alias `hg <prefix><view>` to `hg show <view>`.
--- a/hgext/sparse.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/sparse.py Mon Mar 19 08:07:18 2018 -0700
@@ -75,12 +75,12 @@
from mercurial.i18n import _
from mercurial import (
- cmdutil,
commands,
dirstate,
error,
extensions,
hg,
+ logcmdutil,
match as matchmod,
pycompat,
registrar,
@@ -126,7 +126,7 @@
entry[1].append(('', 'sparse', None,
"limit to changesets affecting the sparse checkout"))
- def _logrevs(orig, repo, opts):
+ def _initialrevs(orig, repo, opts):
revs = orig(repo, opts)
if opts.get('sparse'):
sparsematch = sparse.matcher(repo)
@@ -135,7 +135,7 @@
return any(f for f in ctx.files() if sparsematch(f))
revs = revs.filter(ctxmatch)
return revs
- extensions.wrapfunction(cmdutil, '_logrevs', _logrevs)
+ extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs)
def _clonesparsecmd(orig, ui, repo, *args, **opts):
include_pat = opts.get('include')
@@ -194,7 +194,11 @@
"""
def walk(orig, self, match, subrepos, unknown, ignored, full=True):
- match = matchmod.intersectmatchers(match, self._sparsematcher)
+ # hack to not exclude explicitly-specified paths so that they can
+ # be warned later on e.g. dirstate.add()
+ em = matchmod.exact(match._root, match._cwd, match.files())
+ sm = matchmod.unionmatcher([self._sparsematcher, em])
+ match = matchmod.intersectmatchers(match, sm)
return orig(self, match, subrepos, unknown, ignored, full)
extensions.wrapfunction(dirstate.dirstate, 'walk', walk)
--- a/hgext/split.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/split.py Mon Mar 19 08:07:18 2018 -0700
@@ -24,6 +24,7 @@
hg,
obsolete,
phases,
+ pycompat,
registrar,
revsetlang,
scmutil,
@@ -160,7 +161,7 @@
'interactive': True,
'message': header + ctx.description(),
})
- commands.commit(ui, repo, **opts)
+ commands.commit(ui, repo, **pycompat.strkwargs(opts))
newctx = repo['.']
committed.append(newctx)
@@ -172,6 +173,6 @@
return committed[-1]
-def dorebase(ui, repo, src, dest):
+def dorebase(ui, repo, src, destctx):
rebase.rebase(ui, repo, rev=[revsetlang.formatspec('%ld', src)],
- dest=revsetlang.formatspec('%d', dest))
+ dest=revsetlang.formatspec('%d', destctx.rev()))
--- a/hgext/strip.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/strip.py Mon Mar 19 08:07:18 2018 -0700
@@ -181,13 +181,10 @@
strippedrevs = revs.union(descendants)
roots = revs.difference(descendants)
- update = False
# if one of the wdir parent is stripped we'll need
# to update away to an earlier revision
- for p in repo.dirstate.parents():
- if p != nullid and cl.rev(p) in strippedrevs:
- update = True
- break
+ update = any(p != nullid and cl.rev(p) in strippedrevs
+ for p in repo.dirstate.parents())
rootnodes = set(cl.node(r) for r in roots)
@@ -215,7 +212,7 @@
# only reset the dirstate for files that would actually change
# between the working context and uctx
- descendantrevs = repo.revs("%s::." % uctx.rev())
+ descendantrevs = repo.revs(b"%d::.", uctx.rev())
changedfiles = []
for rev in descendantrevs:
# blindly reset the files, regardless of what actually changed
--- a/hgext/transplant.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/transplant.py Mon Mar 19 08:07:18 2018 -0700
@@ -24,6 +24,7 @@
error,
exchange,
hg,
+ logcmdutil,
match,
merge,
node as nodemod,
@@ -119,7 +120,8 @@
opener=self.opener)
def getcommiteditor():
editform = cmdutil.mergeeditform(repo[None], 'transplant')
- return cmdutil.getcommiteditor(editform=editform, **opts)
+ return cmdutil.getcommiteditor(editform=editform,
+ **pycompat.strkwargs(opts))
self.getcommiteditor = getcommiteditor
def applied(self, repo, node, parent):
@@ -160,7 +162,7 @@
tr = repo.transaction('transplant')
for rev in revs:
node = revmap[rev]
- revstr = '%s:%s' % (rev, nodemod.short(node))
+ revstr = '%d:%s' % (rev, nodemod.short(node))
if self.applied(repo, node, p1):
self.ui.warn(_('skipping already applied revision %s\n') %
@@ -194,7 +196,7 @@
skipmerge = False
if parents[1] != revlog.nullid:
if not opts.get('parent'):
- self.ui.note(_('skipping merge changeset %s:%s\n')
+ self.ui.note(_('skipping merge changeset %d:%s\n')
% (rev, nodemod.short(node)))
skipmerge = True
else:
@@ -210,7 +212,7 @@
patchfile = None
else:
fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
- fp = os.fdopen(fd, pycompat.sysstr('w'))
+ fp = os.fdopen(fd, r'wb')
gen = patch.diff(source, parent, node, opts=diffopts)
for chunk in gen:
fp.write(chunk)
@@ -258,7 +260,7 @@
self.ui.status(_('filtering %s\n') % patchfile)
user, date, msg = (changelog[1], changelog[2], changelog[4])
fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
- fp = os.fdopen(fd, pycompat.sysstr('w'))
+ fp = os.fdopen(fd, r'wb')
fp.write("# HG changeset patch\n")
fp.write("# User %s\n" % user)
fp.write("# Date %d %d\n" % date)
@@ -273,7 +275,7 @@
},
onerr=error.Abort, errprefix=_('filter failed'),
blockedtag='transplant_filter')
- user, date, msg = self.parselog(file(headerfile))[1:4]
+ user, date, msg = self.parselog(open(headerfile, 'rb'))[1:4]
finally:
os.unlink(headerfile)
@@ -309,7 +311,7 @@
p1 = repo.dirstate.p1()
p2 = node
self.log(user, date, message, p1, p2, merge=merge)
- self.ui.write(str(inst) + '\n')
+ self.ui.write(util.forcebytestr(inst) + '\n')
raise TransplantError(_('fix up the working directory and run '
'hg transplant --continue'))
else:
@@ -501,7 +503,7 @@
def browserevs(ui, repo, nodes, opts):
'''interactively transplant changesets'''
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
transplants = []
merges = []
prompt = _('apply changeset? [ynmpcq?]:'
@@ -646,6 +648,7 @@
raise error.Abort(_('--all is incompatible with a '
'revision list'))
+ opts = pycompat.byteskwargs(opts)
checkopts(opts, revs)
if not opts.get('log'):
@@ -741,10 +744,11 @@
templatekeyword = registrar.templatekeyword()
-@templatekeyword('transplanted')
-def kwtransplanted(repo, ctx, **args):
+@templatekeyword('transplanted', requires={'ctx'})
+def kwtransplanted(context, mapping):
"""String. The node identifier of the transplanted
changeset if any."""
+ ctx = context.resource(mapping, 'ctx')
n = ctx.extra().get('transplant_source')
return n and nodemod.hex(n) or ''
--- a/hgext/uncommit.py Thu Mar 15 22:35:07 2018 -0700
+++ b/hgext/uncommit.py Mon Mar 19 08:07:18 2018 -0700
@@ -51,7 +51,7 @@
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
-def _commitfiltered(repo, ctx, match, allowempty):
+def _commitfiltered(repo, ctx, match, keepcommit):
"""Recommit ctx with changed files not in match. Return the new
node identifier, or None if nothing changed.
"""
@@ -66,7 +66,7 @@
files = (initialfiles - exclude)
# return the p1 so that we don't create an obsmarker later
- if not files and not allowempty:
+ if not keepcommit:
return ctx.parents()[0].node()
# Filter copies
@@ -151,13 +151,16 @@
files to their uncommitted state. This means that files modified or
deleted in the changeset will be left unchanged, and so will remain
modified in the working directory.
+
+ If no files are specified, the commit will be pruned, unless --keep is
+ given.
"""
opts = pycompat.byteskwargs(opts)
with repo.wlock(), repo.lock():
if not pats and not repo.ui.configbool('experimental',
- 'uncommitondirtywdir'):
+ 'uncommitondirtywdir'):
cmdutil.bailifchanged(repo)
old = repo['.']
rewriteutil.precheck(repo, [old.rev()], 'uncommit')
@@ -166,7 +169,8 @@
with repo.transaction('uncommit'):
match = scmutil.match(old, pats, opts)
- newid = _commitfiltered(repo, old, match, opts.get('keep'))
+ keepcommit = opts.get('keep') or pats
+ newid = _commitfiltered(repo, old, match, keepcommit)
if newid is None:
ui.status(_("nothing to uncommit\n"))
return 1
--- a/i18n/hggettext Thu Mar 15 22:35:07 2018 -0700
+++ b/i18n/hggettext Mon Mar 19 08:07:18 2018 -0700
@@ -104,7 +104,8 @@
"""
mod = importpath(path)
if not path.startswith('mercurial/') and mod.__doc__:
- src = open(path).read()
+ with open(path) as fobj:
+ src = fobj.read()
lineno = 1 + offset(src, mod.__doc__, path, 7)
print(poentry(path, lineno, mod.__doc__))
@@ -143,7 +144,8 @@
def rawtext(path):
- src = open(path).read()
+ with open(path) as f:
+ src = f.read()
print(poentry(path, 1, src))
--- a/mercurial/__init__.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/__init__.py Mon Mar 19 08:07:18 2018 -0700
@@ -31,6 +31,9 @@
# Only handle Mercurial-related modules.
if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
return None
+ # don't try to parse binary
+ if fullname.startswith('mercurial.cext.'):
+ return None
# third-party packages are expected to be dual-version clean
if fullname.startswith('mercurial.thirdparty'):
return None
--- a/mercurial/archival.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/archival.py Mon Mar 19 08:07:18 2018 -0700
@@ -21,6 +21,8 @@
error,
formatter,
match as matchmod,
+ pycompat,
+ scmutil,
util,
vfs as vfsmod,
)
@@ -37,7 +39,7 @@
if prefix:
prefix = util.normpath(prefix)
else:
- if not isinstance(dest, str):
+ if not isinstance(dest, bytes):
raise ValueError('dest must be string if no prefix')
prefix = os.path.basename(dest)
lower = prefix.lower()
@@ -76,29 +78,27 @@
return repo[rev]
return repo['null']
+# {tags} on ctx includes local tags and 'tip', with no current way to limit
+# that to global tags. Therefore, use {latesttag} as a substitute when
+# the distance is 0, since that will be the list of global tags on ctx.
+_defaultmetatemplate = br'''
+repo: {root}
+node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
+branch: {branch|utf8}
+{ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
+ separate("\n",
+ join(latesttag % "latesttag: {tag}", "\n"),
+ "latesttagdistance: {latesttagdistance}",
+ "changessincelatesttag: {changessincelatesttag}"))}
+'''[1:] # drop leading '\n'
+
def buildmetadata(ctx):
'''build content of .hg_archival.txt'''
repo = ctx.repo()
- default = (
- r'repo: {root}\n'
- r'node: {ifcontains(rev, revset("wdir()"),'
- r'"{p1node}{dirty}", "{node}")}\n'
- r'branch: {branch|utf8}\n'
-
- # {tags} on ctx includes local tags and 'tip', with no current way to
- # limit that to global tags. Therefore, use {latesttag} as a substitute
- # when the distance is 0, since that will be the list of global tags on
- # ctx.
- r'{ifeq(latesttagdistance, 0, latesttag % "tag: {tag}\n",'
- r'"{latesttag % "latesttag: {tag}\n"}'
- r'latesttagdistance: {latesttagdistance}\n'
- r'changessincelatesttag: {changessincelatesttag}\n")}'
- )
-
opts = {
'template': repo.ui.config('experimental', 'archivemetatemplate',
- default)
+ _defaultmetatemplate)
}
out = util.stringio()
@@ -125,7 +125,7 @@
def __init__(self, *args, **kw):
timestamp = None
- if 'timestamp' in kw:
+ if r'timestamp' in kw:
timestamp = kw.pop(r'timestamp')
if timestamp is None:
self.timestamp = time.time()
@@ -142,8 +142,8 @@
flags = 0
if fname:
flags = gzip.FNAME
- self.fileobj.write(chr(flags))
- gzip.write32u(self.fileobj, long(self.timestamp))
+ self.fileobj.write(pycompat.bytechr(flags))
+ gzip.write32u(self.fileobj, int(self.timestamp))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
@@ -155,30 +155,34 @@
def taropen(mode, name='', fileobj=None):
if kind == 'gz':
- mode = mode[0]
+ mode = mode[0:1]
if not fileobj:
fileobj = open(name, mode + 'b')
- gzfileobj = self.GzipFileWithTime(name, mode + 'b',
+ gzfileobj = self.GzipFileWithTime(name,
+ pycompat.sysstr(mode + 'b'),
zlib.Z_BEST_COMPRESSION,
fileobj, timestamp=mtime)
self.fileobj = gzfileobj
- return tarfile.TarFile.taropen(name, mode, gzfileobj)
+ return tarfile.TarFile.taropen(
+ name, pycompat.sysstr(mode), gzfileobj)
else:
- return tarfile.open(name, mode + kind, fileobj)
+ return tarfile.open(
+ name, pycompat.sysstr(mode + kind), fileobj)
- if isinstance(dest, str):
+ if isinstance(dest, bytes):
self.z = taropen('w:', name=dest)
else:
self.z = taropen('w|', fileobj=dest)
def addfile(self, name, mode, islink, data):
+ name = pycompat.fsdecode(name)
i = tarfile.TarInfo(name)
i.mtime = self.mtime
i.size = len(data)
if islink:
i.type = tarfile.SYMTYPE
i.mode = 0o777
- i.linkname = data
+ i.linkname = pycompat.fsdecode(data)
data = None
i.size = 0
else:
@@ -191,35 +195,12 @@
if self.fileobj:
self.fileobj.close()
-class tellable(object):
- '''provide tell method for zipfile.ZipFile when writing to http
- response file object.'''
-
- def __init__(self, fp):
- self.fp = fp
- self.offset = 0
-
- def __getattr__(self, key):
- return getattr(self.fp, key)
-
- def write(self, s):
- self.fp.write(s)
- self.offset += len(s)
-
- def tell(self):
- return self.offset
-
class zipit(object):
'''write archive to zip file or stream. can write uncompressed,
or compressed with deflate.'''
def __init__(self, dest, mtime, compress=True):
- if not isinstance(dest, str):
- try:
- dest.tell()
- except (AttributeError, IOError):
- dest = tellable(dest)
- self.z = zipfile.ZipFile(dest, 'w',
+ self.z = zipfile.ZipFile(pycompat.fsdecode(dest), r'w',
compress and zipfile.ZIP_DEFLATED or
zipfile.ZIP_STORED)
@@ -233,7 +214,7 @@
self.date_time = time.gmtime(mtime)[:6]
def addfile(self, name, mode, islink, data):
- i = zipfile.ZipInfo(name, self.date_time)
+ i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
i.compress_type = self.z.compression
# unzip will not honor unix file modes unless file creator is
# set to unix (id 3).
@@ -268,7 +249,7 @@
if islink:
self.opener.symlink(data, name)
return
- f = self.opener(name, "w", atomictemp=True)
+ f = self.opener(name, "w", atomictemp=False)
f.write(data)
f.close()
destfile = os.path.join(self.basedir, name)
@@ -339,6 +320,7 @@
total = len(files)
if total:
files.sort()
+ scmutil.fileprefetchhooks(repo, ctx, files)
repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
for i, f in enumerate(files):
ff = ctx.flags(f)
--- a/mercurial/bookmarks.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/bookmarks.py Mon Mar 19 08:07:18 2018 -0700
@@ -84,7 +84,7 @@
# - node in nm, for non-20-bytes entry
# - split(...), for string without ' '
repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
- % line)
+ % pycompat.bytestr(line))
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
@@ -103,30 +103,21 @@
self._aclean = False
def __setitem__(self, *args, **kwargs):
- msg = ("'bookmarks[name] = node' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.3')
- self._set(*args, **kwargs)
+ raise error.ProgrammingError("use 'bookmarks.applychanges' instead")
def _set(self, key, value):
self._clean = False
return dict.__setitem__(self, key, value)
def __delitem__(self, key):
- msg = ("'del bookmarks[name]' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.3')
- self._del(key)
+ raise error.ProgrammingError("use 'bookmarks.applychanges' instead")
def _del(self, key):
self._clean = False
return dict.__delitem__(self, key)
def update(self, *others):
- msg = ("bookmarks.update(...)' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.5')
- return dict.update(self, *others)
+ raise error.ProgrammingError("use 'bookmarks.applychanges' instead")
def applychanges(self, repo, tr, changes):
"""Apply a list of changes to bookmarks
@@ -146,12 +137,6 @@
bmchanges[name] = (old, node)
self._recordchange(tr)
- def recordchange(self, tr):
- msg = ("'bookmarks.recorchange' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.3')
- return self._recordchange(tr)
-
def _recordchange(self, tr):
"""record that bookmarks have been changed in a transaction
@@ -194,7 +179,7 @@
self._aclean = True
def _write(self, fp):
- for name, node in self.iteritems():
+ for name, node in sorted(self.iteritems()):
fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
self._clean = True
self._repo.invalidatevolatilesets()
--- a/mercurial/branchmap.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/branchmap.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,6 +18,7 @@
from . import (
encoding,
error,
+ pycompat,
scmutil,
util,
)
@@ -52,18 +53,19 @@
filteredhash=filteredhash)
if not partial.validfor(repo):
# invalidate the cache
- raise ValueError('tip differs')
+ raise ValueError(r'tip differs')
cl = repo.changelog
for l in lines:
if not l:
continue
node, state, label = l.split(" ", 2)
if state not in 'oc':
- raise ValueError('invalid branch state')
+ raise ValueError(r'invalid branch state')
label = encoding.tolocal(label.strip())
node = bin(node)
if not cl.hasnode(node):
- raise ValueError('node %s does not exist' % hex(node))
+ raise ValueError(
+ r'node %s does not exist' % pycompat.sysstr(hex(node)))
partial.setdefault(label, []).append(node)
if state == 'c':
partial._closednodes.add(node)
@@ -73,7 +75,7 @@
if repo.filtername is not None:
msg += ' (%s)' % repo.filtername
msg += ': %s\n'
- repo.ui.debug(msg % inst)
+ repo.ui.debug(msg % pycompat.bytestr(inst))
partial = None
return partial
@@ -253,7 +255,8 @@
repo.filtername, len(self), nodecount)
except (IOError, OSError, error.Abort) as inst:
# Abort may be raised by read only opener, so log and continue
- repo.ui.debug("couldn't write branch cache: %s\n" % inst)
+ repo.ui.debug("couldn't write branch cache: %s\n" %
+ util.forcebytestr(inst))
def update(self, repo, revgen):
"""Given a branchhead cache, self, that may have extra nodes or be
@@ -375,7 +378,7 @@
self._rbcrevs[:] = data
except (IOError, OSError) as inst:
repo.ui.debug("couldn't read revision branch cache: %s\n" %
- inst)
+ util.forcebytestr(inst))
# remember number of good records on disk
self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
len(repo.changelog))
@@ -451,6 +454,26 @@
self._setcachedata(rev, reponode, branchidx)
return b, close
+ def setdata(self, branch, rev, node, close):
+ """add new data information to the cache"""
+ if branch in self._namesreverse:
+ branchidx = self._namesreverse[branch]
+ else:
+ branchidx = len(self._names)
+ self._names.append(branch)
+ self._namesreverse[branch] = branchidx
+ if close:
+ branchidx |= _rbccloseflag
+ self._setcachedata(rev, node, branchidx)
+ # If no cache data were readable (non exists, bad permission, etc)
+ # the cache was bypassing itself by setting:
+ #
+ # self.branchinfo = self._branchinfo
+ #
+ # Since we now have data in the cache, we need to drop this bypassing.
+ if 'branchinfo' in vars(self):
+ del self.branchinfo
+
def _setcachedata(self, rev, node, branchidx):
"""Writes the node's branch data to the in-memory cache data."""
if rev == nullrev:
@@ -517,7 +540,7 @@
self._rbcrevslen = revs
except (IOError, OSError, error.Abort, error.LockError) as inst:
repo.ui.debug("couldn't write revision branch cache%s: %s\n"
- % (step, inst))
+ % (step, util.forcebytestr(inst)))
finally:
if wlock is not None:
wlock.release()
--- a/mercurial/bundle2.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/bundle2.py Mon Mar 19 08:07:18 2018 -0700
@@ -147,6 +147,7 @@
from __future__ import absolute_import, division
+import collections
import errno
import os
import re
@@ -158,6 +159,7 @@
from . import (
bookmarks,
changegroup,
+ encoding,
error,
node as nodemod,
obsolete,
@@ -1490,6 +1492,7 @@
'digests': tuple(sorted(util.DIGESTS.keys())),
'remote-changegroup': ('http', 'https'),
'hgtagsfnodes': (),
+ 'rev-branch-cache': (),
'phases': ('heads',),
'stream': ('v2',),
}
@@ -1574,21 +1577,24 @@
# different right now. So we keep them separated for now for the sake of
# simplicity.
- # we always want a changegroup in such bundle
- cgversion = opts.get('cg.version')
- if cgversion is None:
- cgversion = changegroup.safeversion(repo)
- cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
- part = bundler.newpart('changegroup', data=cg.getchunks())
- part.addparam('version', cg.version)
- if 'clcount' in cg.extras:
- part.addparam('nbchanges', '%d' % cg.extras['clcount'],
- mandatory=False)
- if opts.get('phases') and repo.revs('%ln and secret()',
- outgoing.missingheads):
- part.addparam('targetphase', '%d' % phases.secret, mandatory=False)
+ # we might not always want a changegroup in such bundle, for example in
+ # stream bundles
+ if opts.get('changegroup', True):
+ cgversion = opts.get('cg.version')
+ if cgversion is None:
+ cgversion = changegroup.safeversion(repo)
+ cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
+ part = bundler.newpart('changegroup', data=cg.getchunks())
+ part.addparam('version', cg.version)
+ if 'clcount' in cg.extras:
+ part.addparam('nbchanges', '%d' % cg.extras['clcount'],
+ mandatory=False)
+ if opts.get('phases') and repo.revs('%ln and secret()',
+ outgoing.missingheads):
+ part.addparam('targetphase', '%d' % phases.secret, mandatory=False)
addparttagsfnodescache(repo, bundler, outgoing)
+ addpartrevbranchcache(repo, bundler, outgoing)
if opts.get('obsolescence', False):
obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
@@ -1623,6 +1629,28 @@
if chunks:
bundler.newpart('hgtagsfnodes', data=''.join(chunks))
+def addpartrevbranchcache(repo, bundler, outgoing):
+ # we include the rev branch cache for the bundle changeset
+ # (as an optional parts)
+ cache = repo.revbranchcache()
+ cl = repo.unfiltered().changelog
+ branchesdata = collections.defaultdict(lambda: (set(), set()))
+ for node in outgoing.missing:
+ branch, close = cache.branchinfo(cl.rev(node))
+ branchesdata[branch][close].add(node)
+
+ def generate():
+ for branch, (nodes, closed) in sorted(branchesdata.items()):
+ utf8branch = encoding.fromlocal(branch)
+ yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
+ yield utf8branch
+ for n in sorted(nodes):
+ yield n
+ for n in sorted(closed):
+ yield n
+
+ bundler.newpart('cache:rev-branch-cache', data=generate())
+
def buildobsmarkerspart(bundler, markers):
"""add an obsmarker part to the bundler with <markers>
@@ -1729,7 +1757,7 @@
extrakwargs = {}
targetphase = inpart.params.get('targetphase')
if targetphase is not None:
- extrakwargs['targetphase'] = int(targetphase)
+ extrakwargs[r'targetphase'] = int(targetphase)
ret = _processchangegroup(op, cg, tr, 'bundle2', 'bundle2',
expectedtotal=nbchangesets, **extrakwargs)
if op.reply is not None:
@@ -1946,7 +1974,8 @@
value = inpart.params.get(name)
if value is not None:
kwargs[name] = value
- raise error.PushkeyFailed(inpart.params['in-reply-to'], **kwargs)
+ raise error.PushkeyFailed(inpart.params['in-reply-to'],
+ **pycompat.strkwargs(kwargs))
@parthandler('error:unsupportedcontent', ('parttype', 'params'))
def handleerrorunsupportedcontent(op, inpart):
@@ -1959,7 +1988,7 @@
if params is not None:
kwargs['params'] = params.split('\0')
- raise error.BundleUnknownFeatureError(**kwargs)
+ raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
@parthandler('error:pushraced', ('message',))
def handleerrorpushraced(op, inpart):
@@ -2001,7 +2030,8 @@
for key in ('namespace', 'key', 'new', 'old', 'ret'):
if key in inpart.params:
kwargs[key] = inpart.params[key]
- raise error.PushkeyFailed(partid=str(inpart.id), **kwargs)
+ raise error.PushkeyFailed(partid='%d' % inpart.id,
+ **pycompat.strkwargs(kwargs))
@parthandler('bookmarks')
def handlebookmark(op, inpart):
@@ -2040,14 +2070,15 @@
allhooks.append(hookargs)
for hookargs in allhooks:
- op.repo.hook('prepushkey', throw=True, **hookargs)
+ op.repo.hook('prepushkey', throw=True,
+ **pycompat.strkwargs(hookargs))
bookstore.applychanges(op.repo, op.gettransaction(), changes)
if pushkeycompat:
def runhook():
for hookargs in allhooks:
- op.repo.hook('pushkey', **hookargs)
+ op.repo.hook('pushkey', **pycompat.strkwargs(hookargs))
op.repo._afterlock(runhook)
elif bookmarksmode == 'records':
@@ -2126,6 +2157,40 @@
cache.write()
op.ui.debug('applied %i hgtags fnodes cache entries\n' % count)
+rbcstruct = struct.Struct('>III')
+
+@parthandler('cache:rev-branch-cache')
+def handlerbc(op, inpart):
+ """receive a rev-branch-cache payload and update the local cache
+
+ The payload is a series of data related to each branch
+
+ 1) branch name length
+ 2) number of open heads
+ 3) number of closed heads
+ 4) open heads nodes
+ 5) closed heads nodes
+ """
+ total = 0
+ rawheader = inpart.read(rbcstruct.size)
+ cache = op.repo.revbranchcache()
+ cl = op.repo.unfiltered().changelog
+ while rawheader:
+ header = rbcstruct.unpack(rawheader)
+ total += header[1] + header[2]
+ utf8branch = inpart.read(header[0])
+ branch = encoding.tolocal(utf8branch)
+ for x in xrange(header[1]):
+ node = inpart.read(20)
+ rev = cl.rev(node)
+ cache.setdata(branch, rev, node, False)
+ for x in xrange(header[2]):
+ node = inpart.read(20)
+ rev = cl.rev(node)
+ cache.setdata(branch, rev, node, True)
+ rawheader = inpart.read(rbcstruct.size)
+ cache.write()
+
@parthandler('pushvars')
def bundle2getvars(op, part):
'''unbundle a bundle2 containing shellvars on the server'''
--- a/mercurial/bundlerepo.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/bundlerepo.py Mon Mar 19 08:07:18 2018 -0700
@@ -349,7 +349,7 @@
suffix=suffix)
self.tempfile = temp
- with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
+ with os.fdopen(fdtemp, r'wb') as fptemp:
fptemp.write(header)
while True:
chunk = readfn(2**18)
@@ -402,7 +402,7 @@
# manifestlog implementation did not consume the manifests from the
# changegroup (ex: it might be consuming trees from a separate bundle2
# part instead). So we need to manually consume it.
- if 'filestart' not in self.__dict__:
+ if r'filestart' not in self.__dict__:
self._consumemanifest()
return self.filestart
--- a/mercurial/byterange.py Thu Mar 15 22:35:07 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,472 +0,0 @@
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, see
-# <http://www.gnu.org/licenses/>.
-
-# This file is part of urlgrabber, a high-level cross-protocol url-grabber
-# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
-
-# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
-
-from __future__ import absolute_import
-
-import email
-import ftplib
-import mimetypes
-import os
-import re
-import socket
-import stat
-
-from . import (
- urllibcompat,
- util,
-)
-
-urlerr = util.urlerr
-urlreq = util.urlreq
-
-addclosehook = urlreq.addclosehook
-addinfourl = urlreq.addinfourl
-splitattr = urlreq.splitattr
-splitpasswd = urlreq.splitpasswd
-splitport = urlreq.splitport
-splituser = urlreq.splituser
-unquote = urlreq.unquote
-
-class RangeError(IOError):
- """Error raised when an unsatisfiable range is requested."""
-
-class HTTPRangeHandler(urlreq.basehandler):
- """Handler that enables HTTP Range headers.
-
- This was extremely simple. The Range header is a HTTP feature to
- begin with so all this class does is tell urllib2 that the
- "206 Partial Content" response from the HTTP server is what we
- expected.
-
- Example:
- import urllib2
- import byterange
-
- range_handler = range.HTTPRangeHandler()
- opener = urlreq.buildopener(range_handler)
-
- # install it
- urlreq.installopener(opener)
-
- # create Request and set Range header
- req = urlreq.request('http://www.python.org/')
- req.header['Range'] = 'bytes=30-50'
- f = urlreq.urlopen(req)
- """
-
- def http_error_206(self, req, fp, code, msg, hdrs):
- # 206 Partial Content Response
- r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
- r.code = code
- r.msg = msg
- return r
-
- def http_error_416(self, req, fp, code, msg, hdrs):
- # HTTP's Range Not Satisfiable error
- raise RangeError('Requested Range Not Satisfiable')
-
-class RangeableFileObject(object):
- """File object wrapper to enable raw range handling.
- This was implemented primarily for handling range
- specifications for file:// urls. This object effectively makes
- a file object look like it consists only of a range of bytes in
- the stream.
-
- Examples:
- # expose 10 bytes, starting at byte position 20, from
- # /etc/aliases.
- >>> fo = RangeableFileObject(file(b'/etc/passwd', b'r'), (20,30))
- # seek seeks within the range (to position 23 in this case)
- >>> fo.seek(3)
- # tell tells where your at _within the range_ (position 3 in
- # this case)
- >>> fo.tell()
- # read EOFs if an attempt is made to read past the last
- # byte in the range. the following will return only 7 bytes.
- >>> fo.read(30)
- """
-
- def __init__(self, fo, rangetup):
- """Create a RangeableFileObject.
- fo -- a file like object. only the read() method need be
- supported but supporting an optimized seek() is
- preferable.
- rangetup -- a (firstbyte,lastbyte) tuple specifying the range
- to work over.
- The file object provided is assumed to be at byte offset 0.
- """
- self.fo = fo
- (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
- self.realpos = 0
- self._do_seek(self.firstbyte)
-
- def __getattr__(self, name):
- """This effectively allows us to wrap at the instance level.
- Any attribute not found in _this_ object will be searched for
- in self.fo. This includes methods."""
- return getattr(self.fo, name)
-
- def tell(self):
- """Return the position within the range.
- This is different from fo.seek in that position 0 is the
- first byte position of the range tuple. For example, if
- this object was created with a range tuple of (500,899),
- tell() will return 0 when at byte position 500 of the file.
- """
- return (self.realpos - self.firstbyte)
-
- def seek(self, offset, whence=0):
- """Seek within the byte range.
- Positioning is identical to that described under tell().
- """
- assert whence in (0, 1, 2)
- if whence == 0: # absolute seek
- realoffset = self.firstbyte + offset
- elif whence == 1: # relative seek
- realoffset = self.realpos + offset
- elif whence == 2: # absolute from end of file
- # XXX: are we raising the right Error here?
- raise IOError('seek from end of file not supported.')
-
- # do not allow seek past lastbyte in range
- if self.lastbyte and (realoffset >= self.lastbyte):
- realoffset = self.lastbyte
-
- self._do_seek(realoffset - self.realpos)
-
- def read(self, size=-1):
- """Read within the range.
- This method will limit the size read based on the range.
- """
- size = self._calc_read_size(size)
- rslt = self.fo.read(size)
- self.realpos += len(rslt)
- return rslt
-
- def readline(self, size=-1):
- """Read lines within the range.
- This method will limit the size read based on the range.
- """
- size = self._calc_read_size(size)
- rslt = self.fo.readline(size)
- self.realpos += len(rslt)
- return rslt
-
- def _calc_read_size(self, size):
- """Handles calculating the amount of data to read based on
- the range.
- """
- if self.lastbyte:
- if size > -1:
- if ((self.realpos + size) >= self.lastbyte):
- size = (self.lastbyte - self.realpos)
- else:
- size = (self.lastbyte - self.realpos)
- return size
-
- def _do_seek(self, offset):
- """Seek based on whether wrapped object supports seek().
- offset is relative to the current position (self.realpos).
- """
- assert offset >= 0
- seek = getattr(self.fo, 'seek', self._poor_mans_seek)
- seek(self.realpos + offset)
- self.realpos += offset
-
- def _poor_mans_seek(self, offset):
- """Seek by calling the wrapped file objects read() method.
- This is used for file like objects that do not have native
- seek support. The wrapped objects read() method is called
- to manually seek to the desired position.
- offset -- read this number of bytes from the wrapped
- file object.
- raise RangeError if we encounter EOF before reaching the
- specified offset.
- """
- pos = 0
- bufsize = 1024
- while pos < offset:
- if (pos + bufsize) > offset:
- bufsize = offset - pos
- buf = self.fo.read(bufsize)
- if len(buf) != bufsize:
- raise RangeError('Requested Range Not Satisfiable')
- pos += bufsize
-
-class FileRangeHandler(urlreq.filehandler):
- """FileHandler subclass that adds Range support.
- This class handles Range headers exactly like an HTTP
- server would.
- """
- def open_local_file(self, req):
- host = urllibcompat.gethost(req)
- file = urllibcompat.getselector(req)
- localfile = urlreq.url2pathname(file)
- stats = os.stat(localfile)
- size = stats[stat.ST_SIZE]
- modified = email.Utils.formatdate(stats[stat.ST_MTIME])
- mtype = mimetypes.guess_type(file)[0]
- if host:
- host, port = urlreq.splitport(host)
- if port or socket.gethostbyname(host) not in self.get_names():
- raise urlerr.urlerror('file not on local host')
- fo = open(localfile,'rb')
- brange = req.headers.get('Range', None)
- brange = range_header_to_tuple(brange)
- assert brange != ()
- if brange:
- (fb, lb) = brange
- if lb == '':
- lb = size
- if fb < 0 or fb > size or lb > size:
- raise RangeError('Requested Range Not Satisfiable')
- size = (lb - fb)
- fo = RangeableFileObject(fo, (fb, lb))
- headers = email.message_from_string(
- 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
- (mtype or 'text/plain', size, modified))
- return urlreq.addinfourl(fo, headers, 'file:'+file)
-
-
-# FTP Range Support
-# Unfortunately, a large amount of base FTP code had to be copied
-# from urllib and urllib2 in order to insert the FTP REST command.
-# Code modifications for range support have been commented as
-# follows:
-# -- range support modifications start/end here
-
-class FTPRangeHandler(urlreq.ftphandler):
- def ftp_open(self, req):
- host = urllibcompat.gethost(req)
- if not host:
- raise IOError('ftp error', 'no host given')
- host, port = splitport(host)
- if port is None:
- port = ftplib.FTP_PORT
- else:
- port = int(port)
-
- # username/password handling
- user, host = splituser(host)
- if user:
- user, passwd = splitpasswd(user)
- else:
- passwd = None
- host = unquote(host)
- user = unquote(user or '')
- passwd = unquote(passwd or '')
-
- try:
- host = socket.gethostbyname(host)
- except socket.error as msg:
- raise urlerr.urlerror(msg)
- path, attrs = splitattr(req.get_selector())
- dirs = path.split('/')
- dirs = map(unquote, dirs)
- dirs, file = dirs[:-1], dirs[-1]
- if dirs and not dirs[0]:
- dirs = dirs[1:]
- try:
- fw = self.connect_ftp(user, passwd, host, port, dirs)
- if file:
- type = 'I'
- else:
- type = 'D'
-
- for attr in attrs:
- attr, value = splitattr(attr)
- if attr.lower() == 'type' and \
- value in ('a', 'A', 'i', 'I', 'd', 'D'):
- type = value.upper()
-
- # -- range support modifications start here
- rest = None
- range_tup = range_header_to_tuple(req.headers.get('Range', None))
- assert range_tup != ()
- if range_tup:
- (fb, lb) = range_tup
- if fb > 0:
- rest = fb
- # -- range support modifications end here
-
- fp, retrlen = fw.retrfile(file, type, rest)
-
- # -- range support modifications start here
- if range_tup:
- (fb, lb) = range_tup
- if lb == '':
- if retrlen is None or retrlen == 0:
- raise RangeError('Requested Range Not Satisfiable due'
- ' to unobtainable file length.')
- lb = retrlen
- retrlen = lb - fb
- if retrlen < 0:
- # beginning of range is larger than file
- raise RangeError('Requested Range Not Satisfiable')
- else:
- retrlen = lb - fb
- fp = RangeableFileObject(fp, (0, retrlen))
- # -- range support modifications end here
-
- headers = ""
- mtype = mimetypes.guess_type(req.get_full_url())[0]
- if mtype:
- headers += "Content-Type: %s\n" % mtype
- if retrlen is not None and retrlen >= 0:
- headers += "Content-Length: %d\n" % retrlen
- headers = email.message_from_string(headers)
- return addinfourl(fp, headers, req.get_full_url())
- except ftplib.all_errors as msg:
- raise IOError('ftp error', msg)
-
- def connect_ftp(self, user, passwd, host, port, dirs):
- fw = ftpwrapper(user, passwd, host, port, dirs)
- return fw
-
-class ftpwrapper(urlreq.ftpwrapper):
- # range support note:
- # this ftpwrapper code is copied directly from
- # urllib. The only enhancement is to add the rest
- # argument and pass it on to ftp.ntransfercmd
- def retrfile(self, file, type, rest=None):
- self.endtransfer()
- if type in ('d', 'D'):
- cmd = 'TYPE A'
- isdir = 1
- else:
- cmd = 'TYPE ' + type
- isdir = 0
- try:
- self.ftp.voidcmd(cmd)
- except ftplib.all_errors:
- self.init()
- self.ftp.voidcmd(cmd)
- conn = None
- if file and not isdir:
- # Use nlst to see if the file exists at all
- try:
- self.ftp.nlst(file)
- except ftplib.error_perm as reason:
- raise IOError('ftp error', reason)
- # Restore the transfer mode!
- self.ftp.voidcmd(cmd)
- # Try to retrieve as a file
- try:
- cmd = 'RETR ' + file
- conn = self.ftp.ntransfercmd(cmd, rest)
- except ftplib.error_perm as reason:
- if str(reason).startswith('501'):
- # workaround for REST not supported error
- fp, retrlen = self.retrfile(file, type)
- fp = RangeableFileObject(fp, (rest,''))
- return (fp, retrlen)
- elif not str(reason).startswith('550'):
- raise IOError('ftp error', reason)
- if not conn:
- # Set transfer mode to ASCII!
- self.ftp.voidcmd('TYPE A')
- # Try a directory listing
- if file:
- cmd = 'LIST ' + file
- else:
- cmd = 'LIST'
- conn = self.ftp.ntransfercmd(cmd)
- self.busy = 1
- # Pass back both a suitably decorated object and a retrieval length
- return (addclosehook(conn[0].makefile('rb'),
- self.endtransfer), conn[1])
-
-
-####################################################################
-# Range Tuple Functions
-# XXX: These range tuple functions might go better in a class.
-
-_rangere = None
-def range_header_to_tuple(range_header):
- """Get a (firstbyte,lastbyte) tuple from a Range header value.
-
- Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
- function pulls the firstbyte and lastbyte values and returns
- a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
- the header value, it is returned as an empty string in the
- tuple.
-
- Return None if range_header is None
- Return () if range_header does not conform to the range spec
- pattern.
-
- """
- global _rangere
- if range_header is None:
- return None
- if _rangere is None:
- _rangere = re.compile(br'^bytes=(\d{1,})-(\d*)')
- match = _rangere.match(range_header)
- if match:
- tup = range_tuple_normalize(match.group(1, 2))
- if tup and tup[1]:
- tup = (tup[0], tup[1]+1)
- return tup
- return ()
-
-def range_tuple_to_header(range_tup):
- """Convert a range tuple to a Range header value.
- Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
- if no range is needed.
- """
- if range_tup is None:
- return None
- range_tup = range_tuple_normalize(range_tup)
- if range_tup:
- if range_tup[1]:
- range_tup = (range_tup[0], range_tup[1] - 1)
- return 'bytes=%s-%s' % range_tup
-
-def range_tuple_normalize(range_tup):
- """Normalize a (first_byte,last_byte) range tuple.
- Return a tuple whose first element is guaranteed to be an int
- and whose second element will be '' (meaning: the last byte) or
- an int. Finally, return None if the normalized tuple == (0,'')
- as that is equivalent to retrieving the entire file.
- """
- if range_tup is None:
- return None
- # handle first byte
- fb = range_tup[0]
- if fb in (None, ''):
- fb = 0
- else:
- fb = int(fb)
- # handle last byte
- try:
- lb = range_tup[1]
- except IndexError:
- lb = ''
- else:
- if lb is None:
- lb = ''
- elif lb != '':
- lb = int(lb)
- # check if range is over the entire file
- if (fb, lb) == (0, ''):
- return None
- # check that the range is valid
- if lb < fb:
- raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
- return (fb, lb)
--- a/mercurial/cext/base85.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/base85.c Mon Mar 19 08:07:18 2018 -0700
@@ -14,8 +14,9 @@
#include "util.h"
-static const char b85chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
+static const char b85chars[] =
+ "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
static char b85dec[256];
static void b85prep(void)
@@ -36,7 +37,7 @@
unsigned int acc, val, ch;
int pad = 0;
- if (!PyArg_ParseTuple(args, "s#|i", &text, &len, &pad))
+ if (!PyArg_ParseTuple(args, PY23("s#|i", "y#|i"), &text, &len, &pad))
return NULL;
if (pad)
@@ -83,7 +84,7 @@
int c;
unsigned int acc;
- if (!PyArg_ParseTuple(args, "s#", &text, &len))
+ if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &text, &len))
return NULL;
olen = len / 5 * 4;
@@ -105,25 +106,25 @@
c = b85dec[(int)*text++] - 1;
if (c < 0)
return PyErr_Format(
- PyExc_ValueError,
- "bad base85 character at position %d",
- (int)i);
+ PyExc_ValueError,
+ "bad base85 character at position %d",
+ (int)i);
acc = acc * 85 + c;
}
if (i++ < len) {
c = b85dec[(int)*text++] - 1;
if (c < 0)
return PyErr_Format(
- PyExc_ValueError,
- "bad base85 character at position %d",
- (int)i);
+ PyExc_ValueError,
+ "bad base85 character at position %d",
+ (int)i);
/* overflow detection: 0xffffffff == "|NsC0",
* "|NsC" == 0x03030303 */
if (acc > 0x03030303 || (acc *= 85) > 0xffffffff - c)
return PyErr_Format(
- PyExc_ValueError,
- "bad base85 sequence at position %d",
- (int)i);
+ PyExc_ValueError,
+ "bad base85 sequence at position %d",
+ (int)i);
acc += c;
}
@@ -145,23 +146,19 @@
static char base85_doc[] = "Base85 Data Encoding";
static PyMethodDef methods[] = {
- {"b85encode", b85encode, METH_VARARGS,
- "Encode text in base85.\n\n"
- "If the second parameter is true, pad the result to a multiple of "
- "five characters.\n"},
- {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"},
- {NULL, NULL}
+ {"b85encode", b85encode, METH_VARARGS,
+ "Encode text in base85.\n\n"
+ "If the second parameter is true, pad the result to a multiple of "
+ "five characters.\n"},
+ {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"},
+ {NULL, NULL},
};
static const int version = 1;
#ifdef IS_PY3K
static struct PyModuleDef base85_module = {
- PyModuleDef_HEAD_INIT,
- "base85",
- base85_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "base85", base85_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_base85(void)
--- a/mercurial/cext/bdiff.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/bdiff.c Mon Mar 19 08:07:18 2018 -0700
@@ -17,9 +17,9 @@
#include "bdiff.h"
#include "bitmanipulation.h"
+#include "thirdparty/xdiff/xdiff.h"
#include "util.h"
-
static PyObject *blocks(PyObject *self, PyObject *args)
{
PyObject *sa, *sb, *rl = NULL, *m;
@@ -61,42 +61,60 @@
static PyObject *bdiff(PyObject *self, PyObject *args)
{
- char *sa, *sb, *rb, *ia, *ib;
+ Py_buffer ba, bb;
+ char *rb, *ia, *ib;
PyObject *result = NULL;
- struct bdiff_line *al, *bl;
+ struct bdiff_line *al = NULL, *bl = NULL;
struct bdiff_hunk l, *h;
int an, bn, count;
Py_ssize_t len = 0, la, lb, li = 0, lcommon = 0, lmax;
- PyThreadState *_save;
+ PyThreadState *_save = NULL;
l.next = NULL;
- if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb))
+ if (!PyArg_ParseTuple(args, PY23("s*s*:bdiff", "y*y*:bdiff"), &ba, &bb))
return NULL;
+ if (!PyBuffer_IsContiguous(&ba, 'C') || ba.ndim > 1) {
+ PyErr_SetString(PyExc_ValueError, "bdiff input not contiguous");
+ goto cleanup;
+ }
+
+ if (!PyBuffer_IsContiguous(&bb, 'C') || bb.ndim > 1) {
+ PyErr_SetString(PyExc_ValueError, "bdiff input not contiguous");
+ goto cleanup;
+ }
+
+ la = ba.len;
+ lb = bb.len;
+
if (la > UINT_MAX || lb > UINT_MAX) {
PyErr_SetString(PyExc_ValueError, "bdiff inputs too large");
- return NULL;
+ goto cleanup;
}
_save = PyEval_SaveThread();
lmax = la > lb ? lb : la;
- for (ia = sa, ib = sb;
- li < lmax && *ia == *ib;
- ++li, ++ia, ++ib)
+ for (ia = ba.buf, ib = bb.buf; li < lmax && *ia == *ib;
+ ++li, ++ia, ++ib) {
if (*ia == '\n')
lcommon = li + 1;
+ }
/* we can almost add: if (li == lmax) lcommon = li; */
- an = bdiff_splitlines(sa + lcommon, la - lcommon, &al);
- bn = bdiff_splitlines(sb + lcommon, lb - lcommon, &bl);
- if (!al || !bl)
- goto nomem;
+ an = bdiff_splitlines((char *)ba.buf + lcommon, la - lcommon, &al);
+ bn = bdiff_splitlines((char *)bb.buf + lcommon, lb - lcommon, &bl);
+ if (!al || !bl) {
+ PyErr_NoMemory();
+ goto cleanup;
+ }
count = bdiff_diff(al, an, bl, bn, &l);
- if (count < 0)
- goto nomem;
+ if (count < 0) {
+ PyErr_NoMemory();
+ goto cleanup;
+ }
/* calculate length of output */
la = lb = 0;
@@ -112,7 +130,7 @@
result = PyBytes_FromStringAndSize(NULL, len);
if (!result)
- goto nomem;
+ goto cleanup;
/* build binary patch */
rb = PyBytes_AsString(result);
@@ -122,7 +140,8 @@
if (h->a1 != la || h->b1 != lb) {
len = bl[h->b1].l - bl[lb].l;
putbe32((uint32_t)(al[la].l + lcommon - al->l), rb);
- putbe32((uint32_t)(al[h->a1].l + lcommon - al->l), rb + 4);
+ putbe32((uint32_t)(al[h->a1].l + lcommon - al->l),
+ rb + 4);
putbe32((uint32_t)len, rb + 8);
memcpy(rb + 12, bl[lb].l, len);
rb += 12 + len;
@@ -131,13 +150,21 @@
lb = h->b2;
}
-nomem:
+cleanup:
if (_save)
PyEval_RestoreThread(_save);
- free(al);
- free(bl);
- bdiff_freehunks(l.next);
- return result ? result : PyErr_NoMemory();
+ PyBuffer_Release(&ba);
+ PyBuffer_Release(&bb);
+ if (al) {
+ free(al);
+ }
+ if (bl) {
+ free(bl);
+ }
+ if (l.next) {
+ bdiff_freehunks(l.next);
+ }
+ return result;
}
/*
@@ -167,8 +194,8 @@
if (c == ' ' || c == '\t' || c == '\r') {
if (!allws && (wlen == 0 || w[wlen - 1] != ' '))
w[wlen++] = ' ';
- } else if (c == '\n' && !allws
- && wlen > 0 && w[wlen - 1] == ' ') {
+ } else if (c == '\n' && !allws && wlen > 0 &&
+ w[wlen - 1] == ' ') {
w[wlen - 1] = '\n';
} else {
w[wlen++] = c;
@@ -182,25 +209,124 @@
return result ? result : PyErr_NoMemory();
}
+static bool sliceintolist(PyObject *list, Py_ssize_t destidx,
+ const char *source, Py_ssize_t len)
+{
+ PyObject *sliced = PyBytes_FromStringAndSize(source, len);
+ if (sliced == NULL)
+ return false;
+ PyList_SET_ITEM(list, destidx, sliced);
+ return true;
+}
+
+static PyObject *splitnewlines(PyObject *self, PyObject *args)
+{
+ const char *text;
+ Py_ssize_t nelts = 0, size, i, start = 0;
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &text, &size)) {
+ goto abort;
+ }
+ if (!size) {
+ return PyList_New(0);
+ }
+ /* This loops to size-1 because if the last byte is a newline,
+ * we don't want to perform a split there. */
+ for (i = 0; i < size - 1; ++i) {
+ if (text[i] == '\n') {
+ ++nelts;
+ }
+ }
+ if ((result = PyList_New(nelts + 1)) == NULL)
+ goto abort;
+ nelts = 0;
+ for (i = 0; i < size - 1; ++i) {
+ if (text[i] == '\n') {
+ if (!sliceintolist(result, nelts++, text + start,
+ i - start + 1))
+ goto abort;
+ start = i + 1;
+ }
+ }
+ if (!sliceintolist(result, nelts++, text + start, size - start))
+ goto abort;
+ return result;
+abort:
+ Py_XDECREF(result);
+ return NULL;
+}
+
+static int hunk_consumer(int64_t a1, int64_t a2, int64_t b1, int64_t b2,
+ void *priv)
+{
+ PyObject *rl = (PyObject *)priv;
+ PyObject *m = Py_BuildValue("llll", a1, a2, b1, b2);
+ if (!m)
+ return -1;
+ if (PyList_Append(rl, m) != 0) {
+ Py_DECREF(m);
+ return -1;
+ }
+ return 0;
+}
+
+static PyObject *xdiffblocks(PyObject *self, PyObject *args)
+{
+ Py_ssize_t la, lb;
+ mmfile_t a, b;
+ PyObject *rl;
+
+ xpparam_t xpp = {
+ XDF_INDENT_HEURISTIC, /* flags */
+ };
+ xdemitconf_t xecfg = {
+ XDL_EMIT_BDIFFHUNK, /* flags */
+ hunk_consumer, /* hunk_consume_func */
+ };
+ xdemitcb_t ecb = {
+ NULL, /* priv */
+ };
+
+ if (!PyArg_ParseTuple(args, PY23("s#s#", "y#y#"), &a.ptr, &la, &b.ptr,
+ &lb))
+ return NULL;
+
+ a.size = la;
+ b.size = lb;
+
+ rl = PyList_New(0);
+ if (!rl)
+ return PyErr_NoMemory();
+
+ ecb.priv = rl;
+
+ if (xdl_diff(&a, &b, &xpp, &xecfg, &ecb) != 0) {
+ Py_DECREF(rl);
+ return PyErr_NoMemory();
+ }
+
+ return rl;
+}
static char mdiff_doc[] = "Efficient binary diff.";
static PyMethodDef methods[] = {
- {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
- {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
- {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"},
- {NULL, NULL}
+ {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
+ {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
+ {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"},
+ {"splitnewlines", splitnewlines, METH_VARARGS,
+ "like str.splitlines, but only split on newlines\n"},
+ {"xdiffblocks", xdiffblocks, METH_VARARGS,
+ "find a list of matching lines using xdiff algorithm\n"},
+ {NULL, NULL},
};
-static const int version = 1;
+static const int version = 3;
#ifdef IS_PY3K
static struct PyModuleDef bdiff_module = {
- PyModuleDef_HEAD_INIT,
- "bdiff",
- mdiff_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "bdiff", mdiff_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_bdiff(void)
--- a/mercurial/cext/charencode.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/charencode.c Mon Mar 19 08:07:18 2018 -0700
@@ -65,7 +65,6 @@
'\x58', '\x59', '\x5a', /* x-z */
'\x7b', '\x7c', '\x7d', '\x7e', '\x7f'
};
-/* clang-format on */
/* 1: no escape, 2: \<c>, 6: \u<x> */
static const uint8_t jsonlentable[256] = {
@@ -102,6 +101,7 @@
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
};
+/* clang-format on */
/*
* Turn a hex-encoded string into binary.
@@ -132,7 +132,8 @@
{
const char *buf;
Py_ssize_t i, len;
- if (!PyArg_ParseTuple(args, "s#:isasciistr", &buf, &len))
+ if (!PyArg_ParseTuple(args, PY23("s#:isasciistr", "y#:isasciistr"),
+ &buf, &len))
return NULL;
i = 0;
/* char array in PyStringObject should be at least 4-byte aligned */
@@ -151,9 +152,8 @@
Py_RETURN_TRUE;
}
-static inline PyObject *_asciitransform(PyObject *str_obj,
- const char table[128],
- PyObject *fallback_fn)
+static inline PyObject *
+_asciitransform(PyObject *str_obj, const char table[128], PyObject *fallback_fn)
{
char *str, *newstr;
Py_ssize_t i, len;
@@ -173,12 +173,12 @@
char c = str[i];
if (c & 0x80) {
if (fallback_fn != NULL) {
- ret = PyObject_CallFunctionObjArgs(fallback_fn,
- str_obj, NULL);
+ ret = PyObject_CallFunctionObjArgs(
+ fallback_fn, str_obj, NULL);
} else {
PyObject *err = PyUnicodeDecodeError_Create(
- "ascii", str, len, i, (i + 1),
- "unexpected code byte");
+ "ascii", str, len, i, (i + 1),
+ "unexpected code byte");
PyErr_SetObject(PyExc_UnicodeDecodeError, err);
Py_XDECREF(err);
}
@@ -220,10 +220,9 @@
Py_ssize_t pos = 0;
const char *table;
- if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap",
- &PyDict_Type, &dmap,
- &PyInt_Type, &spec_obj,
- &PyFunction_Type, &normcase_fallback))
+ if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap", &PyDict_Type,
+ &dmap, &PyInt_Type, &spec_obj, &PyFunction_Type,
+ &normcase_fallback))
goto quit;
spec = (int)PyInt_AS_LONG(spec_obj);
@@ -251,7 +250,7 @@
while (PyDict_Next(dmap, &pos, &k, &v)) {
if (!dirstate_tuple_check(v)) {
PyErr_SetString(PyExc_TypeError,
- "expected a dirstate tuple");
+ "expected a dirstate tuple");
goto quit;
}
@@ -260,10 +259,10 @@
PyObject *normed;
if (table != NULL) {
normed = _asciitransform(k, table,
- normcase_fallback);
+ normcase_fallback);
} else {
normed = PyObject_CallFunctionObjArgs(
- normcase_fallback, k, NULL);
+ normcase_fallback, k, NULL);
}
if (normed == NULL)
@@ -292,13 +291,13 @@
char c = buf[i];
if (c & 0x80) {
PyErr_SetString(PyExc_ValueError,
- "cannot process non-ascii str");
+ "cannot process non-ascii str");
return -1;
}
esclen += jsonparanoidlentable[(unsigned char)c];
if (esclen < 0) {
PyErr_SetString(PyExc_MemoryError,
- "overflow in jsonescapelen");
+ "overflow in jsonescapelen");
return -1;
}
}
@@ -308,7 +307,7 @@
esclen += jsonlentable[(unsigned char)c];
if (esclen < 0) {
PyErr_SetString(PyExc_MemoryError,
- "overflow in jsonescapelen");
+ "overflow in jsonescapelen");
return -1;
}
}
@@ -336,17 +335,17 @@
case '\\':
return '\\';
}
- return '\0'; /* should not happen */
+ return '\0'; /* should not happen */
}
/* convert 'origbuf' to JSON-escaped form 'escbuf'; 'origbuf' should only
include characters mappable by json(paranoid)lentable */
static void encodejsonescape(char *escbuf, Py_ssize_t esclen,
- const char *origbuf, Py_ssize_t origlen,
- bool paranoid)
+ const char *origbuf, Py_ssize_t origlen,
+ bool paranoid)
{
const uint8_t *lentable =
- (paranoid) ? jsonparanoidlentable : jsonlentable;
+ (paranoid) ? jsonparanoidlentable : jsonlentable;
Py_ssize_t i, j;
for (i = 0, j = 0; i < origlen; i++) {
@@ -377,15 +376,15 @@
const char *origbuf;
Py_ssize_t origlen, esclen;
int paranoid;
- if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast",
- &PyBytes_Type, &origstr, ¶noid))
+ if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast", &PyBytes_Type,
+ &origstr, ¶noid))
return NULL;
origbuf = PyBytes_AS_STRING(origstr);
origlen = PyBytes_GET_SIZE(origstr);
esclen = jsonescapelen(origbuf, origlen, paranoid);
if (esclen < 0)
- return NULL; /* unsupported char found or overflow */
+ return NULL; /* unsupported char found or overflow */
if (origlen == esclen) {
Py_INCREF(origstr);
return origstr;
@@ -395,7 +394,7 @@
if (!escstr)
return NULL;
encodejsonescape(PyBytes_AS_STRING(escstr), esclen, origbuf, origlen,
- paranoid);
+ paranoid);
return escstr;
}
--- a/mercurial/cext/charencode.h Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/charencode.h Mon Mar 19 08:07:18 2018 -0700
@@ -25,6 +25,7 @@
PyObject *make_file_foldmap(PyObject *self, PyObject *args);
PyObject *jsonescapeu8fast(PyObject *self, PyObject *args);
+/* clang-format off */
static const int8_t hextable[256] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -43,6 +44,7 @@
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
+/* clang-format on */
static inline int hexdigit(const char *p, Py_ssize_t off)
{
--- a/mercurial/cext/diffhelpers.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/diffhelpers.c Mon Mar 19 08:07:18 2018 -0700
@@ -16,12 +16,11 @@
static char diffhelpers_doc[] = "Efficient diff parsing";
static PyObject *diffhelpers_Error;
-
/* fixup the last lines of a and b when the patch has no newline at eof */
static void _fix_newline(PyObject *hunk, PyObject *a, PyObject *b)
{
Py_ssize_t hunksz = PyList_Size(hunk);
- PyObject *s = PyList_GET_ITEM(hunk, hunksz-1);
+ PyObject *s = PyList_GET_ITEM(hunk, hunksz - 1);
char *l = PyBytes_AsString(s);
Py_ssize_t alen = PyList_Size(a);
Py_ssize_t blen = PyList_Size(b);
@@ -29,29 +28,28 @@
PyObject *hline;
Py_ssize_t sz = PyBytes_GET_SIZE(s);
- if (sz > 1 && l[sz-2] == '\r')
+ if (sz > 1 && l[sz - 2] == '\r')
/* tolerate CRLF in last line */
sz -= 1;
- hline = PyBytes_FromStringAndSize(l, sz-1);
+ hline = PyBytes_FromStringAndSize(l, sz - 1);
if (!hline) {
return;
}
if (c == ' ' || c == '+') {
PyObject *rline = PyBytes_FromStringAndSize(l + 1, sz - 2);
- PyList_SetItem(b, blen-1, rline);
+ PyList_SetItem(b, blen - 1, rline);
}
if (c == ' ' || c == '-') {
Py_INCREF(hline);
- PyList_SetItem(a, alen-1, hline);
+ PyList_SetItem(a, alen - 1, hline);
}
- PyList_SetItem(hunk, hunksz-1, hline);
+ PyList_SetItem(hunk, hunksz - 1, hline);
}
/* python callable form of _fix_newline */
-static PyObject *
-fix_newline(PyObject *self, PyObject *args)
+static PyObject *fix_newline(PyObject *self, PyObject *args)
{
PyObject *hunk, *a, *b;
if (!PyArg_ParseTuple(args, "OOO", &hunk, &a, &b))
@@ -72,8 +70,7 @@
* The control char from the hunk is saved when inserting into a, but not b
* (for performance while deleting files)
*/
-static PyObject *
-addlines(PyObject *self, PyObject *args)
+static PyObject *addlines(PyObject *self, PyObject *args)
{
PyObject *fp, *hunk, *a, *b, *x;
@@ -83,8 +80,8 @@
Py_ssize_t todoa, todob;
char *s, c;
PyObject *l;
- if (!PyArg_ParseTuple(args, addlines_format,
- &fp, &hunk, &lena, &lenb, &a, &b))
+ if (!PyArg_ParseTuple(args, addlines_format, &fp, &hunk, &lena, &lenb,
+ &a, &b))
return NULL;
while (1) {
@@ -92,7 +89,7 @@
todob = lenb - PyList_Size(b);
num = todoa > todob ? todoa : todob;
if (num == 0)
- break;
+ break;
for (i = 0; i < num; i++) {
x = PyFile_GetLine(fp, 0);
s = PyBytes_AsString(x);
@@ -131,8 +128,7 @@
* a control char at the start of each line, this char is ignored in the
* compare
*/
-static PyObject *
-testhunk(PyObject *self, PyObject *args)
+static PyObject *testhunk(PyObject *self, PyObject *args)
{
PyObject *a, *b;
@@ -158,21 +154,16 @@
}
static PyMethodDef methods[] = {
- {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"},
- {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"},
- {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"},
- {NULL, NULL}
-};
+ {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"},
+ {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"},
+ {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"},
+ {NULL, NULL}};
static const int version = 1;
#ifdef IS_PY3K
static struct PyModuleDef diffhelpers_module = {
- PyModuleDef_HEAD_INIT,
- "diffhelpers",
- diffhelpers_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "diffhelpers", diffhelpers_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_diffhelpers(void)
@@ -183,8 +174,8 @@
if (m == NULL)
return NULL;
- diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError",
- NULL, NULL);
+ diffhelpers_Error =
+ PyErr_NewException("diffhelpers.diffhelpersError", NULL, NULL);
Py_INCREF(diffhelpers_Error);
PyModule_AddObject(m, "diffhelpersError", diffhelpers_Error);
PyModule_AddIntConstant(m, "version", version);
@@ -192,13 +183,12 @@
return m;
}
#else
-PyMODINIT_FUNC
-initdiffhelpers(void)
+PyMODINIT_FUNC initdiffhelpers(void)
{
PyObject *m;
m = Py_InitModule3("diffhelpers", methods, diffhelpers_doc);
- diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError",
- NULL, NULL);
+ diffhelpers_Error =
+ PyErr_NewException("diffhelpers.diffhelpersError", NULL, NULL);
PyModule_AddIntConstant(m, "version", version);
}
#endif
--- a/mercurial/cext/manifest.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/manifest.c Mon Mar 19 08:07:18 2018 -0700
@@ -718,7 +718,8 @@
Py_INCREF(self->pydata);
for (i = 0; i < self->numlines; i++) {
PyObject *arglist = NULL, *result = NULL;
- arglist = Py_BuildValue("(s)", self->lines[i].start);
+ arglist = Py_BuildValue(PY23("(s)", "(y)"),
+ self->lines[i].start);
if (!arglist) {
return NULL;
}
--- a/mercurial/cext/mpatch.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/mpatch.c Mon Mar 19 08:07:18 2018 -0700
@@ -55,10 +55,10 @@
ssize_t blen;
int r;
- PyObject *tmp = PyList_GetItem((PyObject*)bins, pos);
+ PyObject *tmp = PyList_GetItem((PyObject *)bins, pos);
if (!tmp)
return NULL;
- if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t*)&blen))
+ if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t *)&blen))
return NULL;
if ((r = mpatch_decode(buffer, blen, &res)) < 0) {
if (!PyErr_Occurred())
@@ -68,8 +68,7 @@
return res;
}
-static PyObject *
-patches(PyObject *self, PyObject *args)
+static PyObject *patches(PyObject *self, PyObject *args)
{
PyObject *text, *bins, *result;
struct mpatch_flist *patch;
@@ -110,7 +109,14 @@
goto cleanup;
}
out = PyBytes_AsString(result);
- if ((r = mpatch_apply(out, in, inlen, patch)) < 0) {
+ /* clang-format off */
+ {
+ Py_BEGIN_ALLOW_THREADS
+ r = mpatch_apply(out, in, inlen, patch);
+ Py_END_ALLOW_THREADS
+ }
+ /* clang-format on */
+ if (r < 0) {
Py_DECREF(result);
result = NULL;
}
@@ -122,14 +128,13 @@
}
/* calculate size of a patched file directly */
-static PyObject *
-patchedsize(PyObject *self, PyObject *args)
+static PyObject *patchedsize(PyObject *self, PyObject *args)
{
long orig, start, end, len, outlen = 0, last = 0, pos = 0;
Py_ssize_t patchlen;
char *bin;
- if (!PyArg_ParseTuple(args, "ls#", &orig, &bin, &patchlen))
+ if (!PyArg_ParseTuple(args, PY23("ls#", "ly#"), &orig, &bin, &patchlen))
return NULL;
while (pos >= 0 && pos < patchlen) {
@@ -146,7 +151,8 @@
if (pos != patchlen) {
if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error, "patch cannot be decoded");
+ PyErr_SetString(mpatch_Error,
+ "patch cannot be decoded");
return NULL;
}
@@ -155,20 +161,16 @@
}
static PyMethodDef methods[] = {
- {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
- {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
- {NULL, NULL}
+ {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
+ {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
+ {NULL, NULL},
};
static const int version = 1;
#ifdef IS_PY3K
static struct PyModuleDef mpatch_module = {
- PyModuleDef_HEAD_INIT,
- "mpatch",
- mpatch_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "mpatch", mpatch_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_mpatch(void)
@@ -179,8 +181,8 @@
if (m == NULL)
return NULL;
- mpatch_Error = PyErr_NewException("mercurial.cext.mpatch.mpatchError",
- NULL, NULL);
+ mpatch_Error =
+ PyErr_NewException("mercurial.cext.mpatch.mpatchError", NULL, NULL);
Py_INCREF(mpatch_Error);
PyModule_AddObject(m, "mpatchError", mpatch_Error);
PyModule_AddIntConstant(m, "version", version);
@@ -188,13 +190,12 @@
return m;
}
#else
-PyMODINIT_FUNC
-initmpatch(void)
+PyMODINIT_FUNC initmpatch(void)
{
PyObject *m;
m = Py_InitModule3("mpatch", methods, mpatch_doc);
- mpatch_Error = PyErr_NewException("mercurial.cext.mpatch.mpatchError",
- NULL, NULL);
+ mpatch_Error =
+ PyErr_NewException("mercurial.cext.mpatch.mpatchError", NULL, NULL);
PyModule_AddIntConstant(m, "version", version);
}
#endif
--- a/mercurial/cext/osutil.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/osutil.c Mon Mar 19 08:07:18 2018 -0700
@@ -121,6 +121,27 @@
o->ob_type->tp_free(o);
}
+static PyObject *listdir_stat_getitem(PyObject *self, PyObject *key)
+{
+ long index = PyLong_AsLong(key);
+ if (index == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+ if (index != 8) {
+ PyErr_Format(PyExc_IndexError, "osutil.stat objects only "
+ "support stat.ST_MTIME in "
+ "__getitem__");
+ return NULL;
+ }
+ return listdir_stat_st_mtime(self, NULL);
+}
+
+static PyMappingMethods listdir_stat_type_mapping_methods = {
+ (lenfunc)NULL, /* mp_length */
+ (binaryfunc)listdir_stat_getitem, /* mp_subscript */
+ (objobjargproc)NULL, /* mp_ass_subscript */
+};
+
static PyTypeObject listdir_stat_type = {
PyVarObject_HEAD_INIT(NULL, 0) /* header */
"osutil.stat", /*tp_name*/
@@ -134,7 +155,7 @@
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
+ &listdir_stat_type_mapping_methods, /*tp_as_mapping*/
0, /*tp_hash */
0, /*tp_call*/
0, /*tp_str*/
@@ -184,7 +205,7 @@
? _S_IFDIR : _S_IFREG;
if (!wantstat)
- return Py_BuildValue("si", fd->cFileName, kind);
+ return Py_BuildValue(PY23("si", "yi"), fd->cFileName, kind);
py_st = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
if (!py_st)
@@ -202,7 +223,7 @@
if (kind == _S_IFREG)
stp->st_size = ((__int64)fd->nFileSizeHigh << 32)
+ fd->nFileSizeLow;
- return Py_BuildValue("siN", fd->cFileName,
+ return Py_BuildValue(PY23("siN", "yiN"), fd->cFileName,
kind, py_st);
}
@@ -390,9 +411,11 @@
stat = makestat(&st);
if (!stat)
goto error;
- elem = Py_BuildValue("siN", ent->d_name, kind, stat);
+ elem = Py_BuildValue(PY23("siN", "yiN"), ent->d_name,
+ kind, stat);
} else
- elem = Py_BuildValue("si", ent->d_name, kind);
+ elem = Py_BuildValue(PY23("si", "yi"), ent->d_name,
+ kind);
if (!elem)
goto error;
stat = NULL;
@@ -570,9 +593,11 @@
stat = makestat(&st);
if (!stat)
goto error;
- elem = Py_BuildValue("siN", filename, kind, stat);
+ elem = Py_BuildValue(PY23("siN", "yiN"),
+ filename, kind, stat);
} else
- elem = Py_BuildValue("si", filename, kind);
+ elem = Py_BuildValue(PY23("si", "yi"),
+ filename, kind);
if (!elem)
goto error;
stat = NULL;
@@ -754,7 +779,7 @@
static PyObject *setprocname(PyObject *self, PyObject *args)
{
const char *name = NULL;
- if (!PyArg_ParseTuple(args, "s", &name))
+ if (!PyArg_ParseTuple(args, PY23("s", "y"), &name))
return NULL;
#if defined(SETPROCNAME_USE_SETPROCTITLE)
@@ -1101,14 +1126,14 @@
const char *path = NULL;
struct statfs buf;
int r;
- if (!PyArg_ParseTuple(args, "s", &path))
+ if (!PyArg_ParseTuple(args, PY23("s", "y"), &path))
return NULL;
memset(&buf, 0, sizeof(buf));
r = statfs(path, &buf);
if (r != 0)
return PyErr_SetFromErrno(PyExc_OSError);
- return Py_BuildValue("s", describefstype(&buf));
+ return Py_BuildValue(PY23("s", "y"), describefstype(&buf));
}
#endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */
@@ -1119,14 +1144,14 @@
const char *path = NULL;
struct statfs buf;
int r;
- if (!PyArg_ParseTuple(args, "s", &path))
+ if (!PyArg_ParseTuple(args, PY23("s", "y"), &path))
return NULL;
memset(&buf, 0, sizeof(buf));
r = statfs(path, &buf);
if (r != 0)
return PyErr_SetFromErrno(PyExc_OSError);
- return Py_BuildValue("s", buf.f_mntonname);
+ return Py_BuildValue(PY23("s", "y"), buf.f_mntonname);
}
#endif /* defined(HAVE_BSD_STATFS) */
@@ -1160,7 +1185,8 @@
static char *kwlist[] = {"path", "stat", "skip", NULL};
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|OO:listdir",
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, PY23("s#|OO:listdir",
+ "y#|OO:listdir"),
kwlist, &path, &plen, &statobj, &skipobj))
return NULL;
@@ -1193,7 +1219,9 @@
int plus;
FILE *fp;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:posixfile", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, PY23("et|si:posixfile",
+ "et|yi:posixfile"),
+ kwlist,
Py_FileSystemDefaultEncoding,
&name, &mode, &bufsize))
return NULL;
@@ -1345,7 +1373,7 @@
{NULL, NULL}
};
-static const int version = 3;
+static const int version = 4;
#ifdef IS_PY3K
static struct PyModuleDef osutil_module = {
--- a/mercurial/cext/parsers.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/parsers.c Mon Mar 19 08:07:18 2018 -0700
@@ -48,8 +48,9 @@
char *str, *start, *end;
int len;
- if (!PyArg_ParseTuple(args, "O!O!s#:parse_manifest", &PyDict_Type,
- &mfdict, &PyDict_Type, &fdict, &str, &len))
+ if (!PyArg_ParseTuple(
+ args, PY23("O!O!s#:parse_manifest", "O!O!y#:parse_manifest"),
+ &PyDict_Type, &mfdict, &PyDict_Type, &fdict, &str, &len))
goto quit;
start = str;
@@ -241,8 +242,9 @@
unsigned int flen, len, pos = 40;
int readlen;
- if (!PyArg_ParseTuple(args, "O!O!s#:parse_dirstate", &PyDict_Type,
- &dmap, &PyDict_Type, &cmap, &str, &readlen))
+ if (!PyArg_ParseTuple(
+ args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
+ &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen))
goto quit;
len = readlen;
@@ -254,7 +256,7 @@
goto quit;
}
- parents = Py_BuildValue("s#s#", str, 20, str + 20, 20);
+ parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, 20, str + 20, 20);
if (!parents)
goto quit;
@@ -645,7 +647,8 @@
Py_ssize_t offset, stop;
PyObject *markers = NULL;
- if (!PyArg_ParseTuple(args, "s#nn", &data, &datalen, &offset, &stop)) {
+ if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
+ &offset, &stop)) {
return NULL;
}
dataend = data + datalen;
--- a/mercurial/cext/pathencode.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/pathencode.c Mon Mar 19 08:07:18 2018 -0700
@@ -26,26 +26,26 @@
/* state machine for the fast path */
enum path_state {
- START, /* first byte of a path component */
- A, /* "AUX" */
+ START, /* first byte of a path component */
+ A, /* "AUX" */
AU,
- THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */
- C, /* "CON" or "COMn" */
+ THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */
+ C, /* "CON" or "COMn" */
CO,
- COMLPT, /* "COM" or "LPT" */
+ COMLPT, /* "COM" or "LPT" */
COMLPTn,
L,
LP,
N,
NU,
- P, /* "PRN" */
+ P, /* "PRN" */
PR,
- LDOT, /* leading '.' */
- DOT, /* '.' in a non-leading position */
- H, /* ".h" */
- HGDI, /* ".hg", ".d", or ".i" */
+ LDOT, /* leading '.' */
+ DOT, /* '.' in a non-leading position */
+ H, /* ".h" */
+ HGDI, /* ".hg", ".d", or ".i" */
SPACE,
- DEFAULT /* byte of a path component after the first */
+ DEFAULT, /* byte of a path component after the first */
};
/* state machine for dir-encoding */
@@ -53,7 +53,7 @@
DDOT,
DH,
DHGDI,
- DDEFAULT
+ DDEFAULT,
};
static inline int inset(const uint32_t bitset[], char c)
@@ -82,7 +82,7 @@
}
static inline void hexencode(char *dest, Py_ssize_t *destlen, size_t destsize,
- uint8_t c)
+ uint8_t c)
{
static const char hexdigit[] = "0123456789abcdef";
@@ -92,14 +92,14 @@
/* 3-byte escape: tilde followed by two hex digits */
static inline void escape3(char *dest, Py_ssize_t *destlen, size_t destsize,
- char c)
+ char c)
{
charcopy(dest, destlen, destsize, '~');
hexencode(dest, destlen, destsize, c);
}
-static Py_ssize_t _encodedir(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t _encodedir(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
enum dir_state state = DDEFAULT;
Py_ssize_t i = 0, destlen = 0;
@@ -126,8 +126,8 @@
if (src[i] == 'g') {
state = DHGDI;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DDEFAULT;
+ } else
+ state = DDEFAULT;
break;
case DHGDI:
if (src[i] == '/') {
@@ -173,17 +173,15 @@
if (newobj) {
assert(PyBytes_Check(newobj));
Py_SIZE(newobj)--;
- _encodedir(PyBytes_AS_STRING(newobj), newlen, path,
- len + 1);
+ _encodedir(PyBytes_AS_STRING(newobj), newlen, path, len + 1);
}
return newobj;
}
static Py_ssize_t _encode(const uint32_t twobytes[8], const uint32_t onebyte[8],
- char *dest, Py_ssize_t destlen, size_t destsize,
- const char *src, Py_ssize_t len,
- int encodedir)
+ char *dest, Py_ssize_t destlen, size_t destsize,
+ const char *src, Py_ssize_t len, int encodedir)
{
enum path_state state = START;
Py_ssize_t i = 0;
@@ -237,15 +235,15 @@
if (src[i] == 'u') {
state = AU;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case AU:
if (src[i] == 'x') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case THIRD:
state = DEFAULT;
@@ -264,24 +262,30 @@
if (src[i] == 'o') {
state = CO;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case CO:
if (src[i] == 'm') {
state = COMLPT;
i++;
- }
- else if (src[i] == 'n') {
+ } else if (src[i] == 'n') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case COMLPT:
switch (src[i]) {
- case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
state = COMLPTn;
i++;
break;
@@ -301,8 +305,8 @@
charcopy(dest, &destlen, destsize, src[i - 1]);
break;
default:
- memcopy(dest, &destlen, destsize,
- &src[i - 2], 2);
+ memcopy(dest, &destlen, destsize, &src[i - 2],
+ 2);
break;
}
break;
@@ -310,43 +314,43 @@
if (src[i] == 'p') {
state = LP;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case LP:
if (src[i] == 't') {
state = COMLPT;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case N:
if (src[i] == 'u') {
state = NU;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case NU:
if (src[i] == 'l') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case P:
if (src[i] == 'r') {
state = PR;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case PR:
if (src[i] == 'n') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case LDOT:
switch (src[i]) {
@@ -393,18 +397,18 @@
if (src[i] == 'g') {
state = HGDI;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case HGDI:
if (src[i] == '/') {
state = START;
if (encodedir)
memcopy(dest, &destlen, destsize, ".hg",
- 3);
+ 3);
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case SPACE:
switch (src[i]) {
@@ -444,19 +448,17 @@
if (inset(onebyte, src[i])) {
do {
charcopy(dest, &destlen,
- destsize, src[i++]);
+ destsize, src[i++]);
} while (i < len &&
- inset(onebyte, src[i]));
- }
- else if (inset(twobytes, src[i])) {
+ inset(onebyte, src[i]));
+ } else if (inset(twobytes, src[i])) {
char c = src[i++];
charcopy(dest, &destlen, destsize, '_');
charcopy(dest, &destlen, destsize,
- c == '_' ? '_' : c + 32);
- }
- else
+ c == '_' ? '_' : c + 32);
+ } else
escape3(dest, &destlen, destsize,
- src[i++]);
+ src[i++]);
break;
}
break;
@@ -466,31 +468,29 @@
return destlen;
}
-static Py_ssize_t basicencode(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t basicencode(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
- static const uint32_t twobytes[8] = { 0, 0, 0x87fffffe };
+ static const uint32_t twobytes[8] = {0, 0, 0x87fffffe};
static const uint32_t onebyte[8] = {
- 1, 0x2bff3bfa, 0x68000001, 0x2fffffff,
+ 1, 0x2bff3bfa, 0x68000001, 0x2fffffff,
};
Py_ssize_t destlen = 0;
- return _encode(twobytes, onebyte, dest, destlen, destsize,
- src, len, 1);
+ return _encode(twobytes, onebyte, dest, destlen, destsize, src, len, 1);
}
static const Py_ssize_t maxstorepathlen = 120;
-static Py_ssize_t _lowerencode(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t _lowerencode(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
- static const uint32_t onebyte[8] = {
- 1, 0x2bfffbfb, 0xe8000001, 0x2fffffff
- };
+ static const uint32_t onebyte[8] = {1, 0x2bfffbfb, 0xe8000001,
+ 0x2fffffff};
- static const uint32_t lower[8] = { 0, 0, 0x7fffffe };
+ static const uint32_t lower[8] = {0, 0, 0x7fffffe};
Py_ssize_t i, destlen = 0;
@@ -512,7 +512,8 @@
Py_ssize_t len, newlen;
PyObject *ret;
- if (!PyArg_ParseTuple(args, "s#:lowerencode", &path, &len))
+ if (!PyArg_ParseTuple(args, PY23("s#:lowerencode", "y#:lowerencode"),
+ &path, &len))
return NULL;
newlen = _lowerencode(NULL, 0, path, len);
@@ -524,13 +525,13 @@
}
/* See store.py:_auxencode for a description. */
-static Py_ssize_t auxencode(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t auxencode(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
static const uint32_t twobytes[8];
static const uint32_t onebyte[8] = {
- ~0U, 0xffff3ffe, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
+ ~0U, 0xffff3ffe, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
};
return _encode(twobytes, onebyte, dest, 0, destsize, src, len, 0);
@@ -590,8 +591,7 @@
break;
charcopy(dest, &destlen, destsize, src[i]);
p = -1;
- }
- else if (p < dirprefixlen)
+ } else if (p < dirprefixlen)
charcopy(dest, &destlen, destsize, src[i]);
}
@@ -622,13 +622,13 @@
slop = maxstorepathlen - used;
if (slop > 0) {
Py_ssize_t basenamelen =
- lastslash >= 0 ? len - lastslash - 2 : len - 1;
+ lastslash >= 0 ? len - lastslash - 2 : len - 1;
if (basenamelen > slop)
basenamelen = slop;
if (basenamelen > 0)
memcopy(dest, &destlen, destsize, &src[lastslash + 1],
- basenamelen);
+ basenamelen);
}
/* Add hash and suffix. */
@@ -637,7 +637,7 @@
if (lastdot >= 0)
memcopy(dest, &destlen, destsize, &src[lastdot],
- len - lastdot - 1);
+ len - lastdot - 1);
assert(PyBytes_Check(ret));
Py_SIZE(ret) = destlen;
@@ -672,8 +672,8 @@
if (shafunc == NULL) {
PyErr_SetString(PyExc_AttributeError,
- "module 'hashlib' has no "
- "attribute 'sha1'");
+ "module 'hashlib' has no "
+ "attribute 'sha1'");
return -1;
}
}
@@ -690,7 +690,7 @@
if (!PyBytes_Check(hashobj) || PyBytes_GET_SIZE(hashobj) != 20) {
PyErr_SetString(PyExc_TypeError,
- "result of digest is not a 20-byte hash");
+ "result of digest is not a 20-byte hash");
Py_DECREF(hashobj);
return -1;
}
@@ -755,10 +755,9 @@
assert(PyBytes_Check(newobj));
Py_SIZE(newobj)--;
basicencode(PyBytes_AS_STRING(newobj), newlen, path,
- len + 1);
+ len + 1);
}
- }
- else
+ } else
newobj = hashencode(path, len + 1);
return newobj;
--- a/mercurial/cext/revlog.c Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/revlog.c Mon Mar 19 08:07:18 2018 -0700
@@ -87,9 +87,9 @@
static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
#if LONG_MAX == 0x7fffffffL
-static char *tuple_format = "Kiiiiiis#";
+static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
#else
-static char *tuple_format = "kiiiiiis#";
+static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
#endif
/* A RevlogNG v1 index entry is 64 bytes long. */
@@ -643,8 +643,10 @@
if (!PyArg_ParseTuple(args, "O", &roots))
goto done;
- if (roots == NULL || !PyList_Check(roots))
+ if (roots == NULL || !PyList_Check(roots)) {
+ PyErr_SetString(PyExc_TypeError, "roots must be a list");
goto done;
+ }
phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
if (phases == NULL) {
@@ -667,8 +669,11 @@
if (phaseset == NULL)
goto release;
PyList_SET_ITEM(phasessetlist, i+1, phaseset);
- if (!PyList_Check(phaseroots))
+ if (!PyList_Check(phaseroots)) {
+ PyErr_SetString(PyExc_TypeError,
+ "roots item must be a list");
goto release;
+ }
minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
if (minrevphase == -2) /* Error from add_roots_get_min */
goto release;
@@ -1243,7 +1248,7 @@
char *node;
int rev, i;
- if (!PyArg_ParseTuple(args, "s#", &node, &nodelen))
+ if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
return NULL;
if (nodelen < 4) {
@@ -2077,7 +2082,7 @@
Py_INCREF(&indexType);
PyModule_AddObject(mod, "index", (PyObject *)&indexType);
- nullentry = Py_BuildValue("iiiiiiis#", 0, 0, 0,
+ nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
-1, -1, -1, -1, nullid, 20);
if (nullentry)
PyObject_GC_UnTrack(nullentry);
--- a/mercurial/cext/util.h Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cext/util.h Mon Mar 19 08:07:18 2018 -0700
@@ -14,6 +14,13 @@
#define IS_PY3K
#endif
+/* helper to switch things like string literal depending on Python version */
+#ifdef IS_PY3K
+#define PY23(py2, py3) py3
+#else
+#define PY23(py2, py3) py2
+#endif
+
/* clang-format off */
typedef struct {
PyObject_HEAD
--- a/mercurial/cffi/bdiffbuild.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cffi/bdiffbuild.py Mon Mar 19 08:07:18 2018 -0700
@@ -4,9 +4,10 @@
import os
ffi = cffi.FFI()
-ffi.set_source("mercurial.cffi._bdiff",
- open(os.path.join(os.path.join(os.path.dirname(__file__), '..'),
- 'bdiff.c')).read(), include_dirs=['mercurial'])
+with open(os.path.join(os.path.join(os.path.dirname(__file__), '..'),
+ 'bdiff.c')) as f:
+ ffi.set_source("mercurial.cffi._bdiff",
+ f.read(), include_dirs=['mercurial'])
ffi.cdef("""
struct bdiff_line {
int hash, n, e;
--- a/mercurial/cffi/mpatchbuild.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cffi/mpatchbuild.py Mon Mar 19 08:07:18 2018 -0700
@@ -6,8 +6,9 @@
ffi = cffi.FFI()
mpatch_c = os.path.join(os.path.join(os.path.dirname(__file__), '..',
'mpatch.c'))
-ffi.set_source("mercurial.cffi._mpatch", open(mpatch_c).read(),
- include_dirs=["mercurial"])
+with open(mpatch_c) as f:
+ ffi.set_source("mercurial.cffi._mpatch", f.read(),
+ include_dirs=["mercurial"])
ffi.cdef("""
struct mpatch_frag {
--- a/mercurial/changegroup.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/changegroup.py Mon Mar 19 08:07:18 2018 -0700
@@ -32,6 +32,10 @@
_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
_CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = 'narrowhg-experimental'
+
readexactly = util.readexactly
def getchunk(stream):
@@ -71,7 +75,7 @@
fh = open(filename, "wb", 131072)
else:
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
- fh = os.fdopen(fd, pycompat.sysstr("wb"))
+ fh = os.fdopen(fd, r"wb")
cleanup = filename
for c in chunks:
fh.write(c)
@@ -407,7 +411,7 @@
newheads = [h for h in repo.heads()
if h not in oldheads]
repo.ui.log("incoming",
- "%s incoming changes - new heads: %s\n",
+ "%d incoming changes - new heads: %s\n",
len(added),
', '.join([hex(c[:6]) for c in newheads]))
@@ -899,6 +903,11 @@
# support versions 01 and 02.
versions.discard('01')
versions.discard('02')
+ if NARROW_REQUIREMENT in repo.requirements:
+ # Versions 01 and 02 don't support revlog flags, and we need to
+ # support that for stripping and unbundling to work.
+ versions.discard('01')
+ versions.discard('02')
return versions
def localversion(repo):
--- a/mercurial/changelog.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/changelog.py Mon Mar 19 08:07:18 2018 -0700
@@ -20,9 +20,11 @@
from . import (
encoding,
error,
+ pycompat,
revlog,
util,
)
+from .utils import dateutil
_defaultextra = {'branch': 'default'}
@@ -90,6 +92,11 @@
return self.offset
def flush(self):
pass
+
+ @property
+ def closed(self):
+ return self.fp.closed
+
def close(self):
self.fp.close()
@@ -127,6 +134,13 @@
self.offset += len(s)
self._end += len(s)
+ def __enter__(self):
+ self.fp.__enter__()
+ return self
+
+ def __exit__(self, *args):
+ return self.fp.__exit__(*args)
+
def _divertopener(opener, target):
"""build an opener that writes in 'target.a' instead of 'target'"""
def _divert(name, mode='r', checkambig=False):
@@ -420,7 +434,7 @@
self._delaybuf = None
self._divert = False
# split when we're done
- self.checkinlinesize(tr)
+ self._enforceinlinesize(tr)
def _writepending(self, tr):
"create a file containing the unfinalized state for pretxnchangegroup"
@@ -446,9 +460,9 @@
return False
- def checkinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr, fp=None):
if not self._delayed:
- revlog.revlog.checkinlinesize(self, tr, fp)
+ revlog.revlog._enforceinlinesize(self, tr, fp)
def read(self, node):
"""Obtain data from a parsed changelog revision.
@@ -505,15 +519,15 @@
if not user:
raise error.RevlogError(_("empty username"))
if "\n" in user:
- raise error.RevlogError(_("username %s contains a newline")
- % repr(user))
+ raise error.RevlogError(_("username %r contains a newline")
+ % pycompat.bytestr(user))
desc = stripdesc(desc)
if date:
- parseddate = "%d %d" % util.parsedate(date)
+ parseddate = "%d %d" % dateutil.parsedate(date)
else:
- parseddate = "%d %d" % util.makedate()
+ parseddate = "%d %d" % dateutil.makedate()
if extra:
branch = extra.get("branch")
if branch in ("default", ""):
--- a/mercurial/chgserver.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/chgserver.py Mon Mar 19 08:07:18 2018 -0700
@@ -45,6 +45,7 @@
import os
import re
import socket
+import stat
import struct
import time
@@ -161,7 +162,7 @@
def trystat(path):
try:
st = os.stat(path)
- return (st.st_mtime, st.st_size)
+ return (st[stat.ST_MTIME], st.st_size)
except OSError:
# could be ENOENT, EPERM etc. not fatal in any case
pass
@@ -295,9 +296,9 @@
_iochannels = [
# server.ch, ui.fp, mode
- ('cin', 'fin', pycompat.sysstr('rb')),
- ('cout', 'fout', pycompat.sysstr('wb')),
- ('cerr', 'ferr', pycompat.sysstr('wb')),
+ ('cin', 'fin', r'rb'),
+ ('cout', 'fout', r'wb'),
+ ('cerr', 'ferr', r'wb'),
]
class chgcmdserver(commandserver.server):
@@ -546,9 +547,9 @@
def _issocketowner(self):
try:
- stat = os.stat(self._realaddress)
- return (stat.st_ino == self._socketstat.st_ino and
- stat.st_mtime == self._socketstat.st_mtime)
+ st = os.stat(self._realaddress)
+ return (st.st_ino == self._socketstat.st_ino and
+ st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME])
except OSError:
return False
--- a/mercurial/cmdutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/cmdutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -8,7 +8,6 @@
from __future__ import absolute_import
import errno
-import itertools
import os
import re
import tempfile
@@ -26,14 +25,13 @@
changelog,
copies,
crecord as crecordmod,
- dagop,
dirstateguard,
encoding,
error,
formatter,
- graphmod,
+ logcmdutil,
match as matchmod,
- mdiff,
+ merge as mergemod,
mergeutil,
obsolete,
patch,
@@ -41,16 +39,16 @@
pycompat,
registrar,
revlog,
- revset,
- revsetlang,
rewriteutil,
scmutil,
smartset,
+ subrepoutil,
templatekw,
templater,
util,
vfs as vfsmod,
)
+from .utils import dateutil
stringio = util.stringio
# templates of common command options
@@ -226,7 +224,6 @@
def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
filterfn, *pats, **opts):
- from . import merge as mergemod
opts = pycompat.byteskwargs(opts)
if not ui.interactive():
if cmdsuggest:
@@ -366,7 +363,7 @@
ui.debug(fp.getvalue())
patch.internalpatch(ui, repo, fp, 1, eolmode=None)
except error.PatchError as err:
- raise error.Abort(str(err))
+ raise error.Abort(pycompat.bytestr(err))
del fp
# 4. We prepared working directory according to filtered
@@ -563,8 +560,6 @@
return '\n'.join(commentedlines) + '\n'
def _conflictsmsg(repo):
- # avoid merge cycle
- from . import merge as mergemod
mergestate = mergemod.mergestate.read(repo)
if not mergestate.active():
return
@@ -899,65 +894,97 @@
else:
return commiteditor
-def loglimit(opts):
- """get the log limit according to option -l/--limit"""
- limit = opts.get('limit')
- if limit:
- try:
- limit = int(limit)
- except ValueError:
- raise error.Abort(_('limit must be a positive integer'))
- if limit <= 0:
- raise error.Abort(_('limit must be positive'))
- else:
- limit = None
- return limit
-
-def makefilename(repo, pat, node, desc=None,
- total=None, seqno=None, revwidth=None, pathname=None):
- node_expander = {
- 'H': lambda: hex(node),
- 'R': lambda: '%d' % repo.changelog.rev(node),
- 'h': lambda: short(node),
- 'm': lambda: re.sub('[^\w]', '_', desc or '')
- }
+def rendertemplate(ctx, tmpl, props=None):
+ """Expand a literal template 'tmpl' byte-string against one changeset
+
+ Each props item must be a stringify-able value or a callable returning
+ such value, i.e. no bare list nor dict should be passed.
+ """
+ repo = ctx.repo()
+ tres = formatter.templateresources(repo.ui, repo)
+ t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
+ resources=tres)
+ mapping = {'ctx': ctx, 'revcache': {}}
+ if props:
+ mapping.update(props)
+ return t.renderdefault(mapping)
+
+def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
+ r"""Convert old-style filename format string to template string
+
+ >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
+ 'foo-{reporoot|basename}-{seqno}.patch'
+ >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
+ '{rev}{tags % "{tag}"}{node}'
+
+ '\' in outermost strings has to be escaped because it is a directory
+ separator on Windows:
+
+ >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
+ 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
+ >>> _buildfntemplate(b'\\\\foo\\bar.patch')
+ '\\\\\\\\foo\\\\bar.patch'
+ >>> _buildfntemplate(b'\\{tags % "{tag}"}')
+ '\\\\{tags % "{tag}"}'
+
+ but inner strings follow the template rules (i.e. '\' is taken as an
+ escape character):
+
+ >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
+ '{"c:\\tmp"}'
+ """
expander = {
- '%': lambda: '%',
- 'b': lambda: os.path.basename(repo.root),
- }
-
- try:
- if node:
- expander.update(node_expander)
- if node:
- expander['r'] = (lambda:
- ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
- if total is not None:
- expander['N'] = lambda: '%d' % total
- if seqno is not None:
- expander['n'] = lambda: '%d' % seqno
- if total is not None and seqno is not None:
- expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
- if pathname is not None:
- expander['s'] = lambda: os.path.basename(pathname)
- expander['d'] = lambda: os.path.dirname(pathname) or '.'
- expander['p'] = lambda: pathname
-
- newname = []
- patlen = len(pat)
- i = 0
- while i < patlen:
- c = pat[i:i + 1]
- if c == '%':
- i += 1
- c = pat[i:i + 1]
- c = expander[c]()
- newname.append(c)
- i += 1
- return ''.join(newname)
- except KeyError as inst:
- raise error.Abort(_("invalid format spec '%%%s' in output filename") %
- inst.args[0])
+ b'H': b'{node}',
+ b'R': b'{rev}',
+ b'h': b'{node|short}',
+ b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
+ b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
+ b'%': b'%',
+ b'b': b'{reporoot|basename}',
+ }
+ if total is not None:
+ expander[b'N'] = b'{total}'
+ if seqno is not None:
+ expander[b'n'] = b'{seqno}'
+ if total is not None and seqno is not None:
+ expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
+ if pathname is not None:
+ expander[b's'] = b'{pathname|basename}'
+ expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
+ expander[b'p'] = b'{pathname}'
+
+ newname = []
+ for typ, start, end in templater.scantemplate(pat, raw=True):
+ if typ != b'string':
+ newname.append(pat[start:end])
+ continue
+ i = start
+ while i < end:
+ n = pat.find(b'%', i, end)
+ if n < 0:
+ newname.append(util.escapestr(pat[i:end]))
+ break
+ newname.append(util.escapestr(pat[i:n]))
+ if n + 2 > end:
+ raise error.Abort(_("incomplete format spec in output "
+ "filename"))
+ c = pat[n + 1:n + 2]
+ i = n + 2
+ try:
+ newname.append(expander[c])
+ except KeyError:
+ raise error.Abort(_("invalid format spec '%%%s' in output "
+ "filename") % c)
+ return ''.join(newname)
+
+def makefilename(ctx, pat, **props):
+ if not pat:
+ return pat
+ tmpl = _buildfntemplate(pat, **props)
+ # BUG: alias expansion shouldn't be made against template fragments
+ # rewritten from %-format strings, but we have no easy way to partially
+ # disable the expansion.
+ return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
def isstdiofilename(pat):
"""True if the given pat looks like a filename denoting stdin/stdout"""
@@ -982,19 +1009,17 @@
def __exit__(self, exc_type, exc_value, exc_tb):
pass
-def makefileobj(repo, pat, node=None, desc=None, total=None,
- seqno=None, revwidth=None, mode='wb', modemap=None,
- pathname=None):
-
+def makefileobj(ctx, pat, mode='wb', modemap=None, **props):
writable = mode not in ('r', 'rb')
if isstdiofilename(pat):
+ repo = ctx.repo()
if writable:
fp = repo.ui.fout
else:
fp = repo.ui.fin
return _unclosablefile(fp)
- fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
+ fn = makefilename(ctx, pat, **props)
if modemap is not None:
mode = modemap.get(fn, mode)
if mode == 'wb':
@@ -1408,7 +1433,7 @@
files=files, eolmode=None, similarity=sim / 100.0)
except error.PatchError as e:
if not partial:
- raise error.Abort(str(e))
+ raise error.Abort(pycompat.bytestr(e))
if partial:
rejects = True
@@ -1454,7 +1479,7 @@
patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
files, eolmode=None)
except error.PatchError as e:
- raise error.Abort(str(e))
+ raise error.Abort(util.forcebytestr(e))
if opts.get('exact'):
editor = None
else:
@@ -1507,7 +1532,7 @@
write("# HG changeset patch\n")
write("# User %s\n" % ctx.user())
write("# Date %d %d\n" % ctx.date())
- write("# %s\n" % util.datestr(ctx.date()))
+ write("# %s\n" % dateutil.datestr(ctx.date()))
if branch and branch != 'default':
write("# Branch %s\n" % branch)
write("# Node ID %s\n" % hex(node))
@@ -1569,11 +1594,8 @@
ctx = repo[rev]
fo = None
if not fp and fntemplate:
- desc_lines = ctx.description().rstrip().split('\n')
- desc = desc_lines[0] #Commit always has a first line.
- fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
- total=total, seqno=seqno, revwidth=revwidth,
- mode='wb', modemap=filemode)
+ fo = makefileobj(ctx, fntemplate, mode='wb', modemap=filemode,
+ total=total, seqno=seqno, revwidth=revwidth)
dest = fo.name
def write(s, **kw):
fo.write(s)
@@ -1584,500 +1606,6 @@
if fo is not None:
fo.close()
-def diffordiffstat(ui, repo, diffopts, node1, node2, match,
- changes=None, stat=False, fp=None, prefix='',
- root='', listsubrepos=False, hunksfilterfn=None):
- '''show diff or diffstat.'''
- if fp is None:
- write = ui.write
- else:
- def write(s, **kw):
- fp.write(s)
-
- if root:
- relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
- else:
- relroot = ''
- if relroot != '':
- # XXX relative roots currently don't work if the root is within a
- # subrepo
- uirelroot = match.uipath(relroot)
- relroot += '/'
- for matchroot in match.files():
- if not matchroot.startswith(relroot):
- ui.warn(_('warning: %s not inside relative root %s\n') % (
- match.uipath(matchroot), uirelroot))
-
- if stat:
- diffopts = diffopts.copy(context=0, noprefix=False)
- width = 80
- if not ui.plain():
- width = ui.termwidth()
- chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
- prefix=prefix, relroot=relroot,
- hunksfilterfn=hunksfilterfn)
- for chunk, label in patch.diffstatui(util.iterlines(chunks),
- width=width):
- write(chunk, label=label)
- else:
- for chunk, label in patch.diffui(repo, node1, node2, match,
- changes, opts=diffopts, prefix=prefix,
- relroot=relroot,
- hunksfilterfn=hunksfilterfn):
- write(chunk, label=label)
-
- if listsubrepos:
- ctx1 = repo[node1]
- ctx2 = repo[node2]
- for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
- tempnode2 = node2
- try:
- if node2 is not None:
- tempnode2 = ctx2.substate[subpath][1]
- except KeyError:
- # A subrepo that existed in node1 was deleted between node1 and
- # node2 (inclusive). Thus, ctx2's substate won't contain that
- # subpath. The best we can do is to ignore it.
- tempnode2 = None
- submatch = matchmod.subdirmatcher(subpath, match)
- sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
- stat=stat, fp=fp, prefix=prefix)
-
-def _changesetlabels(ctx):
- labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
- if ctx.obsolete():
- labels.append('changeset.obsolete')
- if ctx.isunstable():
- labels.append('changeset.unstable')
- for instability in ctx.instabilities():
- labels.append('instability.%s' % instability)
- return ' '.join(labels)
-
-class changeset_printer(object):
- '''show changeset information when templating not requested.'''
-
- def __init__(self, ui, repo, matchfn, diffopts, buffered):
- self.ui = ui
- self.repo = repo
- self.buffered = buffered
- self.matchfn = matchfn
- self.diffopts = diffopts
- self.header = {}
- self.hunk = {}
- self.lastheader = None
- self.footer = None
- self._columns = templatekw.getlogcolumns()
-
- def flush(self, ctx):
- rev = ctx.rev()
- if rev in self.header:
- h = self.header[rev]
- if h != self.lastheader:
- self.lastheader = h
- self.ui.write(h)
- del self.header[rev]
- if rev in self.hunk:
- self.ui.write(self.hunk[rev])
- del self.hunk[rev]
-
- def close(self):
- if self.footer:
- self.ui.write(self.footer)
-
- def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
- **props):
- props = pycompat.byteskwargs(props)
- if self.buffered:
- self.ui.pushbuffer(labeled=True)
- self._show(ctx, copies, matchfn, hunksfilterfn, props)
- self.hunk[ctx.rev()] = self.ui.popbuffer()
- else:
- self._show(ctx, copies, matchfn, hunksfilterfn, props)
-
- def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
- '''show a single changeset or file revision'''
- changenode = ctx.node()
- rev = ctx.rev()
-
- if self.ui.quiet:
- self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
- label='log.node')
- return
-
- columns = self._columns
- self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
- label=_changesetlabels(ctx))
-
- # branches are shown first before any other names due to backwards
- # compatibility
- branch = ctx.branch()
- # don't show the default branch name
- if branch != 'default':
- self.ui.write(columns['branch'] % branch, label='log.branch')
-
- for nsname, ns in self.repo.names.iteritems():
- # branches has special logic already handled above, so here we just
- # skip it
- if nsname == 'branches':
- continue
- # we will use the templatename as the color name since those two
- # should be the same
- for name in ns.names(self.repo, changenode):
- self.ui.write(ns.logfmt % name,
- label='log.%s' % ns.colorname)
- if self.ui.debugflag:
- self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
- for pctx in scmutil.meaningfulparents(self.repo, ctx):
- label = 'log.parent changeset.%s' % pctx.phasestr()
- self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
- label=label)
-
- if self.ui.debugflag and rev is not None:
- mnode = ctx.manifestnode()
- mrev = self.repo.manifestlog._revlog.rev(mnode)
- self.ui.write(columns['manifest']
- % scmutil.formatrevnode(self.ui, mrev, mnode),
- label='ui.debug log.manifest')
- self.ui.write(columns['user'] % ctx.user(), label='log.user')
- self.ui.write(columns['date'] % util.datestr(ctx.date()),
- label='log.date')
-
- if ctx.isunstable():
- instabilities = ctx.instabilities()
- self.ui.write(columns['instability'] % ', '.join(instabilities),
- label='log.instability')
-
- elif ctx.obsolete():
- self._showobsfate(ctx)
-
- self._exthook(ctx)
-
- if self.ui.debugflag:
- files = ctx.p1().status(ctx)[:3]
- for key, value in zip(['files', 'files+', 'files-'], files):
- if value:
- self.ui.write(columns[key] % " ".join(value),
- label='ui.debug log.files')
- elif ctx.files() and self.ui.verbose:
- self.ui.write(columns['files'] % " ".join(ctx.files()),
- label='ui.note log.files')
- if copies and self.ui.verbose:
- copies = ['%s (%s)' % c for c in copies]
- self.ui.write(columns['copies'] % ' '.join(copies),
- label='ui.note log.copies')
-
- extra = ctx.extra()
- if extra and self.ui.debugflag:
- for key, value in sorted(extra.items()):
- self.ui.write(columns['extra'] % (key, util.escapestr(value)),
- label='ui.debug log.extra')
-
- description = ctx.description().strip()
- if description:
- if self.ui.verbose:
- self.ui.write(_("description:\n"),
- label='ui.note log.description')
- self.ui.write(description,
- label='ui.note log.description')
- self.ui.write("\n\n")
- else:
- self.ui.write(columns['summary'] % description.splitlines()[0],
- label='log.summary')
- self.ui.write("\n")
-
- self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
-
- def _showobsfate(self, ctx):
- obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
-
- if obsfate:
- for obsfateline in obsfate:
- self.ui.write(self._columns['obsolete'] % obsfateline,
- label='log.obsfate')
-
- def _exthook(self, ctx):
- '''empty method used by extension as a hook point
- '''
-
- def showpatch(self, ctx, matchfn, hunksfilterfn=None):
- if not matchfn:
- matchfn = self.matchfn
- if matchfn:
- stat = self.diffopts.get('stat')
- diff = self.diffopts.get('patch')
- diffopts = patch.diffallopts(self.ui, self.diffopts)
- node = ctx.node()
- prev = ctx.p1().node()
- if stat:
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=True,
- hunksfilterfn=hunksfilterfn)
- if diff:
- if stat:
- self.ui.write("\n")
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=False,
- hunksfilterfn=hunksfilterfn)
- if stat or diff:
- self.ui.write("\n")
-
-class jsonchangeset(changeset_printer):
- '''format changeset information.'''
-
- def __init__(self, ui, repo, matchfn, diffopts, buffered):
- changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
- self.cache = {}
- self._first = True
-
- def close(self):
- if not self._first:
- self.ui.write("\n]\n")
- else:
- self.ui.write("[]\n")
-
- def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
- '''show a single changeset or file revision'''
- rev = ctx.rev()
- if rev is None:
- jrev = jnode = 'null'
- else:
- jrev = '%d' % rev
- jnode = '"%s"' % hex(ctx.node())
- j = encoding.jsonescape
-
- if self._first:
- self.ui.write("[\n {")
- self._first = False
- else:
- self.ui.write(",\n {")
-
- if self.ui.quiet:
- self.ui.write(('\n "rev": %s') % jrev)
- self.ui.write((',\n "node": %s') % jnode)
- self.ui.write('\n }')
- return
-
- self.ui.write(('\n "rev": %s') % jrev)
- self.ui.write((',\n "node": %s') % jnode)
- self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
- self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
- self.ui.write((',\n "user": "%s"') % j(ctx.user()))
- self.ui.write((',\n "date": [%d, %d]') % ctx.date())
- self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
-
- self.ui.write((',\n "bookmarks": [%s]') %
- ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
- self.ui.write((',\n "tags": [%s]') %
- ", ".join('"%s"' % j(t) for t in ctx.tags()))
- self.ui.write((',\n "parents": [%s]') %
- ", ".join('"%s"' % c.hex() for c in ctx.parents()))
-
- if self.ui.debugflag:
- if rev is None:
- jmanifestnode = 'null'
- else:
- jmanifestnode = '"%s"' % hex(ctx.manifestnode())
- self.ui.write((',\n "manifest": %s') % jmanifestnode)
-
- self.ui.write((',\n "extra": {%s}') %
- ", ".join('"%s": "%s"' % (j(k), j(v))
- for k, v in ctx.extra().items()))
-
- files = ctx.p1().status(ctx)
- self.ui.write((',\n "modified": [%s]') %
- ", ".join('"%s"' % j(f) for f in files[0]))
- self.ui.write((',\n "added": [%s]') %
- ", ".join('"%s"' % j(f) for f in files[1]))
- self.ui.write((',\n "removed": [%s]') %
- ", ".join('"%s"' % j(f) for f in files[2]))
-
- elif self.ui.verbose:
- self.ui.write((',\n "files": [%s]') %
- ", ".join('"%s"' % j(f) for f in ctx.files()))
-
- if copies:
- self.ui.write((',\n "copies": {%s}') %
- ", ".join('"%s": "%s"' % (j(k), j(v))
- for k, v in copies))
-
- matchfn = self.matchfn
- if matchfn:
- stat = self.diffopts.get('stat')
- diff = self.diffopts.get('patch')
- diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
- node, prev = ctx.node(), ctx.p1().node()
- if stat:
- self.ui.pushbuffer()
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=True)
- self.ui.write((',\n "diffstat": "%s"')
- % j(self.ui.popbuffer()))
- if diff:
- self.ui.pushbuffer()
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=False)
- self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
-
- self.ui.write("\n }")
-
-class changeset_templater(changeset_printer):
- '''format changeset information.
-
- Note: there are a variety of convenience functions to build a
- changeset_templater for common cases. See functions such as:
- makelogtemplater, show_changeset, buildcommittemplate, or other
- functions that use changesest_templater.
- '''
-
- # Arguments before "buffered" used to be positional. Consider not
- # adding/removing arguments before "buffered" to not break callers.
- def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
- buffered=False):
- diffopts = diffopts or {}
-
- changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
- tres = formatter.templateresources(ui, repo)
- self.t = formatter.loadtemplater(ui, tmplspec,
- defaults=templatekw.keywords,
- resources=tres,
- cache=templatekw.defaulttempl)
- self._counter = itertools.count()
- self.cache = tres['cache'] # shared with _graphnodeformatter()
-
- self._tref = tmplspec.ref
- self._parts = {'header': '', 'footer': '',
- tmplspec.ref: tmplspec.ref,
- 'docheader': '', 'docfooter': '',
- 'separator': ''}
- if tmplspec.mapfile:
- # find correct templates for current mode, for backward
- # compatibility with 'log -v/-q/--debug' using a mapfile
- tmplmodes = [
- (True, ''),
- (self.ui.verbose, '_verbose'),
- (self.ui.quiet, '_quiet'),
- (self.ui.debugflag, '_debug'),
- ]
- for mode, postfix in tmplmodes:
- for t in self._parts:
- cur = t + postfix
- if mode and cur in self.t:
- self._parts[t] = cur
- else:
- partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
- m = formatter.templatepartsmap(tmplspec, self.t, partnames)
- self._parts.update(m)
-
- if self._parts['docheader']:
- self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
-
- def close(self):
- if self._parts['docfooter']:
- if not self.footer:
- self.footer = ""
- self.footer += templater.stringify(self.t(self._parts['docfooter']))
- return super(changeset_templater, self).close()
-
- def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
- '''show a single changeset or file revision'''
- props = props.copy()
- props['ctx'] = ctx
- props['index'] = index = next(self._counter)
- props['revcache'] = {'copies': copies}
- props = pycompat.strkwargs(props)
-
- # write separator, which wouldn't work well with the header part below
- # since there's inherently a conflict between header (across items) and
- # separator (per item)
- if self._parts['separator'] and index > 0:
- self.ui.write(templater.stringify(self.t(self._parts['separator'])))
-
- # write header
- if self._parts['header']:
- h = templater.stringify(self.t(self._parts['header'], **props))
- if self.buffered:
- self.header[ctx.rev()] = h
- else:
- if self.lastheader != h:
- self.lastheader = h
- self.ui.write(h)
-
- # write changeset metadata, then patch if requested
- key = self._parts[self._tref]
- self.ui.write(templater.stringify(self.t(key, **props)))
- self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
-
- if self._parts['footer']:
- if not self.footer:
- self.footer = templater.stringify(
- self.t(self._parts['footer'], **props))
-
-def logtemplatespec(tmpl, mapfile):
- if mapfile:
- return formatter.templatespec('changeset', tmpl, mapfile)
- else:
- return formatter.templatespec('', tmpl, None)
-
-def _lookuplogtemplate(ui, tmpl, style):
- """Find the template matching the given template spec or style
-
- See formatter.lookuptemplate() for details.
- """
-
- # ui settings
- if not tmpl and not style: # template are stronger than style
- tmpl = ui.config('ui', 'logtemplate')
- if tmpl:
- return logtemplatespec(templater.unquotestring(tmpl), None)
- else:
- style = util.expandpath(ui.config('ui', 'style'))
-
- if not tmpl and style:
- mapfile = style
- if not os.path.split(mapfile)[0]:
- mapname = (templater.templatepath('map-cmdline.' + mapfile)
- or templater.templatepath(mapfile))
- if mapname:
- mapfile = mapname
- return logtemplatespec(None, mapfile)
-
- if not tmpl:
- return logtemplatespec(None, None)
-
- return formatter.lookuptemplate(ui, 'changeset', tmpl)
-
-def makelogtemplater(ui, repo, tmpl, buffered=False):
- """Create a changeset_templater from a literal template 'tmpl'
- byte-string."""
- spec = logtemplatespec(tmpl, None)
- return changeset_templater(ui, repo, spec, buffered=buffered)
-
-def show_changeset(ui, repo, opts, buffered=False):
- """show one changeset using template or regular display.
-
- Display format will be the first non-empty hit of:
- 1. option 'template'
- 2. option 'style'
- 3. [ui] setting 'logtemplate'
- 4. [ui] setting 'style'
- If all of these values are either the unset or the empty string,
- regular display via changeset_printer() is done.
- """
- # options
- match = None
- if opts.get('patch') or opts.get('stat'):
- match = scmutil.matchall(repo)
-
- if opts.get('template') == 'json':
- return jsonchangeset(ui, repo, match, opts, buffered)
-
- spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
-
- if not spec.ref and not spec.tmpl and not spec.mapfile:
- return changeset_printer(ui, repo, match, opts, buffered)
-
- return changeset_templater(ui, repo, spec, match, opts, buffered)
-
def showmarker(fm, marker, index=None):
"""utility function to display obsolescence marker in a readable way
@@ -2096,13 +1624,14 @@
fm.write('date', '(%s) ', fm.formatdate(marker.date()))
meta = marker.metadata().copy()
meta.pop('date', None)
- fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
+ smeta = util.rapply(pycompat.maybebytestr, meta)
+ fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
fm.plain('\n')
def finddate(ui, repo, date):
"""Find the tipmost changeset that matches the given date spec"""
- df = util.matchdate(date)
+ df = dateutil.matchdate(date)
m = scmutil.matchall(repo)
results = {}
@@ -2115,7 +1644,7 @@
rev = ctx.rev()
if rev in results:
ui.status(_("found revision %s from %s\n") %
- (rev, util.datestr(results[rev])))
+ (rev, dateutil.datestr(results[rev])))
return '%d' % rev
raise error.Abort(_("revision matching date not found"))
@@ -2353,7 +1882,7 @@
else:
self.revs.discard(value)
ctx = change(value)
- matches = filter(match, ctx.files())
+ matches = [f for f in ctx.files() if match(f)]
if matches:
fncache[value] = matches
self.set.add(value)
@@ -2416,394 +1945,6 @@
return iterate()
-def _makelogmatcher(repo, revs, pats, opts):
- """Build matcher and expanded patterns from log options
-
- If --follow, revs are the revisions to follow from.
-
- Returns (match, pats, slowpath) where
- - match: a matcher built from the given pats and -I/-X opts
- - pats: patterns used (globs are expanded on Windows)
- - slowpath: True if patterns aren't as simple as scanning filelogs
- """
- # pats/include/exclude are passed to match.match() directly in
- # _matchfiles() revset but walkchangerevs() builds its matcher with
- # scmutil.match(). The difference is input pats are globbed on
- # platforms without shell expansion (windows).
- wctx = repo[None]
- match, pats = scmutil.matchandpats(wctx, pats, opts)
- slowpath = match.anypats() or (not match.always() and opts.get('removed'))
- if not slowpath:
- follow = opts.get('follow') or opts.get('follow_first')
- startctxs = []
- if follow and opts.get('rev'):
- startctxs = [repo[r] for r in revs]
- for f in match.files():
- if follow and startctxs:
- # No idea if the path was a directory at that revision, so
- # take the slow path.
- if any(f not in c for c in startctxs):
- slowpath = True
- continue
- elif follow and f not in wctx:
- # If the file exists, it may be a directory, so let it
- # take the slow path.
- if os.path.exists(repo.wjoin(f)):
- slowpath = True
- continue
- else:
- raise error.Abort(_('cannot follow file not in parent '
- 'revision: "%s"') % f)
- filelog = repo.file(f)
- if not filelog:
- # A zero count may be a directory or deleted file, so
- # try to find matching entries on the slow path.
- if follow:
- raise error.Abort(
- _('cannot follow nonexistent file: "%s"') % f)
- slowpath = True
-
- # We decided to fall back to the slowpath because at least one
- # of the paths was not a file. Check to see if at least one of them
- # existed in history - in that case, we'll continue down the
- # slowpath; otherwise, we can turn off the slowpath
- if slowpath:
- for path in match.files():
- if path == '.' or path in repo.store:
- break
- else:
- slowpath = False
-
- return match, pats, slowpath
-
-def _fileancestors(repo, revs, match, followfirst):
- fctxs = []
- for r in revs:
- ctx = repo[r]
- fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
-
- # When displaying a revision with --patch --follow FILE, we have
- # to know which file of the revision must be diffed. With
- # --follow, we want the names of the ancestors of FILE in the
- # revision, stored in "fcache". "fcache" is populated as a side effect
- # of the graph traversal.
- fcache = {}
- def filematcher(rev):
- return scmutil.matchfiles(repo, fcache.get(rev, []))
-
- def revgen():
- for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
- fcache[rev] = [c.path() for c in cs]
- yield rev
- return smartset.generatorset(revgen(), iterasc=False), filematcher
-
-def _makenofollowlogfilematcher(repo, pats, opts):
- '''hook for extensions to override the filematcher for non-follow cases'''
- return None
-
-_opt2logrevset = {
- 'no_merges': ('not merge()', None),
- 'only_merges': ('merge()', None),
- '_matchfiles': (None, '_matchfiles(%ps)'),
- 'date': ('date(%s)', None),
- 'branch': ('branch(%s)', '%lr'),
- '_patslog': ('filelog(%s)', '%lr'),
- 'keyword': ('keyword(%s)', '%lr'),
- 'prune': ('ancestors(%s)', 'not %lr'),
- 'user': ('user(%s)', '%lr'),
-}
-
-def _makelogrevset(repo, match, pats, slowpath, opts):
- """Return a revset string built from log options and file patterns"""
- opts = dict(opts)
- # follow or not follow?
- follow = opts.get('follow') or opts.get('follow_first')
-
- # branch and only_branch are really aliases and must be handled at
- # the same time
- opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
- opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
-
- if slowpath:
- # See walkchangerevs() slow path.
- #
- # pats/include/exclude cannot be represented as separate
- # revset expressions as their filtering logic applies at file
- # level. For instance "-I a -X b" matches a revision touching
- # "a" and "b" while "file(a) and not file(b)" does
- # not. Besides, filesets are evaluated against the working
- # directory.
- matchargs = ['r:', 'd:relpath']
- for p in pats:
- matchargs.append('p:' + p)
- for p in opts.get('include', []):
- matchargs.append('i:' + p)
- for p in opts.get('exclude', []):
- matchargs.append('x:' + p)
- opts['_matchfiles'] = matchargs
- elif not follow:
- opts['_patslog'] = list(pats)
-
- expr = []
- for op, val in sorted(opts.iteritems()):
- if not val:
- continue
- if op not in _opt2logrevset:
- continue
- revop, listop = _opt2logrevset[op]
- if revop and '%' not in revop:
- expr.append(revop)
- elif not listop:
- expr.append(revsetlang.formatspec(revop, val))
- else:
- if revop:
- val = [revsetlang.formatspec(revop, v) for v in val]
- expr.append(revsetlang.formatspec(listop, val))
-
- if expr:
- expr = '(' + ' and '.join(expr) + ')'
- else:
- expr = None
- return expr
-
-def _logrevs(repo, opts):
- """Return the initial set of revisions to be filtered or followed"""
- follow = opts.get('follow') or opts.get('follow_first')
- if opts.get('rev'):
- revs = scmutil.revrange(repo, opts['rev'])
- elif follow and repo.dirstate.p1() == nullid:
- revs = smartset.baseset()
- elif follow:
- revs = repo.revs('.')
- else:
- revs = smartset.spanset(repo)
- revs.reverse()
- return revs
-
-def getlogrevs(repo, pats, opts):
- """Return (revs, filematcher) where revs is a smartset
-
- filematcher is a callable taking a revision number and returning a match
- objects filtering the files to be detailed when displaying the revision.
- """
- follow = opts.get('follow') or opts.get('follow_first')
- followfirst = opts.get('follow_first')
- limit = loglimit(opts)
- revs = _logrevs(repo, opts)
- if not revs:
- return smartset.baseset(), None
- match, pats, slowpath = _makelogmatcher(repo, revs, pats, opts)
- filematcher = None
- if follow:
- if slowpath or match.always():
- revs = dagop.revancestors(repo, revs, followfirst=followfirst)
- else:
- revs, filematcher = _fileancestors(repo, revs, match, followfirst)
- revs.reverse()
- if filematcher is None:
- filematcher = _makenofollowlogfilematcher(repo, pats, opts)
- if filematcher is None:
- def filematcher(rev):
- return match
-
- expr = _makelogrevset(repo, match, pats, slowpath, opts)
- if opts.get('graph') and opts.get('rev'):
- # User-specified revs might be unsorted, but don't sort before
- # _makelogrevset because it might depend on the order of revs
- if not (revs.isdescending() or revs.istopo()):
- revs.sort(reverse=True)
- if expr:
- matcher = revset.match(None, expr)
- revs = matcher(repo, revs)
- if limit is not None:
- revs = revs.slice(0, limit)
- return revs, filematcher
-
-def _parselinerangelogopt(repo, opts):
- """Parse --line-range log option and return a list of tuples (filename,
- (fromline, toline)).
- """
- linerangebyfname = []
- for pat in opts.get('line_range', []):
- try:
- pat, linerange = pat.rsplit(',', 1)
- except ValueError:
- raise error.Abort(_('malformatted line-range pattern %s') % pat)
- try:
- fromline, toline = map(int, linerange.split(':'))
- except ValueError:
- raise error.Abort(_("invalid line range for %s") % pat)
- msg = _("line range pattern '%s' must match exactly one file") % pat
- fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
- linerangebyfname.append(
- (fname, util.processlinerange(fromline, toline)))
- return linerangebyfname
-
-def getloglinerangerevs(repo, userrevs, opts):
- """Return (revs, filematcher, hunksfilter).
-
- "revs" are revisions obtained by processing "line-range" log options and
- walking block ancestors of each specified file/line-range.
-
- "filematcher(rev) -> match" is a factory function returning a match object
- for a given revision for file patterns specified in --line-range option.
- If neither --stat nor --patch options are passed, "filematcher" is None.
-
- "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function
- returning a hunks filtering function.
- If neither --stat nor --patch options are passed, "filterhunks" is None.
- """
- wctx = repo[None]
-
- # Two-levels map of "rev -> file ctx -> [line range]".
- linerangesbyrev = {}
- for fname, (fromline, toline) in _parselinerangelogopt(repo, opts):
- if fname not in wctx:
- raise error.Abort(_('cannot follow file not in parent '
- 'revision: "%s"') % fname)
- fctx = wctx.filectx(fname)
- for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
- rev = fctx.introrev()
- if rev not in userrevs:
- continue
- linerangesbyrev.setdefault(
- rev, {}).setdefault(
- fctx.path(), []).append(linerange)
-
- filematcher = None
- hunksfilter = None
- if opts.get('patch') or opts.get('stat'):
-
- def nofilterhunksfn(fctx, hunks):
- return hunks
-
- def hunksfilter(rev):
- fctxlineranges = linerangesbyrev.get(rev)
- if fctxlineranges is None:
- return nofilterhunksfn
-
- def filterfn(fctx, hunks):
- lineranges = fctxlineranges.get(fctx.path())
- if lineranges is not None:
- for hr, lines in hunks:
- if hr is None: # binary
- yield hr, lines
- continue
- if any(mdiff.hunkinrange(hr[2:], lr)
- for lr in lineranges):
- yield hr, lines
- else:
- for hunk in hunks:
- yield hunk
-
- return filterfn
-
- def filematcher(rev):
- files = list(linerangesbyrev.get(rev, []))
- return scmutil.matchfiles(repo, files)
-
- revs = sorted(linerangesbyrev, reverse=True)
-
- return revs, filematcher, hunksfilter
-
-def _graphnodeformatter(ui, displayer):
- spec = ui.config('ui', 'graphnodetemplate')
- if not spec:
- return templatekw.showgraphnode # fast path for "{graphnode}"
-
- spec = templater.unquotestring(spec)
- tres = formatter.templateresources(ui)
- if isinstance(displayer, changeset_templater):
- tres['cache'] = displayer.cache # reuse cache of slow templates
- templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
- resources=tres)
- def formatnode(repo, ctx):
- props = {'ctx': ctx, 'repo': repo, 'revcache': {}}
- return templ.render(props)
- return formatnode
-
-def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
- filematcher=None, props=None):
- props = props or {}
- formatnode = _graphnodeformatter(ui, displayer)
- state = graphmod.asciistate()
- styles = state['styles']
-
- # only set graph styling if HGPLAIN is not set.
- if ui.plain('graph'):
- # set all edge styles to |, the default pre-3.8 behaviour
- styles.update(dict.fromkeys(styles, '|'))
- else:
- edgetypes = {
- 'parent': graphmod.PARENT,
- 'grandparent': graphmod.GRANDPARENT,
- 'missing': graphmod.MISSINGPARENT
- }
- for name, key in edgetypes.items():
- # experimental config: experimental.graphstyle.*
- styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
- styles[key])
- if not styles[key]:
- styles[key] = None
-
- # experimental config: experimental.graphshorten
- state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
-
- for rev, type, ctx, parents in dag:
- char = formatnode(repo, ctx)
- copies = None
- if getrenamed and ctx.rev():
- copies = []
- for fn in ctx.files():
- rename = getrenamed(fn, ctx.rev())
- if rename:
- copies.append((fn, rename[0]))
- revmatchfn = None
- if filematcher is not None:
- revmatchfn = filematcher(ctx.rev())
- edges = edgefn(type, char, state, rev, parents)
- firstedge = next(edges)
- width = firstedge[2]
- displayer.show(ctx, copies=copies, matchfn=revmatchfn,
- _graphwidth=width, **pycompat.strkwargs(props))
- lines = displayer.hunk.pop(rev).split('\n')
- if not lines[-1]:
- del lines[-1]
- displayer.flush(ctx)
- for type, char, width, coldata in itertools.chain([firstedge], edges):
- graphmod.ascii(ui, state, type, char, lines, coldata)
- lines = []
- displayer.close()
-
-def graphlog(ui, repo, revs, filematcher, opts):
- # Parameters are identical to log command ones
- revdag = graphmod.dagwalker(repo, revs)
-
- getrenamed = None
- if opts.get('copies'):
- endrev = None
- if opts.get('rev'):
- endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
- getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
-
- ui.pager('log')
- displayer = show_changeset(ui, repo, opts, buffered=True)
- displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
- filematcher)
-
-def checkunsupportedgraphflags(pats, opts):
- for op in ["newest_first"]:
- if op in opts and opts[op]:
- raise error.Abort(_("-G/--graph option is incompatible with --%s")
- % op.replace("_", "-"))
-
-def graphrevs(repo, nodes, opts):
- limit = loglimit(opts)
- nodes.reverse()
- if limit is not None:
- nodes = nodes[:limit]
- return graphmod.nodes(repo, nodes)
-
def add(ui, repo, match, prefix, explicitonly, **opts):
join = lambda f: os.path.join(prefix, f)
bad = []
@@ -2856,7 +1997,7 @@
for subpath in ctx.substate:
ctx.sub(subpath).addwebdirpath(serverpath, webconf)
-def forget(ui, repo, match, prefix, explicitonly):
+def forget(ui, repo, match, prefix, explicitonly, dryrun):
join = lambda f: os.path.join(prefix, f)
bad = []
badfn = lambda x, y: bad.append(x) or match.bad(x, y)
@@ -2872,7 +2013,7 @@
sub = wctx.sub(subpath)
try:
submatch = matchmod.subdirmatcher(subpath, match)
- subbad, subforgot = sub.forget(submatch, prefix)
+ subbad, subforgot = sub.forget(submatch, prefix, dryrun=dryrun)
bad.extend([subpath + '/' + f for f in subbad])
forgot.extend([subpath + '/' + f for f in subforgot])
except error.LookupError:
@@ -2899,9 +2040,10 @@
if ui.verbose or not match.exact(f):
ui.status(_('removing %s\n') % match.rel(f))
- rejected = wctx.forget(forget, prefix)
- bad.extend(f for f in rejected if f in match.files())
- forgot.extend(f for f in forget if f not in rejected)
+ if not dryrun:
+ rejected = wctx.forget(forget, prefix)
+ bad.extend(f for f in rejected if f in match.files())
+ forgot.extend(f for f in forget if f not in rejected)
return bad, forgot
def files(ui, ctx, m, fm, fmt, subrepos):
@@ -3072,7 +2214,7 @@
def write(path):
filename = None
if fntemplate:
- filename = makefilename(repo, fntemplate, ctx.node(),
+ filename = makefilename(ctx, fntemplate,
pathname=os.path.join(prefix, path))
# attempt to create the directory if it does not already exist
try:
@@ -3090,12 +2232,16 @@
mfnode = ctx.manifestnode()
try:
if mfnode and mfl[mfnode].find(file)[0]:
+ scmutil.fileprefetchhooks(repo, ctx, [file])
write(file)
return 0
except KeyError:
pass
- for abs in ctx.walk(matcher):
+ files = [f for f in ctx.walk(matcher)]
+ scmutil.fileprefetchhooks(repo, ctx, files)
+
+ for abs in files:
write(abs)
err = 0
@@ -3118,7 +2264,7 @@
'''commit the specified files or all outstanding changes'''
date = opts.get('date')
if date:
- opts['date'] = util.parsedate(date)
+ opts['date'] = dateutil.parsedate(date)
message = logmessage(ui, opts)
matcher = scmutil.match(repo[None], pats, opts)
@@ -3183,7 +2329,7 @@
date = opts.get('date') or old.date()
# Parse the date to allow comparison between date and old.date()
- date = util.parsedate(date)
+ date = dateutil.parsedate(date)
if len(old.parents()) > 1:
# ctx.files() isn't reliable for merges, so fall back to the
@@ -3205,16 +2351,13 @@
# subrepo.precommit(). To minimize the risk of this hack, we do
# nothing if .hgsub does not exist.
if '.hgsub' in wctx or '.hgsub' in old:
- from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil
- subs, commitsubs, newsubstate = subrepo.precommit(
+ subs, commitsubs, newsubstate = subrepoutil.precommit(
ui, wctx, wctx._status, matcher)
# amend should abort if commitsubrepos is enabled
assert not commitsubs
if subs:
- subrepo.writestate(repo, newsubstate)
-
- # avoid cycle (TODO: should be removed in default branch)
- from . import merge as mergemod
+ subrepoutil.writestate(repo, newsubstate)
+
ms = mergemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
@@ -3404,7 +2547,7 @@
def buildcommittemplate(repo, ctx, subs, extramsg, ref):
ui = repo.ui
spec = formatter.templatespec(ref, None, None)
- t = changeset_templater(ui, repo, spec, None, {}, False)
+ t = logcmdutil.changesettemplater(ui, repo, spec)
t.t.cache.update((k, templater.unquotestring(v))
for k, v in repo.ui.configitems('committemplate'))
@@ -3487,12 +2630,12 @@
if not opts.get('close_branch'):
for r in parents:
if r.closesbranch() and r.branch() == branch:
- repo.ui.status(_('reopening closed branch head %d\n') % r)
+ repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
if repo.ui.debugflag:
- repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
+ repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
elif repo.ui.verbose:
- repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
+ repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
def postcommitstatus(repo, pats, opts):
return repo.status(match=scmutil.match(repo[None], pats, opts))
@@ -3769,7 +2912,15 @@
if not opts.get('dry_run'):
needdata = ('revert', 'add', 'undelete')
- _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
+ if _revertprefetch is not _revertprefetchstub:
+ ui.deprecwarn("'cmdutil._revertprefetch' is deprecated, "
+ "add a callback to 'scmutil.fileprefetchhooks'",
+ '4.6', stacklevel=1)
+ _revertprefetch(repo, ctx,
+ *[actions[name][0] for name in needdata])
+ oplist = [actions[name][0] for name in needdata]
+ prefetch = scmutil.fileprefetchhooks
+ prefetch(repo, ctx, [f for sublist in oplist for f in sublist])
_performrevert(repo, parents, ctx, actions, interactive, tobackup)
if targetsubs:
@@ -3782,8 +2933,11 @@
raise error.Abort("subrepository '%s' does not exist in %s!"
% (sub, short(ctx.node())))
-def _revertprefetch(repo, ctx, *files):
- """Let extension changing the storage layer prefetch content"""
+def _revertprefetchstub(repo, ctx, *files):
+ """Stub method for detecting extension wrapping of _revertprefetch(), to
+ issue a deprecation warning."""
+
+_revertprefetch = _revertprefetchstub
def _performrevert(repo, parents, ctx, actions, interactive=False,
tobackup=None):
@@ -3797,7 +2951,6 @@
parent, p2 = parents
node = ctx.node()
excluded_files = []
- matcher_opts = {"exclude": excluded_files}
def checkout(f):
fc = ctx[f]
@@ -3818,7 +2971,7 @@
if choice == 0:
repo.dirstate.drop(f)
else:
- excluded_files.append(repo.wjoin(f))
+ excluded_files.append(f)
else:
repo.dirstate.drop(f)
for f in actions['remove'][0]:
@@ -3829,7 +2982,7 @@
if choice == 0:
doremove(f)
else:
- excluded_files.append(repo.wjoin(f))
+ excluded_files.append(f)
else:
doremove(f)
for f in actions['drop'][0]:
@@ -3849,8 +3002,8 @@
newlyaddedandmodifiedfiles = set()
if interactive:
# Prompt the user for changes to revert
- torevert = [repo.wjoin(f) for f in actions['revert'][0]]
- m = scmutil.match(ctx, torevert, matcher_opts)
+ torevert = [f for f in actions['revert'][0] if f not in excluded_files]
+ m = scmutil.matchfiles(repo, torevert)
diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
diffopts.nodates = True
diffopts.git = True
@@ -3895,7 +3048,7 @@
try:
patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
except error.PatchError as err:
- raise error.Abort(str(err))
+ raise error.Abort(pycompat.bytestr(err))
del fp
else:
for f in actions['revert'][0]:
@@ -4031,3 +3184,23 @@
if after[1]:
hint = after[0]
raise error.Abort(_('no %s in progress') % task, hint=hint)
+
+class changeset_printer(logcmdutil.changesetprinter):
+
+ def __init__(self, ui, *args, **kwargs):
+ msg = ("'cmdutil.changeset_printer' is deprecated, "
+ "use 'logcmdutil.logcmdutil'")
+ ui.deprecwarn(msg, "4.6")
+ super(changeset_printer, self).__init__(ui, *args, **kwargs)
+
+def displaygraph(ui, *args, **kwargs):
+ msg = ("'cmdutil.displaygraph' is deprecated, "
+ "use 'logcmdutil.displaygraph'")
+ ui.deprecwarn(msg, "4.6")
+ return logcmdutil.displaygraph(ui, *args, **kwargs)
+
+def show_changeset(ui, *args, **kwargs):
+ msg = ("'cmdutil.show_changeset' is deprecated, "
+ "use 'logcmdutil.changesetdisplayer'")
+ ui.deprecwarn(msg, "4.6")
+ return logcmdutil.changesetdisplayer(ui, *args, **kwargs)
--- a/mercurial/color.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/color.py Mon Mar 19 08:07:18 2018 -0700
@@ -371,7 +371,7 @@
"""add color control code according to the mode"""
if ui._colormode == 'debug':
if label and msg:
- if msg[-1] == '\n':
+ if msg.endswith('\n'):
msg = "[%s|%s]\n" % (label, msg[:-1])
else:
msg = "[%s|%s]" % (label, msg)
--- a/mercurial/commands.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/commands.py Mon Mar 19 08:07:18 2018 -0700
@@ -41,6 +41,7 @@
help,
hg,
lock as lockmod,
+ logcmdutil,
merge as mergemod,
obsolete,
obsutil,
@@ -53,13 +54,14 @@
rewriteutil,
scmutil,
server,
- sshserver,
streamclone,
tags as tagsmod,
templatekw,
ui as uimod,
util,
+ wireprotoserver,
)
+from .utils import dateutil
release = lockmod.release
@@ -301,9 +303,9 @@
rootfm = ui.formatter('annotate', opts)
if ui.quiet:
- datefunc = util.shortdate
+ datefunc = dateutil.shortdate
else:
- datefunc = util.datestr
+ datefunc = dateutil.datestr
if ctx.rev() is None:
def hexfn(node):
if node is None:
@@ -336,8 +338,8 @@
('number', ' ', lambda x: x.fctx.rev(), formatrev),
('changeset', ' ', lambda x: hexfn(x.fctx.node()), formathex),
('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
- ('file', ' ', lambda x: x.fctx.path(), str),
- ('line_number', ':', lambda x: x.lineno, str),
+ ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
+ ('line_number', ':', lambda x: x.lineno, pycompat.bytestr),
]
fieldnamemap = {'number': 'rev', 'changeset': 'node'}
@@ -403,14 +405,15 @@
formats.append(['%s' for x in l])
pieces.append(l)
- for f, p, l in zip(zip(*formats), zip(*pieces), lines):
+ for f, p, (n, l) in zip(zip(*formats), zip(*pieces), lines):
fm.startitem()
+ fm.context(fctx=n.fctx)
fm.write(fields, "".join(f), *p)
- if l[0].skip:
+ if n.skip:
fmt = "* %s"
else:
fmt = ": %s"
- fm.write('line', fmt, l[1])
+ fm.write('line', fmt, l)
if not lines[-1][1].endswith('\n'):
fm.plain('\n')
@@ -475,7 +478,7 @@
if not ctx:
raise error.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
- dest = cmdutil.makefilename(repo, dest, node)
+ dest = cmdutil.makefilename(ctx, dest)
if os.path.realpath(dest) == repo.root:
raise error.Abort(_('repository root cannot be destination'))
@@ -485,11 +488,11 @@
if dest == '-':
if kind == 'files':
raise error.Abort(_('cannot archive plain files to stdout'))
- dest = cmdutil.makefileobj(repo, dest)
+ dest = cmdutil.makefileobj(ctx, dest)
if not prefix:
prefix = os.path.basename(repo.root) + '-%h'
- prefix = cmdutil.makefilename(repo, prefix, node)
+ prefix = cmdutil.makefilename(ctx, prefix)
match = scmutil.match(ctx, [], opts)
archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
match, prefix, subrepos=opts.get('subrepos'))
@@ -583,7 +586,7 @@
date = opts.get('date')
if date:
- opts['date'] = util.parsedate(date)
+ opts['date'] = dateutil.parsedate(date)
cmdutil.checkunfinished(repo)
cmdutil.bailifchanged(repo)
@@ -823,7 +826,7 @@
cmdutil.bailifchanged(repo)
return hg.clean(repo, node, show_stats=show_stats)
- displayer = cmdutil.show_changeset(ui, repo, {})
+ displayer = logcmdutil.changesetdisplayer(ui, repo, {})
if command:
changesets = 1
@@ -859,7 +862,8 @@
transition = "bad"
state[transition].append(node)
ctx = repo[node]
- ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
+ ui.status(_('changeset %d:%s: %s\n') % (ctx.rev(), ctx,
+ transition))
hbisect.checkstate(state)
# bisect
nodes, changesets, bgood = hbisect.bisect(repo, state)
@@ -1129,7 +1133,7 @@
fm.startitem()
fm.write('branch', '%s', tag, label=label)
rev = ctx.rev()
- padsize = max(31 - len(str(rev)) - encoding.colwidth(tag), 0)
+ padsize = max(31 - len("%d" % rev) - encoding.colwidth(tag), 0)
fmt = ' ' * padsize + ' %d:%s'
fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()),
label='log.changeset changeset.%s' % ctx.phasestr())
@@ -1156,13 +1160,15 @@
def bundle(ui, repo, fname, dest=None, **opts):
"""create a bundle file
- Generate a bundle file containing data to be added to a repository.
+ Generate a bundle file containing data to be transferred to another
+ repository.
To create a bundle containing all changesets, use -a/--all
(or --base null). Otherwise, hg assumes the destination will have
all the nodes you specify with --base parameters. Otherwise, hg
will assume the repository has all the nodes in destination, or
- default-push/default if no destination is specified.
+ default-push/default if no destination is specified, where destination
+ is the repository you provide through DEST option.
You can change bundle format with the -t/--type option. See
:hg:`help bundlespec` for documentation on this format. By default,
@@ -1192,7 +1198,7 @@
bcompression, cgversion, params = exchange.parsebundlespec(
repo, bundletype, strict=False)
except error.UnsupportedBundleSpecification as e:
- raise error.Abort(str(e),
+ raise error.Abort(pycompat.bytestr(e),
hint=_("see 'hg help bundlespec' for supported "
"values for --type"))
@@ -1219,7 +1225,7 @@
raise error.Abort(_("--base is incompatible with specifying "
"a destination"))
common = [repo.lookup(rev) for rev in base]
- heads = revs and map(repo.lookup, revs) or None
+ heads = [repo.lookup(r) for r in revs] if revs else None
outgoing = discovery.outgoing(repo, common, heads)
else:
dest = ui.expandpath(dest or 'default-push', dest or 'default')
@@ -1257,7 +1263,7 @@
compopts['level'] = complevel
- contentopts = {'cg.version': cgversion}
+ contentopts = {'cg.version': cgversion, 'changegroup': True}
if repo.ui.configbool('experimental', 'evolution.bundle-obsmarker'):
contentopts['obsolescence'] = True
if repo.ui.configbool('experimental', 'bundle-phases'):
@@ -1281,7 +1287,9 @@
no revision is given, the parent of the working directory is used.
Output may be to a file, in which case the name of the file is
- given using a format string. The formatting rules as follows:
+ given using a template string. See :hg:`help templates`. In addition
+ to the common template keywords, the following formatting rules are
+ supported:
:``%%``: literal "%" character
:``%s``: basename of file being printed
@@ -1292,6 +1300,7 @@
:``%h``: short-form changeset hash (12 hexadecimal digits)
:``%r``: zero-padded changeset revision number
:``%b``: basename of the exporting repository
+ :``\\``: literal "\\" character
Returns 0 on success.
"""
@@ -1319,8 +1328,10 @@
'directory (only a repository)')),
('u', 'updaterev', '', _('revision, tag, or branch to check out'),
_('REV')),
- ('r', 'rev', [], _('include the specified changeset'), _('REV')),
- ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
+ ('r', 'rev', [], _('do not clone everything, but include this changeset'
+ ' and its ancestors'), _('REV')),
+ ('b', 'branch', [], _('do not clone everything, but include this branch\'s'
+ ' changesets and their ancestors'), _('BRANCH')),
('', 'pull', None, _('use pull protocol to copy metadata')),
('', 'uncompressed', None,
_('an alias to --stream (DEPRECATED)')),
@@ -1550,7 +1561,7 @@
extra = {}
if opts.get('close_branch'):
- extra['close'] = 1
+ extra['close'] = '1'
if not bheads:
raise error.Abort(_('can only close branch heads'))
@@ -1628,7 +1639,7 @@
of that config item.
With multiple arguments, print names and values of all config
- items with matching section names.
+ items with matching section names or section.names.
With --edit, start an editor on the user-level config file. With
--global, edit the system-wide config file. With --local, edit the
@@ -1689,11 +1700,15 @@
else:
raise error.ProgrammingError('unknown rctype: %s' % t)
untrusted = bool(opts.get('untrusted'))
+
+ selsections = selentries = []
if values:
- sections = [v for v in values if '.' not in v]
- items = [v for v in values if '.' in v]
- if len(items) > 1 or items and sections:
- raise error.Abort(_('only one config item permitted'))
+ selsections = [v for v in values if '.' not in v]
+ selentries = [v for v in values if '.' in v]
+ uniquesel = (len(selentries) == 1 and not selsections)
+ selsections = set(selsections)
+ selentries = set(selentries)
+
matched = False
for section, name, value in ui.walkconfig(untrusted=untrusted):
source = ui.configsource(section, name, untrusted)
@@ -1702,24 +1717,16 @@
source = source or 'none'
value = value.replace('\n', '\\n')
entryname = section + '.' + name
- if values:
- for v in values:
- if v == section:
- fm.startitem()
- fm.condwrite(ui.debugflag, 'source', '%s: ', source)
- fm.write('name value', '%s=%s\n', entryname, value)
- matched = True
- elif v == entryname:
- fm.startitem()
- fm.condwrite(ui.debugflag, 'source', '%s: ', source)
- fm.write('value', '%s\n', value)
- fm.data(name=entryname)
- matched = True
+ if values and not (section in selsections or entryname in selentries):
+ continue
+ fm.startitem()
+ fm.condwrite(ui.debugflag, 'source', '%s: ', source)
+ if uniquesel:
+ fm.data(name=entryname)
+ fm.write('value', '%s\n', value)
else:
- fm.startitem()
- fm.condwrite(ui.debugflag, 'source', '%s: ', source)
fm.write('name value', '%s=%s\n', entryname, value)
- matched = True
+ matched = True
fm.end()
if matched:
return 0
@@ -1873,9 +1880,9 @@
diffopts = patch.diffallopts(ui, opts)
m = scmutil.match(repo[node2], pats, opts)
ui.pager('diff')
- cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
- listsubrepos=opts.get('subrepos'),
- root=opts.get('root'))
+ logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
+ listsubrepos=opts.get('subrepos'),
+ root=opts.get('root'))
@command('^export',
[('o', 'output', '',
@@ -1901,7 +1908,9 @@
first parent only.
Output may be to a file, in which case the name of the file is
- given using a format string. The formatting rules are as follows:
+ given using a template string. See :hg:`help templates`. In addition
+ to the common template keywords, the following formatting rules are
+ supported:
:``%%``: literal "%" character
:``%H``: changeset hash (40 hexadecimal digits)
@@ -1912,6 +1921,7 @@
:``%m``: first line of the commit message (only alphanumeric characters)
:``%n``: zero-padded sequence number, starting at 1
:``%r``: zero-padded changeset revision number
+ :``\\``: literal "\\" character
Without the -a/--text option, export will avoid generating diffs
of files it detects as binary. With -a, export will generate a
@@ -2027,7 +2037,10 @@
with ui.formatter('files', opts) as fm:
return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos'))
-@command('^forget', walkopts, _('[OPTION]... FILE...'), inferrepo=True)
+@command(
+ '^forget',
+ walkopts + dryrunopts,
+ _('[OPTION]... FILE...'), inferrepo=True)
def forget(ui, repo, *pats, **opts):
"""forget the specified files on the next commit
@@ -2062,7 +2075,9 @@
raise error.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
- rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0]
+ dryrun = opts.get(r'dry_run')
+ rejected = cmdutil.forget(ui, repo, m, prefix="",
+ explicitonly=False, dryrun=dryrun)[0]
return rejected and 1 or 0
@command(
@@ -2153,7 +2168,7 @@
if not opts.get('user') and opts.get('currentuser'):
opts['user'] = ui.username()
if not opts.get('date') and opts.get('currentdate'):
- opts['date'] = "%d %d" % util.makedate()
+ opts['date'] = "%d %d" % dateutil.makedate()
editor = cmdutil.getcommiteditor(editform='graft',
**pycompat.strkwargs(opts))
@@ -2172,10 +2187,10 @@
raise
cmdutil.wrongtooltocontinue(repo, _('graft'))
else:
+ if not revs:
+ raise error.Abort(_('no revisions specified'))
cmdutil.checkunfinished(repo)
cmdutil.bailifchanged(repo)
- if not revs:
- raise error.Abort(_('no revisions specified'))
revs = scmutil.revrange(repo, revs)
skipped = set()
@@ -2292,7 +2307,7 @@
finally:
repo.ui.setconfig('ui', 'forcemerge', '', 'graft')
# report any conflicts
- if stats and stats[3] > 0:
+ if stats[3] > 0:
# write out state for --continue
nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
repo.vfs.write('graftstate', ''.join(nodelines))
@@ -2370,7 +2385,7 @@
try:
regexp = util.re.compile(pattern, reflags)
except re.error as inst:
- ui.warn(_("grep: invalid match pattern: %s\n") % inst)
+ ui.warn(_("grep: invalid match pattern: %s\n") % pycompat.bytestr(inst))
return 1
sep, eol = ':', '\n'
if opts.get('print0'):
@@ -2647,7 +2662,7 @@
ui.pager('heads')
heads = sorted(heads, key=lambda x: -x.rev())
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for ctx in heads:
displayer.show(ctx)
displayer.close()
@@ -3003,7 +3018,7 @@
date = opts.get('date')
if date:
- opts['date'] = util.parsedate(date)
+ opts['date'] = dateutil.parsedate(date)
exact = opts.get('exact')
update = not opts.get('bypass')
@@ -3155,11 +3170,11 @@
"""
opts = pycompat.byteskwargs(opts)
if opts.get('graph'):
- cmdutil.checkunsupportedgraphflags([], opts)
+ logcmdutil.checkunsupportedgraphflags([], opts)
def display(other, chlist, displayer):
- revdag = cmdutil.graphrevs(other, chlist, opts)
- cmdutil.displaygraph(ui, repo, revdag, displayer,
- graphmod.asciiedges)
+ revdag = logcmdutil.graphrevs(other, chlist, opts)
+ logcmdutil.displaygraph(ui, repo, revdag, displayer,
+ graphmod.asciiedges)
hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
return 0
@@ -3414,33 +3429,17 @@
raise error.Abort(_('--line-range requires --follow'))
if linerange and pats:
+ # TODO: take pats as patterns with no line-range filter
raise error.Abort(
_('FILE arguments are not compatible with --line-range option')
)
repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn')
- revs, filematcher = cmdutil.getlogrevs(repo, pats, opts)
- hunksfilter = None
-
- if opts.get('graph'):
- if linerange:
- raise error.Abort(_('graph not supported with line range patterns'))
- return cmdutil.graphlog(ui, repo, revs, filematcher, opts)
-
+ revs, differ = logcmdutil.getrevs(repo, pats, opts)
if linerange:
- revs, lrfilematcher, hunksfilter = cmdutil.getloglinerangerevs(
- repo, revs, opts)
-
- if filematcher is not None and lrfilematcher is not None:
- basefilematcher = filematcher
-
- def filematcher(rev):
- files = (basefilematcher(rev).files()
- + lrfilematcher(rev).files())
- return scmutil.matchfiles(repo, files)
-
- elif filematcher is None:
- filematcher = lrfilematcher
+ # TODO: should follow file history from logcmdutil._initialrevs(),
+ # then filter the result by logcmdutil._makerevset() and --limit
+ revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
getrenamed = None
if opts.get('copies'):
@@ -3450,29 +3449,13 @@
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
ui.pager('log')
- displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
- for rev in revs:
- ctx = repo[rev]
- copies = None
- if getrenamed is not None and rev:
- copies = []
- for fn in ctx.files():
- rename = getrenamed(fn, rev)
- if rename:
- copies.append((fn, rename[0]))
- if filematcher:
- revmatchfn = filematcher(ctx.rev())
- else:
- revmatchfn = None
- if hunksfilter:
- revhunksfilter = hunksfilter(rev)
- else:
- revhunksfilter = None
- displayer.show(ctx, copies=copies, matchfn=revmatchfn,
- hunksfilterfn=revhunksfilter)
- displayer.flush(ctx)
-
- displayer.close()
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ,
+ buffered=True)
+ if opts.get('graph'):
+ displayfn = logcmdutil.displaygraphrevs
+ else:
+ displayfn = logcmdutil.displayrevs
+ displayfn(ui, repo, revs, displayer, getrenamed)
@command('manifest',
[('r', 'rev', '', _('revision to display'), _('REV')),
@@ -3523,8 +3506,8 @@
if not node:
node = rev
- char = {'l': '@', 'x': '*', '': ''}
- mode = {'l': '644', 'x': '755', '': '644'}
+ char = {'l': '@', 'x': '*', '': '', 't': 'd'}
+ mode = {'l': '644', 'x': '755', '': '644', 't': '755'}
if node:
repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn')
ctx = scmutil.revsingle(repo, node)
@@ -3604,7 +3587,7 @@
p2 = repo.lookup(node)
nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for node in nodes:
displayer.show(repo[node])
displayer.close()
@@ -3668,16 +3651,17 @@
"""
opts = pycompat.byteskwargs(opts)
if opts.get('graph'):
- cmdutil.checkunsupportedgraphflags([], opts)
+ logcmdutil.checkunsupportedgraphflags([], opts)
o, other = hg._outgoing(ui, repo, dest, opts)
if not o:
cmdutil.outgoinghooks(ui, repo, other, opts, o)
return
- revdag = cmdutil.graphrevs(repo, o, opts)
+ revdag = logcmdutil.graphrevs(repo, o, opts)
ui.pager('outgoing')
- displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
- cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True)
+ logcmdutil.displaygraph(ui, repo, revdag, displayer,
+ graphmod.asciiedges)
cmdutil.outgoinghooks(ui, repo, other, opts, o)
return 0
@@ -3752,7 +3736,7 @@
else:
p = [cp.node() for cp in ctx.parents()]
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for n in p:
if n != nullid:
displayer.show(repo[n])
@@ -3804,7 +3788,7 @@
if fm.isplain():
hidepassword = util.hidepassword
else:
- hidepassword = str
+ hidepassword = bytes
if ui.quiet:
namefmt = '%s\n'
else:
@@ -3930,7 +3914,7 @@
try:
return hg.updatetotally(ui, repo, checkout, brev)
except error.UpdateAbort as inst:
- msg = _("not updating: %s") % str(inst)
+ msg = _("not updating: %s") % util.forcebytestr(inst)
hint = inst.hint
raise error.UpdateAbort(msg, hint=hint)
if modheads > 1:
@@ -4043,7 +4027,7 @@
brev = None
if checkout:
- checkout = str(repo.changelog.rev(checkout))
+ checkout = "%d" % repo.changelog.rev(checkout)
# order below depends on implementation of
# hg.addbranchrevs(). opts['bookmark'] is ignored,
@@ -4513,7 +4497,7 @@
for f in ms:
if not m(f):
continue
- flags = ''.join(['-%s ' % o[0] for o in flaglist
+ flags = ''.join(['-%s ' % o[0:1] for o in flaglist
if opts.get(o)])
hint = _("(try: hg resolve %s%s)\n") % (
flags,
@@ -4757,7 +4741,7 @@
if repo is None:
raise error.RepoError(_("there is no Mercurial repository here"
" (.hg not found)"))
- s = sshserver.sshserver(ui, repo)
+ s = wireprotoserver.sshserver(ui, repo)
s.serve_forever()
service = server.createservice(ui, repo, opts)
@@ -4984,7 +4968,7 @@
# shows a working directory parent *changeset*:
# i18n: column positioning for "hg summary"
ui.write(_('parent: %d:%s ') % (p.rev(), p),
- label=cmdutil._changesetlabels(p))
+ label=logcmdutil.changesetlabels(p))
ui.write(' '.join(p.tags()), label='log.tag')
if p.bookmarks():
marks.extend(p.bookmarks())
@@ -5330,7 +5314,7 @@
date = opts.get('date')
if date:
- date = util.parsedate(date)
+ date = dateutil.parsedate(date)
if opts.get('remove'):
editform = 'tag.remove'
@@ -5406,7 +5390,7 @@
Returns 0 on success.
"""
opts = pycompat.byteskwargs(opts)
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
displayer.show(repo['tip'])
displayer.close()
--- a/mercurial/commandserver.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/commandserver.py Mon Mar 19 08:07:18 2018 -0700
@@ -16,8 +16,13 @@
import struct
import traceback
+try:
+ import selectors
+ selectors.BaseSelector
+except ImportError:
+ from .thirdparty import selectors2 as selectors
+
from .i18n import _
-from .thirdparty import selectors2
from . import (
encoding,
error,
@@ -303,8 +308,8 @@
ui.flush()
newfiles = []
nullfd = os.open(os.devnull, os.O_RDWR)
- for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
- (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
+ for f, sysf, mode in [(ui.fin, util.stdin, r'rb'),
+ (ui.fout, util.stdout, r'wb')]:
if f is sysf:
newfd = os.dup(f.fileno())
os.dup2(nullfd, f.fileno())
@@ -476,8 +481,8 @@
def _mainloop(self):
exiting = False
h = self._servicehandler
- selector = selectors2.DefaultSelector()
- selector.register(self._sock, selectors2.EVENT_READ)
+ selector = selectors.DefaultSelector()
+ selector.register(self._sock, selectors.EVENT_READ)
while True:
if not exiting and h.shouldexit():
# clients can no longer connect() to the domain socket, so
--- a/mercurial/config.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/config.py Mon Mar 19 08:07:18 2018 -0700
@@ -154,7 +154,7 @@
if inst.errno != errno.ENOENT:
raise error.ParseError(_("cannot include %s (%s)")
% (inc, inst.strerror),
- "%s:%s" % (src, line))
+ "%s:%d" % (src, line))
continue
if emptyre.match(l):
continue
@@ -185,7 +185,7 @@
self._unset.append((section, name))
continue
- raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
+ raise error.ParseError(l.rstrip(), ("%s:%d" % (src, line)))
def read(self, path, fp=None, sections=None, remap=None):
if not fp:
--- a/mercurial/configitems.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/configitems.py Mon Mar 19 08:07:18 2018 -0700
@@ -502,6 +502,9 @@
coreconfigitem('experimental', 'maxdeltachainspan',
default=-1,
)
+coreconfigitem('experimental', 'mergetempdirprefix',
+ default=None,
+)
coreconfigitem('experimental', 'mmapindexthreshold',
default=None,
)
@@ -538,9 +541,6 @@
coreconfigitem('experimental', 'httppostargs',
default=False,
)
-coreconfigitem('experimental', 'manifestv2',
- default=False,
-)
coreconfigitem('experimental', 'mergedriver',
default=None,
)
@@ -556,6 +556,9 @@
coreconfigitem('experimental', 'single-head-per-branch',
default=False,
)
+coreconfigitem('experimental', 'sshserver.support-v2',
+ default=False,
+)
coreconfigitem('experimental', 'spacemovesdown',
default=False,
)
@@ -574,6 +577,12 @@
coreconfigitem('experimental', 'update.atomic-file',
default=False,
)
+coreconfigitem('experimental', 'sshpeer.advertise-v2',
+ default=False,
+)
+coreconfigitem('experimental', 'xdiff',
+ default=False,
+)
coreconfigitem('extensions', '.*',
default=None,
generic=True,
@@ -743,6 +752,16 @@
generic=True,
priority=-1,
)
+coreconfigitem('merge-tools', br'.*\.mergemarkers$',
+ default='basic',
+ generic=True,
+ priority=-1,
+)
+coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
+ default=dynamicdefault, # take from ui.mergemarkertemplate
+ generic=True,
+ priority=-1,
+)
coreconfigitem('merge-tools', br'.*\.priority$',
default=0,
generic=True,
@@ -1013,9 +1032,6 @@
coreconfigitem('ui', 'graphnodetemplate',
default=None,
)
-coreconfigitem('ui', 'http2debuglevel',
- default=None,
-)
coreconfigitem('ui', 'interactive',
default=None,
)
@@ -1114,9 +1130,6 @@
coreconfigitem('ui', 'tweakdefaults',
default=False,
)
-coreconfigitem('ui', 'usehttp2',
- default=False,
-)
coreconfigitem('ui', 'username',
alias=[('ui', 'user')]
)
@@ -1242,6 +1255,9 @@
coreconfigitem('web', 'refreshinterval',
default=20,
)
+coreconfigitem('web', 'server-header',
+ default=None,
+)
coreconfigitem('web', 'staticurl',
default=None,
)
--- a/mercurial/context.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/context.py Mon Mar 19 08:07:18 2018 -0700
@@ -26,15 +26,12 @@
wdirnodes,
wdirrev,
)
-from .thirdparty import (
- attr,
-)
from . import (
+ dagop,
encoding,
error,
fileset,
match as matchmod,
- mdiff,
obsolete as obsmod,
obsutil,
patch,
@@ -46,12 +43,14 @@
scmutil,
sparse,
subrepo,
+ subrepoutil,
util,
)
+from .utils import dateutil
propertycache = util.propertycache
-nonascii = re.compile(r'[^\x21-\x7f]').search
+nonascii = re.compile(br'[^\x21-\x7f]').search
class basectx(object):
"""A basectx object represents the common logic for its children:
@@ -77,9 +76,6 @@
__str__ = encoding.strmethod(__bytes__)
- def __int__(self):
- return self.rev()
-
def __repr__(self):
return r"<%s %s>" % (type(self).__name__, str(self))
@@ -173,7 +169,7 @@
@propertycache
def substate(self):
- return subrepo.state(self, self._repo.ui)
+ return subrepoutil.state(self, self._repo.ui)
def subrev(self, subpath):
return self.substate[subpath][1]
@@ -206,22 +202,10 @@
"""True if the changeset is extinct"""
return self.rev() in obsmod.getrevs(self._repo, 'extinct')
- def unstable(self):
- msg = ("'context.unstable' is deprecated, "
- "use 'context.orphan'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.orphan()
-
def orphan(self):
"""True if the changeset is not obsolete but it's ancestor are"""
return self.rev() in obsmod.getrevs(self._repo, 'orphan')
- def bumped(self):
- msg = ("'context.bumped' is deprecated, "
- "use 'context.phasedivergent'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.phasedivergent()
-
def phasedivergent(self):
"""True if the changeset try to be a successor of a public changeset
@@ -229,12 +213,6 @@
"""
return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
- def divergent(self):
- msg = ("'context.divergent' is deprecated, "
- "use 'context.contentdivergent'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.contentdivergent()
-
def contentdivergent(self):
"""Is a successors of a changeset with multiple possible successors set
@@ -242,33 +220,10 @@
"""
return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
- def troubled(self):
- msg = ("'context.troubled' is deprecated, "
- "use 'context.isunstable'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.isunstable()
-
def isunstable(self):
"""True if the changeset is either unstable, bumped or divergent"""
return self.orphan() or self.phasedivergent() or self.contentdivergent()
- def troubles(self):
- """Keep the old version around in order to avoid breaking extensions
- about different return values.
- """
- msg = ("'context.troubles' is deprecated, "
- "use 'context.instabilities'")
- self._repo.ui.deprecwarn(msg, '4.4')
-
- troubles = []
- if self.orphan():
- troubles.append('orphan')
- if self.phasedivergent():
- troubles.append('bumped')
- if self.contentdivergent():
- troubles.append('divergent')
- return troubles
-
def instabilities(self):
"""return the list of instabilities affecting this changeset.
@@ -475,7 +430,7 @@
self._rev = changeid
return
if not pycompat.ispy3 and isinstance(changeid, long):
- changeid = str(changeid)
+ changeid = "%d" % changeid
if changeid == 'null':
self._node = nullid
self._rev = nullrev
@@ -790,7 +745,7 @@
__str__ = encoding.strmethod(__bytes__)
def __repr__(self):
- return "<%s %s>" % (type(self).__name__, str(self))
+ return r"<%s %s>" % (type(self).__name__, str(self))
def __hash__(self):
try:
@@ -954,7 +909,7 @@
"""
lkr = self.linkrev()
attrs = vars(self)
- noctx = not ('_changeid' in attrs or '_changectx' in attrs)
+ noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
if noctx or self.rev() == lkr:
return self.linkrev()
return self._adjustlinkrev(self.rev(), inclusive=True)
@@ -970,14 +925,14 @@
def _parentfilectx(self, path, fileid, filelog):
"""create parent filectx keeping ancestry info for _adjustlinkrev()"""
fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
- if '_changeid' in vars(self) or '_changectx' in vars(self):
+ if r'_changeid' in vars(self) or r'_changectx' in vars(self):
# If self is associated with a changeset (probably explicitly
# fed), ensure the created filectx is associated with a
# changeset that is an ancestor of self.changectx.
# This lets us later use _adjustlinkrev to get a correct link.
fctx._descendantrev = self.rev()
fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
- elif '_descendantrev' in vars(self):
+ elif r'_descendantrev' in vars(self):
# Otherwise propagate _descendantrev if we have one associated.
fctx._descendantrev = self._descendantrev
fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
@@ -1020,20 +975,6 @@
the line number at the first appearance in the managed file, otherwise,
number has a fixed value of False.
'''
-
- def lines(text):
- if text.endswith("\n"):
- return text.count("\n")
- return text.count("\n") + int(bool(text))
-
- if linenumber:
- def decorate(text, rev):
- return ([annotateline(fctx=rev, lineno=i)
- for i in xrange(1, lines(text) + 1)], text)
- else:
- def decorate(text, rev):
- return ([annotateline(fctx=rev)] * lines(text), text)
-
getlog = util.lrucachefunc(lambda x: self._repo.file(x))
def parents(f):
@@ -1051,7 +992,7 @@
# renamed filectx won't have a filelog yet, so set it
# from the cache to save time
for p in pl:
- if not '_filelog' in p.__dict__:
+ if not r'_filelog' in p.__dict__:
p._filelog = getlog(p.path())
return pl
@@ -1069,60 +1010,8 @@
ac = cl.ancestors([base.rev()], inclusive=True)
base._ancestrycontext = ac
- # This algorithm would prefer to be recursive, but Python is a
- # bit recursion-hostile. Instead we do an iterative
- # depth-first search.
-
- # 1st DFS pre-calculates pcache and needed
- visit = [base]
- pcache = {}
- needed = {base: 1}
- while visit:
- f = visit.pop()
- if f in pcache:
- continue
- pl = parents(f)
- pcache[f] = pl
- for p in pl:
- needed[p] = needed.get(p, 0) + 1
- if p not in pcache:
- visit.append(p)
-
- # 2nd DFS does the actual annotate
- visit[:] = [base]
- hist = {}
- while visit:
- f = visit[-1]
- if f in hist:
- visit.pop()
- continue
-
- ready = True
- pl = pcache[f]
- for p in pl:
- if p not in hist:
- ready = False
- visit.append(p)
- if ready:
- visit.pop()
- curr = decorate(f.data(), f)
- skipchild = False
- if skiprevs is not None:
- skipchild = f._changeid in skiprevs
- curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
- diffopts)
- for p in pl:
- if needed[p] == 1:
- del hist[p]
- del needed[p]
- else:
- needed[p] -= 1
-
- hist[f] = curr
- del pcache[f]
-
- lineattrs, text = hist[base]
- return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
+ return dagop.annotate(base, parents, linenumber=linenumber,
+ skiprevs=skiprevs, diffopts=diffopts)
def ancestors(self, followfirst=False):
visit = {}
@@ -1147,74 +1036,6 @@
"""
return self._repo.wwritedata(self.path(), self.data())
-@attr.s(slots=True, frozen=True)
-class annotateline(object):
- fctx = attr.ib()
- lineno = attr.ib(default=False)
- # Whether this annotation was the result of a skip-annotate.
- skip = attr.ib(default=False)
-
-def _annotatepair(parents, childfctx, child, skipchild, diffopts):
- r'''
- Given parent and child fctxes and annotate data for parents, for all lines
- in either parent that match the child, annotate the child with the parent's
- data.
-
- Additionally, if `skipchild` is True, replace all other lines with parent
- annotate data as well such that child is never blamed for any lines.
-
- See test-annotate.py for unit tests.
- '''
- pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
- for parent in parents]
-
- if skipchild:
- # Need to iterate over the blocks twice -- make it a list
- pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
- # Mercurial currently prefers p2 over p1 for annotate.
- # TODO: change this?
- for parent, blocks in pblocks:
- for (a1, a2, b1, b2), t in blocks:
- # Changed blocks ('!') or blocks made only of blank lines ('~')
- # belong to the child.
- if t == '=':
- child[0][b1:b2] = parent[0][a1:a2]
-
- if skipchild:
- # Now try and match up anything that couldn't be matched,
- # Reversing pblocks maintains bias towards p2, matching above
- # behavior.
- pblocks.reverse()
-
- # The heuristics are:
- # * Work on blocks of changed lines (effectively diff hunks with -U0).
- # This could potentially be smarter but works well enough.
- # * For a non-matching section, do a best-effort fit. Match lines in
- # diff hunks 1:1, dropping lines as necessary.
- # * Repeat the last line as a last resort.
-
- # First, replace as much as possible without repeating the last line.
- remaining = [(parent, []) for parent, _blocks in pblocks]
- for idx, (parent, blocks) in enumerate(pblocks):
- for (a1, a2, b1, b2), _t in blocks:
- if a2 - a1 >= b2 - b1:
- for bk in xrange(b1, b2):
- if child[0][bk].fctx == childfctx:
- ak = min(a1 + (bk - b1), a2 - 1)
- child[0][bk] = attr.evolve(parent[0][ak], skip=True)
- else:
- remaining[idx][1].append((a1, a2, b1, b2))
-
- # Then, look at anything left, which might involve repeating the last
- # line.
- for parent, blocks in remaining:
- for a1, a2, b1, b2 in blocks:
- for bk in xrange(b1, b2):
- if child[0][bk].fctx == childfctx:
- ak = min(a1 + (bk - b1), a2 - 1)
- child[0][bk] = attr.evolve(parent[0][ak], skip=True)
- return child
-
class filectx(basefilectx):
"""A filecontext object makes access to data related to a particular
filerevision convenient."""
@@ -1331,7 +1152,7 @@
self._node = None
self._text = text
if date:
- self._date = util.parsedate(date)
+ self._date = dateutil.parsedate(date)
if user:
self._user = user
if changes:
@@ -1408,7 +1229,7 @@
ui = self._repo.ui
date = ui.configdate('devel', 'default-date')
if date is None:
- date = util.makedate()
+ date = dateutil.makedate()
return date
def subrev(self, subpath):
@@ -1935,7 +1756,7 @@
def date(self):
t, tz = self._changectx.date()
try:
- return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
+ return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
except OSError as err:
if err.errno != errno.ENOENT:
raise
@@ -2155,11 +1976,11 @@
if data is None:
raise error.ProgrammingError("data must be non-None")
self._auditconflicts(path)
- self._markdirty(path, exists=True, data=data, date=util.makedate(),
+ self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
flags=flags)
def setflags(self, path, l, x):
- self._markdirty(path, exists=True, date=util.makedate(),
+ self._markdirty(path, exists=True, date=dateutil.makedate(),
flags=(l and 'l' or '') + (x and 'x' or ''))
def remove(self, path):
@@ -2448,7 +2269,7 @@
user receives the committer name and defaults to current
repository username, date is the commit date in any format
- supported by util.parsedate() and defaults to current date, extra
+ supported by dateutil.parsedate() and defaults to current date, extra
is a dictionary of metadata or is left empty.
"""
@@ -2663,7 +2484,7 @@
user receives the committer name and defaults to current repository
username, date is the commit date in any format supported by
- util.parsedate() and defaults to current date, extra is a dictionary of
+ dateutil.parsedate() and defaults to current date, extra is a dictionary of
metadata or is left empty.
"""
def __new__(cls, repo, originalctx, *args, **kwargs):
--- a/mercurial/copies.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/copies.py Mon Mar 19 08:07:18 2018 -0700
@@ -123,7 +123,7 @@
t[k] = v
# remove criss-crossed copies
- for k, v in t.items():
+ for k, v in list(t.items()):
if k in src and v in dst:
del t[k]
@@ -685,8 +685,8 @@
# the base and present in the source.
# Presence in the base is important to exclude added files, presence in the
# source is important to exclude removed files.
- missingfiles = filter(lambda f: f not in m1 and f in base and f in c2,
- changedfiles)
+ filt = lambda f: f not in m1 and f in base and f in c2
+ missingfiles = [f for f in changedfiles if filt(f)]
if missingfiles:
basenametofilename = collections.defaultdict(list)
--- a/mercurial/crecord.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/crecord.py Mon Mar 19 08:07:18 2018 -0700
@@ -547,7 +547,7 @@
chunkselector = curseschunkselector(headerlist, ui, operation)
if testfn and os.path.exists(testfn):
testf = open(testfn)
- testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
+ testcommands = [x.rstrip('\n') for x in testf.readlines()]
testf.close()
while True:
if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
@@ -950,7 +950,7 @@
# preprocess the text, converting tabs to spaces
text = text.expandtabs(4)
# strip \n, and convert control characters to ^[char] representation
- text = re.sub(r'[\x00-\x08\x0a-\x1f]',
+ text = re.sub(br'[\x00-\x08\x0a-\x1f]',
lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
if pair is not None:
@@ -1335,7 +1335,7 @@
# temporarily disable printing to windows by printstring
patchdisplaystring = self.printitem(item, ignorefolding,
recursechildren, towin=False)
- numlines = len(patchdisplaystring) / self.xscreensize
+ numlines = len(patchdisplaystring) // self.xscreensize
return numlines
def sigwinchhandler(self, n, frame):
--- a/mercurial/dagop.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/dagop.py Mon Mar 19 08:07:18 2018 -0700
@@ -9,11 +9,15 @@
import heapq
+from .thirdparty import (
+ attr,
+)
from . import (
error,
mdiff,
node,
patch,
+ pycompat,
smartset,
)
@@ -358,6 +362,148 @@
if inrange:
yield c, linerange1
+@attr.s(slots=True, frozen=True)
+class annotateline(object):
+ fctx = attr.ib()
+ lineno = attr.ib(default=False)
+ # Whether this annotation was the result of a skip-annotate.
+ skip = attr.ib(default=False)
+
+def _countlines(text):
+ if text.endswith("\n"):
+ return text.count("\n")
+ return text.count("\n") + int(bool(text))
+
+def _annotatepair(parents, childfctx, child, skipchild, diffopts):
+ r'''
+ Given parent and child fctxes and annotate data for parents, for all lines
+ in either parent that match the child, annotate the child with the parent's
+ data.
+
+ Additionally, if `skipchild` is True, replace all other lines with parent
+ annotate data as well such that child is never blamed for any lines.
+
+ See test-annotate.py for unit tests.
+ '''
+ pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
+ for parent in parents]
+
+ if skipchild:
+ # Need to iterate over the blocks twice -- make it a list
+ pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
+ # Mercurial currently prefers p2 over p1 for annotate.
+ # TODO: change this?
+ for parent, blocks in pblocks:
+ for (a1, a2, b1, b2), t in blocks:
+ # Changed blocks ('!') or blocks made only of blank lines ('~')
+ # belong to the child.
+ if t == '=':
+ child[0][b1:b2] = parent[0][a1:a2]
+
+ if skipchild:
+ # Now try and match up anything that couldn't be matched,
+ # Reversing pblocks maintains bias towards p2, matching above
+ # behavior.
+ pblocks.reverse()
+
+ # The heuristics are:
+ # * Work on blocks of changed lines (effectively diff hunks with -U0).
+ # This could potentially be smarter but works well enough.
+ # * For a non-matching section, do a best-effort fit. Match lines in
+ # diff hunks 1:1, dropping lines as necessary.
+ # * Repeat the last line as a last resort.
+
+ # First, replace as much as possible without repeating the last line.
+ remaining = [(parent, []) for parent, _blocks in pblocks]
+ for idx, (parent, blocks) in enumerate(pblocks):
+ for (a1, a2, b1, b2), _t in blocks:
+ if a2 - a1 >= b2 - b1:
+ for bk in xrange(b1, b2):
+ if child[0][bk].fctx == childfctx:
+ ak = min(a1 + (bk - b1), a2 - 1)
+ child[0][bk] = attr.evolve(parent[0][ak], skip=True)
+ else:
+ remaining[idx][1].append((a1, a2, b1, b2))
+
+ # Then, look at anything left, which might involve repeating the last
+ # line.
+ for parent, blocks in remaining:
+ for a1, a2, b1, b2 in blocks:
+ for bk in xrange(b1, b2):
+ if child[0][bk].fctx == childfctx:
+ ak = min(a1 + (bk - b1), a2 - 1)
+ child[0][bk] = attr.evolve(parent[0][ak], skip=True)
+ return child
+
+def annotate(base, parents, linenumber=False, skiprevs=None, diffopts=None):
+ """Core algorithm for filectx.annotate()
+
+ `parents(fctx)` is a function returning a list of parent filectxs.
+ """
+
+ if linenumber:
+ def decorate(text, fctx):
+ return ([annotateline(fctx=fctx, lineno=i)
+ for i in xrange(1, _countlines(text) + 1)], text)
+ else:
+ def decorate(text, fctx):
+ return ([annotateline(fctx=fctx)] * _countlines(text), text)
+
+ # This algorithm would prefer to be recursive, but Python is a
+ # bit recursion-hostile. Instead we do an iterative
+ # depth-first search.
+
+ # 1st DFS pre-calculates pcache and needed
+ visit = [base]
+ pcache = {}
+ needed = {base: 1}
+ while visit:
+ f = visit.pop()
+ if f in pcache:
+ continue
+ pl = parents(f)
+ pcache[f] = pl
+ for p in pl:
+ needed[p] = needed.get(p, 0) + 1
+ if p not in pcache:
+ visit.append(p)
+
+ # 2nd DFS does the actual annotate
+ visit[:] = [base]
+ hist = {}
+ while visit:
+ f = visit[-1]
+ if f in hist:
+ visit.pop()
+ continue
+
+ ready = True
+ pl = pcache[f]
+ for p in pl:
+ if p not in hist:
+ ready = False
+ visit.append(p)
+ if ready:
+ visit.pop()
+ curr = decorate(f.data(), f)
+ skipchild = False
+ if skiprevs is not None:
+ skipchild = f._changeid in skiprevs
+ curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
+ diffopts)
+ for p in pl:
+ if needed[p] == 1:
+ del hist[p]
+ del needed[p]
+ else:
+ needed[p] -= 1
+
+ hist[f] = curr
+ del pcache[f]
+
+ lineattrs, text = hist[base]
+ return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
+
def toposort(revs, parentsfunc, firstbranch=()):
"""Yield revisions from heads to roots one (topo) branch at a time.
--- a/mercurial/debugcommands.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/debugcommands.py Mon Mar 19 08:07:18 2018 -0700
@@ -14,9 +14,12 @@
import operator
import os
import random
+import re
import socket
import ssl
+import stat
import string
+import subprocess
import sys
import tempfile
import time
@@ -46,8 +49,10 @@
fileset,
formatter,
hg,
+ httppeer,
localrepo,
lock as lockmod,
+ logcmdutil,
merge as mergemod,
obsolete,
obsutil,
@@ -64,6 +69,7 @@
setdiscovery,
simplemerge,
smartset,
+ sshpeer,
sslutil,
streamclone,
templater,
@@ -72,7 +78,9 @@
url as urlmod,
util,
vfs as vfsmod,
+ wireprotoserver,
)
+from .utils import dateutil
release = lockmod.release
@@ -162,7 +170,7 @@
if mergeable_file:
linesperrev = 2
# make a file with k lines per rev
- initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
+ initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
initialmergedlines.append("")
tags = []
@@ -269,7 +277,7 @@
ui.write("\n%s%s\n" % (indent_string, named))
for deltadata in gen.deltaiter():
node, p1, p2, cs, deltabase, delta, flags = deltadata
- ui.write("%s%s %s %s %s %s %s\n" %
+ ui.write("%s%s %s %s %s %s %d\n" %
(indent_string, hex(node), hex(p1), hex(p2),
hex(cs), hex(deltabase), len(delta)))
@@ -339,11 +347,14 @@
if part.type == 'changegroup':
version = part.params.get('version', '01')
cg = changegroup.getunbundler(version, part, 'UN')
- _debugchangegroup(ui, cg, all=all, indent=4, **opts)
+ if not ui.quiet:
+ _debugchangegroup(ui, cg, all=all, indent=4, **opts)
if part.type == 'obsmarkers':
- _debugobsmarkers(ui, part, indent=4, **opts)
+ if not ui.quiet:
+ _debugobsmarkers(ui, part, indent=4, **opts)
if part.type == 'phase-heads':
- _debugphaseheads(ui, part, indent=4)
+ if not ui.quiet:
+ _debugphaseheads(ui, part, indent=4)
@command('debugbundle',
[('a', 'all', None, _('show all details')),
@@ -556,13 +567,13 @@
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts[r"extended"]:
- d = util.parsedate(date, util.extendeddateformats)
+ d = dateutil.parsedate(date, util.extendeddateformats)
else:
- d = util.parsedate(date)
- ui.write(("internal: %s %s\n") % d)
- ui.write(("standard: %s\n") % util.datestr(d))
+ d = dateutil.parsedate(date)
+ ui.write(("internal: %d %d\n") % d)
+ ui.write(("standard: %s\n") % dateutil.datestr(d))
if range:
- m = util.matchdate(range)
+ m = dateutil.matchdate(range)
ui.write(("match: %s\n") % m(d[0]))
@command('debugdeltachain',
@@ -1001,7 +1012,7 @@
ignore = repo.dirstate._ignore
if not files:
# Show all the patterns
- ui.write("%s\n" % repr(ignore))
+ ui.write("%s\n" % pycompat.byterepr(ignore))
else:
m = scmutil.match(repo[None], pats=files)
for f in m.files():
@@ -1113,7 +1124,7 @@
def writetemp(contents):
(fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
- f = os.fdopen(fd, pycompat.sysstr("wb"))
+ f = os.fdopen(fd, r"wb")
f.write(contents)
f.close()
return name
@@ -1239,16 +1250,17 @@
# editor
editor = ui.geteditor()
editor = util.expandpath(editor)
- fm.write('editor', _("checking commit editor... (%s)\n"), editor)
- cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
+ editorbin = util.shellsplit(editor)[0]
+ fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
+ cmdpath = util.findexe(editorbin)
fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
_(" No commit editor set and can't find %s in PATH\n"
" (specify a commit editor in your configuration"
- " file)\n"), not cmdpath and editor == 'vi' and editor)
+ " file)\n"), not cmdpath and editor == 'vi' and editorbin)
fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
_(" Can't find editor '%s' in PATH\n"
" (specify a commit editor in your configuration"
- " file)\n"), not cmdpath and editor)
+ " file)\n"), not cmdpath and editorbin)
if not cmdpath and editor != 'vi':
problems += 1
@@ -1367,9 +1379,9 @@
l.release()
else:
try:
- stat = vfs.lstat(name)
- age = now - stat.st_mtime
- user = util.username(stat.st_uid)
+ st = vfs.lstat(name)
+ age = now - st[stat.ST_MTIME]
+ user = util.username(st.st_uid)
locker = vfs.readlock(name)
if ":" in locker:
host, pid = locker.split(':')
@@ -1405,7 +1417,7 @@
return h
def printrecords(version):
- ui.write(('* version %s records\n') % version)
+ ui.write(('* version %d records\n') % version)
if version == 1:
records = v1records
else:
@@ -1573,7 +1585,7 @@
try:
date = opts.get('date')
if date:
- date = util.parsedate(date)
+ date = dateutil.parsedate(date)
else:
date = None
prec = parsenodeid(precursor)
@@ -1589,7 +1601,8 @@
metadata=metadata, ui=ui)
tr.close()
except ValueError as exc:
- raise error.Abort(_('bad obsmarker input: %s') % exc)
+ raise error.Abort(_('bad obsmarker input: %s') %
+ pycompat.bytestr(exc))
finally:
tr.release()
finally:
@@ -1692,6 +1705,25 @@
ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
ui.write('\n')
+@command('debugpeer', [], _('PATH'), norepo=True)
+def debugpeer(ui, path):
+ """establish a connection to a peer repository"""
+ # Always enable peer request logging. Requires --debug to display
+ # though.
+ overrides = {
+ ('devel', 'debug.peer-request'): True,
+ }
+
+ with ui.configoverride(overrides):
+ peer = hg.peer(ui, {}, path)
+
+ local = peer.local() is not None
+ canpush = peer.canpush()
+
+ ui.write(_('url: %s\n') % peer.url())
+ ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
+ ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
+
@command('debugpickmergetool',
[('r', 'rev', '', _('check for files in this revision'), _('REV')),
('', 'changedelete', None, _('emulate merging change and delete')),
@@ -1744,15 +1776,15 @@
overrides = {}
if opts['tool']:
overrides[('ui', 'forcemerge')] = opts['tool']
- ui.note(('with --tool %r\n') % (opts['tool']))
+ ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
with ui.configoverride(overrides, 'debugmergepatterns'):
hgmerge = encoding.environ.get("HGMERGE")
if hgmerge is not None:
- ui.note(('with HGMERGE=%r\n') % (hgmerge))
+ ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
uimerge = ui.config("ui", "merge")
if uimerge:
- ui.note(('with ui.merge=%r\n') % (uimerge))
+ ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
ctx = scmutil.revsingle(repo, opts.get('rev'))
m = scmutil.match(ctx, pats, opts)
@@ -1785,7 +1817,7 @@
if keyinfo:
key, old, new = keyinfo
r = target.pushkey(namespace, key, old, new)
- ui.status(str(r) + '\n')
+ ui.status(pycompat.bytestr(r) + '\n')
return not r
else:
for k, v in sorted(target.listkeys(namespace).iteritems()):
@@ -2206,7 +2238,38 @@
if not opts['show_revs']:
return
for c in revs:
- ui.write("%s\n" % c)
+ ui.write("%d\n" % c)
+
+@command('debugserve', [
+ ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
+ ('', 'logiofd', '', _('file descriptor to log server I/O to')),
+ ('', 'logiofile', '', _('file to log server I/O to')),
+], '')
+def debugserve(ui, repo, **opts):
+ """run a server with advanced settings
+
+ This command is similar to :hg:`serve`. It exists partially as a
+ workaround to the fact that ``hg serve --stdio`` must have specific
+ arguments for security reasons.
+ """
+ opts = pycompat.byteskwargs(opts)
+
+ if not opts['sshstdio']:
+ raise error.Abort(_('only --sshstdio is currently supported'))
+
+ logfh = None
+
+ if opts['logiofd'] and opts['logiofile']:
+ raise error.Abort(_('cannot use both --logiofd and --logiofile'))
+
+ if opts['logiofd']:
+ # Line buffered because output is line based.
+ logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
+ elif opts['logiofile']:
+ logfh = open(opts['logiofile'], 'ab', 1)
+
+ s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
+ s.serve_forever()
@command('debugsetparents', [], _('REV1 [REV2]'))
def debugsetparents(ui, repo, rev1, rev2=None):
@@ -2336,7 +2399,7 @@
"""
# passed to successorssets caching computation from one call to another
cache = {}
- ctx2str = str
+ ctx2str = bytes
node2str = short
for rev in scmutil.revrange(repo, revs):
ctx = repo[rev]
@@ -2394,18 +2457,34 @@
if revs is None:
tres = formatter.templateresources(ui, repo)
t = formatter.maketemplater(ui, tmpl, resources=tres)
- ui.write(t.render(props))
+ ui.write(t.renderdefault(props))
else:
- displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
+ displayer = logcmdutil.maketemplater(ui, repo, tmpl)
for r in revs:
displayer.show(repo[r], **pycompat.strkwargs(props))
displayer.close()
+@command('debuguigetpass', [
+ ('p', 'prompt', '', _('prompt text'), _('TEXT')),
+], _('[-p TEXT]'), norepo=True)
+def debuguigetpass(ui, prompt=''):
+ """show prompt to type password"""
+ r = ui.getpass(prompt)
+ ui.write(('respose: %s\n') % r)
+
+@command('debuguiprompt', [
+ ('p', 'prompt', '', _('prompt text'), _('TEXT')),
+], _('[-p TEXT]'), norepo=True)
+def debuguiprompt(ui, prompt=''):
+ """show plain prompt"""
+ r = ui.prompt(prompt)
+ ui.write(('response: %s\n') % r)
+
@command('debugupdatecaches', [])
def debugupdatecaches(ui, repo, *pats, **opts):
"""warm all known caches in the repository"""
with repo.wlock(), repo.lock():
- repo.updatecaches()
+ repo.updatecaches(full=True)
@command('debugupgraderepo', [
('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
@@ -2452,6 +2531,17 @@
line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
ui.write("%s\n" % line.rstrip())
+@command('debugwhyunstable', [], _('REV'))
+def debugwhyunstable(ui, repo, rev):
+ """explain instabilities of a changeset"""
+ for entry in obsutil.whyunstable(repo, repo[rev]):
+ dnodes = ''
+ if entry.get('divergentnodes'):
+ dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
+ for ctx in entry['divergentnodes']) + ' '
+ ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
+ entry['reason'], entry['node']))
+
@command('debugwireargs',
[('', 'three', '', 'three'),
('', 'four', '', 'four'),
@@ -2475,3 +2565,442 @@
ui.write("%s\n" % res1)
if res1 != res2:
ui.warn("%s\n" % res2)
+
+def _parsewirelangblocks(fh):
+ activeaction = None
+ blocklines = []
+
+ for line in fh:
+ line = line.rstrip()
+ if not line:
+ continue
+
+ if line.startswith(b'#'):
+ continue
+
+ if not line.startswith(' '):
+ # New block. Flush previous one.
+ if activeaction:
+ yield activeaction, blocklines
+
+ activeaction = line
+ blocklines = []
+ continue
+
+ # Else we start with an indent.
+
+ if not activeaction:
+ raise error.Abort(_('indented line outside of block'))
+
+ blocklines.append(line)
+
+ # Flush last block.
+ if activeaction:
+ yield activeaction, blocklines
+
+@command('debugwireproto',
+ [
+ ('', 'localssh', False, _('start an SSH server for this repo')),
+ ('', 'peer', '', _('construct a specific version of the peer')),
+ ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
+ ] + cmdutil.remoteopts,
+ _('[PATH]'),
+ optionalrepo=True)
+def debugwireproto(ui, repo, path=None, **opts):
+ """send wire protocol commands to a server
+
+ This command can be used to issue wire protocol commands to remote
+ peers and to debug the raw data being exchanged.
+
+ ``--localssh`` will start an SSH server against the current repository
+ and connect to that. By default, the connection will perform a handshake
+ and establish an appropriate peer instance.
+
+ ``--peer`` can be used to bypass the handshake protocol and construct a
+ peer instance using the specified class type. Valid values are ``raw``,
+ ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending raw data
+ payloads and don't support higher-level command actions.
+
+ ``--noreadstderr`` can be used to disable automatic reading from stderr
+ of the peer (for SSH connections only). Disabling automatic reading of
+ stderr is useful for making output more deterministic.
+
+ Commands are issued via a mini language which is specified via stdin.
+ The language consists of individual actions to perform. An action is
+ defined by a block. A block is defined as a line with no leading
+ space followed by 0 or more lines with leading space. Blocks are
+ effectively a high-level command with additional metadata.
+
+ Lines beginning with ``#`` are ignored.
+
+ The following sections denote available actions.
+
+ raw
+ ---
+
+ Send raw data to the server.
+
+ The block payload contains the raw data to send as one atomic send
+ operation. The data may not actually be delivered in a single system
+ call: it depends on the abilities of the transport being used.
+
+ Each line in the block is de-indented and concatenated. Then, that
+ value is evaluated as a Python b'' literal. This allows the use of
+ backslash escaping, etc.
+
+ raw+
+ ----
+
+ Behaves like ``raw`` except flushes output afterwards.
+
+ command <X>
+ -----------
+
+ Send a request to run a named command, whose name follows the ``command``
+ string.
+
+ Arguments to the command are defined as lines in this block. The format of
+ each line is ``<key> <value>``. e.g.::
+
+ command listkeys
+ namespace bookmarks
+
+ Values are interpreted as Python b'' literals. This allows encoding
+ special byte sequences via backslash escaping.
+
+ The following arguments have special meaning:
+
+ ``PUSHFILE``
+ When defined, the *push* mechanism of the peer will be used instead
+ of the static request-response mechanism and the content of the
+ file specified in the value of this argument will be sent as the
+ command payload.
+
+ This can be used to submit a local bundle file to the remote.
+
+ batchbegin
+ ----------
+
+ Instruct the peer to begin a batched send.
+
+ All ``command`` blocks are queued for execution until the next
+ ``batchsubmit`` block.
+
+ batchsubmit
+ -----------
+
+ Submit previously queued ``command`` blocks as a batch request.
+
+ This action MUST be paired with a ``batchbegin`` action.
+
+ httprequest <method> <path>
+ ---------------------------
+
+ (HTTP peer only)
+
+ Send an HTTP request to the peer.
+
+ The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
+
+ Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
+ headers to add to the request. e.g. ``Accept: foo``.
+
+ The following arguments are special:
+
+ ``BODYFILE``
+ The content of the file defined as the value to this argument will be
+ transferred verbatim as the HTTP request body.
+
+ close
+ -----
+
+ Close the connection to the server.
+
+ flush
+ -----
+
+ Flush data written to the server.
+
+ readavailable
+ -------------
+
+ Close the write end of the connection and read all available data from
+ the server.
+
+ If the connection to the server encompasses multiple pipes, we poll both
+ pipes and read available data.
+
+ readline
+ --------
+
+ Read a line of output from the server. If there are multiple output
+ pipes, reads only the main pipe.
+
+ ereadline
+ ---------
+
+ Like ``readline``, but read from the stderr pipe, if available.
+
+ read <X>
+ --------
+
+ ``read()`` N bytes from the server's main output pipe.
+
+ eread <X>
+ ---------
+
+ ``read()`` N bytes from the server's stderr pipe, if available.
+ """
+ opts = pycompat.byteskwargs(opts)
+
+ if opts['localssh'] and not repo:
+ raise error.Abort(_('--localssh requires a repository'))
+
+ if opts['peer'] and opts['peer'] not in ('raw', 'ssh1', 'ssh2'):
+ raise error.Abort(_('invalid value for --peer'),
+ hint=_('valid values are "raw", "ssh1", and "ssh2"'))
+
+ if path and opts['localssh']:
+ raise error.Abort(_('cannot specify --localssh with an explicit '
+ 'path'))
+
+ if ui.interactive():
+ ui.write(_('(waiting for commands on stdin)\n'))
+
+ blocks = list(_parsewirelangblocks(ui.fin))
+
+ proc = None
+ stdin = None
+ stdout = None
+ stderr = None
+ opener = None
+
+ if opts['localssh']:
+ # We start the SSH server in its own process so there is process
+ # separation. This prevents a whole class of potential bugs around
+ # shared state from interfering with server operation.
+ args = util.hgcmd() + [
+ '-R', repo.root,
+ 'debugserve', '--sshstdio',
+ ]
+ proc = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ bufsize=0)
+
+ stdin = proc.stdin
+ stdout = proc.stdout
+ stderr = proc.stderr
+
+ # We turn the pipes into observers so we can log I/O.
+ if ui.verbose or opts['peer'] == 'raw':
+ stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
+ logdata=True)
+ stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
+ logdata=True)
+ stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
+ logdata=True)
+
+ # --localssh also implies the peer connection settings.
+
+ url = 'ssh://localserver'
+ autoreadstderr = not opts['noreadstderr']
+
+ if opts['peer'] == 'ssh1':
+ ui.write(_('creating ssh peer for wire protocol version 1\n'))
+ peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
+ None, autoreadstderr=autoreadstderr)
+ elif opts['peer'] == 'ssh2':
+ ui.write(_('creating ssh peer for wire protocol version 2\n'))
+ peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
+ None, autoreadstderr=autoreadstderr)
+ elif opts['peer'] == 'raw':
+ ui.write(_('using raw connection to peer\n'))
+ peer = None
+ else:
+ ui.write(_('creating ssh peer from handshake results\n'))
+ peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
+ autoreadstderr=autoreadstderr)
+
+ elif path:
+ # We bypass hg.peer() so we can proxy the sockets.
+ # TODO consider not doing this because we skip
+ # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
+ u = util.url(path)
+ if u.scheme != 'http':
+ raise error.Abort(_('only http:// paths are currently supported'))
+
+ url, authinfo = u.authinfo()
+ openerargs = {}
+
+ # Turn pipes/sockets into observers so we can log I/O.
+ if ui.verbose:
+ openerargs = {
+ r'loggingfh': ui,
+ r'loggingname': b's',
+ r'loggingopts': {
+ r'logdata': True,
+ },
+ }
+
+ opener = urlmod.opener(ui, authinfo, **openerargs)
+
+ if opts['peer'] == 'raw':
+ ui.write(_('using raw connection to peer\n'))
+ peer = None
+ elif opts['peer']:
+ raise error.Abort(_('--peer %s not supported with HTTP peers') %
+ opts['peer'])
+ else:
+ peer = httppeer.httppeer(ui, path, url, opener)
+ peer._fetchcaps()
+
+ # We /could/ populate stdin/stdout with sock.makefile()...
+ else:
+ raise error.Abort(_('unsupported connection configuration'))
+
+ batchedcommands = None
+
+ # Now perform actions based on the parsed wire language instructions.
+ for action, lines in blocks:
+ if action in ('raw', 'raw+'):
+ if not stdin:
+ raise error.Abort(_('cannot call raw/raw+ on this peer'))
+
+ # Concatenate the data together.
+ data = ''.join(l.lstrip() for l in lines)
+ data = util.unescapestr(data)
+ stdin.write(data)
+
+ if action == 'raw+':
+ stdin.flush()
+ elif action == 'flush':
+ if not stdin:
+ raise error.Abort(_('cannot call flush on this peer'))
+ stdin.flush()
+ elif action.startswith('command'):
+ if not peer:
+ raise error.Abort(_('cannot send commands unless peer instance '
+ 'is available'))
+
+ command = action.split(' ', 1)[1]
+
+ args = {}
+ for line in lines:
+ # We need to allow empty values.
+ fields = line.lstrip().split(' ', 1)
+ if len(fields) == 1:
+ key = fields[0]
+ value = ''
+ else:
+ key, value = fields
+
+ args[key] = util.unescapestr(value)
+
+ if batchedcommands is not None:
+ batchedcommands.append((command, args))
+ continue
+
+ ui.status(_('sending %s command\n') % command)
+
+ if 'PUSHFILE' in args:
+ with open(args['PUSHFILE'], r'rb') as fh:
+ del args['PUSHFILE']
+ res, output = peer._callpush(command, fh,
+ **pycompat.strkwargs(args))
+ ui.status(_('result: %s\n') % util.escapedata(res))
+ ui.status(_('remote output: %s\n') %
+ util.escapedata(output))
+ else:
+ res = peer._call(command, **pycompat.strkwargs(args))
+ ui.status(_('response: %s\n') % util.escapedata(res))
+
+ elif action == 'batchbegin':
+ if batchedcommands is not None:
+ raise error.Abort(_('nested batchbegin not allowed'))
+
+ batchedcommands = []
+ elif action == 'batchsubmit':
+ # There is a batching API we could go through. But it would be
+ # difficult to normalize requests into function calls. It is easier
+ # to bypass this layer and normalize to commands + args.
+ ui.status(_('sending batch with %d sub-commands\n') %
+ len(batchedcommands))
+ for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
+ ui.status(_('response #%d: %s\n') % (i, util.escapedata(chunk)))
+
+ batchedcommands = None
+
+ elif action.startswith('httprequest '):
+ if not opener:
+ raise error.Abort(_('cannot use httprequest without an HTTP '
+ 'peer'))
+
+ request = action.split(' ', 2)
+ if len(request) != 3:
+ raise error.Abort(_('invalid httprequest: expected format is '
+ '"httprequest <method> <path>'))
+
+ method, httppath = request[1:]
+ headers = {}
+ body = None
+ for line in lines:
+ line = line.lstrip()
+ m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
+ if m:
+ headers[m.group(1)] = m.group(2)
+ continue
+
+ if line.startswith(b'BODYFILE '):
+ with open(line.split(b' ', 1), 'rb') as fh:
+ body = fh.read()
+ else:
+ raise error.Abort(_('unknown argument to httprequest: %s') %
+ line)
+
+ url = path + httppath
+ req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
+
+ try:
+ opener.open(req).read()
+ except util.urlerr.urlerror as e:
+ e.read()
+
+ elif action == 'close':
+ peer.close()
+ elif action == 'readavailable':
+ if not stdout or not stderr:
+ raise error.Abort(_('readavailable not available on this peer'))
+
+ stdin.close()
+ stdout.read()
+ stderr.read()
+
+ elif action == 'readline':
+ if not stdout:
+ raise error.Abort(_('readline not available on this peer'))
+ stdout.readline()
+ elif action == 'ereadline':
+ if not stderr:
+ raise error.Abort(_('ereadline not available on this peer'))
+ stderr.readline()
+ elif action.startswith('read '):
+ count = int(action.split(' ', 1)[1])
+ if not stdout:
+ raise error.Abort(_('read not available on this peer'))
+ stdout.read(count)
+ elif action.startswith('eread '):
+ count = int(action.split(' ', 1)[1])
+ if not stderr:
+ raise error.Abort(_('eread not available on this peer'))
+ stderr.read(count)
+ else:
+ raise error.Abort(_('unknown action: %s') % action)
+
+ if batchedcommands is not None:
+ raise error.Abort(_('unclosed "batchbegin" request'))
+
+ if peer:
+ peer.close()
+
+ if proc:
+ proc.kill()
--- a/mercurial/default.d/mergetools.rc Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/default.d/mergetools.rc Mon Mar 19 08:07:18 2018 -0700
@@ -1,7 +1,7 @@
# Some default global settings for common merge tools
[merge-tools]
-kdiff3.args=--auto --L1 base --L2 local --L3 other $base $local $other -o $output
+kdiff3.args=--auto --L1 $labelbase --L2 $labellocal --L3 $labelother $base $local $other -o $output
kdiff3.regkey=Software\KDiff3
kdiff3.regkeyalt=Software\Wow6432Node\KDiff3
kdiff3.regappend=\kdiff3.exe
@@ -26,7 +26,7 @@
gpyfm.gui=True
meld.gui=True
-meld.args=--label='local' $local --label='merged' $base --label='other' $other -o $output
+meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output
meld.check=changed
meld.diffargs=-a --label=$plabel1 $parent --label=$clabel $child
@@ -35,7 +35,7 @@
tkdiff.priority=-8
tkdiff.diffargs=-L $plabel1 $parent -L $clabel $child
-xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 local --title2 base --title3 other --merged-filename $output --merge $local $base $other
+xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 $labellocal --title2 $labelbase --title3 $labelother --merged-filename $output --merge $local $base $other
xxdiff.gui=True
xxdiff.priority=-8
xxdiff.diffargs=--title1 $plabel1 $parent --title2 $clabel $child
@@ -44,7 +44,7 @@
diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\
diffmerge.regname=Location
diffmerge.priority=-7
-diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output
+diffmerge.args=-nosplash -merge -title1=$labellocal -title2=merged -title3=$labelother $local $base $other -result=$output
diffmerge.check=changed
diffmerge.gui=True
diffmerge.diffargs=--nosplash --title1=$plabel1 --title2=$clabel $parent $child
@@ -72,7 +72,7 @@
tortoisemerge.priority=-8
tortoisemerge.diffargs=/base:$parent /mine:$child /basename:$plabel1 /minename:$clabel
-ecmerge.args=$base $local $other --mode=merge3 --title0=base --title1=local --title2=other --to=$output
+ecmerge.args=$base $local $other --mode=merge3 --title0=$labelbase --title1=$labellocal --title2=$labelother --to=$output
ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge
ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge
ecmerge.gui=True
@@ -93,7 +93,7 @@
filemergexcode.gui=True
; Windows version of Beyond Compare
-beyondcompare3.args=$local $other $base $output /ro /lefttitle=local /centertitle=base /righttitle=other /automerge /reviewconflicts /solo
+beyondcompare3.args=$local $other $base $output /ro /lefttitle=$labellocal /centertitle=$labelbase /righttitle=$labelother /automerge /reviewconflicts /solo
beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3
beyondcompare3.regname=ExePath
beyondcompare3.gui=True
@@ -113,7 +113,7 @@
bcomposx.priority=-1
bcomposx.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
-winmerge.args=/e /x /wl /ub /dl other /dr local $other $local $output
+winmerge.args=/e /x /wl /ub /dl $labelother /dr $labellocal $other $local $output
winmerge.regkey=Software\Thingamahoochie\WinMerge
winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\
winmerge.regname=Executable
--- a/mercurial/destutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/destutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,6 +13,7 @@
error,
obsutil,
scmutil,
+ stack
)
def _destupdateobs(repo, clean):
@@ -339,26 +340,26 @@
onheadcheck=onheadcheck, destspace=destspace)
return repo[node].rev()
-histeditdefaultrevset = 'reverse(only(.) and not public() and not ::merge())'
-
def desthistedit(ui, repo):
"""Default base revision to edit for `hg histedit`."""
- default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
- if default:
+ default = ui.config('histedit', 'defaultrev')
+
+ if default is None:
+ revs = stack.getstack(repo)
+ elif default:
revs = scmutil.revrange(repo, [default])
- if revs:
- # The revset supplied by the user may not be in ascending order nor
- # take the first revision. So do this manually.
- revs.sort()
- return revs.first()
+
+ if revs:
+ # The revset supplied by the user may not be in ascending order nor
+ # take the first revision. So do this manually.
+ revs.sort()
+ return revs.first()
return None
def stackbase(ui, repo):
- # The histedit default base stops at public changesets, branchpoints,
- # and merges, which is exactly what we want for a stack.
- revs = scmutil.revrange(repo, [histeditdefaultrevset])
- return revs.last() if revs else None
+ revs = stack.getstack(repo)
+ return revs.first() if revs else None
def _statusotherbook(ui, repo):
bmheads = bookmarks.headsforactive(repo)
--- a/mercurial/dirstate.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/dirstate.py Mon Mar 19 08:07:18 2018 -0700
@@ -49,7 +49,7 @@
'''Get "now" timestamp on filesystem'''
tmpfd, tmpname = vfs.mkstemp()
try:
- return os.fstat(tmpfd).st_mtime
+ return os.fstat(tmpfd)[stat.ST_MTIME]
finally:
os.close(tmpfd)
vfs.unlink(tmpname)
@@ -99,27 +99,6 @@
# normally, so we don't have a try/finally here on purpose.
self._parentwriters -= 1
- def beginparentchange(self):
- '''Marks the beginning of a set of changes that involve changing
- the dirstate parents. If there is an exception during this time,
- the dirstate will not be written when the wlock is released. This
- prevents writing an incoherent dirstate where the parent doesn't
- match the contents.
- '''
- self._ui.deprecwarn('beginparentchange is obsoleted by the '
- 'parentchange context manager.', '4.3')
- self._parentwriters += 1
-
- def endparentchange(self):
- '''Marks the end of a set of changes that involve changing the
- dirstate parents. Once all parent changes have been marked done,
- the wlock will be free to write the dirstate on release.
- '''
- self._ui.deprecwarn('endparentchange is obsoleted by the '
- 'parentchange context manager.', '4.3')
- if self._parentwriters > 0:
- self._parentwriters -= 1
-
def pendingparentchange(self):
'''Returns true if the dirstate is in the middle of a set of changes
that modify the dirstate parent.
@@ -360,7 +339,7 @@
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
check whether the dirstate has changed before rereading it.'''
- for a in ("_map", "_branch", "_ignore"):
+ for a in (r"_map", r"_branch", r"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
@@ -392,7 +371,8 @@
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
if self._map.hastrackeddir(f):
- raise error.Abort(_('directory %r already in dirstate') % f)
+ raise error.Abort(_('directory %r already in dirstate') %
+ pycompat.bytestr(f))
# shadows
for d in util.finddirs(f):
if self._map.hastrackeddir(d):
@@ -400,7 +380,8 @@
entry = self._map.get(d)
if entry is not None and entry[0] != 'r':
raise error.Abort(
- _('file %r in dirstate clashes with %r') % (d, f))
+ _('file %r in dirstate clashes with %r') %
+ (pycompat.bytestr(d), pycompat.bytestr(f)))
self._dirty = True
self._updatedfiles.add(f)
self._map.addfile(f, oldstate, state, mode, size, mtime)
@@ -408,7 +389,7 @@
def normal(self, f):
'''Mark a file normal and clean.'''
s = os.lstat(self._join(f))
- mtime = s.st_mtime
+ mtime = s[stat.ST_MTIME]
self._addpath(f, 'n', s.st_mode,
s.st_size & _rangemask, mtime & _rangemask)
self._map.copymap.pop(f, None)
@@ -647,7 +628,7 @@
self._origpl = None
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
- now = util.fstat(st).st_mtime & _rangemask
+ now = util.fstat(st)[stat.ST_MTIME] & _rangemask
# enough 'delaywrite' prevents 'pack_dirstate' from dropping
# timestamp of each entries in dirstate, because of 'now > mtime'
@@ -808,6 +789,17 @@
else:
badfn(ff, encoding.strtolocal(inst.strerror))
+ # match.files() may contain explicitly-specified paths that shouldn't
+ # be taken; drop them from the list of files found. dirsfound/notfound
+ # aren't filtered here because they will be tested later.
+ if match.anypats():
+ for f in list(results):
+ if f == '.hg' or f in subrepos:
+ # keep sentinel to disable further out-of-repo walks
+ continue
+ if not match(f):
+ del results[f]
+
# Case insensitive filesystems cannot rely on lstat() failing to detect
# a case-only rename. Prune the stat object for any file that does not
# match the case in the filesystem, if there are multiple files that
@@ -1078,9 +1070,10 @@
or size == -2 # other parent
or fn in copymap):
madd(fn)
- elif time != st.st_mtime and time != st.st_mtime & _rangemask:
+ elif (time != st[stat.ST_MTIME]
+ and time != st[stat.ST_MTIME] & _rangemask):
ladd(fn)
- elif st.st_mtime == lastnormaltime:
+ elif st[stat.ST_MTIME] == lastnormaltime:
# fn may have just been marked as normal and it may have
# changed in the same second without changing its size.
# This can happen if we quickly do multiple commits.
@@ -1237,9 +1230,12 @@
util.clearcachedproperty(self, "nonnormalset")
util.clearcachedproperty(self, "otherparentset")
- def iteritems(self):
+ def items(self):
return self._map.iteritems()
+ # forward for python2,3 compat
+ iteritems = items
+
def __len__(self):
return len(self._map)
@@ -1264,9 +1260,9 @@
def addfile(self, f, oldstate, state, mode, size, mtime):
"""Add a tracked file to the dirstate."""
- if oldstate in "?r" and "_dirs" in self.__dict__:
+ if oldstate in "?r" and r"_dirs" in self.__dict__:
self._dirs.addpath(f)
- if oldstate == "?" and "_alldirs" in self.__dict__:
+ if oldstate == "?" and r"_alldirs" in self.__dict__:
self._alldirs.addpath(f)
self._map[f] = dirstatetuple(state, mode, size, mtime)
if state != 'n' or mtime == -1:
@@ -1282,11 +1278,11 @@
the file's previous state. In the future, we should refactor this
to be more explicit about what that state is.
"""
- if oldstate not in "?r" and "_dirs" in self.__dict__:
+ if oldstate not in "?r" and r"_dirs" in self.__dict__:
self._dirs.delpath(f)
- if oldstate == "?" and "_alldirs" in self.__dict__:
+ if oldstate == "?" and r"_alldirs" in self.__dict__:
self._alldirs.addpath(f)
- if "filefoldmap" in self.__dict__:
+ if r"filefoldmap" in self.__dict__:
normed = util.normcase(f)
self.filefoldmap.pop(normed, None)
self._map[f] = dirstatetuple('r', 0, size, 0)
@@ -1299,11 +1295,11 @@
"""
exists = self._map.pop(f, None) is not None
if exists:
- if oldstate != "r" and "_dirs" in self.__dict__:
+ if oldstate != "r" and r"_dirs" in self.__dict__:
self._dirs.delpath(f)
- if "_alldirs" in self.__dict__:
+ if r"_alldirs" in self.__dict__:
self._alldirs.delpath(f)
- if "filefoldmap" in self.__dict__:
+ if r"filefoldmap" in self.__dict__:
normed = util.normcase(f)
self.filefoldmap.pop(normed, None)
self.nonnormalset.discard(f)
@@ -1438,7 +1434,7 @@
# This heuristic is imperfect in many ways, so in a future dirstate
# format update it makes sense to just record the number of entries
# on write.
- self._map = parsers.dict_new_presized(len(st) / 71)
+ self._map = parsers.dict_new_presized(len(st) // 71)
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
--- a/mercurial/discovery.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/discovery.py Mon Mar 19 08:07:18 2018 -0700
@@ -53,13 +53,8 @@
return treediscovery.findcommonincoming(repo, remote, heads, force)
if heads:
- allknown = True
knownnode = repo.changelog.hasnode # no nodemap until it is filtered
- for h in heads:
- if not knownnode(h):
- allknown = False
- break
- if allknown:
+ if all(knownnode(h) for h in heads):
return (heads, False, heads)
res = setdiscovery.findcommonheads(repo.ui, repo, remote,
--- a/mercurial/dispatch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/dispatch.py Mon Mar 19 08:07:18 2018 -0700
@@ -85,7 +85,7 @@
req = request(pycompat.sysargv[1:])
err = None
try:
- status = (dispatch(req) or 0) & 255
+ status = (dispatch(req) or 0)
except error.StdioError as e:
err = e
status = -1
@@ -106,11 +106,36 @@
except IOError:
status = -1
+ _silencestdio()
sys.exit(status & 255)
-def _initstdio():
- for fp in (sys.stdin, sys.stdout, sys.stderr):
- util.setbinary(fp)
+if pycompat.ispy3:
+ def _initstdio():
+ pass
+
+ def _silencestdio():
+ for fp in (sys.stdout, sys.stderr):
+ # Check if the file is okay
+ try:
+ fp.flush()
+ continue
+ except IOError:
+ pass
+ # Otherwise mark it as closed to silence "Exception ignored in"
+ # message emitted by the interpreter finalizer. Be careful to
+ # not close util.stdout, which may be a fdopen-ed file object and
+ # its close() actually closes the underlying file descriptor.
+ try:
+ fp.close()
+ except IOError:
+ pass
+else:
+ def _initstdio():
+ for fp in (sys.stdin, sys.stdout, sys.stderr):
+ util.setbinary(fp)
+
+ def _silencestdio():
+ pass
def _getsimilar(symbols, value):
sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
@@ -132,8 +157,8 @@
similar = _getsimilar(inst.symbols, inst.function)
if len(inst.args) > 1:
write(_("hg: parse error at %s: %s\n") %
- (inst.args[1], inst.args[0]))
- if (inst.args[0][0] == ' '):
+ (pycompat.bytestr(inst.args[1]), inst.args[0]))
+ if inst.args[0].startswith(' '):
write(_("unexpected leading whitespace\n"))
else:
write(_("hg: parse error: %s\n") % inst.args[0])
@@ -471,13 +496,14 @@
args = pycompat.shlexsplit(self.definition)
except ValueError as inst:
self.badalias = (_("error in definition for alias '%s': %s")
- % (self.name, inst))
+ % (self.name, util.forcebytestr(inst)))
return
earlyopts, args = _earlysplitopts(args)
if earlyopts:
self.badalias = (_("error in definition for alias '%s': %s may "
"only be given on the command line")
- % (self.name, '/'.join(zip(*earlyopts)[0])))
+ % (self.name, '/'.join(pycompat.ziplist(*earlyopts)
+ [0])))
return
self.cmdname = cmd = args.pop(0)
self.givenargs = args
@@ -597,7 +623,7 @@
try:
args = fancyopts.fancyopts(args, commands.globalopts, options)
except getopt.GetoptError as inst:
- raise error.CommandError(None, inst)
+ raise error.CommandError(None, util.forcebytestr(inst))
if args:
cmd, args = args[0], args[1:]
@@ -621,7 +647,7 @@
try:
args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
except getopt.GetoptError as inst:
- raise error.CommandError(cmd, inst)
+ raise error.CommandError(cmd, util.forcebytestr(inst))
# separate global options back out
for o in commands.globalopts:
@@ -646,7 +672,8 @@
configs.append((section, name, value))
except (IndexError, ValueError):
raise error.Abort(_('malformed --config option: %r '
- '(use --config section.name=value)') % cfg)
+ '(use --config section.name=value)')
+ % pycompat.bytestr(cfg))
return configs
@@ -821,9 +848,7 @@
if options['verbose'] or options['debug'] or options['quiet']:
for opt in ('verbose', 'debug', 'quiet'):
- val = str(bool(options[opt]))
- if pycompat.ispy3:
- val = val.encode('ascii')
+ val = pycompat.bytestr(bool(options[opt]))
for ui_ in uis:
ui_.setconfig('ui', opt, val, '--' + opt)
@@ -941,9 +966,9 @@
worst = None, ct, ''
if ui.config('ui', 'supportcontact') is None:
for name, mod in extensions.extensions():
- testedwith = getattr(mod, 'testedwith', '')
- if pycompat.ispy3 and isinstance(testedwith, str):
- testedwith = testedwith.encode(u'utf-8')
+ # 'testedwith' should be bytes, but not all extensions are ported
+ # to py3 and we don't want UnicodeException because of that.
+ testedwith = util.forcebytestr(getattr(mod, 'testedwith', ''))
report = getattr(mod, 'buglink', _('the extension author.'))
if not testedwith.strip():
# We found an untested extension. It's likely the culprit.
@@ -965,7 +990,7 @@
if worst[0] is not None:
name, testedwith, report = worst
if not isinstance(testedwith, (bytes, str)):
- testedwith = '.'.join([str(c) for c in testedwith])
+ testedwith = '.'.join([util.forcebytestr(c) for c in testedwith])
warning = (_('** Unknown exception encountered with '
'possibly-broken third-party extension %s\n'
'** which supports versions %s of Mercurial.\n'
@@ -978,11 +1003,7 @@
bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
warning = (_("** unknown exception encountered, "
"please report by visiting\n** ") + bugtracker + '\n')
- if pycompat.ispy3:
- sysversion = sys.version.encode(u'utf-8')
- else:
- sysversion = sys.version
- sysversion = sysversion.replace('\n', '')
+ sysversion = pycompat.sysbytes(sys.version).replace('\n', '')
warning += ((_("** Python %s\n") % sysversion) +
(_("** Mercurial Distributed SCM (version %s)\n") %
util.version()) +
@@ -997,6 +1018,7 @@
this function returns False, ignored otherwise.
"""
warning = _exceptionwarning(ui)
- ui.log("commandexception", "%s\n%s\n", warning, traceback.format_exc())
+ ui.log("commandexception", "%s\n%s\n", warning,
+ pycompat.sysbytes(traceback.format_exc()))
ui.warn(warning)
return False # re-raise the exception
--- a/mercurial/encoding.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/encoding.py Mon Mar 19 08:07:18 2018 -0700
@@ -7,7 +7,6 @@
from __future__ import absolute_import, print_function
-import io
import locale
import os
import unicodedata
@@ -181,7 +180,8 @@
return u.encode("utf-8")
except UnicodeDecodeError as inst:
sub = s[max(0, inst.start - 10):inst.start + 10]
- raise error.Abort("decoding near '%s': %s!" % (sub, inst))
+ raise error.Abort("decoding near '%s': %s!"
+ % (sub, pycompat.bytestr(inst)))
except LookupError as k:
raise error.Abort(k, hint="please check your locale settings")
@@ -580,18 +580,3 @@
c = pycompat.bytechr(ord(c.decode("utf-8", _utf8strict)) & 0xff)
r += c
return r
-
-if pycompat.ispy3:
- class strio(io.TextIOWrapper):
- """Wrapper around TextIOWrapper that respects hg's encoding assumptions.
-
- Also works around Python closing streams.
- """
-
- def __init__(self, buffer):
- super(strio, self).__init__(buffer, encoding=_sysstr(encoding))
-
- def __del__(self):
- """Override __del__ so it doesn't close the underlying stream."""
-else:
- strio = pycompat.identity
--- a/mercurial/error.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/error.py Mon Mar 19 08:07:18 2018 -0700
@@ -47,7 +47,7 @@
# this can't be called 'message' because at least some installs of
# Python 2.6+ complain about the 'message' property being deprecated
self.lookupmessage = message
- if isinstance(name, str) and len(name) == 20:
+ if isinstance(name, bytes) and len(name) == 20:
from .node import short
name = short(name)
RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
--- a/mercurial/exchange.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/exchange.py Mon Mar 19 08:07:18 2018 -0700
@@ -283,7 +283,6 @@
This function is used to allow testing of the older bundle version"""
ui = op.repo.ui
- forcebundle1 = False
# The goal is this config is to allow developer to choose the bundle
# version used during exchanged. This is especially handy during test.
# Value is a list of bundle version to be picked from, highest version
@@ -621,16 +620,25 @@
return hex(x)
def hexifycompbookmarks(bookmarks):
- for b, scid, dcid in bookmarks:
- yield b, safehex(scid), safehex(dcid)
+ return [(b, safehex(scid), safehex(dcid))
+ for (b, scid, dcid) in bookmarks]
comp = [hexifycompbookmarks(marks) for marks in comp]
+ return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
+
+def _processcompared(pushop, pushed, explicit, remotebms, comp):
+ """take decision on bookmark to pull from the remote bookmark
+
+ Exist to help extensions who want to alter this behavior.
+ """
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
+ repo = pushop.repo
+
for b, scid, dcid in advsrc:
if b in explicit:
explicit.remove(b)
- if not ancestors or repo[scid].rev() in ancestors:
+ if not pushed or repo[scid].rev() in pushed:
pushop.outbookmarks.append((b, dcid, scid))
# search added bookmark
for b, scid, dcid in addsrc:
@@ -656,8 +664,8 @@
if explicit:
explicit = sorted(explicit)
# we should probably list all of them
- ui.warn(_('bookmark %s does not exist on the local '
- 'or remote repository!\n') % explicit[0])
+ pushop.ui.warn(_('bookmark %s does not exist on the local '
+ 'or remote repository!\n') % explicit[0])
pushop.bkresult = 2
pushop.outbookmarks.sort()
@@ -1151,8 +1159,8 @@
for newremotehead in outdated:
r = pushop.remote.pushkey('phases',
newremotehead.hex(),
- str(phases.draft),
- str(phases.public))
+ ('%d' % phases.draft),
+ ('%d' % phases.public))
if not r:
pushop.ui.warn(_('updating %s to public failed!\n')
% newremotehead)
@@ -1931,6 +1939,28 @@
outgoing = _computeoutgoing(repo, heads, common)
bundle2.addparttagsfnodescache(repo, bundler, outgoing)
+@getbundle2partsgenerator('cache:rev-branch-cache')
+def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
+ b2caps=None, heads=None, common=None,
+ **kwargs):
+ """Transfer the rev-branch-cache mapping
+
+ The payload is a series of data related to each branch
+
+ 1) branch name length
+ 2) number of open heads
+ 3) number of closed heads
+ 4) open heads nodes
+ 5) closed heads nodes
+ """
+ # Don't send unless:
+ # - changeset are being exchanged,
+ # - the client supports it.
+ if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
+ return
+ outgoing = _computeoutgoing(repo, heads, common)
+ bundle2.addpartrevbranchcache(repo, bundler, outgoing)
+
def check_heads(repo, their_heads, context):
"""check if the heads of a repo have been modified
@@ -2149,7 +2179,8 @@
continue
except error.UnsupportedBundleSpecification as e:
repo.ui.debug('filtering %s because unsupported bundle '
- 'spec: %s\n' % (entry['URL'], str(e)))
+ 'spec: %s\n' % (
+ entry['URL'], util.forcebytestr(e)))
continue
# If we don't have a spec and requested a stream clone, we don't know
# what the entry is so don't attempt to apply it.
@@ -2254,8 +2285,10 @@
bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
return True
except urlerr.httperror as e:
- ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
+ ui.warn(_('HTTP error fetching bundle: %s\n') %
+ util.forcebytestr(e))
except urlerr.urlerror as e:
- ui.warn(_('error fetching bundle: %s\n') % e.reason)
+ ui.warn(_('error fetching bundle: %s\n') %
+ util.forcebytestr(e.reason))
return False
--- a/mercurial/extensions.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/extensions.py Mon Mar 19 08:07:18 2018 -0700
@@ -122,6 +122,18 @@
if ui.debugflag:
ui.traceback()
+def _rejectunicode(name, xs):
+ if isinstance(xs, (list, set, tuple)):
+ for x in xs:
+ _rejectunicode(name, x)
+ elif isinstance(xs, dict):
+ for k, v in xs.items():
+ _rejectunicode(name, k)
+ _rejectunicode(b'%s.%s' % (name, util.forcebytestr(k)), v)
+ elif isinstance(xs, type(u'')):
+ raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
+ hint="use b'' to make it byte string")
+
# attributes set by registrar.command
_cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
@@ -134,19 +146,22 @@
"registrar.command to register '%s'" % c, '4.6')
missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
if not missing:
- for option in e[1]:
- default = option[2]
- if isinstance(default, type(u'')):
- raise error.ProgrammingError(
- "option '%s.%s' has a unicode default value"
- % (c, option[1]),
- hint=("change the %s.%s default value to a "
- "non-unicode string" % (c, option[1])))
continue
raise error.ProgrammingError(
'missing attributes: %s' % ', '.join(missing),
hint="use @command decorator to register '%s'" % c)
+def _validatetables(ui, mod):
+ """Sanity check for loadable tables provided by extension module"""
+ for t in ['cmdtable', 'colortable', 'configtable']:
+ _rejectunicode(t, getattr(mod, t, {}))
+ for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
+ 'templatefilter', 'templatefunc', 'templatekeyword']:
+ o = getattr(mod, t, None)
+ if o:
+ _rejectunicode(t, o._table)
+ _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
+
def load(ui, name, path):
if name.startswith('hgext.') or name.startswith('hgext/'):
shortname = name[6:]
@@ -168,7 +183,7 @@
ui.warn(_('(third party extension %s requires version %s or newer '
'of Mercurial; disabling)\n') % (shortname, minver))
return
- _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
+ _validatetables(ui, mod)
_extensions[shortname] = mod
_order.append(shortname)
@@ -195,11 +210,7 @@
try:
extsetup(ui)
except TypeError:
- # Try to use getfullargspec (Python 3) first, and fall
- # back to getargspec only if it doesn't exist so as to
- # avoid warnings.
- if getattr(inspect, 'getfullargspec',
- getattr(inspect, 'getargspec'))(extsetup).args:
+ if pycompat.getargspec(extsetup).args:
raise
extsetup() # old extsetup with no ui argument
except Exception as inst:
@@ -279,8 +290,8 @@
fileset,
revset,
templatefilters,
+ templatefuncs,
templatekw,
- templater,
)
# list of (objname, loadermod, loadername) tuple:
@@ -296,7 +307,7 @@
('internalmerge', filemerge, 'loadinternalmerge'),
('revsetpredicate', revset, 'loadpredicate'),
('templatefilter', templatefilters, 'loadfilter'),
- ('templatefunc', templater, 'loadfunction'),
+ ('templatefunc', templatefuncs, 'loadfunction'),
('templatekeyword', templatekw, 'loadkeyword'),
]
_loadextra(ui, newindex, extraloaders)
--- a/mercurial/fancyopts.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/fancyopts.py Mon Mar 19 08:07:18 2018 -0700
@@ -7,6 +7,7 @@
from __future__ import absolute_import
+import abc
import functools
from .i18n import _
@@ -201,6 +202,64 @@
parsedargs.extend(args[pos:])
return parsedopts, parsedargs
+class customopt(object):
+ """Manage defaults and mutations for any type of opt."""
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, defaultvalue):
+ self.defaultvalue = defaultvalue
+
+ def _isboolopt(self):
+ return False
+
+ @abc.abstractmethod
+ def newstate(self, oldstate, newparam, abort):
+ """Adds newparam to oldstate and returns the new state.
+
+ On failure, abort can be called with a string error message."""
+
+class _simpleopt(customopt):
+ def _isboolopt(self):
+ return isinstance(self.defaultvalue, (bool, type(None)))
+
+ def newstate(self, oldstate, newparam, abort):
+ return newparam
+
+class _callableopt(customopt):
+ def __init__(self, callablefn):
+ self.callablefn = callablefn
+ super(_callableopt, self).__init__(None)
+
+ def newstate(self, oldstate, newparam, abort):
+ return self.callablefn(newparam)
+
+class _listopt(customopt):
+ def newstate(self, oldstate, newparam, abort):
+ oldstate.append(newparam)
+ return oldstate
+
+class _intopt(customopt):
+ def newstate(self, oldstate, newparam, abort):
+ try:
+ return int(newparam)
+ except ValueError:
+ abort(_('expected int'))
+
+def _defaultopt(default):
+ """Returns a default opt implementation, given a default value."""
+
+ if isinstance(default, customopt):
+ return default
+ elif callable(default):
+ return _callableopt(default)
+ elif isinstance(default, list):
+ return _listopt(default[:])
+ elif type(default) is type(1):
+ return _intopt(default)
+ else:
+ return _simpleopt(default)
+
def fancyopts(args, options, state, gnu=False, early=False, optaliases=None):
"""
read args, parse options, and store options in state
@@ -220,6 +279,7 @@
list - parameter string is added to a list
integer - parameter strings is stored as int
function - call function with parameter
+ customopt - subclass of 'customopt'
optaliases is a mapping from a canonical option name to a list of
additional long options. This exists for preserving backward compatibility
@@ -250,18 +310,13 @@
argmap['-' + short] = name
for n in onames:
argmap['--' + n] = name
- defmap[name] = default
+ defmap[name] = _defaultopt(default)
# copy defaults to state
- if isinstance(default, list):
- state[name] = default[:]
- elif callable(default):
- state[name] = None
- else:
- state[name] = default
+ state[name] = defmap[name].defaultvalue
# does it take a parameter?
- if not (default is None or default is True or default is False):
+ if not defmap[name]._isboolopt():
if short:
short += ':'
onames = [n + '=' for n in onames]
@@ -301,21 +356,13 @@
boolval = False
name = argmap[opt]
obj = defmap[name]
- t = type(obj)
- if callable(obj):
- state[name] = defmap[name](val)
- elif t is type(1):
- try:
- state[name] = int(val)
- except ValueError:
- raise error.Abort(_('invalid value %r for option %s, '
- 'expected int') % (val, opt))
- elif t is type(''):
- state[name] = val
- elif t is type([]):
- state[name].append(val)
- elif t is type(None) or t is type(False):
+ if obj._isboolopt():
state[name] = boolval
+ else:
+ def abort(s):
+ raise error.Abort(
+ _('invalid value %r for option %s, %s') % (val, opt, s))
+ state[name] = defmap[name].newstate(state[name], val, abort)
# return unparsed args
return args
--- a/mercurial/filemerge.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/filemerge.py Mon Mar 19 08:07:18 2018 -0700
@@ -7,8 +7,10 @@
from __future__ import absolute_import
+import contextlib
import os
import re
+import shutil
import tempfile
from .i18n import _
@@ -509,27 +511,37 @@
'for %s\n') % (tool, fcd.path()))
return False, 1, None
unused, unused, unused, back = files
- a = _workingpath(repo, fcd)
- b, c = _maketempfiles(repo, fco, fca)
- try:
- out = ""
+ localpath = _workingpath(repo, fcd)
+ with _maketempfiles(repo, fco, fca) as temppaths:
+ basepath, otherpath = temppaths
+ outpath = ""
+ mylabel, otherlabel = labels[:2]
+ if len(labels) >= 3:
+ baselabel = labels[2]
+ else:
+ baselabel = 'base'
env = {'HG_FILE': fcd.path(),
'HG_MY_NODE': short(mynode),
- 'HG_OTHER_NODE': str(fco.changectx()),
- 'HG_BASE_NODE': str(fca.changectx()),
+ 'HG_OTHER_NODE': short(fco.changectx().node()),
+ 'HG_BASE_NODE': short(fca.changectx().node()),
'HG_MY_ISLINK': 'l' in fcd.flags(),
'HG_OTHER_ISLINK': 'l' in fco.flags(),
'HG_BASE_ISLINK': 'l' in fca.flags(),
+ 'HG_MY_LABEL': mylabel,
+ 'HG_OTHER_LABEL': otherlabel,
+ 'HG_BASE_LABEL': baselabel,
}
ui = repo.ui
args = _toolstr(ui, tool, "args")
if "$output" in args:
# read input from backup, write to original
- out = a
- a = repo.wvfs.join(back.path())
- replace = {'local': a, 'base': b, 'other': c, 'output': out}
- args = util.interpolate(r'\$', replace, args,
+ outpath = localpath
+ localpath = repo.wvfs.join(back.path())
+ replace = {'local': localpath, 'base': basepath, 'other': otherpath,
+ 'output': outpath, 'labellocal': mylabel,
+ 'labelother': otherlabel, 'labelbase': baselabel}
+ args = util.interpolate(br'\$', replace, args,
lambda s: util.shellquote(util.localpath(s)))
cmd = toolpath + ' ' + args
if _toolbool(ui, tool, "gui"):
@@ -539,9 +551,6 @@
r = ui.system(cmd, cwd=repo.root, environ=env, blockedtag='mergetool')
repo.ui.debug('merge tool returned: %d\n' % r)
return True, r, False
- finally:
- util.unlink(b)
- util.unlink(c)
def _formatconflictmarker(ctx, template, label, pad):
"""Applies the given template to the ctx, prefixed by the label.
@@ -553,7 +562,7 @@
ctx = ctx.p1()
props = {'ctx': ctx}
- templateresult = template.render(props)
+ templateresult = template.renderdefault(props)
label = ('%s:' % label).ljust(pad + 1)
mark = '%s %s' % (label, templateresult)
@@ -566,7 +575,7 @@
_defaultconflictlabels = ['local', 'other']
-def _formatlabels(repo, fcd, fco, fca, labels):
+def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
"""Formats the given labels using the conflict marker template.
Returns a list of formatted labels.
@@ -577,6 +586,8 @@
ui = repo.ui
template = ui.config('ui', 'mergemarkertemplate')
+ if tool is not None:
+ template = _toolstr(ui, tool, 'mergemarkertemplate', template)
template = templater.unquotestring(template)
tres = formatter.templateresources(ui, repo)
tmpl = formatter.maketemplater(ui, template, defaults=templatekw.keywords,
@@ -653,24 +664,42 @@
# the backup context regardless of where it lives.
return context.arbitraryfilectx(back, repo=repo)
+@contextlib.contextmanager
def _maketempfiles(repo, fco, fca):
"""Writes out `fco` and `fca` as temporary files, so an external merge
tool may use them.
"""
+ tmproot = None
+ tmprootprefix = repo.ui.config('experimental', 'mergetempdirprefix')
+ if tmprootprefix:
+ tmproot = tempfile.mkdtemp(prefix=tmprootprefix)
+
def temp(prefix, ctx):
fullbase, ext = os.path.splitext(ctx.path())
- pre = "%s~%s." % (os.path.basename(fullbase), prefix)
- (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
+ pre = "%s~%s" % (os.path.basename(fullbase), prefix)
+ if tmproot:
+ name = os.path.join(tmproot, pre)
+ if ext:
+ name += ext
+ f = open(name, r"wb")
+ else:
+ (fd, name) = tempfile.mkstemp(prefix=pre + '.', suffix=ext)
+ f = os.fdopen(fd, r"wb")
data = repo.wwritedata(ctx.path(), ctx.data())
- f = os.fdopen(fd, pycompat.sysstr("wb"))
f.write(data)
f.close()
return name
b = temp("base", fca)
c = temp("other", fco)
-
- return b, c
+ try:
+ yield b, c
+ finally:
+ if tmproot:
+ shutil.rmtree(tmproot)
+ else:
+ util.unlink(b)
+ util.unlink(c)
def _filemerge(premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
"""perform a 3-way merge in the working directory
@@ -706,6 +735,7 @@
mergetype = func.mergetype
onfailure = func.onfailure
precheck = func.precheck
+ isexternal = False
else:
if wctx.isinmemory():
func = _xmergeimm
@@ -714,6 +744,7 @@
mergetype = fullmerge
onfailure = _("merging %s failed!\n")
precheck = None
+ isexternal = True
toolconf = tool, toolpath, binary, symlink
@@ -743,19 +774,42 @@
files = (None, None, None, back)
r = 1
try:
- markerstyle = ui.config('ui', 'mergemarkers')
+ internalmarkerstyle = ui.config('ui', 'mergemarkers')
+ if isexternal:
+ markerstyle = _toolstr(ui, tool, 'mergemarkers')
+ else:
+ markerstyle = internalmarkerstyle
+
if not labels:
labels = _defaultconflictlabels
+ formattedlabels = labels
if markerstyle != 'basic':
- labels = _formatlabels(repo, fcd, fco, fca, labels)
+ formattedlabels = _formatlabels(repo, fcd, fco, fca, labels,
+ tool=tool)
if premerge and mergetype == fullmerge:
- r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels)
+ # conflict markers generated by premerge will use 'detailed'
+ # settings if either ui.mergemarkers or the tool's mergemarkers
+ # setting is 'detailed'. This way tools can have basic labels in
+ # space-constrained areas of the UI, but still get full information
+ # in conflict markers if premerge is 'keep' or 'keep-merge3'.
+ premergelabels = labels
+ labeltool = None
+ if markerstyle != 'basic':
+ # respect 'tool's mergemarkertemplate (which defaults to
+ # ui.mergemarkertemplate)
+ labeltool = tool
+ if internalmarkerstyle != 'basic' or markerstyle != 'basic':
+ premergelabels = _formatlabels(repo, fcd, fco, fca,
+ premergelabels, tool=labeltool)
+
+ r = _premerge(repo, fcd, fco, fca, toolconf, files,
+ labels=premergelabels)
# complete if premerge successful (r is 0)
return not r, r, False
needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
- toolconf, files, labels=labels)
+ toolconf, files, labels=formattedlabels)
if needcheck:
r = _check(repo, r, ui, tool, fcd, files)
--- a/mercurial/fileset.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/fileset.py Mon Mar 19 08:07:18 2018 -0700
@@ -392,11 +392,10 @@
elif expr.startswith(">"):
a = util.sizetoint(expr[1:])
return lambda x: x > a
- elif expr[0].isdigit or expr[0] == '.':
+ else:
a = util.sizetoint(expr)
b = _sizetomax(expr)
return lambda x: x >= a and x <= b
- raise error.ParseError(_("couldn't parse size: %s") % expr)
@predicate('size(expression)', callexisting=True)
def size(mctx, x):
--- a/mercurial/formatter.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/formatter.py Mon Mar 19 08:07:18 2018 -0700
@@ -124,8 +124,10 @@
templatefilters,
templatekw,
templater,
+ templateutil,
util,
)
+from .utils import dateutil
pickle = util.pickle
@@ -175,10 +177,10 @@
def formatdate(self, date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
'''convert date tuple to appropriate format'''
return self._converter.formatdate(date, fmt)
- def formatdict(self, data, key='key', value='value', fmt='%s=%s', sep=' '):
+ def formatdict(self, data, key='key', value='value', fmt=None, sep=' '):
'''convert dict or key-value pairs to appropriate dict format'''
return self._converter.formatdict(data, key, value, fmt, sep)
- def formatlist(self, data, name, fmt='%s', sep=' '):
+ def formatlist(self, data, name, fmt=None, sep=' '):
'''convert iterable to appropriate list format'''
# name is mandatory argument for now, but it could be optional if
# we have default template keyword, e.g. {item}
@@ -186,7 +188,7 @@
def context(self, **ctxs):
'''insert context objects to be used to render template keywords'''
ctxs = pycompat.byteskwargs(ctxs)
- assert all(k == 'ctx' for k in ctxs)
+ assert all(k in {'ctx', 'fctx'} for k in ctxs)
if self._converter.storecontext:
self._item.update(ctxs)
def data(self, **data):
@@ -243,15 +245,24 @@
@staticmethod
def formatdate(date, fmt):
'''stringify date tuple in the given format'''
- return util.datestr(date, fmt)
+ return dateutil.datestr(date, fmt)
@staticmethod
def formatdict(data, key, value, fmt, sep):
'''stringify key-value pairs separated by sep'''
- return sep.join(fmt % (k, v) for k, v in _iteritems(data))
+ prefmt = pycompat.identity
+ if fmt is None:
+ fmt = '%s=%s'
+ prefmt = pycompat.bytestr
+ return sep.join(fmt % (prefmt(k), prefmt(v))
+ for k, v in _iteritems(data))
@staticmethod
def formatlist(data, name, fmt, sep):
'''stringify iterable separated by sep'''
- return sep.join(fmt % e for e in data)
+ prefmt = pycompat.identity
+ if fmt is None:
+ fmt = '%s'
+ prefmt = pycompat.bytestr
+ return sep.join(fmt % prefmt(e) for e in data)
class plainformatter(baseformatter):
'''the default text output scheme'''
@@ -291,7 +302,7 @@
self._out = out
self._out.write("%s = [\n" % self._topic)
def _showitem(self):
- self._out.write(" " + repr(self._item) + ",\n")
+ self._out.write(' %s,\n' % pycompat.byterepr(self._item))
def end(self):
baseformatter.end(self)
self._out.write("]\n")
@@ -348,14 +359,15 @@
data = util.sortdict(_iteritems(data))
def f():
yield _plainconverter.formatdict(data, key, value, fmt, sep)
- return templatekw.hybriddict(data, key=key, value=value, fmt=fmt, gen=f)
+ return templateutil.hybriddict(data, key=key, value=value, fmt=fmt,
+ gen=f)
@staticmethod
def formatlist(data, name, fmt, sep):
'''build object that can be evaluated as either plain string or list'''
data = list(data)
def f():
yield _plainconverter.formatlist(data, name, fmt, sep)
- return templatekw.hybridlist(data, name=name, fmt=fmt, gen=f)
+ return templateutil.hybridlist(data, name=name, fmt=fmt, gen=f)
class templateformatter(baseformatter):
def __init__(self, ui, out, topic, opts):
@@ -383,19 +395,13 @@
return
ref = self._parts[part]
- # TODO: add support for filectx. probably each template keyword or
- # function will have to declare dependent resources. e.g.
- # @templatekeyword(..., requires=('ctx',))
props = {}
# explicitly-defined fields precede templatekw
props.update(item)
- if 'ctx' in item:
+ if 'ctx' in item or 'fctx' in item:
# but template resources must be always available
- props['repo'] = props['ctx'].repo()
props['revcache'] = {}
- props = pycompat.strkwargs(props)
- g = self._t(ref, **props)
- self._out.write(templater.stringify(g))
+ self._out.write(self._t.render(ref, props))
def end(self):
baseformatter.end(self)
@@ -491,14 +497,41 @@
def templateresources(ui, repo=None):
"""Create a dict of template resources designed for the default templatekw
and function"""
- return {
+ resmap = {
'cache': {}, # for templatekw/funcs to store reusable data
- 'ctx': None,
'repo': repo,
- 'revcache': None, # per-ctx cache; set later
'ui': ui,
}
+ def getsome(context, mapping, key):
+ v = mapping.get(key)
+ if v is not None:
+ return v
+ return resmap.get(key)
+
+ def getctx(context, mapping, key):
+ ctx = mapping.get('ctx')
+ if ctx is not None:
+ return ctx
+ fctx = mapping.get('fctx')
+ if fctx is not None:
+ return fctx.changectx()
+
+ def getrepo(context, mapping, key):
+ ctx = getctx(context, mapping, 'ctx')
+ if ctx is not None:
+ return ctx.repo()
+ return getsome(context, mapping, key)
+
+ return {
+ 'cache': getsome,
+ 'ctx': getctx,
+ 'fctx': getsome,
+ 'repo': getrepo,
+ 'revcache': getsome, # per-ctx cache; set later
+ 'ui': getsome,
+ }
+
def formatter(ui, out, topic, opts):
template = opts.get("template", "")
if template == "json":
--- a/mercurial/graphmod.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/graphmod.py Mon Mar 19 08:07:18 2018 -0700
@@ -454,7 +454,7 @@
if any(len(char) > 1 for char in edgemap.values()):
# limit drawing an edge to the first or last N lines of the current
# section the rest of the edge is drawn like a parent line.
- parent = state['styles'][PARENT][-1]
+ parent = state['styles'][PARENT][-1:]
def _drawgp(char, i):
# should a grandparent character be drawn for this line?
if len(char) < 2:
@@ -463,7 +463,7 @@
# either skip first num lines or take last num lines, based on sign
return -num <= i if num < 0 else (len(lines) - i) <= num
for i, line in enumerate(lines):
- line[:] = [c[-1] if _drawgp(c, i) else parent for c in line]
+ line[:] = [c[-1:] if _drawgp(c, i) else parent for c in line]
edgemap.update(
(e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
--- a/mercurial/hbisect.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hbisect.py Mon Mar 19 08:07:18 2018 -0700
@@ -55,7 +55,7 @@
if (len(state['bad']) == 1 and len(state['good']) == 1 and
state['bad'] != state['good']):
raise error.Abort(_("starting revisions are not directly related"))
- raise error.Abort(_("inconsistent state, %s:%s is good and bad")
+ raise error.Abort(_("inconsistent state, %d:%s is good and bad")
% (badrev, short(bad)))
# build children dict
@@ -267,12 +267,6 @@
return None
-def shortlabel(label):
- if label:
- return label[0].upper()
-
- return None
-
def printresult(ui, repo, state, displayer, nodes, good):
if len(nodes) == 1:
# narrowed it down to a single revision
--- a/mercurial/help.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/help.py Mon Mar 19 08:07:18 2018 -0700
@@ -26,8 +26,8 @@
pycompat,
revset,
templatefilters,
+ templatefuncs,
templatekw,
- templater,
util,
)
from .hgweb import (
@@ -62,7 +62,8 @@
rst = loaddoc('extensions')(ui).splitlines(True)
rst.extend(listexts(
_('enabled extensions:'), extensions.enabled(), showdeprecated=True))
- rst.extend(listexts(_('disabled extensions:'), extensions.disabled()))
+ rst.extend(listexts(_('disabled extensions:'), extensions.disabled(),
+ showdeprecated=ui.verbose))
doc = ''.join(rst)
return doc
@@ -149,7 +150,7 @@
doclines = docs.splitlines()
if doclines:
summary = doclines[0]
- cmdname = cmd.partition('|')[0].lstrip('^')
+ cmdname = cmdutil.parsealiases(cmd)[0]
if filtercmd(ui, cmdname, kw, docs):
continue
results['commands'].append((cmdname, summary))
@@ -169,7 +170,7 @@
continue
for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems():
if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])):
- cmdname = cmd.partition('|')[0].lstrip('^')
+ cmdname = cmdutil.parsealiases(cmd)[0]
cmddoc = pycompat.getdoc(entry[0])
if cmddoc:
cmddoc = gettext(cmddoc).splitlines()[0]
@@ -196,6 +197,8 @@
return loader
internalstable = sorted([
+ (['bundle2'], _('Bundle2'),
+ loaddoc('bundle2', subdir='internals')),
(['bundles'], _('Bundles'),
loaddoc('bundles', subdir='internals')),
(['censor'], _('Censor'),
@@ -306,7 +309,7 @@
addtopicsymbols('revisions', '.. predicatesmarker', revset.symbols)
addtopicsymbols('templates', '.. keywordsmarker', templatekw.keywords)
addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters)
-addtopicsymbols('templates', '.. functionsmarker', templater.funcs)
+addtopicsymbols('templates', '.. functionsmarker', templatefuncs.funcs)
addtopicsymbols('hgweb', '.. webcommandsmarker', webcommands.commands,
dedent=True)
@@ -327,7 +330,7 @@
# py3k fix: except vars can't be used outside the scope of the
# except block, nor can be used inside a lambda. python issue4617
prefix = inst.args[0]
- select = lambda c: c.lstrip('^').startswith(prefix)
+ select = lambda c: cmdutil.parsealiases(c)[0].startswith(prefix)
rst = helplist(select)
return rst
@@ -418,15 +421,18 @@
h = {}
cmds = {}
for c, e in commands.table.iteritems():
- f = c.partition("|")[0]
- if select and not select(f):
+ fs = cmdutil.parsealiases(c)
+ f = fs[0]
+ p = ''
+ if c.startswith("^"):
+ p = '^'
+ if select and not select(p + f):
continue
if (not select and name != 'shortlist' and
e[0].__module__ != commands.__name__):
continue
- if name == "shortlist" and not f.startswith("^"):
+ if name == "shortlist" and not p:
continue
- f = f.lstrip("^")
doc = pycompat.getdoc(e[0])
if filtercmd(ui, f, name, doc):
continue
@@ -434,7 +440,7 @@
if not doc:
doc = _("(no help text available)")
h[f] = doc.splitlines()[0].rstrip()
- cmds[f] = c.lstrip("^")
+ cmds[f] = '|'.join(fs)
rst = []
if not h:
--- a/mercurial/help/config.txt Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/help/config.txt Mon Mar 19 08:07:18 2018 -0700
@@ -1363,13 +1363,18 @@
``args``
The arguments to pass to the tool executable. You can refer to the
files being merged as well as the output file through these
- variables: ``$base``, ``$local``, ``$other``, ``$output``. The meaning
- of ``$local`` and ``$other`` can vary depending on which action is being
- performed. During and update or merge, ``$local`` represents the original
- state of the file, while ``$other`` represents the commit you are updating
- to or the commit you are merging with. During a rebase ``$local``
- represents the destination of the rebase, and ``$other`` represents the
- commit being rebased.
+ variables: ``$base``, ``$local``, ``$other``, ``$output``.
+
+ The meaning of ``$local`` and ``$other`` can vary depending on which action is
+ being performed. During an update or merge, ``$local`` represents the original
+ state of the file, while ``$other`` represents the commit you are updating to or
+ the commit you are merging with. During a rebase, ``$local`` represents the
+ destination of the rebase, and ``$other`` represents the commit being rebased.
+
+ Some operations define custom labels to assist with identifying the revisions,
+ accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
+ labels are not available, these will be ``local``, ``other``, and ``base``,
+ respectively.
(default: ``$local $base $other``)
``premerge``
@@ -1405,6 +1410,21 @@
``gui``
This tool requires a graphical interface to run. (default: False)
+``mergemarkers``
+ Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
+ ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
+ ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
+ markers generated during premerge will be ``detailed`` if either this option or
+ the corresponding option in the ``[ui]`` section is ``detailed``.
+ (default: ``basic``)
+
+``mergemarkertemplate``
+ This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
+ section on a per-tool basis; this applies to the ``$label``-prefixed variables
+ and to the conflict markers that are generated if ``premerge`` is ``keep` or
+ ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
+ information.
+
.. container:: windows
``regkey``
@@ -1564,8 +1584,7 @@
In this section description, 'profiling data' stands for the raw data
collected during profiling, while 'profiling report' stands for a
-statistical text report generated from the profiling data. The
-profiling is done using lsprof.
+statistical text report generated from the profiling data.
``enabled``
Enable the profiler.
@@ -1637,7 +1656,7 @@
Show at most this number of lines of drill-down info after each main entry.
This can help explain the difference between Total and Inline.
Specific to the ``ls`` instrumenting profiler.
- (default: 5)
+ (default: 0)
``showmin``
Minimum fraction of samples an entry must have for it to be displayed.
@@ -2120,6 +2139,8 @@
markers is different from the encoding of the merged files,
serious problems may occur.
+ Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
+
``origbackuppath``
The path to a directory used to store generated .orig files. If the path is
not a directory, one will be created. If set, files stored in this
@@ -2506,6 +2527,9 @@
Values less than or equal to 0 always refresh.
(default: 20)
+``server-header``
+ Value for HTTP ``Server`` response header.
+
``staticurl``
Base URL to use for static files. If unset, static files (e.g. the
hgicon.png favicon) will be served by the CGI script itself. Use
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/bundle2.txt Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,677 @@
+Bundle2 refers to a data format that is used for both on-disk storage
+and over-the-wire transfer of repository data and state.
+
+The data format allows the capture of multiple components of
+repository data. Contrast with the initial bundle format, which
+only captured *changegroup* data (and couldn't store bookmarks,
+phases, etc).
+
+Bundle2 is used for:
+
+* Transferring data from a repository (e.g. as part of an ``hg clone``
+ or ``hg pull`` operation).
+* Transferring data to a repository (e.g. as part of an ``hg push``
+ operation).
+* Storing data on disk (e.g. the result of an ``hg bundle``
+ operation).
+* Transferring the results of a repository operation (e.g. the
+ reply to an ``hg push`` operation).
+
+At its highest level, a bundle2 payload is a stream that begins
+with some metadata and consists of a series of *parts*, with each
+part describing repository data or state or the result of an
+operation. New bundle2 parts are introduced over time when there is
+a need to capture a new form of data. A *capabilities* mechanism
+exists to allow peers to understand which bundle2 parts the other
+understands.
+
+Stream Format
+=============
+
+A bundle2 payload consists of a magic string (``HG20``) followed by
+stream level parameters, followed by any number of payload *parts*.
+
+It may help to think of the stream level parameters as *headers* and the
+payload parts as the *body*.
+
+Stream Level Parameters
+-----------------------
+
+Following the magic string is data that defines parameters applicable to the
+entire payload.
+
+Stream level parameters begin with a 32-bit unsigned big-endian integer.
+The value of this integer defines the number of bytes of stream level
+parameters that follow.
+
+The *N* bytes of raw data contains a space separated list of parameters.
+Each parameter consists of a required name and an optional value.
+
+Parameters have the form ``<name>`` or ``<name>=<value>``.
+
+Both the parameter name and value are URL quoted.
+
+Names MUST start with a letter. If the first letter is lower case, the
+parameter is advisory and can safely be ignored. If the first letter
+is upper case, the parameter is mandatory and the handler MUST stop if
+it is unable to process it.
+
+Stream level parameters apply to the entire bundle2 payload. Lower-level
+options should go into a bundle2 part instead.
+
+The following stream level parameters are defined:
+
+compression
+ Compression format of payload data. ``GZ`` denotes zlib. ``BZ``
+ denotes bzip2. ``ZS`` denotes zstandard.
+
+ When defined, all bytes after the stream level parameters are
+ compressed using the compression format defined by this parameter.
+
+ If this parameter isn't present, data is raw/uncompressed.
+
+ This parameter MUST be mandatory because attempting to consume
+ streams without knowing how to decode the underlying bytes will
+ result in errors.
+
+Payload Part
+------------
+
+Following the stream level parameters are 0 or more payload parts. Each
+payload part consists of a header and a body.
+
+The payload part header consists of a 32-bit unsigned big-endian integer
+defining the number of bytes in the header that follow. The special
+value ``0`` indicates the end of the bundle2 stream.
+
+The binary format of the part header is as follows:
+
+* 8-bit unsigned size of the part name
+* N-bytes alphanumeric part name
+* 32-bit unsigned big-endian part ID
+* N bytes part parameter data
+
+The *part name* identifies the type of the part. A part name with an
+UPPERCASE letter is mandatory. Otherwise, the part is advisory. A
+consumer should abort if it encounters a mandatory part it doesn't know
+how to process. See the sections below for each defined part type.
+
+The *part ID* is a unique identifier within the bundle used to refer to a
+specific part. It should be unique within the bundle2 payload.
+
+Part parameter data consists of:
+
+* 1 byte number of mandatory parameters
+* 1 byte number of advisory parameters
+* 2 * N bytes of sizes of parameter key and values
+* N * M blobs of values for parameter key and values
+
+Following the 2 bytes of mandatory and advisory parameter counts are
+2-tuples of bytes of the sizes of each parameter. e.g.
+(<key size>, <value size>).
+
+Following that are the raw values, without padding. Mandatory parameters
+come first, followed by advisory parameters.
+
+Each parameter's key MUST be unique within the part.
+
+Following the part parameter data is the part payload. The part payload
+consists of a series of framed chunks. The frame header is a 32-bit
+big-endian integer defining the size of the chunk. The N bytes of raw
+payload data follows.
+
+The part payload consists of 0 or more chunks.
+
+A chunk with size ``0`` denotes the end of the part payload. Therefore,
+there will always be at least 1 32-bit integer following the payload
+part header.
+
+A chunk size of ``-1`` is used to signal an *interrupt*. If such a chunk
+size is seen, the stream processor should process the next bytes as a new
+payload part. After this payload part, processing of the original,
+interrupted part should resume.
+
+Capabilities
+============
+
+Bundle2 is a dynamic format that can evolve over time. For example,
+when a new repository data concept is invented, a new bundle2 part
+is typically invented to hold that data. In addition, parts performing
+similar functionality may come into existence if there is a better
+mechanism for performing certain functionality.
+
+Because the bundle2 format evolves over time, peers need to understand
+what bundle2 features the other can understand. The *capabilities*
+mechanism is how those features are expressed.
+
+Bundle2 capabilities are logically expressed as a dictionary of
+string key-value pairs where the keys are strings and the values
+are lists of strings.
+
+Capabilities are encoded for exchange between peers. The encoded
+capabilities blob consists of a newline (``\n``) delimited list of
+entries. Each entry has the form ``<key>`` or ``<key>=<value>``,
+depending if the capability has a value.
+
+The capability name is URL quoted (``%XX`` encoding of URL unsafe
+characters).
+
+The value, if present, is formed by URL quoting each value in
+the capability list and concatenating the result with a comma (``,``).
+
+For example, the capabilities ``novaluekey`` and ``listvaluekey``
+with values ``value 1`` and ``value 2``. This would be encoded as:
+
+ listvaluekey=value%201,value%202\nnovaluekey
+
+The sections below detail the defined bundle2 capabilities.
+
+HG20
+----
+
+Denotes that the peer supports the bundle2 data format.
+
+bookmarks
+---------
+
+Denotes that the peer supports the ``bookmarks`` part.
+
+Peers should not issue mandatory ``bookmarks`` parts unless this
+capability is present.
+
+changegroup
+-----------
+
+Denotes which versions of the *changegroup* format the peer can
+receive. Values include ``01``, ``02``, and ``03``.
+
+The peer should not generate changegroup data for a version not
+specified by this capability.
+
+checkheads
+----------
+
+Denotes which forms of heads checking the peer supports.
+
+If ``related`` is in the value, then the peer supports the ``check:heads``
+part and the peer is capable of detecting race conditions when applying
+changelog data.
+
+digests
+-------
+
+Denotes which hashing formats the peer supports.
+
+Values are names of hashing function. Values include ``md5``, ``sha1``,
+and ``sha512``.
+
+error
+-----
+
+Denotes which ``error:`` parts the peer supports.
+
+Value is a list of strings of ``error:`` part names. Valid values
+include ``abort``, ``unsupportecontent``, ``pushraced``, and ``pushkey``.
+
+Peers should not issue an ``error:`` part unless the type of that
+part is listed as supported by this capability.
+
+listkeys
+--------
+
+Denotes that the peer supports the ``listkeys`` part.
+
+hgtagsfnodes
+------------
+
+Denotes that the peer supports the ``hgtagsfnodes`` part.
+
+obsmarkers
+----------
+
+Denotes that the peer supports the ``obsmarker`` part and which versions
+of the obsolescence data format it can receive. Values are strings like
+``V<N>``. e.g. ``V1``.
+
+phases
+------
+
+Denotes that the peer supports the ``phases`` part.
+
+pushback
+--------
+
+Denotes that the peer supports sending/receiving bundle2 data in response
+to a bundle2 request.
+
+This capability is typically used by servers that employ server-side
+rewriting of pushed repository data. For example, a server may wish to
+automatically rebase pushed changesets. When this capability is present,
+the server can send a bundle2 response containing the rewritten changeset
+data and the client will apply it.
+
+pushkey
+-------
+
+Denotes that the peer supports the ``puskey`` part.
+
+remote-changegroup
+------------------
+
+Denotes that the peer supports the ``remote-changegroup`` part and
+which protocols it can use to fetch remote changegroup data.
+
+Values are protocol names. e.g. ``http`` and ``https``.
+
+stream
+------
+
+Denotes that the peer supports ``stream*`` parts in order to support
+*stream clone*.
+
+Values are which ``stream*`` parts the peer supports. ``v2`` denotes
+support for the ``stream2`` part.
+
+Bundle2 Part Types
+==================
+
+The sections below detail the various bundle2 part types.
+
+bookmarks
+---------
+
+The ``bookmarks`` part holds bookmarks information.
+
+This part has no parameters.
+
+The payload consists of entries defining bookmarks. Each entry consists of:
+
+* 20 bytes binary changeset node.
+* 2 bytes big endian short defining bookmark name length.
+* N bytes defining bookmark name.
+
+Receivers typically update bookmarks to match the state specified in
+this part.
+
+changegroup
+-----------
+
+The ``changegroup`` part contains *changegroup* data (changelog, manifestlog,
+and filelog revision data).
+
+The following part parameters are defined for this part.
+
+version
+ Changegroup version string. e.g. ``01``, ``02``, and ``03``. This parameter
+ determines how to interpret the changegroup data within the part.
+
+nbchanges
+ The number of changesets in this changegroup. This parameter can be used
+ to aid in the display of progress bars, etc during part application.
+
+treemanifest
+ Whether the changegroup contains tree manifests.
+
+targetphase
+ The target phase of changesets in this part. Value is an integer of
+ the target phase.
+
+The payload of this part is raw changegroup data. See
+:hg:`help internals.changegroups` for the format of changegroup data.
+
+check:bookmarks
+---------------
+
+The ``check:bookmarks`` part is inserted into a bundle as a means for the
+receiver to validate that the sender's known state of bookmarks matches
+the receiver's.
+
+This part has no parameters.
+
+The payload is a binary stream of bookmark data. Each entry in the stream
+consists of:
+
+* 20 bytes binary node that bookmark is associated with
+* 2 bytes unsigned short defining length of bookmark name
+* N bytes containing the bookmark name
+
+If all bits in the node value are ``1``, then this signifies a missing
+bookmark.
+
+When the receiver encounters this part, for each bookmark in the part
+payload, it should validate that the current bookmark state matches
+the specified state. If it doesn't, then the receiver should take
+appropriate action. (In the case of pushes, this mismatch signifies
+a race condition and the receiver should consider rejecting the push.)
+
+check:heads
+-----------
+
+The ``check:heads`` part is a means to validate that the sender's state
+of DAG heads matches the receiver's.
+
+This part has no parameters.
+
+The body of this part is an array of 20 byte binary nodes representing
+changeset heads.
+
+Receivers should compare the set of heads defined in this part to the
+current set of repo heads and take action if there is a mismatch in that
+set.
+
+Note that this part applies to *all* heads in the repo.
+
+check:phases
+------------
+
+The ``check:phases`` part validates that the sender's state of phase
+boundaries matches the receiver's.
+
+This part has no parameters.
+
+The payload consists of an array of 24 byte entries. Each entry is
+a big endian 32-bit integer defining the phase integer and 20 byte
+binary node value.
+
+For each changeset defined in this part, the receiver should validate
+that its current phase matches the phase defined in this part. The
+receiver should take appropriate action if a mismatch occurs.
+
+check:updated-heads
+-------------------
+
+The ``check:updated-heads`` part validates that the sender's state of
+DAG heads updated by this bundle matches the receiver's.
+
+This type is nearly identical to ``check:heads`` except the heads
+in the payload are only a subset of heads in the repository. The
+receiver should validate that all nodes specified by the sender are
+branch heads and take appropriate action if not.
+
+error:abort
+-----------
+
+The ``error:abort`` part conveys a fatal error.
+
+The following part parameters are defined:
+
+message
+ The string content of the error message.
+
+hint
+ Supplemental string giving a hint on how to fix the problem.
+
+error:pushkey
+-------------
+
+The ``error:pushkey`` part conveys an error in the *pushkey* protocol.
+
+The following part parameters are defined:
+
+namespace
+ The pushkey domain that exhibited the error.
+
+key
+ The key whose update failed.
+
+new
+ The value we tried to set the key to.
+
+old
+ The old value of the key (as supplied by the client).
+
+ret
+ The integer result code for the pushkey request.
+
+in-reply-to
+ Part ID that triggered this error.
+
+This part is generated if there was an error applying *pushkey* data.
+Pushkey data includes bookmarks, phases, and obsolescence markers.
+
+error:pushraced
+---------------
+
+The ``error:pushraced`` part conveys that an error occurred and
+the likely cause is losing a race with another pusher.
+
+The following part parameters are defined:
+
+message
+ String error message.
+
+This part is typically emitted when a receiver examining ``check:*``
+parts encountered inconsistency between incoming state and local state.
+The likely cause of that inconsistency is another repository change
+operation (often another client performing an ``hg push``).
+
+error:unsupportedcontent
+------------------------
+
+The ``error:unsupportedcontent`` part conveys that a bundle2 receiver
+encountered a part or content it was not able to handle.
+
+The following part parameters are defined:
+
+parttype
+ The name of the part that triggered this error.
+
+params
+ ``\0`` delimited list of parameters.
+
+hgtagsfnodes
+------------
+
+The ``hgtagsfnodes`` type defines file nodes for the ``.hgtags`` file
+for various changesets.
+
+This part has no parameters.
+
+The payload is an array of pairs of 20 byte binary nodes. The first node
+is a changeset node. The second node is the ``.hgtags`` file node.
+
+Resolving tags requires resolving the ``.hgtags`` file node for changesets.
+On large repositories, this can be expensive. Repositories cache the
+mapping of changeset to ``.hgtags`` file node on disk as a performance
+optimization. This part allows that cached data to be transferred alongside
+changeset data.
+
+Receivers should update their ``.hgtags`` cache file node mappings with
+the incoming data.
+
+listkeys
+--------
+
+The ``listkeys`` part holds content for a *pushkey* namespace.
+
+The following part parameters are defined:
+
+namespace
+ The pushkey domain this data belongs to.
+
+The part payload contains a newline (``\n``) delimited list of
+tab (``\t``) delimited key-value pairs defining entries in this pushkey
+namespace.
+
+obsmarkers
+----------
+
+The ``obsmarkers`` part defines obsolescence markers.
+
+This part has no parameters.
+
+The payload consists of obsolescence markers using the on-disk markers
+format. The first byte defines the version format.
+
+The receiver should apply the obsolescence markers defined in this
+part. A ``reply:obsmarkers`` part should be sent to the sender, if possible.
+
+output
+------
+
+The ``output`` part is used to display output on the receiver.
+
+This part has no parameters.
+
+The payload consists of raw data to be printed on the receiver.
+
+phase-heads
+-----------
+
+The ``phase-heads`` part defines phase boundaries.
+
+This part has no parameters.
+
+The payload consists of an array of 24 byte entries. Each entry is
+a big endian 32-bit integer defining the phase integer and 20 byte
+binary node value.
+
+pushkey
+-------
+
+The ``pushkey`` part communicates an intent to perform a ``pushkey``
+request.
+
+The following part parameters are defined:
+
+namespace
+ The pushkey domain to operate on.
+
+key
+ The key within the pushkey namespace that is being changed.
+
+old
+ The old value for the key being changed.
+
+new
+ The new value for the key being changed.
+
+This part has no payload.
+
+The receiver should perform a pushkey operation as described by this
+part's parameters.
+
+If the pushey operation fails, a ``reply:pushkey`` part should be sent
+back to the sender, if possible. The ``in-reply-to`` part parameter
+should reference the source part.
+
+pushvars
+--------
+
+The ``pushvars`` part defines environment variables that should be
+set when processing this bundle2 payload.
+
+The part's advisory parameters define environment variables.
+
+There is no part payload.
+
+When received, part parameters are prefixed with ``USERVAR_`` and the
+resulting variables are defined in the hooks context for the current
+bundle2 application. This part provides a mechanism for senders to
+inject extra state into the hook execution environment on the receiver.
+
+remote-changegroup
+------------------
+
+The ``remote-changegroup`` part defines an external location of a bundle
+to apply. This part can be used by servers to serve pre-generated bundles
+hosted at arbitrary URLs.
+
+The following part parameters are defined:
+
+url
+ The URL of the remote bundle.
+
+size
+ The size in bytes of the remote bundle.
+
+digests
+ A space separated list of the digest types provided in additional
+ part parameters.
+
+digest:<type>
+ The hexadecimal representation of the digest (hash) of the remote bundle.
+
+There is no payload for this part type.
+
+When encountered, clients should attempt to fetch the URL being advertised
+and read and apply it as a bundle.
+
+The ``size`` and ``digest:<type>`` parameters should be used to validate
+that the downloaded bundle matches what was advertised. If a mismatch occurs,
+the client should abort.
+
+reply:changegroup
+-----------------
+
+The ``reply:changegroup`` part conveys the results of application of a
+``changegroup`` part.
+
+The following part parameters are defined:
+
+return
+ Integer return code from changegroup application.
+
+in-reply-to
+ Part ID of part this reply is in response to.
+
+reply:obsmarkers
+----------------
+
+The ``reply:obsmarkers`` part conveys the results of applying an
+``obsmarkers`` part.
+
+The following part parameters are defined:
+
+new
+ The integer number of new markers that were applied.
+
+in-reply-to
+ The part ID that this part is in reply to.
+
+reply:pushkey
+-------------
+
+The ``reply:pushkey`` part conveys the result of a *pushkey* operation.
+
+The following part parameters are defined:
+
+return
+ Integer result code from pushkey operation.
+
+in-reply-to
+ Part ID that triggered this pushkey operation.
+
+This part has no payload.
+
+replycaps
+---------
+
+The ``replycaps`` part notifies the receiver that a reply bundle should
+be created.
+
+This part has no parameters.
+
+The payload consists of a bundle2 capabilities blob.
+
+stream2
+-------
+
+The ``stream2`` part contains *streaming clone* version 2 data.
+
+The following part parameters are defined:
+
+requirements
+ URL quoted repository requirements string. Requirements are delimited by a
+ command (``,``).
+
+filecount
+ The total number of files being transferred in the payload.
+
+bytecount
+ The total size of file content being transferred in the payload.
+
+The payload consists of raw stream clone version 2 data.
+
+The ``filecount`` and ``bytecount`` parameters can be used for progress and
+reporting purposes. The values may not be exact.
--- a/mercurial/help/internals/bundles.txt Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/help/internals/bundles.txt Mon Mar 19 08:07:18 2018 -0700
@@ -63,8 +63,7 @@
``HG20`` is currently the only defined bundle2 version.
-The ``HG20`` format is not yet documented here. See the inline comments
-in ``mercurial/exchange.py`` for now.
+The ``HG20`` format is documented at :hg:`help internals.bundle2`.
Initial ``HG20`` support was added in Mercurial 3.0 (released May
2014). However, bundle2 bundles were hidden behind an experimental flag
--- a/mercurial/help/internals/requirements.txt Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/help/internals/requirements.txt Mon Mar 19 08:07:18 2018 -0700
@@ -1,4 +1,3 @@
-
Repositories contain a file (``.hg/requires``) containing a list of
features/capabilities that are *required* for clients to interface
with the repository. This file has been present in Mercurial since
@@ -105,8 +104,10 @@
Denotes that version 2 of manifests are being used.
Support for this requirement was added in Mercurial 3.4 (released
-May 2015). The requirement is currently experimental and is disabled
-by default.
+May 2015). The new format failed to meet expectations and support
+for the format and requirement were removed in Mercurial 4.6
+(released May 2018) since the feature never graduated frome experiment
+status.
treemanifest
============
--- a/mercurial/help/internals/wireprotocol.txt Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/help/internals/wireprotocol.txt Mon Mar 19 08:07:18 2018 -0700
@@ -10,11 +10,43 @@
The protocol is synchronous and does not support multiplexing (concurrent
commands).
-Transport Protocols
-===================
+Handshake
+=========
+
+It is required or common for clients to perform a *handshake* when connecting
+to a server. The handshake serves the following purposes:
+
+* Negotiating protocol/transport level options
+* Allows the client to learn about server capabilities to influence
+ future requests
+* Ensures the underlying transport channel is in a *clean* state
-HTTP Transport
---------------
+An important goal of the handshake is to allow clients to use more modern
+wire protocol features. By default, clients must assume they are talking
+to an old version of Mercurial server (possibly even the very first
+implementation). So, clients should not attempt to call or utilize modern
+wire protocol features until they have confirmation that the server
+supports them. The handshake implementation is designed to allow both
+ends to utilize the latest set of features and capabilities with as
+few round trips as possible.
+
+The handshake mechanism varies by transport and protocol and is documented
+in the sections below.
+
+HTTP Protocol
+=============
+
+Handshake
+---------
+
+The client sends a ``capabilities`` command request (``?cmd=capabilities``)
+as soon as HTTP requests may be issued.
+
+The server responds with a capabilities string, which the client parses to
+learn about the server's abilities.
+
+HTTP Version 1 Transport
+------------------------
Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
sent to the base URL of the repository with the command name sent in
@@ -112,11 +144,175 @@
``application/mercurial-0.*`` media type and the HTTP response is typically
using *chunked transfer* (``Transfer-Encoding: chunked``).
-SSH Transport
-=============
+SSH Protocol
+============
+
+Handshake
+---------
+
+For all clients, the handshake consists of the client sending 1 or more
+commands to the server using version 1 of the transport. Servers respond
+to commands they know how to respond to and send an empty response (``0\n``)
+for unknown commands (per standard behavior of version 1 of the transport).
+Clients then typically look for a response to the newest sent command to
+determine which transport version to use and what the available features for
+the connection and server are.
+
+Preceding any response from client-issued commands, the server may print
+non-protocol output. It is common for SSH servers to print banners, message
+of the day announcements, etc when clients connect. It is assumed that any
+such *banner* output will precede any Mercurial server output. So clients
+must be prepared to handle server output on initial connect that isn't
+in response to any client-issued command and doesn't conform to Mercurial's
+wire protocol. This *banner* output should only be on stdout. However,
+some servers may send output on stderr.
+
+Pre 0.9.1 clients issue a ``between`` command with the ``pairs`` argument
+having the value
+``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
+
+The ``between`` command has been supported since the original Mercurial
+SSH server. Requesting the empty range will return a ``\n`` string response,
+which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
+followed by the value, which happens to be a newline).
+
+For pre 0.9.1 clients and all servers, the exchange looks like::
+
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: 1\n
+ s: \n
+
+0.9.1+ clients send a ``hello`` command (with no arguments) before the
+``between`` command. The response to this command allows clients to
+discover server capabilities and settings.
+
+An example exchange between 0.9.1+ clients and a ``hello`` aware server looks
+like::
+
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: 324\n
+ s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+ s: 1\n
+ s: \n
+
+And a similar scenario but with servers sending a banner on connect::
+
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: welcome to the server\n
+ s: if you find any issues, email someone@somewhere.com\n
+ s: 324\n
+ s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+ s: 1\n
+ s: \n
+
+Note that output from the ``hello`` command is terminated by a ``\n``. This is
+part of the response payload and not part of the wire protocol adding a newline
+after responses. In other words, the length of the response contains the
+trailing ``\n``.
+
+Clients supporting version 2 of the SSH transport send a line beginning
+with ``upgrade`` before the ``hello`` and ``between`` commands. The line
+(which isn't a well-formed command line because it doesn't consist of a
+single command name) serves to both communicate the client's intent to
+switch to transport version 2 (transports are version 1 by default) as
+well as to advertise the client's transport-level capabilities so the
+server may satisfy that request immediately.
+
+The upgrade line has the form:
-The SSH transport is a custom text-based protocol suitable for use over any
-bi-directional stream transport. It is most commonly used with SSH.
+ upgrade <token> <transport capabilities>
+
+That is the literal string ``upgrade`` followed by a space, followed by
+a randomly generated string, followed by a space, followed by a string
+denoting the client's transport capabilities.
+
+The token can be anything. However, a random UUID is recommended. (Use
+of version 4 UUIDs is recommended because version 1 UUIDs can leak the
+client's MAC address.)
+
+The transport capabilities string is a URL/percent encoded string
+containing key-value pairs defining the client's transport-level
+capabilities. The following capabilities are defined:
+
+proto
+ A comma-delimited list of transport protocol versions the client
+ supports. e.g. ``ssh-v2``.
+
+If the server does not recognize the ``upgrade`` line, it should issue
+an empty response and continue processing the ``hello`` and ``between``
+commands. Here is an example handshake between a version 2 aware client
+and a non version 2 aware server:
+
+ c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: 0\n
+ s: 324\n
+ s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+ s: 1\n
+ s: \n
+
+(The initial ``0\n`` line from the server indicates an empty response to
+the unknown ``upgrade ..`` command/line.)
+
+If the server recognizes the ``upgrade`` line and is willing to satisfy that
+upgrade request, it replies to with a payload of the following form:
+
+ upgraded <token> <transport name>\n
+
+This line is the literal string ``upgraded``, a space, the token that was
+specified by the client in its ``upgrade ...`` request line, a space, and the
+name of the transport protocol that was chosen by the server. The transport
+name MUST match one of the names the client specified in the ``proto`` field
+of its ``upgrade ...`` request line.
+
+If a server issues an ``upgraded`` response, it MUST also read and ignore
+the lines associated with the ``hello`` and ``between`` command requests
+that were issued by the server. It is assumed that the negotiated transport
+will respond with equivalent requested information following the transport
+handshake.
+
+All data following the ``\n`` terminating the ``upgraded`` line is the
+domain of the negotiated transport. It is common for the data immediately
+following to contain additional metadata about the state of the transport and
+the server. However, this isn't strictly speaking part of the transport
+handshake and isn't covered by this section.
+
+Here is an example handshake between a version 2 aware client and a version
+2 aware server:
+
+ c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
+ s: <additional transport specific data>
+
+The client-issued token that is echoed in the response provides a more
+resilient mechanism for differentiating *banner* output from Mercurial
+output. In version 1, properly formatted banner output could get confused
+for Mercurial server output. By submitting a randomly generated token
+that is then present in the response, the client can look for that token
+in response lines and have reasonable certainty that the line did not
+originate from a *banner* message.
+
+SSH Version 1 Transport
+-----------------------
+
+The SSH transport (version 1) is a custom text-based protocol suitable for
+use over any bi-directional stream transport. It is most commonly used with
+SSH.
A SSH transport server can be started with ``hg serve --stdio``. The stdin,
stderr, and stdout file descriptors of the started process are used to exchange
@@ -174,6 +370,31 @@
The server terminates if it receives an empty command (a ``\n`` character).
+SSH Version 2 Transport
+-----------------------
+
+**Experimental**
+
+Version 2 of the SSH transport behaves identically to version 1 of the SSH
+transport with the exception of handshake semantics. See above for how
+version 2 of the SSH transport is negotiated.
+
+Immediately following the ``upgraded`` line signaling a switch to version
+2 of the SSH protocol, the server automatically sends additional details
+about the capabilities of the remote server. This has the form:
+
+ <integer length of value>\n
+ capabilities: ...\n
+
+e.g.
+
+ s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
+ s: 240\n
+ s: capabilities: known getbundle batch ...\n
+
+Following capabilities advertisement, the peers communicate using version
+1 of the SSH transport.
+
Capabilities
============
@@ -463,53 +684,6 @@
reflects the priority/preference of that type, where the first value is the
most preferred type.
-Handshake Protocol
-==================
-
-While not explicitly required, it is common for clients to perform a
-*handshake* when connecting to a server. The handshake accomplishes 2 things:
-
-* Obtaining capabilities and other server features
-* Flushing extra server output (e.g. SSH servers may print extra text
- when connecting that may confuse the wire protocol)
-
-This isn't a traditional *handshake* as far as network protocols go because
-there is no persistent state as a result of the handshake: the handshake is
-simply the issuing of commands and commands are stateless.
-
-The canonical clients perform a capabilities lookup at connection establishment
-time. This is because clients must assume a server only supports the features
-of the original Mercurial server implementation until proven otherwise (from
-advertised capabilities). Nearly every server running today supports features
-that weren't present in the original Mercurial server implementation. Rather
-than wait for a client to perform functionality that needs to consult
-capabilities, it issues the lookup at connection start to avoid any delay later.
-
-For HTTP servers, the client sends a ``capabilities`` command request as
-soon as the connection is established. The server responds with a capabilities
-string, which the client parses.
-
-For SSH servers, the client sends the ``hello`` command (no arguments)
-and a ``between`` command with the ``pairs`` argument having the value
-``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
-
-The ``between`` command has been supported since the original Mercurial
-server. Requesting the empty range will return a ``\n`` string response,
-which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
-followed by the value, which happens to be a newline).
-
-The ``hello`` command was later introduced. Servers supporting it will issue
-a response to that command before sending the ``1\n\n`` response to the
-``between`` command. Servers not supporting ``hello`` will send an empty
-response (``0\n``).
-
-In addition to the expected output from the ``hello`` and ``between`` commands,
-servers may also send other output, such as *message of the day (MOTD)*
-announcements. Clients assume servers will send this output before the
-Mercurial server replies to the client-issued commands. So any server output
-not conforming to the expected command responses is assumed to be not related
-to Mercurial and can be ignored.
-
Content Negotiation
===================
@@ -519,8 +693,8 @@
well-defined response type and only certain commands needed to support
functionality like compression.
-Currently, only the HTTP transport supports content negotiation at the protocol
-layer.
+Currently, only the HTTP version 1 transport supports content negotiation
+at the protocol layer.
HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
request header, where ``<N>`` is an integer starting at 1 allowing the logical
@@ -662,6 +836,8 @@
This command does not accept any arguments. Return type is a ``string``.
+This command was introduced in Mercurial 0.9.1 (released July 2006).
+
changegroup
-----------
@@ -737,7 +913,7 @@
Boolean indicating whether phases data is requested.
The return type on success is a ``stream`` where the value is bundle.
-On the HTTP transport, the response is zlib compressed.
+On the HTTP version 1 transport, the response is zlib compressed.
If an error occurs, a generic error response can be sent.
@@ -779,6 +955,8 @@
This command does not accept any arguments. The return type is a ``string``.
+This command was introduced in Mercurial 0.9.1 (released July 2006).
+
listkeys
--------
@@ -838,13 +1016,14 @@
The return type is a ``string``. The value depends on the transport protocol.
-The SSH transport sends a string encoded integer followed by a newline
-(``\n``) which indicates operation result. The server may send additional
-output on the ``stderr`` stream that should be displayed to the user.
+The SSH version 1 transport sends a string encoded integer followed by a
+newline (``\n``) which indicates operation result. The server may send
+additional output on the ``stderr`` stream that should be displayed to the
+user.
-The HTTP transport sends a string encoded integer followed by a newline
-followed by additional server output that should be displayed to the user.
-This may include output from hooks, etc.
+The HTTP version 1 transport sends a string encoded integer followed by a
+newline followed by additional server output that should be displayed to
+the user. This may include output from hooks, etc.
The integer result varies by namespace. ``0`` means an error has occurred
and there should be additional output to display to the user.
@@ -908,18 +1087,18 @@
The encoding of the ``push response`` type varies by transport.
-For the SSH transport, this type is composed of 2 ``string`` responses: an
-empty response (``0\n``) followed by the integer result value. e.g.
-``1\n2``. So the full response might be ``0\n1\n2``.
+For the SSH version 1 transport, this type is composed of 2 ``string``
+responses: an empty response (``0\n``) followed by the integer result value.
+e.g. ``1\n2``. So the full response might be ``0\n1\n2``.
-For the HTTP transport, the response is a ``string`` type composed of an
-integer result value followed by a newline (``\n``) followed by string
+For the HTTP version 1 transport, the response is a ``string`` type composed
+of an integer result value followed by a newline (``\n``) followed by string
content holding server output that should be displayed on the client (output
hooks, etc).
In some cases, the server may respond with a ``bundle2`` bundle. In this
-case, the response type is ``stream``. For the HTTP transport, the response
-is zlib compressed.
+case, the response type is ``stream``. For the HTTP version 1 transport, the
+response is zlib compressed.
The server may also respond with a generic error type, which contains a string
indicating the failure.
--- a/mercurial/hg.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hg.py Mon Mar 19 08:07:18 2018 -0700
@@ -12,6 +12,7 @@
import hashlib
import os
import shutil
+import stat
from .i18n import _
from .node import (
@@ -31,6 +32,7 @@
httppeer,
localrepo,
lock,
+ logcmdutil,
logexchange,
merge as mergemod,
node,
@@ -201,6 +203,24 @@
return ''
return os.path.basename(os.path.normpath(path))
+def sharedreposource(repo):
+ """Returns repository object for source repository of a shared repo.
+
+ If repo is not a shared repository, returns None.
+ """
+ if repo.sharedpath == repo.path:
+ return None
+
+ if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
+ return repo.srcrepo
+
+ # the sharedpath always ends in the .hg; we want the path to the repo
+ source = repo.vfs.split(repo.sharedpath)[0]
+ srcurl, branches = parseurl(source)
+ srcrepo = repository(repo.ui, srcurl)
+ repo.srcrepo = srcrepo
+ return srcrepo
+
def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
relative=False):
'''create a shared repository'''
@@ -213,7 +233,7 @@
else:
dest = ui.expandpath(dest)
- if isinstance(source, str):
+ if isinstance(source, bytes):
origsource = ui.expandpath(source)
source, branches = parseurl(origsource)
srcrepo = repository(ui, source)
@@ -250,7 +270,7 @@
# ValueError is raised on Windows if the drive letters differ on
# each path
raise error.Abort(_('cannot calculate relative path'),
- hint=str(e))
+ hint=util.forcebytestr(e))
else:
requirements += 'shared\n'
@@ -885,7 +905,8 @@
ui.status(_("no changes found\n"))
return subreporecurse()
ui.pager('incoming')
- displayer = cmdutil.show_changeset(ui, other, opts, buffered)
+ displayer = logcmdutil.changesetdisplayer(ui, other, opts,
+ buffered=buffered)
displaychlist(other, chlist, displayer)
displayer.close()
finally:
@@ -904,7 +925,7 @@
return ret
def display(other, chlist, displayer):
- limit = cmdutil.loglimit(opts)
+ limit = logcmdutil.getlimit(opts)
if opts.get('newest_first'):
chlist.reverse()
count = 0
@@ -949,7 +970,7 @@
ret = min(ret, sub.outgoing(ui, dest, opts))
return ret
- limit = cmdutil.loglimit(opts)
+ limit = logcmdutil.getlimit(opts)
o, other = _outgoing(ui, repo, dest, opts)
if not o:
cmdutil.outgoinghooks(ui, repo, other, opts, o)
@@ -958,7 +979,7 @@
if opts.get('newest_first'):
o.reverse()
ui.pager('outgoing')
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
@@ -1093,8 +1114,8 @@
st = os.stat(p)
except OSError:
st = os.stat(prefix)
- state.append((st.st_mtime, st.st_size))
- maxmtime = max(maxmtime, st.st_mtime)
+ state.append((st[stat.ST_MTIME], st.st_size))
+ maxmtime = max(maxmtime, st[stat.ST_MTIME])
return tuple(state), maxmtime
--- a/mercurial/hgweb/common.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/common.py Mon Mar 19 08:07:18 2018 -0700
@@ -12,6 +12,7 @@
import errno
import mimetypes
import os
+import stat
from .. import (
encoding,
@@ -45,7 +46,7 @@
authentication info). Return if op allowed, else raise an ErrorResponse
exception.'''
- user = req.env.get('REMOTE_USER')
+ user = req.remoteuser
deny_read = hgweb.configlist('web', 'deny_read')
if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
@@ -61,14 +62,13 @@
return
# enforce that you can only push using POST requests
- if req.env['REQUEST_METHOD'] != 'POST':
+ if req.method != 'POST':
msg = 'push requires POST request'
raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
# require ssl by default for pushing, auth info cannot be sniffed
# and replayed
- scheme = req.env.get('wsgi.url_scheme')
- if hgweb.configbool('web', 'push_ssl') and scheme != 'https':
+ if hgweb.configbool('web', 'push_ssl') and req.urlscheme != 'https':
raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
deny = hgweb.configlist('web', 'deny_push')
@@ -93,13 +93,20 @@
def __init__(self, code, message=None, headers=None):
if message is None:
message = _statusmessage(code)
- Exception.__init__(self, message)
+ Exception.__init__(self, pycompat.sysstr(message))
self.code = code
if headers is None:
headers = []
self.headers = headers
class continuereader(object):
+ """File object wrapper to handle HTTP 100-continue.
+
+ This is used by servers so they automatically handle Expect: 100-continue
+ request headers. On first read of the request body, the 100 Continue
+ response is sent. This should trigger the client into actually sending
+ the request body.
+ """
def __init__(self, f, write):
self.f = f
self._write = write
@@ -132,20 +139,20 @@
return os.stat(spath)
def get_mtime(spath):
- return get_stat(spath, "00changelog.i").st_mtime
+ return get_stat(spath, "00changelog.i")[stat.ST_MTIME]
def ispathsafe(path):
"""Determine if a path is safe to use for filesystem access."""
parts = path.split('/')
for part in parts:
- if (part in ('', os.curdir, os.pardir) or
+ if (part in ('', pycompat.oscurdir, pycompat.ospardir) or
pycompat.ossep in part or
pycompat.osaltsep is not None and pycompat.osaltsep in part):
return False
return True
-def staticfile(directory, fname, req):
+def staticfile(directory, fname, res):
"""return a file inside directory with guessed Content-Type header
fname always uses '/' as directory separator and isn't allowed to
@@ -170,7 +177,9 @@
with open(path, 'rb') as fh:
data = fh.read()
- req.respond(HTTP_OK, ct, body=data)
+ res.headers['Content-Type'] = ct
+ res.setbodybytes(data)
+ return res
except TypeError:
raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
except OSError as err:
@@ -185,7 +194,7 @@
if stripecount and offset:
# account for offset, e.g. due to building the list in reverse
count = (stripecount + offset) % stripecount
- parity = (stripecount + offset) / stripecount & 1
+ parity = (stripecount + offset) // stripecount & 1
else:
count = 0
parity = 0
@@ -206,12 +215,6 @@
config("ui", "username") or
encoding.environ.get("EMAIL") or "")
-def caching(web, req):
- tag = r'W/"%d"' % web.mtime
- if req.env.get('HTTP_IF_NONE_MATCH') == tag:
- raise ErrorResponse(HTTP_NOT_MODIFIED)
- req.headers.append(('ETag', tag))
-
def cspvalues(ui):
"""Obtain the Content-Security-Policy header and nonce value.
--- a/mercurial/hgweb/hgweb_mod.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/hgweb_mod.py Mon Mar 19 08:07:18 2018 -0700
@@ -14,19 +14,15 @@
from .common import (
ErrorResponse,
HTTP_BAD_REQUEST,
- HTTP_NOT_FOUND,
- HTTP_NOT_MODIFIED,
- HTTP_OK,
- HTTP_SERVER_ERROR,
- caching,
cspvalues,
permhooks,
+ statusmessage,
)
-from .request import wsgirequest
from .. import (
encoding,
error,
+ formatter,
hg,
hook,
profiling,
@@ -36,19 +32,16 @@
templater,
ui as uimod,
util,
- wireproto,
+ wireprotoserver,
)
from . import (
- protocol,
+ request as requestmod,
webcommands,
webutil,
wsgicgi,
)
-# Aliased for API compatibility.
-perms = wireproto.permissions
-
archivespecs = util.sortdict((
('zip', ('application/zip', 'zip', '.zip', None)),
('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
@@ -56,11 +49,8 @@
))
def getstyle(req, configfn, templatepath):
- fromreq = req.form.get('style', [None])[0]
- if fromreq is not None:
- fromreq = pycompat.sysbytes(fromreq)
styles = (
- fromreq,
+ req.qsparams.get('style', None),
configfn('web', 'style'),
'paper',
)
@@ -98,9 +88,11 @@
is prone to race conditions. Instances of this class exist to hold
mutable and race-free state for requests.
"""
- def __init__(self, app, repo):
+ def __init__(self, app, repo, req, res):
self.repo = repo
self.reponame = app.reponame
+ self.req = req
+ self.res = res
self.archivespecs = archivespecs
@@ -150,21 +142,10 @@
def templater(self, req):
# determine scheme, port and server name
# this is needed to create absolute urls
-
- proto = req.env.get('wsgi.url_scheme')
- if proto == 'https':
- proto = 'https'
- default_port = '443'
- else:
- proto = 'http'
- default_port = '80'
-
- port = req.env[r'SERVER_PORT']
- port = port != default_port and (r':' + port) or r''
- urlbase = r'%s://%s%s' % (proto, req.env[r'SERVER_NAME'], port)
logourl = self.config('web', 'logourl')
logoimg = self.config('web', 'logoimg')
- staticurl = self.config('web', 'staticurl') or req.url + 'static/'
+ staticurl = (self.config('web', 'staticurl')
+ or req.apppath + '/static/')
if not staticurl.endswith('/'):
staticurl += '/'
@@ -181,38 +162,45 @@
if style == styles[0]:
vars['style'] = style
- start = '&' if req.url[-1] == r'?' else '?'
- sessionvars = webutil.sessionvars(vars, start)
+ sessionvars = webutil.sessionvars(vars, '?')
if not self.reponame:
self.reponame = (self.config('web', 'name', '')
- or req.env.get('REPO_NAME')
- or req.url.strip('/') or self.repo.root)
+ or req.reponame
+ or req.apppath
+ or self.repo.root)
def websubfilter(text):
return templatefilters.websub(text, self.websubtable)
# create the templater
-
+ # TODO: export all keywords: defaults = templatekw.keywords.copy()
defaults = {
- 'url': req.url,
+ 'url': req.apppath + '/',
'logourl': logourl,
'logoimg': logoimg,
'staticurl': staticurl,
- 'urlbase': urlbase,
+ 'urlbase': req.advertisedbaseurl,
'repo': self.reponame,
'encoding': encoding.encoding,
'motd': motd,
'sessionvars': sessionvars,
- 'pathdef': makebreadcrumb(req.url),
+ 'pathdef': makebreadcrumb(req.apppath),
'style': style,
'nonce': self.nonce,
}
+ tres = formatter.templateresources(self.repo.ui, self.repo)
tmpl = templater.templater.frommapfile(mapfile,
filters={'websub': websubfilter},
- defaults=defaults)
+ defaults=defaults,
+ resources=tres)
return tmpl
+ def sendtemplate(self, name, **kwargs):
+ """Helper function to send a response generated from a template."""
+ kwargs = pycompat.byteskwargs(kwargs)
+ self.res.setbodygen(self.tmpl.generate(name, kwargs))
+ return self.res.sendresponse()
class hgweb(object):
"""HTTP server for individual repositories.
@@ -303,10 +291,12 @@
This may be called by multiple threads.
"""
- req = wsgirequest(env, respond)
- return self.run_wsgi(req)
+ req = requestmod.parserequestfromenv(env)
+ res = requestmod.wsgiresponse(req, respond)
- def run_wsgi(self, req):
+ return self.run_wsgi(req, res)
+
+ def run_wsgi(self, req, res):
"""Internal method to run the WSGI application.
This is typically only called by Mercurial. External consumers
@@ -315,155 +305,129 @@
with self._obtainrepo() as repo:
profile = repo.ui.configbool('profiling', 'enabled')
with profiling.profile(repo.ui, enabled=profile):
- for r in self._runwsgi(req, repo):
+ for r in self._runwsgi(req, res, repo):
yield r
- def _runwsgi(self, req, repo):
- rctx = requestcontext(self, repo)
+ def _runwsgi(self, req, res, repo):
+ rctx = requestcontext(self, repo, req, res)
# This state is global across all threads.
encoding.encoding = rctx.config('web', 'encoding')
- rctx.repo.ui.environ = req.env
+ rctx.repo.ui.environ = req.rawenv
if rctx.csp:
# hgwebdir may have added CSP header. Since we generate our own,
# replace it.
- req.headers = [h for h in req.headers
- if h[0] != 'Content-Security-Policy']
- req.headers.append(('Content-Security-Policy', rctx.csp))
-
- # work with CGI variables to create coherent structure
- # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
-
- req.url = req.env[r'SCRIPT_NAME']
- if not req.url.endswith('/'):
- req.url += '/'
- if req.env.get('REPO_NAME'):
- req.url += req.env[r'REPO_NAME'] + r'/'
+ res.headers['Content-Security-Policy'] = rctx.csp
- if r'PATH_INFO' in req.env:
- parts = req.env[r'PATH_INFO'].strip('/').split('/')
- repo_parts = req.env.get(r'REPO_NAME', r'').split(r'/')
- if parts[:len(repo_parts)] == repo_parts:
- parts = parts[len(repo_parts):]
- query = '/'.join(parts)
- else:
- query = req.env[r'QUERY_STRING'].partition(r'&')[0]
- query = query.partition(r';')[0]
-
- # process this if it's a protocol request
- # protocol bits don't need to create any URLs
- # and the clients always use the old URL structure
+ handled = wireprotoserver.handlewsgirequest(
+ rctx, req, res, self.check_perm)
+ if handled:
+ return res.sendresponse()
- cmd = pycompat.sysbytes(req.form.get(r'cmd', [r''])[0])
- if protocol.iscmd(cmd):
- try:
- if query:
- raise ErrorResponse(HTTP_NOT_FOUND)
-
- req.checkperm = lambda op: self.check_perm(rctx, req, op)
- # Assume commands with no defined permissions are writes /
- # for pushes. This is the safest from a security perspective
- # because it doesn't allow commands with undefined semantics
- # from bypassing permissions checks.
- req.checkperm(perms.get(cmd, 'push'))
- return protocol.call(rctx.repo, req, cmd)
- except ErrorResponse as inst:
- # A client that sends unbundle without 100-continue will
- # break if we respond early.
- if (cmd == 'unbundle' and
- (req.env.get('HTTP_EXPECT',
- '').lower() != '100-continue') or
- req.env.get('X-HgHttp2', '')):
- req.drain()
- else:
- req.headers.append((r'Connection', r'Close'))
- req.respond(inst, protocol.HGTYPE,
- body='0\n%s\n' % inst)
- return ''
+ # Old implementations of hgweb supported dispatching the request via
+ # the initial query string parameter instead of using PATH_INFO.
+ # If PATH_INFO is present (signaled by ``req.dispatchpath`` having
+ # a value), we use it. Otherwise fall back to the query string.
+ if req.dispatchpath is not None:
+ query = req.dispatchpath
+ else:
+ query = req.querystring.partition('&')[0].partition(';')[0]
# translate user-visible url structure to internal structure
args = query.split('/', 2)
- if r'cmd' not in req.form and args and args[0]:
+ if 'cmd' not in req.qsparams and args and args[0]:
cmd = args.pop(0)
style = cmd.rfind('-')
if style != -1:
- req.form['style'] = [cmd[:style]]
+ req.qsparams['style'] = cmd[:style]
cmd = cmd[style + 1:]
# avoid accepting e.g. style parameter as command
if util.safehasattr(webcommands, cmd):
- req.form[r'cmd'] = [cmd]
+ req.qsparams['cmd'] = cmd
if cmd == 'static':
- req.form['file'] = ['/'.join(args)]
+ req.qsparams['file'] = '/'.join(args)
else:
if args and args[0]:
node = args.pop(0).replace('%2F', '/')
- req.form['node'] = [node]
+ req.qsparams['node'] = node
if args:
- req.form['file'] = args
+ if 'file' in req.qsparams:
+ del req.qsparams['file']
+ for a in args:
+ req.qsparams.add('file', a)
- ua = req.env.get('HTTP_USER_AGENT', '')
+ ua = req.headers.get('User-Agent', '')
if cmd == 'rev' and 'mercurial' in ua:
- req.form['style'] = ['raw']
+ req.qsparams['style'] = 'raw'
if cmd == 'archive':
- fn = req.form['node'][0]
+ fn = req.qsparams['node']
for type_, spec in rctx.archivespecs.iteritems():
ext = spec[2]
if fn.endswith(ext):
- req.form['node'] = [fn[:-len(ext)]]
- req.form['type'] = [type_]
+ req.qsparams['node'] = fn[:-len(ext)]
+ req.qsparams['type'] = type_
+ else:
+ cmd = req.qsparams.get('cmd', '')
# process the web interface request
try:
- tmpl = rctx.templater(req)
- ctype = tmpl('mimetype', encoding=encoding.encoding)
- ctype = templater.stringify(ctype)
+ rctx.tmpl = rctx.templater(req)
+ ctype = rctx.tmpl.render('mimetype',
+ {'encoding': encoding.encoding})
# check read permissions non-static content
if cmd != 'static':
self.check_perm(rctx, req, None)
if cmd == '':
- req.form[r'cmd'] = [tmpl.cache['default']]
- cmd = req.form[r'cmd'][0]
+ req.qsparams['cmd'] = rctx.tmpl.render('default', {})
+ cmd = req.qsparams['cmd']
# Don't enable caching if using a CSP nonce because then it wouldn't
# be a nonce.
if rctx.configbool('web', 'cache') and not rctx.nonce:
- caching(self, req) # sets ETag header or raises NOT_MODIFIED
+ tag = 'W/"%d"' % self.mtime
+ if req.headers.get('If-None-Match') == tag:
+ res.status = '304 Not Modified'
+ # Response body not allowed on 304.
+ res.setbodybytes('')
+ return res.sendresponse()
+
+ res.headers['ETag'] = tag
+
if cmd not in webcommands.__all__:
msg = 'no such method: %s' % cmd
raise ErrorResponse(HTTP_BAD_REQUEST, msg)
- elif cmd == 'file' and r'raw' in req.form.get(r'style', []):
- rctx.ctype = ctype
- content = webcommands.rawfile(rctx, req, tmpl)
else:
- content = getattr(webcommands, cmd)(rctx, req, tmpl)
- req.respond(HTTP_OK, ctype)
-
- return content
+ # Set some globals appropriate for web handlers. Commands can
+ # override easily enough.
+ res.status = '200 Script output follows'
+ res.headers['Content-Type'] = ctype
+ return getattr(webcommands, cmd)(rctx)
except (error.LookupError, error.RepoLookupError) as err:
- req.respond(HTTP_NOT_FOUND, ctype)
- msg = str(err)
+ msg = pycompat.bytestr(err)
if (util.safehasattr(err, 'name') and
not isinstance(err, error.ManifestLookupError)):
msg = 'revision not found: %s' % err.name
- return tmpl('error', error=msg)
- except (error.RepoError, error.RevlogError) as inst:
- req.respond(HTTP_SERVER_ERROR, ctype)
- return tmpl('error', error=str(inst))
- except ErrorResponse as inst:
- req.respond(inst, ctype)
- if inst.code == HTTP_NOT_MODIFIED:
- # Not allowed to return a body on a 304
- return ['']
- return tmpl('error', error=str(inst))
+
+ res.status = '404 Not Found'
+ res.headers['Content-Type'] = ctype
+ return rctx.sendtemplate('error', error=msg)
+ except (error.RepoError, error.RevlogError) as e:
+ res.status = '500 Internal Server Error'
+ res.headers['Content-Type'] = ctype
+ return rctx.sendtemplate('error', error=pycompat.bytestr(e))
+ except ErrorResponse as e:
+ res.status = statusmessage(e.code, pycompat.bytestr(e))
+ res.headers['Content-Type'] = ctype
+ return rctx.sendtemplate('error', error=pycompat.bytestr(e))
def check_perm(self, rctx, req, op):
for permhook in permhooks:
--- a/mercurial/hgweb/hgwebdir_mod.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/hgwebdir_mod.py Mon Mar 19 08:07:18 2018 -0700
@@ -10,15 +10,12 @@
import gc
import os
-import re
import time
from ..i18n import _
from .common import (
ErrorResponse,
- HTTP_NOT_FOUND,
- HTTP_OK,
HTTP_SERVER_ERROR,
cspvalues,
get_contact,
@@ -26,8 +23,8 @@
ismember,
paritygen,
staticfile,
+ statusmessage,
)
-from .request import wsgirequest
from .. import (
configitems,
@@ -44,9 +41,11 @@
from . import (
hgweb_mod,
+ request as requestmod,
webutil,
wsgicgi,
)
+from ..utils import dateutil
def cleannames(items):
return [(util.pconvert(name).strip('/'), path) for name, path in items]
@@ -83,32 +82,185 @@
yield (prefix + '/' +
util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path
-def geturlcgivars(baseurl, port):
- """
- Extract CGI variables from baseurl
+def readallowed(ui, req):
+ """Check allow_read and deny_read config options of a repo's ui object
+ to determine user permissions. By default, with neither option set (or
+ both empty), allow all users to read the repo. There are two ways a
+ user can be denied read access: (1) deny_read is not empty, and the
+ user is unauthenticated or deny_read contains user (or *), and (2)
+ allow_read is not empty and the user is not in allow_read. Return True
+ if user is allowed to read the repo, else return False."""
+
+ user = req.remoteuser
+
+ deny_read = ui.configlist('web', 'deny_read', untrusted=True)
+ if deny_read and (not user or ismember(ui, user, deny_read)):
+ return False
+
+ allow_read = ui.configlist('web', 'allow_read', untrusted=True)
+ # by default, allow reading if no allow_read option has been set
+ if not allow_read or ismember(ui, user, allow_read):
+ return True
+
+ return False
+
+def archivelist(ui, nodeid, url):
+ allowed = ui.configlist('web', 'allow_archive', untrusted=True)
+ archives = []
+
+ for typ, spec in hgweb_mod.archivespecs.iteritems():
+ if typ in allowed or ui.configbool('web', 'allow' + typ,
+ untrusted=True):
+ archives.append({
+ 'type': typ,
+ 'extension': spec[2],
+ 'node': nodeid,
+ 'url': url,
+ })
+
+ return archives
+
+def rawindexentries(ui, repos, req, subdir=''):
+ descend = ui.configbool('web', 'descend')
+ collapse = ui.configbool('web', 'collapse')
+ seenrepos = set()
+ seendirs = set()
+ for name, path in repos:
+
+ if not name.startswith(subdir):
+ continue
+ name = name[len(subdir):]
+ directory = False
+
+ if '/' in name:
+ if not descend:
+ continue
+
+ nameparts = name.split('/')
+ rootname = nameparts[0]
+
+ if not collapse:
+ pass
+ elif rootname in seendirs:
+ continue
+ elif rootname in seenrepos:
+ pass
+ else:
+ directory = True
+ name = rootname
+
+ # redefine the path to refer to the directory
+ discarded = '/'.join(nameparts[1:])
+
+ # remove name parts plus accompanying slash
+ path = path[:-len(discarded) - 1]
+
+ try:
+ r = hg.repository(ui, path)
+ directory = False
+ except (IOError, error.RepoError):
+ pass
+
+ parts = [
+ req.apppath.strip('/'),
+ subdir.strip('/'),
+ name.strip('/'),
+ ]
+ url = '/' + '/'.join(p for p in parts if p) + '/'
- >>> geturlcgivars(b"http://host.org/base", b"80")
- ('host.org', '80', '/base')
- >>> geturlcgivars(b"http://host.org:8000/base", b"80")
- ('host.org', '8000', '/base')
- >>> geturlcgivars(b'/base', 8000)
- ('', '8000', '/base')
- >>> geturlcgivars(b"base", b'8000')
- ('', '8000', '/base')
- >>> geturlcgivars(b"http://host", b'8000')
- ('host', '8000', '/')
- >>> geturlcgivars(b"http://host/", b'8000')
- ('host', '8000', '/')
- """
- u = util.url(baseurl)
- name = u.host or ''
- if u.port:
- port = u.port
- path = u.path or ""
- if not path.startswith('/'):
- path = '/' + path
+ # show either a directory entry or a repository
+ if directory:
+ # get the directory's time information
+ try:
+ d = (get_mtime(path), dateutil.makedate()[1])
+ except OSError:
+ continue
+
+ # add '/' to the name to make it obvious that
+ # the entry is a directory, not a regular repository
+ row = {'contact': "",
+ 'contact_sort': "",
+ 'name': name + '/',
+ 'name_sort': name,
+ 'url': url,
+ 'description': "",
+ 'description_sort': "",
+ 'lastchange': d,
+ 'lastchange_sort': d[1] - d[0],
+ 'archives': [],
+ 'isdirectory': True,
+ 'labels': [],
+ }
+
+ seendirs.add(name)
+ yield row
+ continue
+
+ u = ui.copy()
+ try:
+ u.readconfig(os.path.join(path, '.hg', 'hgrc'))
+ except Exception as e:
+ u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
+ continue
+
+ def get(section, name, default=uimod._unset):
+ return u.config(section, name, default, untrusted=True)
+
+ if u.configbool("web", "hidden", untrusted=True):
+ continue
+
+ if not readallowed(u, req):
+ continue
- return name, pycompat.bytestr(port), path
+ # update time with local timezone
+ try:
+ r = hg.repository(ui, path)
+ except IOError:
+ u.warn(_('error accessing repository at %s\n') % path)
+ continue
+ except error.RepoError:
+ u.warn(_('error accessing repository at %s\n') % path)
+ continue
+ try:
+ d = (get_mtime(r.spath), dateutil.makedate()[1])
+ except OSError:
+ continue
+
+ contact = get_contact(get)
+ description = get("web", "description")
+ seenrepos.add(name)
+ name = get("web", "name", name)
+ row = {'contact': contact or "unknown",
+ 'contact_sort': contact.upper() or "unknown",
+ 'name': name,
+ 'name_sort': name,
+ 'url': url,
+ 'description': description or "unknown",
+ 'description_sort': description.upper() or "unknown",
+ 'lastchange': d,
+ 'lastchange_sort': d[1] - d[0],
+ 'archives': archivelist(u, "tip", url),
+ 'isdirectory': None,
+ 'labels': u.configlist('web', 'labels', untrusted=True),
+ }
+
+ yield row
+
+def indexentries(ui, repos, req, stripecount, sortcolumn='',
+ descending=False, subdir=''):
+
+ rows = rawindexentries(ui, repos, req, subdir=subdir)
+
+ sortdefault = None, False
+
+ if sortcolumn and sortdefault != (sortcolumn, descending):
+ sortkey = '%s_sort' % sortcolumn
+ rows = sorted(rows, key=lambda x: x[sortkey],
+ reverse=descending)
+
+ for row, parity in zip(rows, paritygen(stripecount)):
+ row['parity'] = parity
+ yield row
class hgwebdir(object):
"""HTTP server for multiple repositories.
@@ -180,7 +332,6 @@
self.stripecount = self.ui.config('web', 'stripes')
if self.stripecount:
self.stripecount = int(self.stripecount)
- self._baseurl = self.ui.config('web', 'baseurl')
prefix = self.ui.config('web', 'prefix')
if prefix.startswith('/'):
prefix = prefix[1:]
@@ -197,36 +348,17 @@
wsgicgi.launch(self)
def __call__(self, env, respond):
- req = wsgirequest(env, respond)
- return self.run_wsgi(req)
-
- def read_allowed(self, ui, req):
- """Check allow_read and deny_read config options of a repo's ui object
- to determine user permissions. By default, with neither option set (or
- both empty), allow all users to read the repo. There are two ways a
- user can be denied read access: (1) deny_read is not empty, and the
- user is unauthenticated or deny_read contains user (or *), and (2)
- allow_read is not empty and the user is not in allow_read. Return True
- if user is allowed to read the repo, else return False."""
+ baseurl = self.ui.config('web', 'baseurl')
+ req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
+ res = requestmod.wsgiresponse(req, respond)
- user = req.env.get('REMOTE_USER')
-
- deny_read = ui.configlist('web', 'deny_read', untrusted=True)
- if deny_read and (not user or ismember(ui, user, deny_read)):
- return False
+ return self.run_wsgi(req, res)
- allow_read = ui.configlist('web', 'allow_read', untrusted=True)
- # by default, allow reading if no allow_read option has been set
- if (not allow_read) or ismember(ui, user, allow_read):
- return True
-
- return False
-
- def run_wsgi(self, req):
+ def run_wsgi(self, req, res):
profile = self.ui.configbool('profiling', 'enabled')
with profiling.profile(self.ui, enabled=profile):
try:
- for r in self._runwsgi(req):
+ for r in self._runwsgi(req, res):
yield r
finally:
# There are known cycles in localrepository that prevent
@@ -238,25 +370,28 @@
# instances instead of every request.
gc.collect()
- def _runwsgi(self, req):
+ def _runwsgi(self, req, res):
try:
self.refresh()
csp, nonce = cspvalues(self.ui)
if csp:
- req.headers.append(('Content-Security-Policy', csp))
+ res.headers['Content-Security-Policy'] = csp
- virtual = req.env.get("PATH_INFO", "").strip('/')
+ virtual = req.dispatchpath.strip('/')
tmpl = self.templater(req, nonce)
- ctype = tmpl('mimetype', encoding=encoding.encoding)
- ctype = templater.stringify(ctype)
+ ctype = tmpl.render('mimetype', {'encoding': encoding.encoding})
+
+ # Global defaults. These can be overridden by any handler.
+ res.status = '200 Script output follows'
+ res.headers['Content-Type'] = ctype
# a static file
- if virtual.startswith('static/') or 'static' in req.form:
+ if virtual.startswith('static/') or 'static' in req.qsparams:
if virtual.startswith('static/'):
fname = virtual[7:]
else:
- fname = req.form['static'][0]
+ fname = req.qsparams['static']
static = self.ui.config("web", "static", None,
untrusted=False)
if not static:
@@ -264,24 +399,23 @@
if isinstance(tp, str):
tp = [tp]
static = [os.path.join(p, 'static') for p in tp]
- staticfile(static, fname, req)
- return []
+
+ staticfile(static, fname, res)
+ return res.sendresponse()
# top-level index
repos = dict(self.repos)
if (not virtual or virtual == 'index') and virtual not in repos:
- req.respond(HTTP_OK, ctype)
- return self.makeindex(req, tmpl)
+ return self.makeindex(req, res, tmpl)
# nested indexes and hgwebs
if virtual.endswith('/index') and virtual not in repos:
subdir = virtual[:-len('index')]
if any(r.startswith(subdir) for r in repos):
- req.respond(HTTP_OK, ctype)
- return self.makeindex(req, tmpl, subdir)
+ return self.makeindex(req, res, tmpl, subdir)
def _virtualdirs():
# Check the full virtual path, each parent, and the root ('')
@@ -296,11 +430,15 @@
for virtualrepo in _virtualdirs():
real = repos.get(virtualrepo)
if real:
- req.env['REPO_NAME'] = virtualrepo
+ # Re-parse the WSGI environment to take into account our
+ # repository path component.
+ req = requestmod.parserequestfromenv(
+ req.rawenv, reponame=virtualrepo,
+ altbaseurl=self.ui.config('web', 'baseurl'))
try:
# ensure caller gets private copy of ui
repo = hg.repository(self.ui.copy(), real)
- return hgweb_mod.hgweb(repo).run_wsgi(req)
+ return hgweb_mod.hgweb(repo).run_wsgi(req, res)
except IOError as inst:
msg = encoding.strtolocal(inst.strerror)
raise ErrorResponse(HTTP_SERVER_ERROR, msg)
@@ -310,173 +448,26 @@
# browse subdirectories
subdir = virtual + '/'
if [r for r in repos if r.startswith(subdir)]:
- req.respond(HTTP_OK, ctype)
- return self.makeindex(req, tmpl, subdir)
+ return self.makeindex(req, res, tmpl, subdir)
# prefixes not found
- req.respond(HTTP_NOT_FOUND, ctype)
- return tmpl("notfound", repo=virtual)
+ res.status = '404 Not Found'
+ res.setbodygen(tmpl.generate('notfound', {'repo': virtual}))
+ return res.sendresponse()
- except ErrorResponse as err:
- req.respond(err, ctype)
- return tmpl('error', error=err.message or '')
+ except ErrorResponse as e:
+ res.status = statusmessage(e.code, pycompat.bytestr(e))
+ res.setbodygen(tmpl.generate('error', {'error': e.message or ''}))
+ return res.sendresponse()
finally:
tmpl = None
- def makeindex(self, req, tmpl, subdir=""):
-
- def archivelist(ui, nodeid, url):
- allowed = ui.configlist("web", "allow_archive", untrusted=True)
- archives = []
- for typ, spec in hgweb_mod.archivespecs.iteritems():
- if typ in allowed or ui.configbool("web", "allow" + typ,
- untrusted=True):
- archives.append({"type": typ, "extension": spec[2],
- "node": nodeid, "url": url})
- return archives
-
- def rawentries(subdir="", **map):
-
- descend = self.ui.configbool('web', 'descend')
- collapse = self.ui.configbool('web', 'collapse')
- seenrepos = set()
- seendirs = set()
- for name, path in self.repos:
-
- if not name.startswith(subdir):
- continue
- name = name[len(subdir):]
- directory = False
-
- if '/' in name:
- if not descend:
- continue
-
- nameparts = name.split('/')
- rootname = nameparts[0]
-
- if not collapse:
- pass
- elif rootname in seendirs:
- continue
- elif rootname in seenrepos:
- pass
- else:
- directory = True
- name = rootname
-
- # redefine the path to refer to the directory
- discarded = '/'.join(nameparts[1:])
-
- # remove name parts plus accompanying slash
- path = path[:-len(discarded) - 1]
-
- try:
- r = hg.repository(self.ui, path)
- directory = False
- except (IOError, error.RepoError):
- pass
-
- parts = [name]
- parts.insert(0, '/' + subdir.rstrip('/'))
- if req.env['SCRIPT_NAME']:
- parts.insert(0, req.env['SCRIPT_NAME'])
- url = re.sub(r'/+', '/', '/'.join(parts) + '/')
-
- # show either a directory entry or a repository
- if directory:
- # get the directory's time information
- try:
- d = (get_mtime(path), util.makedate()[1])
- except OSError:
- continue
-
- # add '/' to the name to make it obvious that
- # the entry is a directory, not a regular repository
- row = {'contact': "",
- 'contact_sort': "",
- 'name': name + '/',
- 'name_sort': name,
- 'url': url,
- 'description': "",
- 'description_sort': "",
- 'lastchange': d,
- 'lastchange_sort': d[1]-d[0],
- 'archives': [],
- 'isdirectory': True,
- 'labels': [],
- }
-
- seendirs.add(name)
- yield row
- continue
-
- u = self.ui.copy()
- try:
- u.readconfig(os.path.join(path, '.hg', 'hgrc'))
- except Exception as e:
- u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
- continue
- def get(section, name, default=uimod._unset):
- return u.config(section, name, default, untrusted=True)
-
- if u.configbool("web", "hidden", untrusted=True):
- continue
-
- if not self.read_allowed(u, req):
- continue
-
- # update time with local timezone
- try:
- r = hg.repository(self.ui, path)
- except IOError:
- u.warn(_('error accessing repository at %s\n') % path)
- continue
- except error.RepoError:
- u.warn(_('error accessing repository at %s\n') % path)
- continue
- try:
- d = (get_mtime(r.spath), util.makedate()[1])
- except OSError:
- continue
-
- contact = get_contact(get)
- description = get("web", "description")
- seenrepos.add(name)
- name = get("web", "name", name)
- row = {'contact': contact or "unknown",
- 'contact_sort': contact.upper() or "unknown",
- 'name': name,
- 'name_sort': name,
- 'url': url,
- 'description': description or "unknown",
- 'description_sort': description.upper() or "unknown",
- 'lastchange': d,
- 'lastchange_sort': d[1]-d[0],
- 'archives': archivelist(u, "tip", url),
- 'isdirectory': None,
- 'labels': u.configlist('web', 'labels', untrusted=True),
- }
-
- yield row
-
- sortdefault = None, False
- def entries(sortcolumn="", descending=False, subdir="", **map):
- rows = rawentries(subdir=subdir, **map)
-
- if sortcolumn and sortdefault != (sortcolumn, descending):
- sortkey = '%s_sort' % sortcolumn
- rows = sorted(rows, key=lambda x: x[sortkey],
- reverse=descending)
- for row, parity in zip(rows, paritygen(self.stripecount)):
- row['parity'] = parity
- yield row
-
+ def makeindex(self, req, res, tmpl, subdir=""):
self.refresh()
sortable = ["name", "description", "contact", "lastchange"]
- sortcolumn, descending = sortdefault
- if 'sort' in req.form:
- sortcolumn = req.form['sort'][0]
+ sortcolumn, descending = None, False
+ if 'sort' in req.qsparams:
+ sortcolumn = req.qsparams['sort']
descending = sortcolumn.startswith('-')
if descending:
sortcolumn = sortcolumn[1:]
@@ -489,12 +480,21 @@
for column in sortable]
self.refresh()
- self.updatereqenv(req.env)
+
+ entries = indexentries(self.ui, self.repos, req,
+ self.stripecount, sortcolumn=sortcolumn,
+ descending=descending, subdir=subdir)
- return tmpl("index", entries=entries, subdir=subdir,
- pathdef=hgweb_mod.makebreadcrumb('/' + subdir, self.prefix),
- sortcolumn=sortcolumn, descending=descending,
- **dict(sort))
+ mapping = {
+ 'entries': entries,
+ 'subdir': subdir,
+ 'pathdef': hgweb_mod.makebreadcrumb('/' + subdir, self.prefix),
+ 'sortcolumn': sortcolumn,
+ 'descending': descending,
+ }
+ mapping.update(sort)
+ res.setbodygen(tmpl.generate('index', mapping))
+ return res.sendresponse()
def templater(self, req, nonce):
@@ -507,30 +507,24 @@
def config(section, name, default=uimod._unset, untrusted=True):
return self.ui.config(section, name, default, untrusted)
- self.updatereqenv(req.env)
-
- url = req.env.get('SCRIPT_NAME', '')
- if not url.endswith('/'):
- url += '/'
-
vars = {}
styles, (style, mapfile) = hgweb_mod.getstyle(req, config,
self.templatepath)
if style == styles[0]:
vars['style'] = style
- start = r'&' if url[-1] == r'?' else r'?'
- sessionvars = webutil.sessionvars(vars, start)
+ sessionvars = webutil.sessionvars(vars, r'?')
logourl = config('web', 'logourl')
logoimg = config('web', 'logoimg')
- staticurl = config('web', 'staticurl') or url + 'static/'
+ staticurl = (config('web', 'staticurl')
+ or req.apppath + '/static/')
if not staticurl.endswith('/'):
staticurl += '/'
defaults = {
"encoding": encoding.encoding,
"motd": motd,
- "url": url,
+ "url": req.apppath + '/',
"logourl": logourl,
"logoimg": logoimg,
"staticurl": staticurl,
@@ -540,10 +534,3 @@
}
tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
return tmpl
-
- def updatereqenv(self, env):
- if self._baseurl is not None:
- name, port, path = geturlcgivars(self._baseurl, env['SERVER_PORT'])
- env['SERVER_NAME'] = name
- env['SERVER_PORT'] = port
- env['SCRIPT_NAME'] = path
--- a/mercurial/hgweb/protocol.py Thu Mar 15 22:35:07 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,202 +0,0 @@
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import cgi
-import struct
-
-from .common import (
- HTTP_OK,
-)
-
-from .. import (
- error,
- pycompat,
- util,
- wireproto,
-)
-stringio = util.stringio
-
-urlerr = util.urlerr
-urlreq = util.urlreq
-
-HGTYPE = 'application/mercurial-0.1'
-HGTYPE2 = 'application/mercurial-0.2'
-HGERRTYPE = 'application/hg-error'
-
-def decodevaluefromheaders(req, headerprefix):
- """Decode a long value from multiple HTTP request headers.
-
- Returns the value as a bytes, not a str.
- """
- chunks = []
- i = 1
- prefix = headerprefix.upper().replace(r'-', r'_')
- while True:
- v = req.env.get(r'HTTP_%s_%d' % (prefix, i))
- if v is None:
- break
- chunks.append(pycompat.bytesurl(v))
- i += 1
-
- return ''.join(chunks)
-
-class webproto(wireproto.abstractserverproto):
- def __init__(self, req, ui):
- self.req = req
- self.response = ''
- self.ui = ui
- self.name = 'http'
- self.checkperm = req.checkperm
-
- def getargs(self, args):
- knownargs = self._args()
- data = {}
- keys = args.split()
- for k in keys:
- if k == '*':
- star = {}
- for key in knownargs.keys():
- if key != 'cmd' and key not in keys:
- star[key] = knownargs[key][0]
- data['*'] = star
- else:
- data[k] = knownargs[k][0]
- return [data[k] for k in keys]
- def _args(self):
- args = self.req.form.copy()
- if pycompat.ispy3:
- args = {k.encode('ascii'): [v.encode('ascii') for v in vs]
- for k, vs in args.items()}
- postlen = int(self.req.env.get(r'HTTP_X_HGARGS_POST', 0))
- if postlen:
- args.update(cgi.parse_qs(
- self.req.read(postlen), keep_blank_values=True))
- return args
-
- argvalue = decodevaluefromheaders(self.req, r'X-HgArg')
- args.update(cgi.parse_qs(argvalue, keep_blank_values=True))
- return args
- def getfile(self, fp):
- length = int(self.req.env[r'CONTENT_LENGTH'])
- # If httppostargs is used, we need to read Content-Length
- # minus the amount that was consumed by args.
- length -= int(self.req.env.get(r'HTTP_X_HGARGS_POST', 0))
- for s in util.filechunkiter(self.req, limit=length):
- fp.write(s)
- def redirect(self):
- self.oldio = self.ui.fout, self.ui.ferr
- self.ui.ferr = self.ui.fout = stringio()
- def restore(self):
- val = self.ui.fout.getvalue()
- self.ui.ferr, self.ui.fout = self.oldio
- return val
-
- def _client(self):
- return 'remote:%s:%s:%s' % (
- self.req.env.get('wsgi.url_scheme') or 'http',
- urlreq.quote(self.req.env.get('REMOTE_HOST', '')),
- urlreq.quote(self.req.env.get('REMOTE_USER', '')))
-
- def responsetype(self, prefer_uncompressed):
- """Determine the appropriate response type and compression settings.
-
- Returns a tuple of (mediatype, compengine, engineopts).
- """
- # Determine the response media type and compression engine based
- # on the request parameters.
- protocaps = decodevaluefromheaders(self.req, r'X-HgProto').split(' ')
-
- if '0.2' in protocaps:
- # All clients are expected to support uncompressed data.
- if prefer_uncompressed:
- return HGTYPE2, util._noopengine(), {}
-
- # Default as defined by wire protocol spec.
- compformats = ['zlib', 'none']
- for cap in protocaps:
- if cap.startswith('comp='):
- compformats = cap[5:].split(',')
- break
-
- # Now find an agreed upon compression format.
- for engine in wireproto.supportedcompengines(self.ui, self,
- util.SERVERROLE):
- if engine.wireprotosupport().name in compformats:
- opts = {}
- level = self.ui.configint('server',
- '%slevel' % engine.name())
- if level is not None:
- opts['level'] = level
-
- return HGTYPE2, engine, opts
-
- # No mutually supported compression format. Fall back to the
- # legacy protocol.
-
- # Don't allow untrusted settings because disabling compression or
- # setting a very high compression level could lead to flooding
- # the server's network or CPU.
- opts = {'level': self.ui.configint('server', 'zliblevel')}
- return HGTYPE, util.compengines['zlib'], opts
-
-def iscmd(cmd):
- return cmd in wireproto.commands
-
-def call(repo, req, cmd):
- p = webproto(req, repo.ui)
-
- def genversion2(gen, engine, engineopts):
- # application/mercurial-0.2 always sends a payload header
- # identifying the compression engine.
- name = engine.wireprotosupport().name
- assert 0 < len(name) < 256
- yield struct.pack('B', len(name))
- yield name
-
- for chunk in gen:
- yield chunk
-
- rsp = wireproto.dispatch(repo, p, cmd)
- if isinstance(rsp, bytes):
- req.respond(HTTP_OK, HGTYPE, body=rsp)
- return []
- elif isinstance(rsp, wireproto.streamres_legacy):
- gen = rsp.gen
- req.respond(HTTP_OK, HGTYPE)
- return gen
- elif isinstance(rsp, wireproto.streamres):
- gen = rsp.gen
-
- # This code for compression should not be streamres specific. It
- # is here because we only compress streamres at the moment.
- mediatype, engine, engineopts = p.responsetype(rsp.prefer_uncompressed)
- gen = engine.compressstream(gen, engineopts)
-
- if mediatype == HGTYPE2:
- gen = genversion2(gen, engine, engineopts)
-
- req.respond(HTTP_OK, mediatype)
- return gen
- elif isinstance(rsp, wireproto.pushres):
- val = p.restore()
- rsp = '%d\n%s' % (rsp.res, val)
- req.respond(HTTP_OK, HGTYPE, body=rsp)
- return []
- elif isinstance(rsp, wireproto.pusherr):
- # drain the incoming bundle
- req.drain()
- p.restore()
- rsp = '0\n%s\n' % rsp.res
- req.respond(HTTP_OK, HGTYPE, body=rsp)
- return []
- elif isinstance(rsp, wireproto.ooberror):
- rsp = rsp.message
- req.respond(HTTP_OK, HGERRTYPE, body=rsp)
- return []
- raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
--- a/mercurial/hgweb/request.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/request.py Mon Mar 19 08:07:18 2018 -0700
@@ -8,143 +8,546 @@
from __future__ import absolute_import
-import cgi
-import errno
-import socket
+import wsgiref.headers as wsgiheaders
+#import wsgiref.validate
-from .common import (
- ErrorResponse,
- HTTP_NOT_MODIFIED,
- statusmessage,
+from ..thirdparty import (
+ attr,
)
-
from .. import (
+ error,
pycompat,
util,
)
-shortcuts = {
- 'cl': [('cmd', ['changelog']), ('rev', None)],
- 'sl': [('cmd', ['shortlog']), ('rev', None)],
- 'cs': [('cmd', ['changeset']), ('node', None)],
- 'f': [('cmd', ['file']), ('filenode', None)],
- 'fl': [('cmd', ['filelog']), ('filenode', None)],
- 'fd': [('cmd', ['filediff']), ('node', None)],
- 'fa': [('cmd', ['annotate']), ('filenode', None)],
- 'mf': [('cmd', ['manifest']), ('manifest', None)],
- 'ca': [('cmd', ['archive']), ('node', None)],
- 'tags': [('cmd', ['tags'])],
- 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
- 'static': [('cmd', ['static']), ('file', None)]
-}
+class multidict(object):
+ """A dict like object that can store multiple values for a key.
+
+ Used to store parsed request parameters.
+
+ This is inspired by WebOb's class of the same name.
+ """
+ def __init__(self):
+ self._items = {}
+
+ def __getitem__(self, key):
+ """Returns the last set value for a key."""
+ return self._items[key][-1]
+
+ def __setitem__(self, key, value):
+ """Replace a values for a key with a new value."""
+ self._items[key] = [value]
+
+ def __delitem__(self, key):
+ """Delete all values for a key."""
+ del self._items[key]
+
+ def __contains__(self, key):
+ return key in self._items
+
+ def __len__(self):
+ return len(self._items)
+
+ def get(self, key, default=None):
+ try:
+ return self.__getitem__(key)
+ except KeyError:
+ return default
+
+ def add(self, key, value):
+ """Add a new value for a key. Does not replace existing values."""
+ self._items.setdefault(key, []).append(value)
+
+ def getall(self, key):
+ """Obtains all values for a key."""
+ return self._items.get(key, [])
+
+ def getone(self, key):
+ """Obtain a single value for a key.
+
+ Raises KeyError if key not defined or it has multiple values set.
+ """
+ vals = self._items[key]
+
+ if len(vals) > 1:
+ raise KeyError('multiple values for %r' % key)
+
+ return vals[0]
+
+ def asdictoflists(self):
+ return {k: list(v) for k, v in self._items.iteritems()}
+
+@attr.s(frozen=True)
+class parsedrequest(object):
+ """Represents a parsed WSGI request.
+
+ Contains both parsed parameters as well as a handle on the input stream.
+ """
-def normalize(form):
- # first expand the shortcuts
- for k in shortcuts:
- if k in form:
- for name, value in shortcuts[k]:
- if value is None:
- value = form[k]
- form[name] = value
- del form[k]
- # And strip the values
- for k, v in form.iteritems():
- form[k] = [i.strip() for i in v]
- return form
+ # Request method.
+ method = attr.ib()
+ # Full URL for this request.
+ url = attr.ib()
+ # URL without any path components. Just <proto>://<host><port>.
+ baseurl = attr.ib()
+ # Advertised URL. Like ``url`` and ``baseurl`` but uses SERVER_NAME instead
+ # of HTTP: Host header for hostname. This is likely what clients used.
+ advertisedurl = attr.ib()
+ advertisedbaseurl = attr.ib()
+ # URL scheme (part before ``://``). e.g. ``http`` or ``https``.
+ urlscheme = attr.ib()
+ # Value of REMOTE_USER, if set, or None.
+ remoteuser = attr.ib()
+ # Value of REMOTE_HOST, if set, or None.
+ remotehost = attr.ib()
+ # Relative WSGI application path. If defined, will begin with a
+ # ``/``.
+ apppath = attr.ib()
+ # List of path parts to be used for dispatch.
+ dispatchparts = attr.ib()
+ # URL path component (no query string) used for dispatch. Can be
+ # ``None`` to signal no path component given to the request, an
+ # empty string to signal a request to the application's root URL,
+ # or a string not beginning with ``/`` containing the requested
+ # path under the application.
+ dispatchpath = attr.ib()
+ # The name of the repository being accessed.
+ reponame = attr.ib()
+ # Raw query string (part after "?" in URL).
+ querystring = attr.ib()
+ # multidict of query string parameters.
+ qsparams = attr.ib()
+ # wsgiref.headers.Headers instance. Operates like a dict with case
+ # insensitive keys.
+ headers = attr.ib()
+ # Request body input stream.
+ bodyfh = attr.ib()
+ # WSGI environment dict, unmodified.
+ rawenv = attr.ib()
-class wsgirequest(object):
- """Higher-level API for a WSGI request.
+def parserequestfromenv(env, reponame=None, altbaseurl=None):
+ """Parse URL components from environment variables.
+
+ WSGI defines request attributes via environment variables. This function
+ parses the environment variables into a data structure.
- WSGI applications are invoked with 2 arguments. They are used to
- instantiate instances of this class, which provides higher-level APIs
- for obtaining request parameters, writing HTTP output, etc.
+ If ``reponame`` is defined, the leading path components matching that
+ string are effectively shifted from ``PATH_INFO`` to ``SCRIPT_NAME``.
+ This simulates the world view of a WSGI application that processes
+ requests from the base URL of a repo.
+
+ If ``altbaseurl`` (typically comes from ``web.baseurl`` config option)
+ is defined, it is used - instead of the WSGI environment variables - for
+ constructing URL components up to and including the WSGI application path.
+ For example, if the current WSGI application is at ``/repo`` and a request
+ is made to ``/rev/@`` with this argument set to
+ ``http://myserver:9000/prefix``, the URL and path components will resolve as
+ if the request were to ``http://myserver:9000/prefix/rev/@``. In other
+ words, ``wsgi.url_scheme``, ``SERVER_NAME``, ``SERVER_PORT``, and
+ ``SCRIPT_NAME`` are all effectively replaced by components from this URL.
"""
- def __init__(self, wsgienv, start_response):
- version = wsgienv[r'wsgi.version']
- if (version < (1, 0)) or (version >= (2, 0)):
- raise RuntimeError("Unknown and unsupported WSGI version %d.%d"
- % version)
- self.inp = wsgienv[r'wsgi.input']
- self.err = wsgienv[r'wsgi.errors']
- self.threaded = wsgienv[r'wsgi.multithread']
- self.multiprocess = wsgienv[r'wsgi.multiprocess']
- self.run_once = wsgienv[r'wsgi.run_once']
- self.env = wsgienv
- self.form = normalize(cgi.parse(self.inp,
- self.env,
- keep_blank_values=1))
- self._start_response = start_response
- self.server_write = None
- self.headers = []
+ # PEP 3333 defines the WSGI spec and is a useful reference for this code.
+
+ # We first validate that the incoming object conforms with the WSGI spec.
+ # We only want to be dealing with spec-conforming WSGI implementations.
+ # TODO enable this once we fix internal violations.
+ #wsgiref.validate.check_environ(env)
- def __iter__(self):
- return iter([])
+ # PEP-0333 states that environment keys and values are native strings
+ # (bytes on Python 2 and str on Python 3). The code points for the Unicode
+ # strings on Python 3 must be between \00000-\000FF. We deal with bytes
+ # in Mercurial, so mass convert string keys and values to bytes.
+ if pycompat.ispy3:
+ env = {k.encode('latin-1'): v for k, v in env.iteritems()}
+ env = {k: v.encode('latin-1') if isinstance(v, str) else v
+ for k, v in env.iteritems()}
+
+ if altbaseurl:
+ altbaseurl = util.url(altbaseurl)
+
+ # https://www.python.org/dev/peps/pep-0333/#environ-variables defines
+ # the environment variables.
+ # https://www.python.org/dev/peps/pep-0333/#url-reconstruction defines
+ # how URLs are reconstructed.
+ fullurl = env['wsgi.url_scheme'] + '://'
+
+ if altbaseurl and altbaseurl.scheme:
+ advertisedfullurl = altbaseurl.scheme + '://'
+ else:
+ advertisedfullurl = fullurl
- def read(self, count=-1):
- return self.inp.read(count)
+ def addport(s, port):
+ if s.startswith('https://'):
+ if port != '443':
+ s += ':' + port
+ else:
+ if port != '80':
+ s += ':' + port
+
+ return s
+
+ if env.get('HTTP_HOST'):
+ fullurl += env['HTTP_HOST']
+ else:
+ fullurl += env['SERVER_NAME']
+ fullurl = addport(fullurl, env['SERVER_PORT'])
+
+ if altbaseurl and altbaseurl.host:
+ advertisedfullurl += altbaseurl.host
- def drain(self):
- '''need to read all data from request, httplib is half-duplex'''
- length = int(self.env.get('CONTENT_LENGTH') or 0)
- for s in util.filechunkiter(self.inp, limit=length):
- pass
+ if altbaseurl.port:
+ port = altbaseurl.port
+ elif altbaseurl.scheme == 'http' and not altbaseurl.port:
+ port = '80'
+ elif altbaseurl.scheme == 'https' and not altbaseurl.port:
+ port = '443'
+ else:
+ port = env['SERVER_PORT']
+
+ advertisedfullurl = addport(advertisedfullurl, port)
+ else:
+ advertisedfullurl += env['SERVER_NAME']
+ advertisedfullurl = addport(advertisedfullurl, env['SERVER_PORT'])
+
+ baseurl = fullurl
+ advertisedbaseurl = advertisedfullurl
+
+ fullurl += util.urlreq.quote(env.get('SCRIPT_NAME', ''))
+ fullurl += util.urlreq.quote(env.get('PATH_INFO', ''))
- def respond(self, status, type, filename=None, body=None):
- if not isinstance(type, str):
- type = pycompat.sysstr(type)
- if self._start_response is not None:
- self.headers.append((r'Content-Type', type))
- if filename:
- filename = (filename.rpartition('/')[-1]
- .replace('\\', '\\\\').replace('"', '\\"'))
- self.headers.append(('Content-Disposition',
- 'inline; filename="%s"' % filename))
- if body is not None:
- self.headers.append((r'Content-Length', str(len(body))))
+ if altbaseurl:
+ path = altbaseurl.path or ''
+ if path and not path.startswith('/'):
+ path = '/' + path
+ advertisedfullurl += util.urlreq.quote(path)
+ else:
+ advertisedfullurl += util.urlreq.quote(env.get('SCRIPT_NAME', ''))
+
+ advertisedfullurl += util.urlreq.quote(env.get('PATH_INFO', ''))
+
+ if env.get('QUERY_STRING'):
+ fullurl += '?' + env['QUERY_STRING']
+ advertisedfullurl += '?' + env['QUERY_STRING']
+
+ # If ``reponame`` is defined, that must be a prefix on PATH_INFO
+ # that represents the repository being dispatched to. When computing
+ # the dispatch info, we ignore these leading path components.
- for k, v in self.headers:
- if not isinstance(v, str):
- raise TypeError('header value must be string: %r' % (v,))
+ if altbaseurl:
+ apppath = altbaseurl.path or ''
+ if apppath and not apppath.startswith('/'):
+ apppath = '/' + apppath
+ else:
+ apppath = env.get('SCRIPT_NAME', '')
+
+ if reponame:
+ repoprefix = '/' + reponame.strip('/')
+
+ if not env.get('PATH_INFO'):
+ raise error.ProgrammingError('reponame requires PATH_INFO')
+
+ if not env['PATH_INFO'].startswith(repoprefix):
+ raise error.ProgrammingError('PATH_INFO does not begin with repo '
+ 'name: %s (%s)' % (env['PATH_INFO'],
+ reponame))
+
+ dispatchpath = env['PATH_INFO'][len(repoprefix):]
+
+ if dispatchpath and not dispatchpath.startswith('/'):
+ raise error.ProgrammingError('reponame prefix of PATH_INFO does '
+ 'not end at path delimiter: %s (%s)' %
+ (env['PATH_INFO'], reponame))
- if isinstance(status, ErrorResponse):
- self.headers.extend(status.headers)
- if status.code == HTTP_NOT_MODIFIED:
- # RFC 2616 Section 10.3.5: 304 Not Modified has cases where
- # it MUST NOT include any headers other than these and no
- # body
- self.headers = [(k, v) for (k, v) in self.headers if
- k in ('Date', 'ETag', 'Expires',
- 'Cache-Control', 'Vary')]
- status = statusmessage(status.code, str(status))
- elif status == 200:
- status = '200 Script output follows'
- elif isinstance(status, int):
- status = statusmessage(status)
+ apppath = apppath.rstrip('/') + repoprefix
+ dispatchparts = dispatchpath.strip('/').split('/')
+ dispatchpath = '/'.join(dispatchparts)
+
+ elif 'PATH_INFO' in env:
+ if env['PATH_INFO'].strip('/'):
+ dispatchparts = env['PATH_INFO'].strip('/').split('/')
+ dispatchpath = '/'.join(dispatchparts)
+ else:
+ dispatchparts = []
+ dispatchpath = ''
+ else:
+ dispatchparts = []
+ dispatchpath = None
+
+ querystring = env.get('QUERY_STRING', '')
+
+ # We store as a list so we have ordering information. We also store as
+ # a dict to facilitate fast lookup.
+ qsparams = multidict()
+ for k, v in util.urlreq.parseqsl(querystring, keep_blank_values=True):
+ qsparams.add(k, v)
+
+ # HTTP_* keys contain HTTP request headers. The Headers structure should
+ # perform case normalization for us. We just rewrite underscore to dash
+ # so keys match what likely went over the wire.
+ headers = []
+ for k, v in env.iteritems():
+ if k.startswith('HTTP_'):
+ headers.append((k[len('HTTP_'):].replace('_', '-'), v))
+
+ headers = wsgiheaders.Headers(headers)
+
+ # This is kind of a lie because the HTTP header wasn't explicitly
+ # sent. But for all intents and purposes it should be OK to lie about
+ # this, since a consumer will either either value to determine how many
+ # bytes are available to read.
+ if 'CONTENT_LENGTH' in env and 'HTTP_CONTENT_LENGTH' not in env:
+ headers['Content-Length'] = env['CONTENT_LENGTH']
- self.server_write = self._start_response(status, self.headers)
- self._start_response = None
- self.headers = []
- if body is not None:
- self.write(body)
- self.server_write = None
+ bodyfh = env['wsgi.input']
+ if 'Content-Length' in headers:
+ bodyfh = util.cappedreader(bodyfh, int(headers['Content-Length']))
+
+ return parsedrequest(method=env['REQUEST_METHOD'],
+ url=fullurl, baseurl=baseurl,
+ advertisedurl=advertisedfullurl,
+ advertisedbaseurl=advertisedbaseurl,
+ urlscheme=env['wsgi.url_scheme'],
+ remoteuser=env.get('REMOTE_USER'),
+ remotehost=env.get('REMOTE_HOST'),
+ apppath=apppath,
+ dispatchparts=dispatchparts, dispatchpath=dispatchpath,
+ reponame=reponame,
+ querystring=querystring,
+ qsparams=qsparams,
+ headers=headers,
+ bodyfh=bodyfh,
+ rawenv=env)
- def write(self, thing):
- if thing:
- try:
- self.server_write(thing)
- except socket.error as inst:
- if inst[0] != errno.ECONNRESET:
- raise
+class offsettrackingwriter(object):
+ """A file object like object that is append only and tracks write count.
+
+ Instances are bound to a callable. This callable is called with data
+ whenever a ``write()`` is attempted.
+
+ Instances track the amount of written data so they can answer ``tell()``
+ requests.
- def writelines(self, lines):
- for line in lines:
- self.write(line)
+ The intent of this class is to wrap the ``write()`` function returned by
+ a WSGI ``start_response()`` function. Since ``write()`` is a callable and
+ not a file object, it doesn't implement other file object methods.
+ """
+ def __init__(self, writefn):
+ self._write = writefn
+ self._offset = 0
+
+ def write(self, s):
+ res = self._write(s)
+ # Some Python objects don't report the number of bytes written.
+ if res is None:
+ self._offset += len(s)
+ else:
+ self._offset += res
def flush(self):
- return None
+ pass
+
+ def tell(self):
+ return self._offset
+
+class wsgiresponse(object):
+ """Represents a response to a WSGI request.
+
+ A response consists of a status line, headers, and a body.
+
+ Consumers must populate the ``status`` and ``headers`` fields and
+ make a call to a ``setbody*()`` method before the response can be
+ issued.
+
+ When it is time to start sending the response over the wire,
+ ``sendresponse()`` is called. It handles emitting the header portion
+ of the response message. It then yields chunks of body data to be
+ written to the peer. Typically, the WSGI application itself calls
+ and returns the value from ``sendresponse()``.
+ """
+
+ def __init__(self, req, startresponse):
+ """Create an empty response tied to a specific request.
+
+ ``req`` is a ``parsedrequest``. ``startresponse`` is the
+ ``start_response`` function passed to the WSGI application.
+ """
+ self._req = req
+ self._startresponse = startresponse
+
+ self.status = None
+ self.headers = wsgiheaders.Headers([])
+
+ self._bodybytes = None
+ self._bodygen = None
+ self._bodywillwrite = False
+ self._started = False
+ self._bodywritefn = None
+
+ def _verifybody(self):
+ if (self._bodybytes is not None or self._bodygen is not None
+ or self._bodywillwrite):
+ raise error.ProgrammingError('cannot define body multiple times')
+
+ def setbodybytes(self, b):
+ """Define the response body as static bytes.
+
+ The empty string signals that there is no response body.
+ """
+ self._verifybody()
+ self._bodybytes = b
+ self.headers['Content-Length'] = '%d' % len(b)
+
+ def setbodygen(self, gen):
+ """Define the response body as a generator of bytes."""
+ self._verifybody()
+ self._bodygen = gen
+
+ def setbodywillwrite(self):
+ """Signal an intent to use write() to emit the response body.
+
+ **This is the least preferred way to send a body.**
+
+ It is preferred for WSGI applications to emit a generator of chunks
+ constituting the response body. However, some consumers can't emit
+ data this way. So, WSGI provides a way to obtain a ``write(data)``
+ function that can be used to synchronously perform an unbuffered
+ write.
+
+ Calling this function signals an intent to produce the body in this
+ manner.
+ """
+ self._verifybody()
+ self._bodywillwrite = True
+
+ def sendresponse(self):
+ """Send the generated response to the client.
+
+ Before this is called, ``status`` must be set and one of
+ ``setbodybytes()`` or ``setbodygen()`` must be called.
+
+ Calling this method multiple times is not allowed.
+ """
+ if self._started:
+ raise error.ProgrammingError('sendresponse() called multiple times')
+
+ self._started = True
+
+ if not self.status:
+ raise error.ProgrammingError('status line not defined')
+
+ if (self._bodybytes is None and self._bodygen is None
+ and not self._bodywillwrite):
+ raise error.ProgrammingError('response body not defined')
- def close(self):
- return None
+ # RFC 7232 Section 4.1 states that a 304 MUST generate one of
+ # {Cache-Control, Content-Location, Date, ETag, Expires, Vary}
+ # and SHOULD NOT generate other headers unless they could be used
+ # to guide cache updates. Furthermore, RFC 7230 Section 3.3.2
+ # states that no response body can be issued. Content-Length can
+ # be sent. But if it is present, it should be the size of the response
+ # that wasn't transferred.
+ if self.status.startswith('304 '):
+ # setbodybytes('') will set C-L to 0. This doesn't conform with the
+ # spec. So remove it.
+ if self.headers.get('Content-Length') == '0':
+ del self.headers['Content-Length']
+
+ # Strictly speaking, this is too strict. But until it causes
+ # problems, let's be strict.
+ badheaders = {k for k in self.headers.keys()
+ if k.lower() not in ('date', 'etag', 'expires',
+ 'cache-control',
+ 'content-location',
+ 'vary')}
+ if badheaders:
+ raise error.ProgrammingError(
+ 'illegal header on 304 response: %s' %
+ ', '.join(sorted(badheaders)))
+
+ if self._bodygen is not None or self._bodywillwrite:
+ raise error.ProgrammingError("must use setbodybytes('') with "
+ "304 responses")
+
+ # Various HTTP clients (notably httplib) won't read the HTTP response
+ # until the HTTP request has been sent in full. If servers (us) send a
+ # response before the HTTP request has been fully sent, the connection
+ # may deadlock because neither end is reading.
+ #
+ # We work around this by "draining" the request data before
+ # sending any response in some conditions.
+ drain = False
+ close = False
+
+ # If the client sent Expect: 100-continue, we assume it is smart enough
+ # to deal with the server sending a response before reading the request.
+ # (httplib doesn't do this.)
+ if self._req.headers.get('Expect', '').lower() == '100-continue':
+ pass
+ # Only tend to request methods that have bodies. Strictly speaking,
+ # we should sniff for a body. But this is fine for our existing
+ # WSGI applications.
+ elif self._req.method not in ('POST', 'PUT'):
+ pass
+ else:
+ # If we don't know how much data to read, there's no guarantee
+ # that we can drain the request responsibly. The WSGI
+ # specification only says that servers *should* ensure the
+ # input stream doesn't overrun the actual request. So there's
+ # no guarantee that reading until EOF won't corrupt the stream
+ # state.
+ if not isinstance(self._req.bodyfh, util.cappedreader):
+ close = True
+ else:
+ # We /could/ only drain certain HTTP response codes. But 200 and
+ # non-200 wire protocol responses both require draining. Since
+ # we have a capped reader in place for all situations where we
+ # drain, it is safe to read from that stream. We'll either do
+ # a drain or no-op if we're already at EOF.
+ drain = True
+
+ if close:
+ self.headers['Connection'] = 'Close'
+
+ if drain:
+ assert isinstance(self._req.bodyfh, util.cappedreader)
+ while True:
+ chunk = self._req.bodyfh.read(32768)
+ if not chunk:
+ break
+
+ write = self._startresponse(pycompat.sysstr(self.status),
+ self.headers.items())
+
+ if self._bodybytes:
+ yield self._bodybytes
+ elif self._bodygen:
+ for chunk in self._bodygen:
+ yield chunk
+ elif self._bodywillwrite:
+ self._bodywritefn = write
+ else:
+ error.ProgrammingError('do not know how to send body')
+
+ def getbodyfile(self):
+ """Obtain a file object like object representing the response body.
+
+ For this to work, you must call ``setbodywillwrite()`` and then
+ ``sendresponse()`` first. ``sendresponse()`` is a generator and the
+ function won't run to completion unless the generator is advanced. The
+ generator yields not items. The easiest way to consume it is with
+ ``list(res.sendresponse())``, which should resolve to an empty list -
+ ``[]``.
+ """
+ if not self._bodywillwrite:
+ raise error.ProgrammingError('must call setbodywillwrite() first')
+
+ if not self._started:
+ raise error.ProgrammingError('must call sendresponse() first; did '
+ 'you remember to consume it since it '
+ 'is a generator?')
+
+ assert self._bodywritefn
+ return offsettrackingwriter(self._bodywritefn)
def wsgiapplication(app_maker):
'''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
--- a/mercurial/hgweb/server.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/server.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,6 +13,7 @@
import socket
import sys
import traceback
+import wsgiref.validate
from ..i18n import _
@@ -124,12 +125,11 @@
env[r'SERVER_NAME'] = self.server.server_name
env[r'SERVER_PORT'] = str(self.server.server_port)
env[r'REQUEST_URI'] = self.path
- env[r'SCRIPT_NAME'] = self.server.prefix
- env[r'PATH_INFO'] = path[len(self.server.prefix):]
+ env[r'SCRIPT_NAME'] = pycompat.sysstr(self.server.prefix)
+ env[r'PATH_INFO'] = pycompat.sysstr(path[len(self.server.prefix):])
env[r'REMOTE_HOST'] = self.client_address[0]
env[r'REMOTE_ADDR'] = self.client_address[0]
- if query:
- env[r'QUERY_STRING'] = query
+ env[r'QUERY_STRING'] = query or r''
if pycompat.ispy3:
if self.headers.get_content_type() is None:
@@ -154,7 +154,7 @@
env[hkey] = hval
env[r'SERVER_PROTOCOL'] = self.request_version
env[r'wsgi.version'] = (1, 0)
- env[r'wsgi.url_scheme'] = self.url_scheme
+ env[r'wsgi.url_scheme'] = pycompat.sysstr(self.url_scheme)
if env.get(r'HTTP_EXPECT', '').lower() == '100-continue':
self.rfile = common.continuereader(self.rfile, self.wfile.write)
@@ -166,6 +166,8 @@
socketserver.ForkingMixIn)
env[r'wsgi.run_once'] = 0
+ wsgiref.validate.check_environ(env)
+
self.saved_status = None
self.saved_headers = []
self.length = None
@@ -229,6 +231,11 @@
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
+ def version_string(self):
+ if self.server.serverheader:
+ return self.server.serverheader
+ return httpservermod.basehttprequesthandler.version_string(self)
+
class _httprequesthandlerssl(_httprequesthandler):
"""HTTPS handler based on Python's ssl module"""
@@ -257,8 +264,8 @@
def setup(self):
self.connection = self.request
- self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
- self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
+ self.rfile = self.request.makefile(r"rb", self.rbufsize)
+ self.wfile = self.request.makefile(r"wb", self.wbufsize)
try:
import threading
@@ -273,7 +280,7 @@
def openlog(opt, default):
if opt and opt != '-':
- return open(opt, 'a')
+ return open(opt, 'ab')
return default
class MercurialHTTPServer(_mixin, httpservermod.httpserver, object):
@@ -302,6 +309,8 @@
self.addr, self.port = self.socket.getsockname()[0:2]
self.fqaddr = socket.getfqdn(addr[0])
+ self.serverheader = ui.config('web', 'server-header')
+
class IPv6HTTPServer(MercurialHTTPServer):
address_family = getattr(socket, 'AF_INET6', None)
def __init__(self, *args, **kwargs):
--- a/mercurial/hgweb/webcommands.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/webcommands.py Mon Mar 19 08:07:18 2018 -0700
@@ -19,7 +19,6 @@
ErrorResponse,
HTTP_FORBIDDEN,
HTTP_NOT_FOUND,
- HTTP_OK,
get_contact,
paritygen,
staticfile,
@@ -53,10 +52,20 @@
The decorator takes as its positional arguments the name/path the
command should be accessible under.
+ When called, functions receive as arguments a ``requestcontext``,
+ ``wsgirequest``, and a templater instance for generatoring output.
+ The functions should populate the ``rctx.res`` object with details
+ about the HTTP response.
+
+ The function returns a generator to be consumed by the WSGI application.
+ For most commands, this should be the result from
+ ``web.res.sendresponse()``. Many commands will call ``web.sendtemplate()``
+ to render a template.
+
Usage:
@webcommand('mycommand')
- def mycommand(web, req, tmpl):
+ def mycommand(web):
pass
"""
@@ -69,7 +78,7 @@
return func
@webcommand('log')
-def log(web, req, tmpl):
+def log(web):
"""
/log[/{revision}[/{path}]]
--------------------------
@@ -85,28 +94,24 @@
file will be shown. This form is equivalent to the ``filelog`` handler.
"""
- if 'file' in req.form and req.form['file'][0]:
- return filelog(web, req, tmpl)
+ if web.req.qsparams.get('file'):
+ return filelog(web)
else:
- return changelog(web, req, tmpl)
+ return changelog(web)
@webcommand('rawfile')
-def rawfile(web, req, tmpl):
+def rawfile(web):
guessmime = web.configbool('web', 'guessmime')
- path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
if not path:
- content = manifest(web, req, tmpl)
- req.respond(HTTP_OK, web.ctype)
- return content
+ return manifest(web)
try:
- fctx = webutil.filectx(web.repo, req)
+ fctx = webutil.filectx(web.repo, web.req)
except error.LookupError as inst:
try:
- content = manifest(web, req, tmpl)
- req.respond(HTTP_OK, web.ctype)
- return content
+ return manifest(web)
except ErrorResponse:
raise inst
@@ -123,10 +128,14 @@
if mt.startswith('text/'):
mt += '; charset="%s"' % encoding.encoding
- req.respond(HTTP_OK, mt, path, body=text)
- return []
+ web.res.headers['Content-Type'] = mt
+ filename = (path.rpartition('/')[-1]
+ .replace('\\', '\\\\').replace('"', '\\"'))
+ web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
+ web.res.setbodybytes(text)
+ return web.res.sendresponse()
-def _filerevision(web, req, tmpl, fctx):
+def _filerevision(web, fctx):
f = fctx.path()
text = fctx.data()
parity = paritygen(web.stripecount)
@@ -143,18 +152,19 @@
"linenumber": "% 6d" % (lineno + 1),
"parity": next(parity)}
- return tmpl("filerevision",
- file=f,
- path=webutil.up(f),
- text=lines(),
- symrev=webutil.symrevorshortnode(req, fctx),
- rename=webutil.renamelink(fctx),
- permissions=fctx.manifest().flags(f),
- ishead=int(ishead),
- **webutil.commonentry(web.repo, fctx))
+ return web.sendtemplate(
+ 'filerevision',
+ file=f,
+ path=webutil.up(f),
+ text=lines(),
+ symrev=webutil.symrevorshortnode(web.req, fctx),
+ rename=webutil.renamelink(fctx),
+ permissions=fctx.manifest().flags(f),
+ ishead=int(ishead),
+ **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
@webcommand('file')
-def file(web, req, tmpl):
+def file(web):
"""
/file/{revision}[/{path}]
-------------------------
@@ -173,18 +183,21 @@
If ``path`` is not defined, information about the root directory will
be rendered.
"""
- path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ if web.req.qsparams.get('style') == 'raw':
+ return rawfile(web)
+
+ path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
if not path:
- return manifest(web, req, tmpl)
+ return manifest(web)
try:
- return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
+ return _filerevision(web, webutil.filectx(web.repo, web.req))
except error.LookupError as inst:
try:
- return manifest(web, req, tmpl)
+ return manifest(web)
except ErrorResponse:
raise inst
-def _search(web, req, tmpl):
+def _search(web):
MODE_REVISION = 'rev'
MODE_KEYWORD = 'keyword'
MODE_REVSET = 'revset'
@@ -277,38 +290,41 @@
for ctx in searchfunc[0](funcarg):
count += 1
n = ctx.node()
- showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
- files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
+ showtags = webutil.showtag(web.repo, web.tmpl, 'changelogtag', n)
+ files = webutil.listfilediffs(web.tmpl, ctx.files(), n,
+ web.maxfiles)
- yield tmpl('searchentry',
- parity=next(parity),
- changelogtag=showtags,
- files=files,
- **webutil.commonentry(web.repo, ctx))
+ lm = webutil.commonentry(web.repo, ctx)
+ lm.update({
+ 'parity': next(parity),
+ 'changelogtag': showtags,
+ 'files': files,
+ })
+ yield web.tmpl.generate('searchentry', lm)
if count >= revcount:
break
- query = req.form['rev'][0]
+ query = web.req.qsparams['rev']
revcount = web.maxchanges
- if 'revcount' in req.form:
+ if 'revcount' in web.req.qsparams:
try:
- revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = int(web.req.qsparams.get('revcount', revcount))
revcount = max(revcount, 1)
- tmpl.defaults['sessionvars']['revcount'] = revcount
+ web.tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
- lessvars = copy.copy(tmpl.defaults['sessionvars'])
- lessvars['revcount'] = max(revcount / 2, 1)
+ lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount // 2, 1)
lessvars['rev'] = query
- morevars = copy.copy(tmpl.defaults['sessionvars'])
+ morevars = copy.copy(web.tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
morevars['rev'] = query
mode, funcarg = getsearchmode(query)
- if 'forcekw' in req.form:
+ if 'forcekw' in web.req.qsparams:
showforcekw = ''
showunforcekw = searchfuncs[mode][1]
mode = MODE_KEYWORD
@@ -325,14 +341,21 @@
tip = web.repo['tip']
parity = paritygen(web.stripecount)
- return tmpl('search', query=query, node=tip.hex(), symrev='tip',
- entries=changelist, archives=web.archivelist("tip"),
- morevars=morevars, lessvars=lessvars,
- modedesc=searchfunc[1],
- showforcekw=showforcekw, showunforcekw=showunforcekw)
+ return web.sendtemplate(
+ 'search',
+ query=query,
+ node=tip.hex(),
+ symrev='tip',
+ entries=changelist,
+ archives=web.archivelist('tip'),
+ morevars=morevars,
+ lessvars=lessvars,
+ modedesc=searchfunc[1],
+ showforcekw=showforcekw,
+ showunforcekw=showunforcekw)
@webcommand('changelog')
-def changelog(web, req, tmpl, shortlog=False):
+def changelog(web, shortlog=False):
"""
/changelog[/{revision}]
-----------------------
@@ -358,11 +381,11 @@
"""
query = ''
- if 'node' in req.form:
- ctx = webutil.changectx(web.repo, req)
- symrev = webutil.symrevorshortnode(req, ctx)
- elif 'rev' in req.form:
- return _search(web, req, tmpl)
+ if 'node' in web.req.qsparams:
+ ctx = webutil.changectx(web.repo, web.req)
+ symrev = webutil.symrevorshortnode(web.req, ctx)
+ elif 'rev' in web.req.qsparams:
+ return _search(web)
else:
ctx = web.repo['tip']
symrev = 'tip'
@@ -377,7 +400,7 @@
if curcount > revcount + 1:
break
- entry = webutil.changelistentry(web, web.repo[rev], tmpl)
+ entry = webutil.changelistentry(web, web.repo[rev])
entry['parity'] = next(parity)
yield entry
@@ -386,17 +409,17 @@
else:
revcount = web.maxchanges
- if 'revcount' in req.form:
+ if 'revcount' in web.req.qsparams:
try:
- revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = int(web.req.qsparams.get('revcount', revcount))
revcount = max(revcount, 1)
- tmpl.defaults['sessionvars']['revcount'] = revcount
+ web.tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
- lessvars = copy.copy(tmpl.defaults['sessionvars'])
- lessvars['revcount'] = max(revcount / 2, 1)
- morevars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount // 2, 1)
+ morevars = copy.copy(web.tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
count = len(web.repo)
@@ -413,15 +436,24 @@
else:
nextentry = []
- return tmpl('shortlog' if shortlog else 'changelog', changenav=changenav,
- node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
- entries=entries,
- latestentry=latestentry, nextentry=nextentry,
- archives=web.archivelist("tip"), revcount=revcount,
- morevars=morevars, lessvars=lessvars, query=query)
+ return web.sendtemplate(
+ 'shortlog' if shortlog else 'changelog',
+ changenav=changenav,
+ node=ctx.hex(),
+ rev=pos,
+ symrev=symrev,
+ changesets=count,
+ entries=entries,
+ latestentry=latestentry,
+ nextentry=nextentry,
+ archives=web.archivelist('tip'),
+ revcount=revcount,
+ morevars=morevars,
+ lessvars=lessvars,
+ query=query)
@webcommand('shortlog')
-def shortlog(web, req, tmpl):
+def shortlog(web):
"""
/shortlog
---------
@@ -432,10 +464,10 @@
difference is the ``shortlog`` template will be rendered instead of the
``changelog`` template.
"""
- return changelog(web, req, tmpl, shortlog=True)
+ return changelog(web, shortlog=True)
@webcommand('changeset')
-def changeset(web, req, tmpl):
+def changeset(web):
"""
/changeset[/{revision}]
-----------------------
@@ -450,9 +482,11 @@
``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
templates related to diffs may all be used to produce the output.
"""
- ctx = webutil.changectx(web.repo, req)
+ ctx = webutil.changectx(web.repo, web.req)
- return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
+ return web.sendtemplate(
+ 'changeset',
+ **webutil.changesetentry(web, ctx))
rev = webcommand('rev')(changeset)
@@ -465,7 +499,7 @@
return path
@webcommand('manifest')
-def manifest(web, req, tmpl):
+def manifest(web):
"""
/manifest[/{revision}[/{path}]]
-------------------------------
@@ -481,13 +515,13 @@
The ``manifest`` template will be rendered for this handler.
"""
- if 'node' in req.form:
- ctx = webutil.changectx(web.repo, req)
- symrev = webutil.symrevorshortnode(req, ctx)
+ if 'node' in web.req.qsparams:
+ ctx = webutil.changectx(web.repo, web.req)
+ symrev = webutil.symrevorshortnode(web.req, ctx)
else:
ctx = web.repo['tip']
symrev = 'tip'
- path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
mf = ctx.manifest()
node = ctx.node()
@@ -495,7 +529,7 @@
dirs = {}
parity = paritygen(web.stripecount)
- if path and path[-1] != "/":
+ if path and path[-1:] != "/":
path += "/"
l = len(path)
abspath = "/" + path
@@ -542,7 +576,7 @@
emptydirs = []
h = dirs[d]
while isinstance(h, dict) and len(h) == 1:
- k, v = h.items()[0]
+ k, v = next(iter(h.items()))
if v:
emptydirs.append(k)
h = v
@@ -553,18 +587,19 @@
"emptydirs": "/".join(emptydirs),
"basename": d}
- return tmpl("manifest",
- symrev=symrev,
- path=abspath,
- up=webutil.up(abspath),
- upparity=next(parity),
- fentries=filelist,
- dentries=dirlist,
- archives=web.archivelist(hex(node)),
- **webutil.commonentry(web.repo, ctx))
+ return web.sendtemplate(
+ 'manifest',
+ symrev=symrev,
+ path=abspath,
+ up=webutil.up(abspath),
+ upparity=next(parity),
+ fentries=filelist,
+ dentries=dirlist,
+ archives=web.archivelist(hex(node)),
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
@webcommand('tags')
-def tags(web, req, tmpl):
+def tags(web):
"""
/tags
-----
@@ -590,14 +625,15 @@
"date": web.repo[n].date(),
"node": hex(n)}
- return tmpl("tags",
- node=hex(web.repo.changelog.tip()),
- entries=lambda **x: entries(False, False, **x),
- entriesnotip=lambda **x: entries(True, False, **x),
- latestentry=lambda **x: entries(True, True, **x))
+ return web.sendtemplate(
+ 'tags',
+ node=hex(web.repo.changelog.tip()),
+ entries=lambda **x: entries(False, False, **x),
+ entriesnotip=lambda **x: entries(True, False, **x),
+ latestentry=lambda **x: entries(True, True, **x))
@webcommand('bookmarks')
-def bookmarks(web, req, tmpl):
+def bookmarks(web):
"""
/bookmarks
----------
@@ -628,14 +664,15 @@
else:
latestrev = -1
- return tmpl("bookmarks",
- node=hex(web.repo.changelog.tip()),
- lastchange=[{"date": web.repo[latestrev].date()}],
- entries=lambda **x: entries(latestonly=False, **x),
- latestentry=lambda **x: entries(latestonly=True, **x))
+ return web.sendtemplate(
+ 'bookmarks',
+ node=hex(web.repo.changelog.tip()),
+ lastchange=[{'date': web.repo[latestrev].date()}],
+ entries=lambda **x: entries(latestonly=False, **x),
+ latestentry=lambda **x: entries(latestonly=True, **x))
@webcommand('branches')
-def branches(web, req, tmpl):
+def branches(web):
"""
/branches
---------
@@ -650,11 +687,15 @@
"""
entries = webutil.branchentries(web.repo, web.stripecount)
latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
- return tmpl('branches', node=hex(web.repo.changelog.tip()),
- entries=entries, latestentry=latestentry)
+
+ return web.sendtemplate(
+ 'branches',
+ node=hex(web.repo.changelog.tip()),
+ entries=entries,
+ latestentry=latestentry)
@webcommand('summary')
-def summary(web, req, tmpl):
+def summary(web):
"""
/summary
--------
@@ -679,11 +720,12 @@
if count > 10: # limit to 10 tags
break
- yield tmpl("tagentry",
- parity=next(parity),
- tag=k,
- node=hex(n),
- date=web.repo[n].date())
+ yield web.tmpl.generate('tagentry', {
+ 'parity': next(parity),
+ 'tag': k,
+ 'node': hex(n),
+ 'date': web.repo[n].date(),
+ })
def bookmarks(**map):
parity = paritygen(web.stripecount)
@@ -704,11 +746,9 @@
revs = web.repo.changelog.revs(start, end - 1)
for i in revs:
ctx = web.repo[i]
-
- l.append(tmpl(
- 'shortlogentry',
- parity=next(parity),
- **webutil.commonentry(web.repo, ctx)))
+ lm = webutil.commonentry(web.repo, ctx)
+ lm['parity'] = next(parity)
+ l.append(web.tmpl.generate('shortlogentry', lm))
for entry in reversed(l):
yield entry
@@ -721,21 +761,23 @@
desc = web.config("web", "description")
if not desc:
desc = 'unknown'
- return tmpl("summary",
- desc=desc,
- owner=get_contact(web.config) or "unknown",
- lastchange=tip.date(),
- tags=tagentries,
- bookmarks=bookmarks,
- branches=webutil.branchentries(web.repo, web.stripecount, 10),
- shortlog=changelist,
- node=tip.hex(),
- symrev='tip',
- archives=web.archivelist("tip"),
- labels=web.configlist('web', 'labels'))
+
+ return web.sendtemplate(
+ 'summary',
+ desc=desc,
+ owner=get_contact(web.config) or 'unknown',
+ lastchange=tip.date(),
+ tags=tagentries,
+ bookmarks=bookmarks,
+ branches=webutil.branchentries(web.repo, web.stripecount, 10),
+ shortlog=changelist,
+ node=tip.hex(),
+ symrev='tip',
+ archives=web.archivelist('tip'),
+ labels=web.configlist('web', 'labels'))
@webcommand('filediff')
-def filediff(web, req, tmpl):
+def filediff(web):
"""
/diff/{revision}/{path}
-----------------------
@@ -749,10 +791,10 @@
"""
fctx, ctx = None, None
try:
- fctx = webutil.filectx(web.repo, req)
+ fctx = webutil.filectx(web.repo, web.req)
except LookupError:
- ctx = webutil.changectx(web.repo, req)
- path = webutil.cleanpath(web.repo, req.form['file'][0])
+ ctx = webutil.changectx(web.repo, web.req)
+ path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
if path not in ctx.files():
raise
@@ -762,27 +804,29 @@
basectx = ctx.p1()
style = web.config('web', 'style')
- if 'style' in req.form:
- style = req.form['style'][0]
+ if 'style' in web.req.qsparams:
+ style = web.req.qsparams['style']
- diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style)
+ diffs = webutil.diffs(web, ctx, basectx, [path], style)
if fctx is not None:
rename = webutil.renamelink(fctx)
ctx = fctx
else:
rename = []
ctx = ctx
- return tmpl("filediff",
- file=path,
- symrev=webutil.symrevorshortnode(req, ctx),
- rename=rename,
- diff=diffs,
- **webutil.commonentry(web.repo, ctx))
+
+ return web.sendtemplate(
+ 'filediff',
+ file=path,
+ symrev=webutil.symrevorshortnode(web.req, ctx),
+ rename=rename,
+ diff=diffs,
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
diff = webcommand('diff')(filediff)
@webcommand('comparison')
-def comparison(web, req, tmpl):
+def comparison(web):
"""
/comparison/{revision}/{path}
-----------------------------
@@ -798,14 +842,14 @@
The ``filecomparison`` template is rendered.
"""
- ctx = webutil.changectx(web.repo, req)
- if 'file' not in req.form:
+ ctx = webutil.changectx(web.repo, web.req)
+ if 'file' not in web.req.qsparams:
raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
- path = webutil.cleanpath(web.repo, req.form['file'][0])
+ path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
parsecontext = lambda v: v == 'full' and -1 or int(v)
- if 'context' in req.form:
- context = parsecontext(req.form['context'][0])
+ if 'context' in web.req.qsparams:
+ context = parsecontext(web.req.qsparams['context'])
else:
context = parsecontext(web.config('web', 'comparisoncontext', '5'))
@@ -836,26 +880,28 @@
pfctx = ctx.parents()[0][path]
leftlines = filelines(pfctx)
- comparison = webutil.compare(tmpl, context, leftlines, rightlines)
+ comparison = webutil.compare(web.tmpl, context, leftlines, rightlines)
if fctx is not None:
rename = webutil.renamelink(fctx)
ctx = fctx
else:
rename = []
ctx = ctx
- return tmpl('filecomparison',
- file=path,
- symrev=webutil.symrevorshortnode(req, ctx),
- rename=rename,
- leftrev=leftrev,
- leftnode=hex(leftnode),
- rightrev=rightrev,
- rightnode=hex(rightnode),
- comparison=comparison,
- **webutil.commonentry(web.repo, ctx))
+
+ return web.sendtemplate(
+ 'filecomparison',
+ file=path,
+ symrev=webutil.symrevorshortnode(web.req, ctx),
+ rename=rename,
+ leftrev=leftrev,
+ leftnode=hex(leftnode),
+ rightrev=rightrev,
+ rightnode=hex(rightnode),
+ comparison=comparison,
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
@webcommand('annotate')
-def annotate(web, req, tmpl):
+def annotate(web):
"""
/annotate/{revision}/{path}
---------------------------
@@ -871,7 +917,7 @@
The ``fileannotate`` template is rendered.
"""
- fctx = webutil.filectx(web.repo, req)
+ fctx = webutil.filectx(web.repo, web.req)
f = fctx.path()
parity = paritygen(web.stripecount)
ishead = fctx.filerev() in fctx.filelog().headrevs()
@@ -901,7 +947,7 @@
or 'application/octet-stream')
lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)]
else:
- lines = webutil.annotate(req, fctx, web.repo.ui)
+ lines = webutil.annotate(web.req, fctx, web.repo.ui)
previousrev = None
blockparitygen = paritygen(1)
@@ -931,22 +977,23 @@
"linenumber": "% 6d" % (lineno + 1),
"revdate": f.date()}
- diffopts = webutil.difffeatureopts(req, web.repo.ui, 'annotate')
+ diffopts = webutil.difffeatureopts(web.req, web.repo.ui, 'annotate')
diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
- return tmpl("fileannotate",
- file=f,
- annotate=annotate,
- path=webutil.up(f),
- symrev=webutil.symrevorshortnode(req, fctx),
- rename=webutil.renamelink(fctx),
- permissions=fctx.manifest().flags(f),
- ishead=int(ishead),
- diffopts=diffopts,
- **webutil.commonentry(web.repo, fctx))
+ return web.sendtemplate(
+ 'fileannotate',
+ file=f,
+ annotate=annotate,
+ path=webutil.up(f),
+ symrev=webutil.symrevorshortnode(web.req, fctx),
+ rename=webutil.renamelink(fctx),
+ permissions=fctx.manifest().flags(f),
+ ishead=int(ishead),
+ diffopts=diffopts,
+ **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
@webcommand('filelog')
-def filelog(web, req, tmpl):
+def filelog(web):
"""
/filelog/{revision}/{path}
--------------------------
@@ -960,16 +1007,16 @@
"""
try:
- fctx = webutil.filectx(web.repo, req)
+ fctx = webutil.filectx(web.repo, web.req)
f = fctx.path()
fl = fctx.filelog()
except error.LookupError:
- f = webutil.cleanpath(web.repo, req.form['file'][0])
+ f = webutil.cleanpath(web.repo, web.req.qsparams['file'])
fl = web.repo.file(f)
numrevs = len(fl)
if not numrevs: # file doesn't exist at all
raise
- rev = webutil.changectx(web.repo, req).rev()
+ rev = webutil.changectx(web.repo, web.req).rev()
first = fl.linkrev(0)
if rev < first: # current rev is from before file existed
raise
@@ -979,27 +1026,27 @@
fctx = web.repo.filectx(f, fl.linkrev(frev))
revcount = web.maxshortchanges
- if 'revcount' in req.form:
+ if 'revcount' in web.req.qsparams:
try:
- revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = int(web.req.qsparams.get('revcount', revcount))
revcount = max(revcount, 1)
- tmpl.defaults['sessionvars']['revcount'] = revcount
+ web.tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
- lrange = webutil.linerange(req)
+ lrange = webutil.linerange(web.req)
- lessvars = copy.copy(tmpl.defaults['sessionvars'])
- lessvars['revcount'] = max(revcount / 2, 1)
- morevars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount // 2, 1)
+ morevars = copy.copy(web.tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
- patch = 'patch' in req.form
+ patch = 'patch' in web.req.qsparams
if patch:
- lessvars['patch'] = morevars['patch'] = req.form['patch'][0]
- descend = 'descend' in req.form
+ lessvars['patch'] = morevars['patch'] = web.req.qsparams['patch']
+ descend = 'descend' in web.req.qsparams
if descend:
- lessvars['descend'] = morevars['descend'] = req.form['descend'][0]
+ lessvars['descend'] = morevars['descend'] = web.req.qsparams['descend']
count = fctx.filerev() + 1
start = max(0, count - revcount) # first rev on this page
@@ -1011,14 +1058,14 @@
entries = []
diffstyle = web.config('web', 'style')
- if 'style' in req.form:
- diffstyle = req.form['style'][0]
+ if 'style' in web.req.qsparams:
+ diffstyle = web.req.qsparams['style']
def diff(fctx, linerange=None):
ctx = fctx.changectx()
basectx = ctx.p1()
path = fctx.path()
- return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle,
+ return webutil.diffs(web, ctx, basectx, [path], diffstyle,
linerange=linerange,
lineidprefix='%s-' % ctx.hex()[:12])
@@ -1044,7 +1091,7 @@
file=path,
diff=diffs,
linerange=webutil.formatlinerange(*lr),
- **webutil.commonentry(repo, c)))
+ **pycompat.strkwargs(webutil.commonentry(repo, c))))
if i == revcount:
break
lessvars['linerange'] = webutil.formatlinerange(*lrange)
@@ -1061,29 +1108,30 @@
file=f,
diff=diffs,
rename=webutil.renamelink(iterfctx),
- **webutil.commonentry(repo, iterfctx)))
+ **pycompat.strkwargs(webutil.commonentry(repo, iterfctx))))
entries.reverse()
revnav = webutil.filerevnav(web.repo, fctx.path())
nav = revnav.gen(end - 1, revcount, count)
latestentry = entries[:1]
- return tmpl("filelog",
- file=f,
- nav=nav,
- symrev=webutil.symrevorshortnode(req, fctx),
- entries=entries,
- descend=descend,
- patch=patch,
- latestentry=latestentry,
- linerange=linerange,
- revcount=revcount,
- morevars=morevars,
- lessvars=lessvars,
- **webutil.commonentry(web.repo, fctx))
+ return web.sendtemplate(
+ 'filelog',
+ file=f,
+ nav=nav,
+ symrev=webutil.symrevorshortnode(web.req, fctx),
+ entries=entries,
+ descend=descend,
+ patch=patch,
+ latestentry=latestentry,
+ linerange=linerange,
+ revcount=revcount,
+ morevars=morevars,
+ lessvars=lessvars,
+ **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
@webcommand('archive')
-def archive(web, req, tmpl):
+def archive(web):
"""
/archive/{revision}.{format}[/{path}]
-------------------------------------
@@ -1103,9 +1151,9 @@
No template is used for this handler. Raw, binary content is generated.
"""
- type_ = req.form.get('type', [None])[0]
+ type_ = web.req.qsparams.get('type')
allowed = web.configlist("web", "allow_archive")
- key = req.form['node'][0]
+ key = web.req.qsparams['node']
if type_ not in web.archivespecs:
msg = 'Unsupported archive type: %s' % type_
@@ -1116,44 +1164,51 @@
msg = 'Archive type not allowed: %s' % type_
raise ErrorResponse(HTTP_FORBIDDEN, msg)
- reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
+ reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
cnode = web.repo.lookup(key)
arch_version = key
if cnode == key or key == 'tip':
arch_version = short(cnode)
name = "%s-%s" % (reponame, arch_version)
- ctx = webutil.changectx(web.repo, req)
+ ctx = webutil.changectx(web.repo, web.req)
pats = []
match = scmutil.match(ctx, [])
- file = req.form.get('file', None)
+ file = web.req.qsparams.get('file')
if file:
- pats = ['path:' + file[0]]
+ pats = ['path:' + file]
match = scmutil.match(ctx, pats, default='path')
if pats:
files = [f for f in ctx.manifest().keys() if match(f)]
if not files:
raise ErrorResponse(HTTP_NOT_FOUND,
- 'file(s) not found: %s' % file[0])
+ 'file(s) not found: %s' % file)
mimetype, artype, extension, encoding = web.archivespecs[type_]
- headers = [
- ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
- ]
+
+ web.res.headers['Content-Type'] = mimetype
+ web.res.headers['Content-Disposition'] = 'attachment; filename=%s%s' % (
+ name, extension)
+
if encoding:
- headers.append(('Content-Encoding', encoding))
- req.headers.extend(headers)
- req.respond(HTTP_OK, mimetype)
+ web.res.headers['Content-Encoding'] = encoding
- archival.archive(web.repo, req, cnode, artype, prefix=name,
+ web.res.setbodywillwrite()
+ if list(web.res.sendresponse()):
+ raise error.ProgrammingError('sendresponse() should not emit data '
+ 'if writing later')
+
+ bodyfh = web.res.getbodyfile()
+
+ archival.archive(web.repo, bodyfh, cnode, artype, prefix=name,
matchfn=match,
subrepos=web.configbool("web", "archivesubrepos"))
+
return []
-
@webcommand('static')
-def static(web, req, tmpl):
- fname = req.form['file'][0]
+def static(web):
+ fname = web.req.qsparams['file']
# a repo owner may set web.static in .hg/hgrc to get any file
# readable by the user running the CGI script
static = web.config("web", "static", None, untrusted=False)
@@ -1162,11 +1217,12 @@
if isinstance(tp, str):
tp = [tp]
static = [os.path.join(p, 'static') for p in tp]
- staticfile(static, fname, req)
- return []
+
+ staticfile(static, fname, web.res)
+ return web.res.sendresponse()
@webcommand('graph')
-def graph(web, req, tmpl):
+def graph(web):
"""
/graph[/{revision}]
-------------------
@@ -1189,9 +1245,9 @@
This handler will render the ``graph`` template.
"""
- if 'node' in req.form:
- ctx = webutil.changectx(web.repo, req)
- symrev = webutil.symrevorshortnode(req, ctx)
+ if 'node' in web.req.qsparams:
+ ctx = webutil.changectx(web.repo, web.req)
+ symrev = webutil.symrevorshortnode(web.req, ctx)
else:
ctx = web.repo['tip']
symrev = 'tip'
@@ -1199,21 +1255,21 @@
bg_height = 39
revcount = web.maxshortchanges
- if 'revcount' in req.form:
+ if 'revcount' in web.req.qsparams:
try:
- revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = int(web.req.qsparams.get('revcount', revcount))
revcount = max(revcount, 1)
- tmpl.defaults['sessionvars']['revcount'] = revcount
+ web.tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
- lessvars = copy.copy(tmpl.defaults['sessionvars'])
- lessvars['revcount'] = max(revcount / 2, 1)
- morevars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount // 2, 1)
+ morevars = copy.copy(web.tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
- graphtop = req.form.get('graphtop', [ctx.hex()])[0]
- graphvars = copy.copy(tmpl.defaults['sessionvars'])
+ graphtop = web.req.qsparams.get('graphtop', ctx.hex())
+ graphvars = copy.copy(web.tmpl.defaults['sessionvars'])
graphvars['graphtop'] = graphtop
count = len(web.repo)
@@ -1305,17 +1361,24 @@
rows = len(tree)
- return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
- uprev=uprev,
- lessvars=lessvars, morevars=morevars, downrev=downrev,
- graphvars=graphvars,
- rows=rows,
- bg_height=bg_height,
- changesets=count,
- nextentry=nextentry,
- jsdata=lambda **x: jsdata(),
- nodes=lambda **x: nodes(),
- node=ctx.hex(), changenav=changenav)
+ return web.sendtemplate(
+ 'graph',
+ rev=rev,
+ symrev=symrev,
+ revcount=revcount,
+ uprev=uprev,
+ lessvars=lessvars,
+ morevars=morevars,
+ downrev=downrev,
+ graphvars=graphvars,
+ rows=rows,
+ bg_height=bg_height,
+ changesets=count,
+ nextentry=nextentry,
+ jsdata=lambda **x: jsdata(),
+ nodes=lambda **x: nodes(),
+ node=ctx.hex(),
+ changenav=changenav)
def _getdoc(e):
doc = e[0].__doc__
@@ -1326,7 +1389,7 @@
return doc
@webcommand('help')
-def help(web, req, tmpl):
+def help(web):
"""
/help[/{topic}]
---------------
@@ -1342,7 +1405,7 @@
"""
from .. import commands, help as helpmod # avoid cycle
- topicname = req.form.get('node', [None])[0]
+ topicname = web.req.qsparams.get('node')
if not topicname:
def topics(**map):
for entries, summary, _doc in helpmod.helptable:
@@ -1371,8 +1434,12 @@
for c, doc in other:
yield {'topic': c, 'summary': doc}
- return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
- othercommands=othercommands, title='Index')
+ return web.sendtemplate(
+ 'helptopics',
+ topics=topics,
+ earlycommands=earlycommands,
+ othercommands=othercommands,
+ title='Index')
# Render an index of sub-topics.
if topicname in helpmod.subtopics:
@@ -1384,8 +1451,11 @@
'summary': summary,
})
- return tmpl('helptopics', topics=topics, title=topicname,
- subindex=True)
+ return web.sendtemplate(
+ 'helptopics',
+ topics=topics,
+ title=topicname,
+ subindex=True)
u = webutil.wsgiui.load()
u.verbose = True
@@ -1403,9 +1473,13 @@
try:
doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
- except error.UnknownCommand:
+ except error.Abort:
raise ErrorResponse(HTTP_NOT_FOUND)
- return tmpl('help', topic=topicname, doc=doc)
+
+ return web.sendtemplate(
+ 'help',
+ topic=topicname,
+ doc=doc)
# tell hggettext to extract docstrings from these functions:
i18nfunctions = commands.values()
--- a/mercurial/hgweb/webutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hgweb/webutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -28,6 +28,7 @@
error,
match,
mdiff,
+ obsutil,
patch,
pathutil,
pycompat,
@@ -38,9 +39,9 @@
)
def up(p):
- if p[0] != "/":
+ if p[0:1] != "/":
p = "/" + p
- if p[-1] == "/":
+ if p[-1:] == "/":
p = p[:-1]
up = os.path.dirname(p)
if up == "/":
@@ -177,7 +178,7 @@
section=section, whitespace=True)
for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
- v = req.form.get(k, [None])[0]
+ v = req.qsparams.get(k)
if v is not None:
v = util.parsebool(v)
setattr(diffopts, k, v if v is not None else True)
@@ -242,12 +243,18 @@
return branches
def showtag(repo, tmpl, t1, node=nullid, **args):
+ args = pycompat.byteskwargs(args)
for t in repo.nodetags(node):
- yield tmpl(t1, tag=t, **args)
+ lm = args.copy()
+ lm['tag'] = t
+ yield tmpl.generate(t1, lm)
def showbookmark(repo, tmpl, t1, node=nullid, **args):
+ args = pycompat.byteskwargs(args)
for t in repo.nodebookmarks(node):
- yield tmpl(t1, bookmark=t, **args)
+ lm = args.copy()
+ lm['bookmark'] = t
+ yield tmpl.generate(t1, lm)
def branchentries(repo, stripecount, limit=0):
tips = []
@@ -295,19 +302,19 @@
def changectx(repo, req):
changeid = "tip"
- if 'node' in req.form:
- changeid = req.form['node'][0]
+ if 'node' in req.qsparams:
+ changeid = req.qsparams['node']
ipos = changeid.find(':')
if ipos != -1:
changeid = changeid[(ipos + 1):]
- elif 'manifest' in req.form:
- changeid = req.form['manifest'][0]
+ elif 'manifest' in req.qsparams:
+ changeid = req.qsparams['manifest']
return changeidctx(repo, changeid)
def basechangectx(repo, req):
- if 'node' in req.form:
- changeid = req.form['node'][0]
+ if 'node' in req.qsparams:
+ changeid = req.qsparams['node']
ipos = changeid.find(':')
if ipos != -1:
changeid = changeid[:ipos]
@@ -316,13 +323,13 @@
return None
def filectx(repo, req):
- if 'file' not in req.form:
+ if 'file' not in req.qsparams:
raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
- path = cleanpath(repo, req.form['file'][0])
- if 'node' in req.form:
- changeid = req.form['node'][0]
- elif 'filenode' in req.form:
- changeid = req.form['filenode'][0]
+ path = cleanpath(repo, req.qsparams['file'])
+ if 'node' in req.qsparams:
+ changeid = req.qsparams['node']
+ elif 'filenode' in req.qsparams:
+ changeid = req.qsparams['filenode']
else:
raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
try:
@@ -333,8 +340,8 @@
return fctx
def linerange(req):
- linerange = req.form.get('linerange')
- if linerange is None:
+ linerange = req.qsparams.getall('linerange')
+ if not linerange:
return None
if len(linerange) > 1:
raise ErrorResponse(HTTP_BAD_REQUEST,
@@ -347,20 +354,41 @@
try:
return util.processlinerange(fromline, toline)
except error.ParseError as exc:
- raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
+ raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
def formatlinerange(fromline, toline):
return '%d:%d' % (fromline + 1, toline)
-def succsandmarkers(repo, ctx):
- for item in templatekw.showsuccsandmarkers(repo, ctx):
+def succsandmarkers(context, mapping):
+ repo = context.resource(mapping, 'repo')
+ for item in templatekw.showsuccsandmarkers(context, mapping):
item['successors'] = _siblings(repo[successor]
for successor in item['successors'])
yield item
+# teach templater succsandmarkers is switched to (context, mapping) API
+succsandmarkers._requires = {'repo', 'ctx', 'templ'}
+
+def whyunstable(context, mapping):
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+
+ entries = obsutil.whyunstable(repo, ctx)
+ for entry in entries:
+ if entry.get('divergentnodes'):
+ entry['divergentnodes'] = _siblings(entry['divergentnodes'])
+ yield entry
+
+whyunstable._requires = {'repo', 'ctx', 'templ'}
+
def commonentry(repo, ctx):
node = ctx.node()
return {
+ # TODO: perhaps ctx.changectx() should be assigned if ctx is a
+ # filectx, but I'm not pretty sure if that would always work because
+ # fctx.parents() != fctx.changectx.parents() for example.
+ 'ctx': ctx,
+ 'revcache': {},
'rev': ctx.rev(),
'node': hex(node),
'author': ctx.user(),
@@ -369,8 +397,9 @@
'extra': ctx.extra(),
'phase': ctx.phasestr(),
'obsolete': ctx.obsolete(),
- 'succsandmarkers': lambda **x: succsandmarkers(repo, ctx),
+ 'succsandmarkers': succsandmarkers,
'instabilities': [{"instability": i} for i in ctx.instabilities()],
+ 'whyunstable': whyunstable,
'branch': nodebranchnodefault(ctx),
'inbranch': nodeinbranch(repo, ctx),
'branches': nodebranchdict(repo, ctx),
@@ -380,7 +409,7 @@
'child': lambda **x: children(ctx),
}
-def changelistentry(web, ctx, tmpl):
+def changelistentry(web, ctx):
'''Obtain a dictionary to be used for entries in a changelist.
This function is called when producing items for the "entries" list passed
@@ -389,8 +418,8 @@
repo = web.repo
rev = ctx.rev()
n = ctx.node()
- showtags = showtag(repo, tmpl, 'changelogtag', n)
- files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
+ showtags = showtag(repo, web.tmpl, 'changelogtag', n)
+ files = listfilediffs(web.tmpl, ctx.files(), n, web.maxfiles)
entry = commonentry(repo, ctx)
entry.update(
@@ -403,16 +432,16 @@
return entry
def symrevorshortnode(req, ctx):
- if 'node' in req.form:
- return templatefilters.revescape(req.form['node'][0])
+ if 'node' in req.qsparams:
+ return templatefilters.revescape(req.qsparams['node'])
else:
return short(ctx.node())
-def changesetentry(web, req, tmpl, ctx):
+def changesetentry(web, ctx):
'''Obtain a dictionary to be used to render the "changeset" template.'''
- showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
- showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
+ showtags = showtag(web.repo, web.tmpl, 'changesettag', ctx.node())
+ showbookmarks = showbookmark(web.repo, web.tmpl, 'changesetbookmark',
ctx.node())
showbranch = nodebranchnodefault(ctx)
@@ -420,27 +449,30 @@
parity = paritygen(web.stripecount)
for blockno, f in enumerate(ctx.files()):
template = 'filenodelink' if f in ctx else 'filenolink'
- files.append(tmpl(template,
- node=ctx.hex(), file=f, blockno=blockno + 1,
- parity=next(parity)))
+ files.append(web.tmpl.generate(template, {
+ 'node': ctx.hex(),
+ 'file': f,
+ 'blockno': blockno + 1,
+ 'parity': next(parity),
+ }))
- basectx = basechangectx(web.repo, req)
+ basectx = basechangectx(web.repo, web.req)
if basectx is None:
basectx = ctx.p1()
style = web.config('web', 'style')
- if 'style' in req.form:
- style = req.form['style'][0]
+ if 'style' in web.req.qsparams:
+ style = web.req.qsparams['style']
- diff = diffs(web, tmpl, ctx, basectx, None, style)
+ diff = diffs(web, ctx, basectx, None, style)
parity = paritygen(web.stripecount)
diffstatsgen = diffstatgen(ctx, basectx)
- diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
+ diffstats = diffstat(web.tmpl, ctx, diffstatsgen, parity)
return dict(
diff=diff,
- symrev=symrevorshortnode(req, ctx),
+ symrev=symrevorshortnode(web.req, ctx),
basenode=basectx.hex(),
changesettag=showtags,
changesetbookmark=showbookmarks,
@@ -449,15 +481,15 @@
diffsummary=lambda **x: diffsummary(diffstatsgen),
diffstat=diffstats,
archives=web.archivelist(ctx.hex()),
- **commonentry(web.repo, ctx))
+ **pycompat.strkwargs(commonentry(web.repo, ctx)))
def listfilediffs(tmpl, files, node, max):
for f in files[:max]:
- yield tmpl('filedifflink', node=hex(node), file=f)
+ yield tmpl.generate('filedifflink', {'node': hex(node), 'file': f})
if len(files) > max:
- yield tmpl('fileellipses')
+ yield tmpl.generate('fileellipses', {})
-def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
+def diffs(web, ctx, basectx, files, style, linerange=None,
lineidprefix=''):
def prettyprintlines(lines, blockno):
@@ -471,11 +503,12 @@
ltype = "difflineat"
else:
ltype = "diffline"
- yield tmpl(ltype,
- line=l,
- lineno=lineno,
- lineid=lineidprefix + "l%s" % difflineno,
- linenumber="% 8s" % difflineno)
+ yield web.tmpl.generate(ltype, {
+ 'line': l,
+ 'lineno': lineno,
+ 'lineid': lineidprefix + "l%s" % difflineno,
+ 'linenumber': "% 8s" % difflineno,
+ })
repo = web.repo
if files:
@@ -500,24 +533,30 @@
continue
lines.extend(hunklines)
if lines:
- yield tmpl('diffblock', parity=next(parity), blockno=blockno,
- lines=prettyprintlines(lines, blockno))
+ yield web.tmpl.generate('diffblock', {
+ 'parity': next(parity),
+ 'blockno': blockno,
+ 'lines': prettyprintlines(lines, blockno),
+ })
def compare(tmpl, context, leftlines, rightlines):
'''Generator function that provides side-by-side comparison data.'''
def compline(type, leftlineno, leftline, rightlineno, rightline):
- lineid = leftlineno and ("l%s" % leftlineno) or ''
- lineid += rightlineno and ("r%s" % rightlineno) or ''
- return tmpl('comparisonline',
- type=type,
- lineid=lineid,
- leftlineno=leftlineno,
- leftlinenumber="% 6s" % (leftlineno or ''),
- leftline=leftline or '',
- rightlineno=rightlineno,
- rightlinenumber="% 6s" % (rightlineno or ''),
- rightline=rightline or '')
+ lineid = leftlineno and ("l%d" % leftlineno) or ''
+ lineid += rightlineno and ("r%d" % rightlineno) or ''
+ llno = '%d' % leftlineno if leftlineno else ''
+ rlno = '%d' % rightlineno if rightlineno else ''
+ return tmpl.generate('comparisonline', {
+ 'type': type,
+ 'lineid': lineid,
+ 'leftlineno': leftlineno,
+ 'leftlinenumber': "% 6s" % llno,
+ 'leftline': leftline or '',
+ 'rightlineno': rightlineno,
+ 'rightlinenumber': "% 6s" % rlno,
+ 'rightline': rightline or '',
+ })
def getblock(opcodes):
for type, llo, lhi, rlo, rhi in opcodes:
@@ -547,10 +586,11 @@
s = difflib.SequenceMatcher(None, leftlines, rightlines)
if context < 0:
- yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
+ yield tmpl.generate('comparisonblock',
+ {'lines': getblock(s.get_opcodes())})
else:
for oc in s.get_grouped_opcodes(n=context):
- yield tmpl('comparisonblock', lines=getblock(oc))
+ yield tmpl.generate('comparisonblock', {'lines': getblock(oc)})
def diffstatgen(ctx, basectx):
'''Generator function that provides the diffstat data.'''
@@ -584,9 +624,15 @@
template = 'diffstatlink' if filename in files else 'diffstatnolink'
total = adds + removes
fileno += 1
- yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
- total=total, addpct=pct(adds), removepct=pct(removes),
- parity=next(parity))
+ yield tmpl.generate(template, {
+ 'node': ctx.hex(),
+ 'file': filename,
+ 'fileno': fileno,
+ 'total': total,
+ 'addpct': pct(adds),
+ 'removepct': pct(removes),
+ 'parity': next(parity),
+ })
class sessionvars(object):
def __init__(self, vars, start='?'):
@@ -619,14 +665,14 @@
websubdefs += repo.ui.configitems('interhg')
for key, pattern in websubdefs:
# grab the delimiter from the character after the "s"
- unesc = pattern[1]
+ unesc = pattern[1:2]
delim = re.escape(unesc)
# identify portions of the pattern, taking care to avoid escaped
# delimiters. the replace format and flags are optional, but
# delimiters are required.
match = re.match(
- r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
+ br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
% (delim, delim, delim), pattern)
if not match:
repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
@@ -634,7 +680,7 @@
continue
# we need to unescape the delimiter for regexp and format
- delim_re = re.compile(r'(?<!\\)\\%s' % delim)
+ delim_re = re.compile(br'(?<!\\)\\%s' % delim)
regexp = delim_re.sub(unesc, match.group(1))
format = delim_re.sub(unesc, match.group(2))
--- a/mercurial/hook.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/hook.py Mon Mar 19 08:07:18 2018 -0700
@@ -49,12 +49,12 @@
modname = modfile
with demandimport.deactivated():
try:
- obj = __import__(modname)
+ obj = __import__(pycompat.sysstr(modname))
except (ImportError, SyntaxError):
e1 = sys.exc_info()
try:
# extensions are loaded with hgext_ prefix
- obj = __import__("hgext_%s" % modname)
+ obj = __import__(r"hgext_%s" % pycompat.sysstr(modname))
except (ImportError, SyntaxError):
e2 = sys.exc_info()
if ui.tracebackflag:
@@ -265,12 +265,12 @@
raised = False
res[hname] = r, raised
+ finally:
+ # The stderr is fully buffered on Windows when connected to a pipe.
+ # A forcible flush is required to make small stderr data in the
+ # remote side available to the client immediately.
+ util.stderr.flush()
- # The stderr is fully buffered on Windows when connected to a pipe.
- # A forcible flush is required to make small stderr data in the
- # remote side available to the client immediately.
- util.stderr.flush()
- finally:
if _redirect and oldstdout >= 0:
util.stdout.flush() # write hook output to stderr fd
os.dup2(oldstdout, stdoutno)
--- a/mercurial/httpclient/__init__.py Thu Mar 15 22:35:07 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,912 +0,0 @@
-# Copyright 2010, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Improved HTTP/1.1 client library
-
-This library contains an HTTPConnection which is similar to the one in
-httplib, but has several additional features:
-
- * supports keepalives natively
- * uses select() to block for incoming data
- * notices when the server responds early to a request
- * implements ssl inline instead of in a different class
-"""
-from __future__ import absolute_import
-
-# Many functions in this file have too many arguments.
-# pylint: disable=R0913
-import email
-import email.message
-import errno
-import inspect
-import logging
-import select
-import socket
-import ssl
-import sys
-
-try:
- import cStringIO as io
- io.StringIO
-except ImportError:
- import io
-
-try:
- import httplib
- httplib.HTTPException
-except ImportError:
- import http.client as httplib
-
-from . import (
- _readers,
-)
-
-logger = logging.getLogger(__name__)
-
-__all__ = ['HTTPConnection', 'HTTPResponse']
-
-HTTP_VER_1_0 = b'HTTP/1.0'
-HTTP_VER_1_1 = b'HTTP/1.1'
-
-OUTGOING_BUFFER_SIZE = 1 << 15
-INCOMING_BUFFER_SIZE = 1 << 20
-
-HDR_ACCEPT_ENCODING = 'accept-encoding'
-HDR_CONNECTION_CTRL = 'connection'
-HDR_CONTENT_LENGTH = 'content-length'
-HDR_XFER_ENCODING = 'transfer-encoding'
-
-XFER_ENCODING_CHUNKED = 'chunked'
-
-CONNECTION_CLOSE = 'close'
-
-EOL = b'\r\n'
-_END_HEADERS = EOL * 2
-
-# Based on some searching around, 1 second seems like a reasonable
-# default here.
-TIMEOUT_ASSUME_CONTINUE = 1
-TIMEOUT_DEFAULT = None
-
-if sys.version_info > (3, 0):
- _unicode = str
-else:
- _unicode = unicode
-
-def _ensurebytes(data):
- if not isinstance(data, (_unicode, bytes)):
- data = str(data)
- if not isinstance(data, bytes):
- try:
- return data.encode('latin-1')
- except UnicodeEncodeError as err:
- raise UnicodeEncodeError(
- err.encoding,
- err.object,
- err.start,
- err.end,
- '%r is not valid Latin-1 Use .encode("utf-8") '
- 'if sending as utf-8 is desired.' % (
- data[err.start:err.end],))
- return data
-
-class _CompatMessage(email.message.Message):
- """Workaround for rfc822.Message and email.message.Message API diffs."""
-
- @classmethod
- def from_string(cls, s):
- if sys.version_info > (3, 0):
- # Python 3 can't decode headers from bytes, so we have to
- # trust RFC 2616 and decode the headers as iso-8859-1
- # bytes.
- s = s.decode('iso-8859-1')
- headers = email.message_from_string(s, _class=_CompatMessage)
- # Fix multi-line headers to match httplib's behavior from
- # Python 2.x, since email.message.Message handles them in
- # slightly different ways.
- if sys.version_info < (3, 0):
- new = []
- for h, v in headers._headers:
- if '\r\n' in v:
- v = '\n'.join([' ' + x.lstrip() for x in v.split('\r\n')])[1:]
- new.append((h, v))
- headers._headers = new
- return headers
-
- def getheaders(self, key):
- return self.get_all(key)
-
- def getheader(self, key, default=None):
- return self.get(key, failobj=default)
-
-
-class HTTPResponse(object):
- """Response from an HTTP server.
-
- The response will continue to load as available. If you need the
- complete response before continuing, check the .complete() method.
- """
- def __init__(self, sock, timeout, method):
- self.sock = sock
- self.method = method
- self.raw_response = b''
- self._headers_len = 0
- self.headers = None
- self.will_close = False
- self.status_line = b''
- self.status = None
- self.continued = False
- self.http_version = None
- self.reason = None
- self._reader = None
-
- self._read_location = 0
- self._eol = EOL
-
- self._timeout = timeout
-
- @property
- def _end_headers(self):
- return self._eol * 2
-
- def complete(self):
- """Returns true if this response is completely loaded.
-
- Note that if this is a connection where complete means the
- socket is closed, this will nearly always return False, even
- in cases where all the data has actually been loaded.
- """
- if self._reader:
- return self._reader.done()
-
- def _close(self):
- if self._reader is not None:
- # We're a friend of the reader class here.
- # pylint: disable=W0212
- self._reader._close()
-
- def getheader(self, header, default=None):
- return self.headers.getheader(header, default=default)
-
- def getheaders(self):
- if sys.version_info < (3, 0):
- return [(k.lower(), v) for k, v in self.headers.items()]
- # Starting in Python 3, headers aren't lowercased before being
- # returned here.
- return self.headers.items()
-
- def readline(self):
- """Read a single line from the response body.
-
- This may block until either a line ending is found or the
- response is complete.
- """
- blocks = []
- while True:
- self._reader.readto(b'\n', blocks)
-
- if blocks and blocks[-1][-1:] == b'\n' or self.complete():
- break
-
- self._select()
-
- return b''.join(blocks)
-
- def read(self, length=None):
- """Read data from the response body."""
- # if length is None, unbounded read
- while (not self.complete() # never select on a finished read
- and (not length # unbounded, so we wait for complete()
- or length > self._reader.available_data)):
- self._select()
- if not length:
- length = self._reader.available_data
- r = self._reader.read(length)
- if self.complete() and self.will_close:
- self.sock.close()
- return r
-
- def _select(self):
- r, unused_write, unused_err = select.select(
- [self.sock], [], [], self._timeout)
- if not r:
- # socket was not readable. If the response is not
- # complete, raise a timeout.
- if not self.complete():
- logger.info('timed out with timeout of %s', self._timeout)
- raise HTTPTimeoutException('timeout reading data')
- try:
- data = self.sock.recv(INCOMING_BUFFER_SIZE)
- except ssl.SSLError as e:
- if e.args[0] != ssl.SSL_ERROR_WANT_READ:
- raise
- logger.debug('SSL_ERROR_WANT_READ in _select, should retry later')
- return True
- logger.debug('response read %d data during _select', len(data))
- # If the socket was readable and no data was read, that means
- # the socket was closed. Inform the reader (if any) so it can
- # raise an exception if this is an invalid situation.
- if not data:
- if self._reader:
- # We're a friend of the reader class here.
- # pylint: disable=W0212
- self._reader._close()
- return False
- else:
- self._load_response(data)
- return True
-
- # This method gets replaced by _load later, which confuses pylint.
- def _load_response(self, data): # pylint: disable=E0202
- # Being here implies we're not at the end of the headers yet,
- # since at the end of this method if headers were completely
- # loaded we replace this method with the load() method of the
- # reader we created.
- self.raw_response += data
- # This is a bogus server with bad line endings
- if self._eol not in self.raw_response:
- for bad_eol in (b'\n', b'\r'):
- if (bad_eol in self.raw_response
- # verify that bad_eol is not the end of the incoming data
- # as this could be a response line that just got
- # split between \r and \n.
- and (self.raw_response.index(bad_eol) <
- (len(self.raw_response) - 1))):
- logger.info('bogus line endings detected, '
- 'using %r for EOL', bad_eol)
- self._eol = bad_eol
- break
- # exit early if not at end of headers
- if self._end_headers not in self.raw_response or self.headers:
- return
-
- # handle 100-continue response
- hdrs, body = self.raw_response.split(self._end_headers, 1)
- unused_http_ver, status = hdrs.split(b' ', 1)
- if status.startswith(b'100'):
- self.raw_response = body
- self.continued = True
- logger.debug('continue seen, setting body to %r', body)
- return
-
- # arriving here means we should parse response headers
- # as all headers have arrived completely
- hdrs, body = self.raw_response.split(self._end_headers, 1)
- del self.raw_response
- if self._eol in hdrs:
- self.status_line, hdrs = hdrs.split(self._eol, 1)
- else:
- self.status_line = hdrs
- hdrs = b''
- # TODO HTTP < 1.0 support
- (self.http_version, self.status,
- self.reason) = self.status_line.split(b' ', 2)
- self.status = int(self.status)
- if self._eol != EOL:
- hdrs = hdrs.replace(self._eol, b'\r\n')
- headers = _CompatMessage.from_string(hdrs)
- content_len = None
- if HDR_CONTENT_LENGTH in headers:
- content_len = int(headers[HDR_CONTENT_LENGTH])
- if self.http_version == HTTP_VER_1_0:
- self.will_close = True
- elif HDR_CONNECTION_CTRL in headers:
- self.will_close = (
- headers[HDR_CONNECTION_CTRL].lower() == CONNECTION_CLOSE)
- if (HDR_XFER_ENCODING in headers
- and headers[HDR_XFER_ENCODING].lower() == XFER_ENCODING_CHUNKED):
- self._reader = _readers.ChunkedReader(self._eol)
- logger.debug('using a chunked reader')
- else:
- # HEAD responses are forbidden from returning a body, and
- # it's implausible for a CONNECT response to use
- # close-is-end logic for an OK response.
- if (self.method == b'HEAD' or
- (self.method == b'CONNECT' and content_len is None)):
- content_len = 0
- if content_len is not None:
- logger.debug('using a content-length reader with length %d',
- content_len)
- self._reader = _readers.ContentLengthReader(content_len)
- else:
- # Response body had no length specified and is not
- # chunked, so the end of the body will only be
- # identifiable by the termination of the socket by the
- # server. My interpretation of the spec means that we
- # are correct in hitting this case if
- # transfer-encoding, content-length, and
- # connection-control were left unspecified.
- self._reader = _readers.CloseIsEndReader()
- logger.debug('using a close-is-end reader')
- self.will_close = True
-
- if body:
- # We're a friend of the reader class here.
- # pylint: disable=W0212
- self._reader._load(body)
- logger.debug('headers complete')
- self.headers = headers
- # We're a friend of the reader class here.
- # pylint: disable=W0212
- self._load_response = self._reader._load
-
-def _foldheaders(headers):
- """Given some headers, rework them so we can safely overwrite values.
-
- >>> _foldheaders({'Accept-Encoding': 'wat'})
- {'accept-encoding': ('Accept-Encoding', 'wat')}
- """
- return dict((k.lower(), (k, v)) for k, v in headers.items())
-
-try:
- inspect.signature
- def _handlesarg(func, arg):
- """ Try to determine if func accepts arg
-
- If it takes arg, return True
- If it happens to take **args, then it could do anything:
- * It could throw a different TypeError, just for fun
- * It could throw an ArgumentError or anything else
- * It could choose not to throw an Exception at all
- ... return 'unknown'
-
- Otherwise, return False
- """
- params = inspect.signature(func).parameters
- if arg in params:
- return True
- for p in params:
- if params[p].kind == inspect._ParameterKind.VAR_KEYWORD:
- return 'unknown'
- return False
-except AttributeError:
- def _handlesarg(func, arg):
- """ Try to determine if func accepts arg
-
- If it takes arg, return True
- If it happens to take **args, then it could do anything:
- * It could throw a different TypeError, just for fun
- * It could throw an ArgumentError or anything else
- * It could choose not to throw an Exception at all
- ... return 'unknown'
-
- Otherwise, return False
- """
- spec = inspect.getargspec(func)
- if arg in spec.args:
- return True
- if spec.keywords:
- return 'unknown'
- return False
-
-class HTTPConnection(object):
- """Connection to a single http server.
-
- Supports 100-continue and keepalives natively. Uses select() for
- non-blocking socket operations.
- """
- http_version = HTTP_VER_1_1
- response_class = HTTPResponse
-
- def __init__(self, host, port=None, use_ssl=None, ssl_validator=None,
- timeout=TIMEOUT_DEFAULT,
- continue_timeout=TIMEOUT_ASSUME_CONTINUE,
- proxy_hostport=None, proxy_headers=None,
- ssl_wrap_socket=None, **ssl_opts):
- """Create a new HTTPConnection.
-
- Args:
- host: The host to which we'll connect.
- port: Optional. The port over which we'll connect. Default 80 for
- non-ssl, 443 for ssl.
- use_ssl: Optional. Whether to use ssl. Defaults to False if port is
- not 443, true if port is 443.
- ssl_validator: a function(socket) to validate the ssl cert
- timeout: Optional. Connection timeout, default is TIMEOUT_DEFAULT.
- continue_timeout: Optional. Timeout for waiting on an expected
- "100 Continue" response. Default is TIMEOUT_ASSUME_CONTINUE.
- proxy_hostport: Optional. Tuple of (host, port) to use as an http
- proxy for the connection. Default is to not use a proxy.
- proxy_headers: Optional dict of header keys and values to send to
- a proxy when using CONNECT. For compatibility with
- httplib, the Proxy-Authorization header may be
- specified in headers for request(), which will clobber
- any such header specified here if specified. Providing
- this option and not proxy_hostport will raise an
- ValueError.
- ssl_wrap_socket: Optional function to use for wrapping
- sockets. If unspecified, the one from the ssl module will
- be used if available, or something that's compatible with
- it if on a Python older than 2.6.
-
- Any extra keyword arguments to this function will be provided
- to the ssl_wrap_socket method. If no ssl
- """
- host = _ensurebytes(host)
- if port is None and host.count(b':') == 1 or b']:' in host:
- host, port = host.rsplit(b':', 1)
- port = int(port)
- if b'[' in host:
- host = host[1:-1]
- if ssl_wrap_socket is not None:
- _wrap_socket = ssl_wrap_socket
- else:
- _wrap_socket = ssl.wrap_socket
- call_wrap_socket = None
- handlesubar = _handlesarg(_wrap_socket, 'server_hostname')
- if handlesubar is True:
- # supports server_hostname
- call_wrap_socket = _wrap_socket
- handlesnobar = _handlesarg(_wrap_socket, 'serverhostname')
- if handlesnobar is True and handlesubar is not True:
- # supports serverhostname
- def call_wrap_socket(sock, server_hostname=None, **ssl_opts):
- return _wrap_socket(sock, serverhostname=server_hostname,
- **ssl_opts)
- if handlesubar is False and handlesnobar is False:
- # does not support either
- def call_wrap_socket(sock, server_hostname=None, **ssl_opts):
- return _wrap_socket(sock, **ssl_opts)
- if call_wrap_socket is None:
- # we assume it takes **args
- def call_wrap_socket(sock, **ssl_opts):
- if 'server_hostname' in ssl_opts:
- ssl_opts['serverhostname'] = ssl_opts['server_hostname']
- return _wrap_socket(sock, **ssl_opts)
- self._ssl_wrap_socket = call_wrap_socket
- if use_ssl is None and port is None:
- use_ssl = False
- port = 80
- elif use_ssl is None:
- use_ssl = (port == 443)
- elif port is None:
- port = (use_ssl and 443 or 80)
- self.port = port
- self.ssl = use_ssl
- self.ssl_opts = ssl_opts
- self._ssl_validator = ssl_validator
- self.host = host
- self.sock = None
- self._current_response = None
- self._current_response_taken = False
- if proxy_hostport is None:
- self._proxy_host = self._proxy_port = None
- if proxy_headers:
- raise ValueError(
- 'proxy_headers may not be specified unless '
- 'proxy_hostport is also specified.')
- else:
- self._proxy_headers = {}
- else:
- self._proxy_host, self._proxy_port = proxy_hostport
- self._proxy_headers = _foldheaders(proxy_headers or {})
-
- self.timeout = timeout
- self.continue_timeout = continue_timeout
-
- def _connect(self, proxy_headers):
- """Connect to the host and port specified in __init__."""
- if self.sock:
- return
- if self._proxy_host is not None:
- logger.info('Connecting to http proxy %s:%s',
- self._proxy_host, self._proxy_port)
- sock = socket.create_connection((self._proxy_host,
- self._proxy_port))
- if self.ssl:
- data = self._buildheaders(b'CONNECT', b'%s:%d' % (self.host,
- self.port),
- proxy_headers, HTTP_VER_1_0)
- sock.send(data)
- sock.setblocking(0)
- r = self.response_class(sock, self.timeout, b'CONNECT')
- timeout_exc = HTTPTimeoutException(
- 'Timed out waiting for CONNECT response from proxy')
- while not r.complete():
- try:
- # We're a friend of the response class, so let
- # us use the private attribute.
- # pylint: disable=W0212
- if not r._select():
- if not r.complete():
- raise timeout_exc
- except HTTPTimeoutException:
- # This raise/except pattern looks goofy, but
- # _select can raise the timeout as well as the
- # loop body. I wish it wasn't this convoluted,
- # but I don't have a better solution
- # immediately handy.
- raise timeout_exc
- if r.status != 200:
- raise HTTPProxyConnectFailedException(
- 'Proxy connection failed: %d %s' % (r.status,
- r.read()))
- logger.info('CONNECT (for SSL) to %s:%s via proxy succeeded.',
- self.host, self.port)
- else:
- sock = socket.create_connection((self.host, self.port))
- if self.ssl:
- # This is the default, but in the case of proxied SSL
- # requests the proxy logic above will have cleared
- # blocking mode, so re-enable it just to be safe.
- sock.setblocking(1)
- logger.debug('wrapping socket for ssl with options %r',
- self.ssl_opts)
- sock = self._ssl_wrap_socket(sock, server_hostname=self.host,
- **self.ssl_opts)
- if self._ssl_validator:
- self._ssl_validator(sock)
- sock.setblocking(0)
- self.sock = sock
-
- def _buildheaders(self, method, path, headers, http_ver):
- if self.ssl and self.port == 443 or self.port == 80:
- # default port for protocol, so leave it out
- hdrhost = self.host
- else:
- # include nonstandard port in header
- if b':' in self.host: # must be IPv6
- hdrhost = b'[%s]:%d' % (self.host, self.port)
- else:
- hdrhost = b'%s:%d' % (self.host, self.port)
- if self._proxy_host and not self.ssl:
- # When talking to a regular http proxy we must send the
- # full URI, but in all other cases we must not (although
- # technically RFC 2616 says servers must accept our
- # request if we screw up, experimentally few do that
- # correctly.)
- assert path[0:1] == b'/', 'path must start with a /'
- path = b'http://%s%s' % (hdrhost, path)
- outgoing = [b'%s %s %s%s' % (method, path, http_ver, EOL)]
- headers[b'host'] = (b'Host', hdrhost)
- headers[HDR_ACCEPT_ENCODING] = (HDR_ACCEPT_ENCODING, 'identity')
- for hdr, val in sorted((_ensurebytes(h), _ensurebytes(v))
- for h, v in headers.values()):
- outgoing.append(b'%s: %s%s' % (hdr, val, EOL))
- outgoing.append(EOL)
- return b''.join(outgoing)
-
- def close(self):
- """Close the connection to the server.
-
- This is a no-op if the connection is already closed. The
- connection may automatically close if requested by the server
- or required by the nature of a response.
- """
- if self.sock is None:
- return
- self.sock.close()
- self.sock = None
- logger.info('closed connection to %s on %s', self.host, self.port)
-
- def busy(self):
- """Returns True if this connection object is currently in use.
-
- If a response is still pending, this will return True, even if
- the request has finished sending. In the future,
- HTTPConnection may transparently juggle multiple connections
- to the server, in which case this will be useful to detect if
- any of those connections is ready for use.
- """
- cr = self._current_response
- if cr is not None:
- if self._current_response_taken:
- if cr.will_close:
- self.sock = None
- self._current_response = None
- return False
- elif cr.complete():
- self._current_response = None
- return False
- return True
- return False
-
- def _reconnect(self, where, pheaders):
- logger.info('reconnecting during %s', where)
- self.close()
- self._connect(pheaders)
-
- def request(self, method, path, body=None, headers=None,
- expect_continue=False):
- """Send a request to the server.
-
- For increased flexibility, this does not return the response
- object. Future versions of HTTPConnection that juggle multiple
- sockets will be able to send (for example) 5 requests all at
- once, and then let the requests arrive as data is
- available. Use the `getresponse()` method to retrieve the
- response.
- """
- if headers is None:
- headers = {}
- method = _ensurebytes(method)
- path = _ensurebytes(path)
- if self.busy():
- raise httplib.CannotSendRequest(
- 'Can not send another request before '
- 'current response is read!')
- self._current_response_taken = False
-
- logger.info('sending %s request for %s to %s on port %s',
- method, path, self.host, self.port)
-
- hdrs = _foldheaders(headers)
- # Figure out headers that have to be computed from the request
- # body.
- chunked = False
- if body and HDR_CONTENT_LENGTH not in hdrs:
- if getattr(body, '__len__', False):
- hdrs[HDR_CONTENT_LENGTH] = (HDR_CONTENT_LENGTH,
- b'%d' % len(body))
- elif getattr(body, 'read', False):
- hdrs[HDR_XFER_ENCODING] = (HDR_XFER_ENCODING,
- XFER_ENCODING_CHUNKED)
- chunked = True
- else:
- raise BadRequestData('body has no __len__() nor read()')
- # Figure out expect-continue header
- if hdrs.get('expect', ('', ''))[1].lower() == b'100-continue':
- expect_continue = True
- elif expect_continue:
- hdrs['expect'] = (b'Expect', b'100-Continue')
- # httplib compatibility: if the user specified a
- # proxy-authorization header, that's actually intended for a
- # proxy CONNECT action, not the real request, but only if
- # we're going to use a proxy.
- pheaders = dict(self._proxy_headers)
- if self._proxy_host and self.ssl:
- pa = hdrs.pop('proxy-authorization', None)
- if pa is not None:
- pheaders['proxy-authorization'] = pa
- # Build header data
- outgoing_headers = self._buildheaders(
- method, path, hdrs, self.http_version)
-
- # If we're reusing the underlying socket, there are some
- # conditions where we'll want to retry, so make a note of the
- # state of self.sock
- fresh_socket = self.sock is None
- self._connect(pheaders)
- response = None
- first = True
-
- while ((outgoing_headers or body)
- and not (response and response.complete())):
- select_timeout = self.timeout
- out = outgoing_headers or body
- blocking_on_continue = False
- if expect_continue and not outgoing_headers and not (
- response and (response.headers or response.continued)):
- logger.info(
- 'waiting up to %s seconds for'
- ' continue response from server',
- self.continue_timeout)
- select_timeout = self.continue_timeout
- blocking_on_continue = True
- out = False
- if out:
- w = [self.sock]
- else:
- w = []
- r, w, x = select.select([self.sock], w, [], select_timeout)
- # if we were expecting a 100 continue and it's been long
- # enough, just go ahead and assume it's ok. This is the
- # recommended behavior from the RFC.
- if r == w == x == []:
- if blocking_on_continue:
- expect_continue = False
- logger.info('no response to continue expectation from '
- 'server, optimistically sending request body')
- else:
- raise HTTPTimeoutException('timeout sending data')
- was_first = first
-
- # incoming data
- if r:
- try:
- try:
- data = r[0].recv(INCOMING_BUFFER_SIZE)
- except ssl.SSLError as e:
- if e.args[0] != ssl.SSL_ERROR_WANT_READ:
- raise
- logger.debug('SSL_ERROR_WANT_READ while sending '
- 'data, retrying...')
- continue
- if not data:
- logger.info('socket appears closed in read')
- self.sock = None
- self._current_response = None
- if response is not None:
- # We're a friend of the response class, so let
- # us use the private attribute.
- # pylint: disable=W0212
- response._close()
- # This if/elif ladder is a bit subtle,
- # comments in each branch should help.
- if response is not None and response.complete():
- # Server responded completely and then
- # closed the socket. We should just shut
- # things down and let the caller get their
- # response.
- logger.info('Got an early response, '
- 'aborting remaining request.')
- break
- elif was_first and response is None:
- # Most likely a keepalive that got killed
- # on the server's end. Commonly happens
- # after getting a really large response
- # from the server.
- logger.info(
- 'Connection appeared closed in read on first'
- ' request loop iteration, will retry.')
- self._reconnect('read', pheaders)
- continue
- else:
- # We didn't just send the first data hunk,
- # and either have a partial response or no
- # response at all. There's really nothing
- # meaningful we can do here.
- raise HTTPStateError(
- 'Connection appears closed after '
- 'some request data was written, but the '
- 'response was missing or incomplete!')
- logger.debug('read %d bytes in request()', len(data))
- if response is None:
- response = self.response_class(
- r[0], self.timeout, method)
- # We're a friend of the response class, so let us
- # use the private attribute.
- # pylint: disable=W0212
- response._load_response(data)
- # Jump to the next select() call so we load more
- # data if the server is still sending us content.
- continue
- except socket.error as e:
- if e[0] != errno.EPIPE and not was_first:
- raise
-
- # outgoing data
- if w and out:
- try:
- if getattr(out, 'read', False):
- # pylint guesses the type of out incorrectly here
- # pylint: disable=E1103
- data = out.read(OUTGOING_BUFFER_SIZE)
- if not data:
- continue
- if len(data) < OUTGOING_BUFFER_SIZE:
- if chunked:
- body = b'0' + EOL + EOL
- else:
- body = None
- if chunked:
- # This encode is okay because we know
- # hex() is building us only 0-9 and a-f
- # digits.
- asciilen = hex(len(data))[2:].encode('ascii')
- out = asciilen + EOL + data + EOL
- else:
- out = data
- amt = w[0].send(out)
- except socket.error as e:
- if e[0] == ssl.SSL_ERROR_WANT_WRITE and self.ssl:
- # This means that SSL hasn't flushed its buffer into
- # the socket yet.
- # TODO: find a way to block on ssl flushing its buffer
- # similar to selecting on a raw socket.
- continue
- if e[0] == errno.EWOULDBLOCK or e[0] == errno.EAGAIN:
- continue
- elif (e[0] not in (errno.ECONNRESET, errno.EPIPE)
- and not first):
- raise
- self._reconnect('write', pheaders)
- amt = self.sock.send(out)
- logger.debug('sent %d', amt)
- first = False
- if out is body:
- body = out[amt:]
- else:
- outgoing_headers = out[amt:]
- # End of request-sending loop.
-
- # close if the server response said to or responded before eating
- # the whole request
- if response is None:
- response = self.response_class(self.sock, self.timeout, method)
- if not fresh_socket:
- if not response._select():
- # This means the response failed to get any response
- # data at all, and in all probability the socket was
- # closed before the server even saw our request. Try
- # the request again on a fresh socket.
- logger.debug('response._select() failed during request().'
- ' Assuming request needs to be retried.')
- self.sock = None
- # Call this method explicitly to re-try the
- # request. We don't use self.request() because
- # some tools (notably Mercurial) expect to be able
- # to subclass and redefine request(), and they
- # don't have the same argspec as we do.
- #
- # TODO restructure sending of requests to avoid
- # this recursion
- return HTTPConnection.request(
- self, method, path, body=body, headers=headers,
- expect_continue=expect_continue)
- data_left = bool(outgoing_headers or body)
- if data_left:
- logger.info('stopped sending request early, '
- 'will close the socket to be safe.')
- response.will_close = True
- if response.will_close:
- # The socket will be closed by the response, so we disown
- # the socket
- self.sock = None
- self._current_response = response
-
- def getresponse(self):
- """Returns the response to the most recent request."""
- if self._current_response is None:
- raise httplib.ResponseNotReady()
- r = self._current_response
- while r.headers is None:
- # We're a friend of the response class, so let us use the
- # private attribute.
- # pylint: disable=W0212
- if not r._select() and not r.complete():
- raise _readers.HTTPRemoteClosedError()
- if r.will_close:
- self.sock = None
- self._current_response = None
- elif r.complete():
- self._current_response = None
- else:
- self._current_response_taken = True
- return r
-
-
-class HTTPTimeoutException(httplib.HTTPException):
- """A timeout occurred while waiting on the server."""
-
-
-class BadRequestData(httplib.HTTPException):
- """Request body object has neither __len__ nor read."""
-
-
-class HTTPProxyConnectFailedException(httplib.HTTPException):
- """Connecting to the HTTP proxy failed."""
-
-
-class HTTPStateError(httplib.HTTPException):
- """Invalid internal state encountered."""
-
-# Forward this exception type from _readers since it needs to be part
-# of the public API.
-HTTPRemoteClosedError = _readers.HTTPRemoteClosedError
-# no-check-code
--- a/mercurial/httpclient/_readers.py Thu Mar 15 22:35:07 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-# Copyright 2011, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Reader objects to abstract out different body response types.
-
-This module is package-private. It is not expected that these will
-have any clients outside of httpplus.
-"""
-from __future__ import absolute_import
-
-try:
- import httplib
- httplib.HTTPException
-except ImportError:
- import http.client as httplib
-
-import logging
-
-logger = logging.getLogger(__name__)
-
-
-class ReadNotReady(Exception):
- """Raised when read() is attempted but not enough data is loaded."""
-
-
-class HTTPRemoteClosedError(httplib.HTTPException):
- """The server closed the remote socket in the middle of a response."""
-
-
-class AbstractReader(object):
- """Abstract base class for response readers.
-
- Subclasses must implement _load, and should implement _close if
- it's not an error for the server to close their socket without
- some termination condition being detected during _load.
- """
- def __init__(self):
- self._finished = False
- self._done_chunks = []
- self.available_data = 0
-
- def _addchunk(self, data):
- self._done_chunks.append(data)
- self.available_data += len(data)
-
- def _pushchunk(self, data):
- self._done_chunks.insert(0, data)
- self.available_data += len(data)
-
- def _popchunk(self):
- b = self._done_chunks.pop(0)
- self.available_data -= len(b)
-
- return b
-
- def done(self):
- """Returns true if the response body is entirely read."""
- return self._finished
-
- def read(self, amt):
- """Read amt bytes from the response body."""
- if self.available_data < amt and not self._finished:
- raise ReadNotReady()
- blocks = []
- need = amt
- while self._done_chunks:
- b = self._popchunk()
- if len(b) > need:
- nb = b[:need]
- self._pushchunk(b[need:])
- b = nb
- blocks.append(b)
- need -= len(b)
- if need == 0:
- break
- result = b''.join(blocks)
- assert len(result) == amt or (self._finished and len(result) < amt)
-
- return result
-
- def readto(self, delimstr, blocks = None):
- """return available data chunks up to the first one in which
- delimstr occurs. No data will be returned after delimstr --
- the chunk in which it occurs will be split and the remainder
- pushed back onto the available data queue. If blocks is
- supplied chunks will be added to blocks, otherwise a new list
- will be allocated.
- """
- if blocks is None:
- blocks = []
-
- while self._done_chunks:
- b = self._popchunk()
- i = b.find(delimstr) + len(delimstr)
- if i:
- if i < len(b):
- self._pushchunk(b[i:])
- blocks.append(b[:i])
- break
- else:
- blocks.append(b)
-
- return blocks
-
- def _load(self, data): # pragma: no cover
- """Subclasses must implement this.
-
- As data is available to be read out of this object, it should
- be placed into the _done_chunks list. Subclasses should not
- rely on data remaining in _done_chunks forever, as it may be
- reaped if the client is parsing data as it comes in.
- """
- raise NotImplementedError
-
- def _close(self):
- """Default implementation of close.
-
- The default implementation assumes that the reader will mark
- the response as finished on the _finished attribute once the
- entire response body has been read. In the event that this is
- not true, the subclass should override the implementation of
- close (for example, close-is-end responses have to set
- self._finished in the close handler.)
- """
- if not self._finished:
- raise HTTPRemoteClosedError(
- 'server appears to have closed the socket mid-response')
-
-
-class AbstractSimpleReader(AbstractReader):
- """Abstract base class for simple readers that require no response decoding.
-
- Examples of such responses are Connection: Close (close-is-end)
- and responses that specify a content length.
- """
- def _load(self, data):
- if data:
- assert not self._finished, (
- 'tried to add data (%r) to a closed reader!' % data)
- logger.debug('%s read an additional %d data',
- self.name, len(data)) # pylint: disable=E1101
- self._addchunk(data)
-
-
-class CloseIsEndReader(AbstractSimpleReader):
- """Reader for responses that specify Connection: Close for length."""
- name = 'close-is-end'
-
- def _close(self):
- logger.info('Marking close-is-end reader as closed.')
- self._finished = True
-
-
-class ContentLengthReader(AbstractSimpleReader):
- """Reader for responses that specify an exact content length."""
- name = 'content-length'
-
- def __init__(self, amount):
- AbstractSimpleReader.__init__(self)
- self._amount = amount
- if amount == 0:
- self._finished = True
- self._amount_seen = 0
-
- def _load(self, data):
- AbstractSimpleReader._load(self, data)
- self._amount_seen += len(data)
- if self._amount_seen >= self._amount:
- self._finished = True
- logger.debug('content-length read complete')
-
-
-class ChunkedReader(AbstractReader):
- """Reader for chunked transfer encoding responses."""
- def __init__(self, eol):
- AbstractReader.__init__(self)
- self._eol = eol
- self._leftover_skip_amt = 0
- self._leftover_data = ''
-
- def _load(self, data):
- assert not self._finished, 'tried to add data to a closed reader!'
- logger.debug('chunked read an additional %d data', len(data))
- position = 0
- if self._leftover_data:
- logger.debug(
- 'chunked reader trying to finish block from leftover data')
- # TODO: avoid this string concatenation if possible
- data = self._leftover_data + data
- position = self._leftover_skip_amt
- self._leftover_data = ''
- self._leftover_skip_amt = 0
- datalen = len(data)
- while position < datalen:
- split = data.find(self._eol, position)
- if split == -1:
- self._leftover_data = data
- self._leftover_skip_amt = position
- return
- amt = int(data[position:split], base=16)
- block_start = split + len(self._eol)
- # If the whole data chunk plus the eol trailer hasn't
- # loaded, we'll wait for the next load.
- if block_start + amt + len(self._eol) > len(data):
- self._leftover_data = data
- self._leftover_skip_amt = position
- return
- if amt == 0:
- self._finished = True
- logger.debug('closing chunked reader due to chunk of length 0')
- return
- self._addchunk(data[block_start:block_start + amt])
- position = block_start + amt + len(self._eol)
-# no-check-code
--- a/mercurial/httpconnection.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/httpconnection.py Mon Mar 19 08:07:18 2018 -0700
@@ -10,15 +10,11 @@
from __future__ import absolute_import
-import logging
import os
-import socket
from .i18n import _
from . import (
- httpclient,
- sslutil,
- urllibcompat,
+ pycompat,
util,
)
@@ -67,6 +63,7 @@
# moved here from url.py to avoid a cycle
def readauthforuri(ui, uri, user):
+ uri = pycompat.bytesurl(uri)
# Read configuration
groups = {}
for key, val in ui.configitems('auth'):
@@ -110,190 +107,3 @@
if user and not bestuser:
auth['username'] = user
return bestauth
-
-# Mercurial (at least until we can remove the old codepath) requires
-# that the http response object be sufficiently file-like, so we
-# provide a close() method here.
-class HTTPResponse(httpclient.HTTPResponse):
- def close(self):
- pass
-
-class HTTPConnection(httpclient.HTTPConnection):
- response_class = HTTPResponse
- def request(self, method, uri, body=None, headers=None):
- if headers is None:
- headers = {}
- if isinstance(body, httpsendfile):
- body.seek(0)
- httpclient.HTTPConnection.request(self, method, uri, body=body,
- headers=headers)
-
-
-_configuredlogging = False
-LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s'
-# Subclass BOTH of these because otherwise urllib2 "helpfully"
-# reinserts them since it notices we don't include any subclasses of
-# them.
-class http2handler(urlreq.httphandler, urlreq.httpshandler):
- def __init__(self, ui, pwmgr):
- global _configuredlogging
- urlreq.abstracthttphandler.__init__(self)
- self.ui = ui
- self.pwmgr = pwmgr
- self._connections = {}
- # developer config: ui.http2debuglevel
- loglevel = ui.config('ui', 'http2debuglevel')
- if loglevel and not _configuredlogging:
- _configuredlogging = True
- logger = logging.getLogger('mercurial.httpclient')
- logger.setLevel(getattr(logging, loglevel.upper()))
- handler = logging.StreamHandler()
- handler.setFormatter(logging.Formatter(LOGFMT))
- logger.addHandler(handler)
-
- def close_all(self):
- """Close and remove all connection objects being kept for reuse."""
- for openconns in self._connections.values():
- for conn in openconns:
- conn.close()
- self._connections = {}
-
- # shamelessly borrowed from urllib2.AbstractHTTPHandler
- def do_open(self, http_class, req, use_ssl):
- """Return an addinfourl object for the request, using http_class.
-
- http_class must implement the HTTPConnection API from httplib.
- The addinfourl return value is a file-like object. It also
- has methods and attributes including:
- - info(): return a mimetools.Message object for the headers
- - geturl(): return the original request URL
- - code: HTTP status code
- """
- # If using a proxy, the host returned by get_host() is
- # actually the proxy. On Python 2.6.1, the real destination
- # hostname is encoded in the URI in the urllib2 request
- # object. On Python 2.6.5, it's stored in the _tunnel_host
- # attribute which has no accessor.
- tunhost = getattr(req, '_tunnel_host', None)
- host = urllibcompat.gethost(req)
- if tunhost:
- proxyhost = host
- host = tunhost
- elif req.has_proxy():
- proxyhost = urllibcompat.gethost(req)
- host = urllibcompat.getselector(
- req).split('://', 1)[1].split('/', 1)[0]
- else:
- proxyhost = None
-
- if proxyhost:
- if ':' in proxyhost:
- # Note: this means we'll explode if we try and use an
- # IPv6 http proxy. This isn't a regression, so we
- # won't worry about it for now.
- proxyhost, proxyport = proxyhost.rsplit(':', 1)
- else:
- proxyport = 3128 # squid default
- proxy = (proxyhost, proxyport)
- else:
- proxy = None
-
- if not host:
- raise urlerr.urlerror('no host given')
-
- connkey = use_ssl, host, proxy
- allconns = self._connections.get(connkey, [])
- conns = [c for c in allconns if not c.busy()]
- if conns:
- h = conns[0]
- else:
- if allconns:
- self.ui.debug('all connections for %s busy, making a new '
- 'one\n' % host)
- timeout = None
- if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
- timeout = req.timeout
- h = http_class(host, timeout=timeout, proxy_hostport=proxy)
- self._connections.setdefault(connkey, []).append(h)
-
- headers = dict(req.headers)
- headers.update(req.unredirected_hdrs)
- headers = dict(
- (name.title(), val) for name, val in headers.items())
- try:
- path = urllibcompat.getselector(req)
- if '://' in path:
- path = path.split('://', 1)[1].split('/', 1)[1]
- if path[0] != '/':
- path = '/' + path
- h.request(req.get_method(), path, req.data, headers)
- r = h.getresponse()
- except socket.error as err: # XXX what error?
- raise urlerr.urlerror(err)
-
- # Pick apart the HTTPResponse object to get the addinfourl
- # object initialized properly.
- r.recv = r.read
-
- resp = urlreq.addinfourl(r, r.headers, urllibcompat.getfullurl(req))
- resp.code = r.status
- resp.msg = r.reason
- return resp
-
- # httplib always uses the given host/port as the socket connect
- # target, and then allows full URIs in the request path, which it
- # then observes and treats as a signal to do proxying instead.
- def http_open(self, req):
- if urllibcompat.getfullurl(req).startswith('https'):
- return self.https_open(req)
- def makehttpcon(*args, **kwargs):
- k2 = dict(kwargs)
- k2[r'use_ssl'] = False
- return HTTPConnection(*args, **k2)
- return self.do_open(makehttpcon, req, False)
-
- def https_open(self, req):
- # urllibcompat.getfullurl(req) does not contain credentials and we may
- # need them to match the certificates.
- url = urllibcompat.getfullurl(req)
- user, password = self.pwmgr.find_stored_password(url)
- res = readauthforuri(self.ui, url, user)
- if res:
- group, auth = res
- self.auth = auth
- self.ui.debug("using auth.%s.* for authentication\n" % group)
- else:
- self.auth = None
- return self.do_open(self._makesslconnection, req, True)
-
- def _makesslconnection(self, host, port=443, *args, **kwargs):
- keyfile = None
- certfile = None
-
- if args: # key_file
- keyfile = args.pop(0)
- if args: # cert_file
- certfile = args.pop(0)
-
- # if the user has specified different key/cert files in
- # hgrc, we prefer these
- if self.auth and 'key' in self.auth and 'cert' in self.auth:
- keyfile = self.auth['key']
- certfile = self.auth['cert']
-
- # let host port take precedence
- if ':' in host and '[' not in host or ']:' in host:
- host, port = host.rsplit(':', 1)
- port = int(port)
- if '[' in host:
- host = host[1:-1]
-
- kwargs[r'keyfile'] = keyfile
- kwargs[r'certfile'] = certfile
-
- con = HTTPConnection(host, port, use_ssl=True,
- ssl_wrap_socket=sslutil.wrapsocket,
- ssl_validator=sslutil.validatesocket,
- ui=self.ui,
- **kwargs)
- return con
--- a/mercurial/httppeer.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/httppeer.py Mon Mar 19 08:07:18 2018 -0700
@@ -16,14 +16,13 @@
import tempfile
from .i18n import _
-from .node import nullid
from . import (
bundle2,
error,
httpconnection,
pycompat,
statichttprepo,
- url,
+ url as urlmod,
util,
wireproto,
)
@@ -135,31 +134,20 @@
self._index = 0
class httppeer(wireproto.wirepeer):
- def __init__(self, ui, path):
+ def __init__(self, ui, path, url, opener):
+ self._ui = ui
self._path = path
+ self._url = url
self._caps = None
- self._urlopener = None
- self._requestbuilder = None
- u = util.url(path)
- if u.query or u.fragment:
- raise error.Abort(_('unsupported URL component: "%s"') %
- (u.query or u.fragment))
-
- # urllib cannot handle URLs with embedded user or passwd
- self._url, authinfo = u.authinfo()
-
- self._ui = ui
- ui.debug('using %s\n' % self._url)
-
- self._urlopener = url.opener(ui, authinfo)
+ self._urlopener = opener
+ # This is an its own attribute to facilitate extensions overriding
+ # the default type.
self._requestbuilder = urlreq.request
def __del__(self):
- urlopener = getattr(self, '_urlopener', None)
- if urlopener:
- for h in urlopener.handlers:
- h.close()
- getattr(h, "close_all", lambda: None)()
+ for h in self._urlopener.handlers:
+ h.close()
+ getattr(h, "close_all", lambda: None)()
def _openurl(self, req):
if (self._ui.debugflag
@@ -222,13 +210,9 @@
# Begin of _basewirepeer interface.
def capabilities(self):
- if self._caps is None:
- try:
- self._fetchcaps()
- except error.RepoError:
- self._caps = set()
- self.ui.debug('capabilities: %s\n' %
- (' '.join(self._caps or ['none'])))
+ # self._fetchcaps() should have been called as part of peer
+ # handshake. So self._caps should always be set.
+ assert self._caps is not None
return self._caps
# End of _basewirepeer interface.
@@ -253,6 +237,8 @@
# with infinite recursion when trying to look up capabilities
# for the first time.
postargsok = self._caps is not None and 'httppostargs' in self._caps
+
+ # Send arguments via POST.
if postargsok and args:
strargs = urlreq.urlencode(sorted(args.items()))
if not data:
@@ -266,11 +252,16 @@
argsio.length = len(strargs)
data = _multifile(argsio, data)
headers[r'X-HgArgs-Post'] = len(strargs)
- else:
- if len(args) > 0:
- httpheader = self.capable('httpheader')
- if httpheader:
- headersize = int(httpheader.split(',', 1)[0])
+ elif args:
+ # Calling self.capable() can infinite loop if we are calling
+ # "capabilities". But that command should never accept wire
+ # protocol arguments. So this should never happen.
+ assert cmd != 'capabilities'
+ httpheader = self.capable('httpheader')
+ if httpheader:
+ headersize = int(httpheader.split(',', 1)[0])
+
+ # Send arguments via HTTP headers.
if headersize > 0:
# The headers can typically carry more data than the URL.
encargs = urlreq.urlencode(sorted(args.items()))
@@ -278,8 +269,10 @@
headersize):
headers[header] = value
varyheaders.append(header)
+ # Send arguments via query string (Mercurial <1.9).
else:
q += sorted(args.items())
+
qs = '?%s' % urlreq.urlencode(q)
cu = "%s%s" % (self._url, qs)
size = 0
@@ -287,9 +280,6 @@
size = data.length
elif data is not None:
size = len(data)
- if size and self.ui.configbool('ui', 'usehttp2'):
- headers[r'Expect'] = r'100-Continue'
- headers[r'X-HgHttp2'] = r'1'
if data is not None and r'Content-Type' not in headers:
headers[r'Content-Type'] = r'application/mercurial-0.1'
@@ -330,8 +320,8 @@
req = self._requestbuilder(pycompat.strurl(cu), data, headers)
if data is not None:
- self.ui.debug("sending %s bytes\n" % size)
- req.add_unredirected_header('Content-Length', '%d' % size)
+ self.ui.debug("sending %d bytes\n" % size)
+ req.add_unredirected_header(r'Content-Length', r'%d' % size)
try:
resp = self._openurl(req)
except urlerr.httperror as inst:
@@ -430,7 +420,7 @@
tempname = bundle2.writebundle(self.ui, cg, None, type)
fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
- headers = {'Content-Type': 'application/mercurial-0.1'}
+ headers = {r'Content-Type': r'application/mercurial-0.1'}
try:
r = self._call(cmd, data=fp, headers=headers, **args)
@@ -438,6 +428,11 @@
if len(vals) < 2:
raise error.ResponseError(_("unexpected response:"), r)
return vals
+ except urlerr.httperror:
+ # Catch and re-raise these so we don't try and treat them
+ # like generic socket errors. They lack any values in
+ # .args on Python 3 which breaks our socket.error block.
+ raise
except socket.error as err:
if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
raise error.Abort(_('push failed: %s') % err.args[1])
@@ -453,7 +448,7 @@
try:
# dump bundle to disk
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
- fh = os.fdopen(fd, pycompat.sysstr("wb"))
+ fh = os.fdopen(fd, r"wb")
d = fp.read(4096)
while d:
fh.write(d)
@@ -461,7 +456,7 @@
fh.close()
# start http push
fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
- headers = {'Content-Type': 'application/mercurial-0.1'}
+ headers = {r'Content-Type': r'application/mercurial-0.1'}
return self._callstream(cmd, data=fp_, headers=headers, **args)
finally:
if fp_ is not None:
@@ -476,28 +471,31 @@
def _abort(self, exception):
raise exception
-class httpspeer(httppeer):
- def __init__(self, ui, path):
- if not url.has_https:
- raise error.Abort(_('Python support for SSL and HTTPS '
- 'is not installed'))
- httppeer.__init__(self, ui, path)
+def makepeer(ui, path):
+ u = util.url(path)
+ if u.query or u.fragment:
+ raise error.Abort(_('unsupported URL component: "%s"') %
+ (u.query or u.fragment))
+
+ # urllib cannot handle URLs with embedded user or passwd.
+ url, authinfo = u.authinfo()
+ ui.debug('using %s\n' % url)
+
+ opener = urlmod.opener(ui, authinfo)
+
+ return httppeer(ui, path, url, opener)
def instance(ui, path, create):
if create:
raise error.Abort(_('cannot create new http repository'))
try:
- if path.startswith('https:'):
- inst = httpspeer(ui, path)
- else:
- inst = httppeer(ui, path)
- try:
- # Try to do useful work when checking compatibility.
- # Usually saves a roundtrip since we want the caps anyway.
- inst._fetchcaps()
- except error.RepoError:
- # No luck, try older compatibility check.
- inst.between([(nullid, nullid)])
+ if path.startswith('https:') and not urlmod.has_https:
+ raise error.Abort(_('Python support for SSL and HTTPS '
+ 'is not installed'))
+
+ inst = makepeer(ui, path)
+ inst._fetchcaps()
+
return inst
except error.RepoError as httpexception:
try:
--- a/mercurial/i18n.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/i18n.py Mon Mar 19 08:07:18 2018 -0700
@@ -50,8 +50,8 @@
def setdatapath(datapath):
datapath = pycompat.fsdecode(datapath)
- localedir = os.path.join(datapath, pycompat.sysstr('locale'))
- t = gettextmod.translation('hg', localedir, _languages, fallback=True)
+ localedir = os.path.join(datapath, r'locale')
+ t = gettextmod.translation(r'hg', localedir, _languages, fallback=True)
global _ugettext
try:
_ugettext = t.ugettext
--- a/mercurial/keepalive.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/keepalive.py Mon Mar 19 08:07:18 2018 -0700
@@ -324,11 +324,11 @@
h.putrequest(
req.get_method(), urllibcompat.getselector(req),
**pycompat.strkwargs(skipheaders))
- if 'content-type' not in headers:
- h.putheader('Content-type',
- 'application/x-www-form-urlencoded')
- if 'content-length' not in headers:
- h.putheader('Content-length', '%d' % len(data))
+ if r'content-type' not in headers:
+ h.putheader(r'Content-type',
+ r'application/x-www-form-urlencoded')
+ if r'content-length' not in headers:
+ h.putheader(r'Content-length', r'%d' % len(data))
else:
h.putrequest(
req.get_method(), urllibcompat.getselector(req),
--- a/mercurial/localrepo.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/localrepo.py Mon Mar 19 08:07:18 2018 -0700
@@ -9,7 +9,6 @@
import errno
import hashlib
-import inspect
import os
import random
import time
@@ -44,6 +43,7 @@
merge as mergemod,
mergeutil,
namespaces,
+ narrowspec,
obsolete,
pathutil,
peer,
@@ -57,7 +57,7 @@
scmutil,
sparse,
store,
- subrepo,
+ subrepoutil,
tags as tagsmod,
transaction,
txnutil,
@@ -191,7 +191,9 @@
def debugwireargs(self, one, two, three=None, four=None, five=None):
"""Used to test argument passing over the wire"""
- return "%s %s %s %s %s" % (one, two, three, four, five)
+ return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
+ pycompat.bytestr(four),
+ pycompat.bytestr(five))
def getbundle(self, source, heads=None, common=None, bundlecaps=None,
**kwargs):
@@ -260,7 +262,8 @@
bundle2.processbundle(self._repo, b)
raise
except error.PushRaced as exc:
- raise error.ResponseError(_('push failed:'), str(exc))
+ raise error.ResponseError(_('push failed:'),
+ util.forcebytestr(exc))
# End of _basewirecommands interface.
@@ -304,11 +307,15 @@
class localrepository(object):
+ # obsolete experimental requirements:
+ # - manifestv2: An experimental new manifest format that allowed
+ # for stem compression of long paths. Experiment ended up not
+ # being successful (repository sizes went up due to worse delta
+ # chains), and the code was deleted in 4.6.
supportedformats = {
'revlogv1',
'generaldelta',
'treemanifest',
- 'manifestv2',
REVLOGV2_REQUIREMENT,
}
_basesupported = supportedformats | {
@@ -323,7 +330,6 @@
'revlogv1',
'generaldelta',
'treemanifest',
- 'manifestv2',
}
# a list of (ui, featureset) functions.
@@ -733,6 +739,37 @@
" working parent %s!\n") % short(node))
return nullid
+ @repofilecache(narrowspec.FILENAME)
+ def narrowpats(self):
+ """matcher patterns for this repository's narrowspec
+
+ A tuple of (includes, excludes).
+ """
+ source = self
+ if self.shared():
+ from . import hg
+ source = hg.sharedreposource(self)
+ return narrowspec.load(source)
+
+ @repofilecache(narrowspec.FILENAME)
+ def _narrowmatch(self):
+ if changegroup.NARROW_REQUIREMENT not in self.requirements:
+ return matchmod.always(self.root, '')
+ include, exclude = self.narrowpats
+ return narrowspec.match(self.root, include=include, exclude=exclude)
+
+ # TODO(martinvonz): make this property-like instead?
+ def narrowmatch(self):
+ return self._narrowmatch
+
+ def setnarrowpats(self, newincludes, newexcludes):
+ target = self
+ if self.shared():
+ from . import hg
+ target = hg.sharedreposource(self)
+ narrowspec.save(target, newincludes, newexcludes)
+ self.invalidate(clearfilecache=True)
+
def __getitem__(self, changeid):
if changeid is None:
return context.workingctx(self)
@@ -1068,7 +1105,7 @@
if not fn:
fn = lambda s, c, **kwargs: util.filter(s, c)
# Wrap old filters not supporting keyword arguments
- if not inspect.getargspec(fn)[2]:
+ if not pycompat.getargspec(fn)[2]:
oldfn = fn
fn = lambda s, c, **kwargs: oldfn(s, c)
l.append((mf, fn, params))
@@ -1140,7 +1177,7 @@
raise error.ProgrammingError('transaction requires locking')
tr = self.currenttransaction()
if tr is not None:
- return tr.nest()
+ return tr.nest(name=desc)
# abort here if the journal already exists
if self.svfs.exists("journal"):
@@ -1279,7 +1316,8 @@
self.store.createmode,
validator=validate,
releasefn=releasefn,
- checkambigfiles=_cachedfiles)
+ checkambigfiles=_cachedfiles,
+ name=desc)
tr.changes['revs'] = xrange(0, 0)
tr.changes['obsmarkers'] = set()
tr.changes['phases'] = {}
@@ -1332,7 +1370,7 @@
"""To be run if transaction is aborted
"""
reporef().hook('txnabort', throw=False, txnname=desc,
- **tr2.hookargs)
+ **pycompat.strkwargs(tr2.hookargs))
tr.addabort('txnabort-hook', txnaborthook)
# avoid eager cache invalidation. in-memory data should be identical
# to stored data if transaction has no error.
@@ -1481,12 +1519,15 @@
return updater
@unfilteredmethod
- def updatecaches(self, tr=None):
+ def updatecaches(self, tr=None, full=False):
"""warm appropriate caches
If this function is called after a transaction closed. The transaction
will be available in the 'tr' argument. This can be used to selectively
update caches relevant to the changes in that transaction.
+
+ If 'full' is set, make sure all caches the function knows about have
+ up-to-date data. Even the ones usually loaded more lazily.
"""
if tr is not None and tr.hookargs.get('source') == 'strip':
# During strip, many caches are invalid but
@@ -1498,6 +1539,12 @@
self.ui.debug('updating the branch cache\n')
branchmap.updatecache(self.filtered('served'))
+ if full:
+ rbc = self.revbranchcache()
+ for r in self.changelog:
+ rbc.branchinfo(r)
+ rbc.write()
+
def invalidatecaches(self):
if '_tagscache' in vars(self):
@@ -1574,7 +1621,8 @@
def _refreshfilecachestats(self, tr):
"""Reload stats of cached files so that they are flagged as valid"""
for k, ce in self._filecache.items():
- if k == 'dirstate' or k not in self.__dict__:
+ k = pycompat.sysstr(k)
+ if k == r'dirstate' or k not in self.__dict__:
continue
ce.refresh()
@@ -1832,7 +1880,7 @@
status.modified.extend(status.clean) # mq may commit clean files
# check subrepos
- subs, commitsubs, newstate = subrepo.precommit(
+ subs, commitsubs, newstate = subrepoutil.precommit(
self.ui, wctx, status, match, force=force)
# make sure all explicit patterns are matched
@@ -1869,10 +1917,10 @@
for s in sorted(commitsubs):
sub = wctx.sub(s)
self.ui.status(_('committing subrepository %s\n') %
- subrepo.subrelpath(sub))
+ subrepoutil.subrelpath(sub))
sr = sub.commit(cctx._text, user, date)
newstate[s] = (newstate[s][0], sr)
- subrepo.writestate(self, newstate)
+ subrepoutil.writestate(self, newstate)
p1, p2 = self.dirstate.parents()
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
@@ -1982,7 +2030,7 @@
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
parent2=xp2)
# set the new commit is proper phase
- targetphase = subrepo.newcommitphase(self.ui, ctx)
+ targetphase = subrepoutil.newcommitphase(self.ui, ctx)
if targetphase:
# retract boundary do not alter parent changeset.
# if a parent have higher the resulting phase will
@@ -2047,15 +2095,6 @@
# tag cache retrieval" case to work.
self.invalidate()
- def walk(self, match, node=None):
- '''
- walk recursively through the directory tree or a given
- changeset, finding all files matched by the match
- function
- '''
- self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
- return self[node].walk(match)
-
def status(self, node1='.', node2=None, match=None,
ignored=False, clean=False, unknown=False,
listsubrepos=False):
@@ -2176,10 +2215,11 @@
hookargs = {}
if tr is not None:
hookargs.update(tr.hookargs)
- hookargs['namespace'] = namespace
- hookargs['key'] = key
- hookargs['old'] = old
- hookargs['new'] = new
+ hookargs = pycompat.strkwargs(hookargs)
+ hookargs[r'namespace'] = namespace
+ hookargs[r'key'] = key
+ hookargs[r'old'] = old
+ hookargs[r'new'] = new
self.hook('prepushkey', throw=True, **hookargs)
except error.HookAbort as exc:
self.ui.write_err(_("pushkey-abort: %s\n") % exc)
@@ -2203,7 +2243,9 @@
def debugwireargs(self, one, two, three=None, four=None, five=None):
'''used to test argument passing over the wire'''
- return "%s %s %s %s %s" % (one, two, three, four, five)
+ return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
+ pycompat.bytestr(four),
+ pycompat.bytestr(five))
def savecommitmessage(self, text):
fp = self.vfs('last-message.txt', 'wb')
@@ -2270,8 +2312,6 @@
requirements.add('generaldelta')
if ui.configbool('experimental', 'treemanifest'):
requirements.add('treemanifest')
- if ui.configbool('experimental', 'manifestv2'):
- requirements.add('manifestv2')
revlogv2 = ui.config('experimental', 'revlogv2')
if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
--- a/mercurial/lock.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/lock.py Mon Mar 19 08:07:18 2018 -0700
@@ -10,6 +10,7 @@
import contextlib
import errno
import os
+import signal
import socket
import time
import warnings
@@ -30,9 +31,7 @@
confidence. Typically it's just hostname. On modern linux, we include an
extra Linux-specific pid namespace identifier.
"""
- result = socket.gethostname()
- if pycompat.ispy3:
- result = result.encode(pycompat.sysstr(encoding.encoding), 'replace')
+ result = encoding.strtolocal(socket.gethostname())
if pycompat.sysplatform.startswith('linux'):
try:
result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
@@ -41,6 +40,64 @@
raise
return result
+@contextlib.contextmanager
+def _delayedinterrupt():
+ """Block signal interrupt while doing something critical
+
+ This makes sure that the code block wrapped by this context manager won't
+ be interrupted.
+
+ For Windows developers: It appears not possible to guard time.sleep()
+ from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
+ working.
+ """
+ assertedsigs = []
+ blocked = False
+ orighandlers = {}
+
+ def raiseinterrupt(num):
+ if (num == getattr(signal, 'SIGINT', None) or
+ num == getattr(signal, 'CTRL_C_EVENT', None)):
+ raise KeyboardInterrupt
+ else:
+ raise error.SignalInterrupt
+ def catchterm(num, frame):
+ if blocked:
+ assertedsigs.append(num)
+ else:
+ raiseinterrupt(num)
+
+ try:
+ # save handlers first so they can be restored even if a setup is
+ # interrupted between signal.signal() and orighandlers[] =.
+ for name in ['CTRL_C_EVENT', 'SIGINT', 'SIGBREAK', 'SIGHUP', 'SIGTERM']:
+ num = getattr(signal, name, None)
+ if num and num not in orighandlers:
+ orighandlers[num] = signal.getsignal(num)
+ try:
+ for num in orighandlers:
+ signal.signal(num, catchterm)
+ except ValueError:
+ pass # in a thread? no luck
+
+ blocked = True
+ yield
+ finally:
+ # no simple way to reliably restore all signal handlers because
+ # any loops, recursive function calls, except blocks, etc. can be
+ # interrupted. so instead, make catchterm() raise interrupt.
+ blocked = False
+ try:
+ for num, handler in orighandlers.items():
+ signal.signal(num, handler)
+ except ValueError:
+ pass # in a thread?
+
+ # re-raise interrupt exception if any, which may be shadowed by a new
+ # interrupt occurred while re-raising the first one
+ if assertedsigs:
+ raiseinterrupt(assertedsigs[0])
+
def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
"""return an acquired lock or raise an a LockHeld exception
@@ -52,10 +109,12 @@
# show more details for new-style locks
if ':' in locker:
host, pid = locker.split(":", 1)
- msg = _("waiting for lock on %s held by process %r "
- "on host %r\n") % (l.desc, pid, host)
+ msg = (_("waiting for lock on %s held by process %r on host %r\n")
+ % (pycompat.bytestr(l.desc), pycompat.bytestr(pid),
+ pycompat.bytestr(host)))
else:
- msg = _("waiting for lock on %s held by %r\n") % (l.desc, locker)
+ msg = (_("waiting for lock on %s held by %r\n")
+ % (l.desc, pycompat.bytestr(locker)))
printer(msg)
l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
@@ -86,9 +145,9 @@
l.delay = delay
if l.delay:
if 0 <= warningidx <= l.delay:
- ui.warn(_("got lock after %s seconds\n") % l.delay)
+ ui.warn(_("got lock after %d seconds\n") % l.delay)
else:
- ui.debug("got lock after %s seconds\n" % l.delay)
+ ui.debug("got lock after %d seconds\n" % l.delay)
if l.acquirefn:
l.acquirefn()
return l
@@ -182,8 +241,9 @@
while not self.held and retry:
retry -= 1
try:
- self.vfs.makelock(lockname, self.f)
- self.held = 1
+ with _delayedinterrupt():
+ self.vfs.makelock(lockname, self.f)
+ self.held = 1
except (OSError, IOError) as why:
if why.errno == errno.EEXIST:
locker = self._readlock()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/logcmdutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,936 @@
+# logcmdutil.py - utility for log-like commands
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import itertools
+import os
+
+from .i18n import _
+from .node import (
+ hex,
+ nullid,
+)
+
+from . import (
+ dagop,
+ encoding,
+ error,
+ formatter,
+ graphmod,
+ match as matchmod,
+ mdiff,
+ patch,
+ pathutil,
+ pycompat,
+ revset,
+ revsetlang,
+ scmutil,
+ smartset,
+ templatekw,
+ templater,
+ util,
+)
+from .utils import dateutil
+
+def getlimit(opts):
+ """get the log limit according to option -l/--limit"""
+ limit = opts.get('limit')
+ if limit:
+ try:
+ limit = int(limit)
+ except ValueError:
+ raise error.Abort(_('limit must be a positive integer'))
+ if limit <= 0:
+ raise error.Abort(_('limit must be positive'))
+ else:
+ limit = None
+ return limit
+
+def diffordiffstat(ui, repo, diffopts, node1, node2, match,
+ changes=None, stat=False, fp=None, prefix='',
+ root='', listsubrepos=False, hunksfilterfn=None):
+ '''show diff or diffstat.'''
+ if root:
+ relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
+ else:
+ relroot = ''
+ if relroot != '':
+ # XXX relative roots currently don't work if the root is within a
+ # subrepo
+ uirelroot = match.uipath(relroot)
+ relroot += '/'
+ for matchroot in match.files():
+ if not matchroot.startswith(relroot):
+ ui.warn(_('warning: %s not inside relative root %s\n') % (
+ match.uipath(matchroot), uirelroot))
+
+ if stat:
+ diffopts = diffopts.copy(context=0, noprefix=False)
+ width = 80
+ if not ui.plain():
+ width = ui.termwidth()
+
+ chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
+ prefix=prefix, relroot=relroot,
+ hunksfilterfn=hunksfilterfn)
+
+ if fp is not None or ui.canwritewithoutlabels():
+ out = fp or ui
+ if stat:
+ chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
+ for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
+ out.write(chunk)
+ else:
+ if stat:
+ chunks = patch.diffstatui(util.iterlines(chunks), width=width)
+ else:
+ chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
+ opts=diffopts)
+ if ui.canbatchlabeledwrites():
+ def gen():
+ for chunk, label in chunks:
+ yield ui.label(chunk, label=label)
+ for chunk in util.filechunkiter(util.chunkbuffer(gen())):
+ ui.write(chunk)
+ else:
+ for chunk, label in chunks:
+ ui.write(chunk, label=label)
+
+ if listsubrepos:
+ ctx1 = repo[node1]
+ ctx2 = repo[node2]
+ for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
+ tempnode2 = node2
+ try:
+ if node2 is not None:
+ tempnode2 = ctx2.substate[subpath][1]
+ except KeyError:
+ # A subrepo that existed in node1 was deleted between node1 and
+ # node2 (inclusive). Thus, ctx2's substate won't contain that
+ # subpath. The best we can do is to ignore it.
+ tempnode2 = None
+ submatch = matchmod.subdirmatcher(subpath, match)
+ sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
+ stat=stat, fp=fp, prefix=prefix)
+
+class changesetdiffer(object):
+ """Generate diff of changeset with pre-configured filtering functions"""
+
+ def _makefilematcher(self, ctx):
+ return scmutil.matchall(ctx.repo())
+
+ def _makehunksfilter(self, ctx):
+ return None
+
+ def showdiff(self, ui, ctx, diffopts, stat=False):
+ repo = ctx.repo()
+ node = ctx.node()
+ prev = ctx.p1().node()
+ diffordiffstat(ui, repo, diffopts, prev, node,
+ match=self._makefilematcher(ctx), stat=stat,
+ hunksfilterfn=self._makehunksfilter(ctx))
+
+def changesetlabels(ctx):
+ labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
+ if ctx.obsolete():
+ labels.append('changeset.obsolete')
+ if ctx.isunstable():
+ labels.append('changeset.unstable')
+ for instability in ctx.instabilities():
+ labels.append('instability.%s' % instability)
+ return ' '.join(labels)
+
+class changesetprinter(object):
+ '''show changeset information when templating not requested.'''
+
+ def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
+ self.ui = ui
+ self.repo = repo
+ self.buffered = buffered
+ self._differ = differ or changesetdiffer()
+ self.diffopts = diffopts or {}
+ self.header = {}
+ self.hunk = {}
+ self.lastheader = None
+ self.footer = None
+ self._columns = templatekw.getlogcolumns()
+
+ def flush(self, ctx):
+ rev = ctx.rev()
+ if rev in self.header:
+ h = self.header[rev]
+ if h != self.lastheader:
+ self.lastheader = h
+ self.ui.write(h)
+ del self.header[rev]
+ if rev in self.hunk:
+ self.ui.write(self.hunk[rev])
+ del self.hunk[rev]
+
+ def close(self):
+ if self.footer:
+ self.ui.write(self.footer)
+
+ def show(self, ctx, copies=None, **props):
+ props = pycompat.byteskwargs(props)
+ if self.buffered:
+ self.ui.pushbuffer(labeled=True)
+ self._show(ctx, copies, props)
+ self.hunk[ctx.rev()] = self.ui.popbuffer()
+ else:
+ self._show(ctx, copies, props)
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ changenode = ctx.node()
+ rev = ctx.rev()
+
+ if self.ui.quiet:
+ self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
+ label='log.node')
+ return
+
+ columns = self._columns
+ self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
+ label=changesetlabels(ctx))
+
+ # branches are shown first before any other names due to backwards
+ # compatibility
+ branch = ctx.branch()
+ # don't show the default branch name
+ if branch != 'default':
+ self.ui.write(columns['branch'] % branch, label='log.branch')
+
+ for nsname, ns in self.repo.names.iteritems():
+ # branches has special logic already handled above, so here we just
+ # skip it
+ if nsname == 'branches':
+ continue
+ # we will use the templatename as the color name since those two
+ # should be the same
+ for name in ns.names(self.repo, changenode):
+ self.ui.write(ns.logfmt % name,
+ label='log.%s' % ns.colorname)
+ if self.ui.debugflag:
+ self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
+ for pctx in scmutil.meaningfulparents(self.repo, ctx):
+ label = 'log.parent changeset.%s' % pctx.phasestr()
+ self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
+ label=label)
+
+ if self.ui.debugflag and rev is not None:
+ mnode = ctx.manifestnode()
+ mrev = self.repo.manifestlog._revlog.rev(mnode)
+ self.ui.write(columns['manifest']
+ % scmutil.formatrevnode(self.ui, mrev, mnode),
+ label='ui.debug log.manifest')
+ self.ui.write(columns['user'] % ctx.user(), label='log.user')
+ self.ui.write(columns['date'] % dateutil.datestr(ctx.date()),
+ label='log.date')
+
+ if ctx.isunstable():
+ instabilities = ctx.instabilities()
+ self.ui.write(columns['instability'] % ', '.join(instabilities),
+ label='log.instability')
+
+ elif ctx.obsolete():
+ self._showobsfate(ctx)
+
+ self._exthook(ctx)
+
+ if self.ui.debugflag:
+ files = ctx.p1().status(ctx)[:3]
+ for key, value in zip(['files', 'files+', 'files-'], files):
+ if value:
+ self.ui.write(columns[key] % " ".join(value),
+ label='ui.debug log.files')
+ elif ctx.files() and self.ui.verbose:
+ self.ui.write(columns['files'] % " ".join(ctx.files()),
+ label='ui.note log.files')
+ if copies and self.ui.verbose:
+ copies = ['%s (%s)' % c for c in copies]
+ self.ui.write(columns['copies'] % ' '.join(copies),
+ label='ui.note log.copies')
+
+ extra = ctx.extra()
+ if extra and self.ui.debugflag:
+ for key, value in sorted(extra.items()):
+ self.ui.write(columns['extra'] % (key, util.escapestr(value)),
+ label='ui.debug log.extra')
+
+ description = ctx.description().strip()
+ if description:
+ if self.ui.verbose:
+ self.ui.write(_("description:\n"),
+ label='ui.note log.description')
+ self.ui.write(description,
+ label='ui.note log.description')
+ self.ui.write("\n\n")
+ else:
+ self.ui.write(columns['summary'] % description.splitlines()[0],
+ label='log.summary')
+ self.ui.write("\n")
+
+ self._showpatch(ctx)
+
+ def _showobsfate(self, ctx):
+ # TODO: do not depend on templater
+ tres = formatter.templateresources(self.repo.ui, self.repo)
+ t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}',
+ defaults=templatekw.keywords,
+ resources=tres)
+ obsfate = t.renderdefault({'ctx': ctx, 'revcache': {}}).splitlines()
+
+ if obsfate:
+ for obsfateline in obsfate:
+ self.ui.write(self._columns['obsolete'] % obsfateline,
+ label='log.obsfate')
+
+ def _exthook(self, ctx):
+ '''empty method used by extension as a hook point
+ '''
+
+ def _showpatch(self, ctx):
+ stat = self.diffopts.get('stat')
+ diff = self.diffopts.get('patch')
+ diffopts = patch.diffallopts(self.ui, self.diffopts)
+ if stat:
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=True)
+ if stat and diff:
+ self.ui.write("\n")
+ if diff:
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=False)
+ if stat or diff:
+ self.ui.write("\n")
+
+class jsonchangeset(changesetprinter):
+ '''format changeset information.'''
+
+ def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
+ changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
+ self.cache = {}
+ self._first = True
+
+ def close(self):
+ if not self._first:
+ self.ui.write("\n]\n")
+ else:
+ self.ui.write("[]\n")
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ rev = ctx.rev()
+ if rev is None:
+ jrev = jnode = 'null'
+ else:
+ jrev = '%d' % rev
+ jnode = '"%s"' % hex(ctx.node())
+ j = encoding.jsonescape
+
+ if self._first:
+ self.ui.write("[\n {")
+ self._first = False
+ else:
+ self.ui.write(",\n {")
+
+ if self.ui.quiet:
+ self.ui.write(('\n "rev": %s') % jrev)
+ self.ui.write((',\n "node": %s') % jnode)
+ self.ui.write('\n }')
+ return
+
+ self.ui.write(('\n "rev": %s') % jrev)
+ self.ui.write((',\n "node": %s') % jnode)
+ self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
+ self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
+ self.ui.write((',\n "user": "%s"') % j(ctx.user()))
+ self.ui.write((',\n "date": [%d, %d]') % ctx.date())
+ self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
+
+ self.ui.write((',\n "bookmarks": [%s]') %
+ ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
+ self.ui.write((',\n "tags": [%s]') %
+ ", ".join('"%s"' % j(t) for t in ctx.tags()))
+ self.ui.write((',\n "parents": [%s]') %
+ ", ".join('"%s"' % c.hex() for c in ctx.parents()))
+
+ if self.ui.debugflag:
+ if rev is None:
+ jmanifestnode = 'null'
+ else:
+ jmanifestnode = '"%s"' % hex(ctx.manifestnode())
+ self.ui.write((',\n "manifest": %s') % jmanifestnode)
+
+ self.ui.write((',\n "extra": {%s}') %
+ ", ".join('"%s": "%s"' % (j(k), j(v))
+ for k, v in ctx.extra().items()))
+
+ files = ctx.p1().status(ctx)
+ self.ui.write((',\n "modified": [%s]') %
+ ", ".join('"%s"' % j(f) for f in files[0]))
+ self.ui.write((',\n "added": [%s]') %
+ ", ".join('"%s"' % j(f) for f in files[1]))
+ self.ui.write((',\n "removed": [%s]') %
+ ", ".join('"%s"' % j(f) for f in files[2]))
+
+ elif self.ui.verbose:
+ self.ui.write((',\n "files": [%s]') %
+ ", ".join('"%s"' % j(f) for f in ctx.files()))
+
+ if copies:
+ self.ui.write((',\n "copies": {%s}') %
+ ", ".join('"%s": "%s"' % (j(k), j(v))
+ for k, v in copies))
+
+ stat = self.diffopts.get('stat')
+ diff = self.diffopts.get('patch')
+ diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
+ if stat:
+ self.ui.pushbuffer()
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=True)
+ self.ui.write((',\n "diffstat": "%s"')
+ % j(self.ui.popbuffer()))
+ if diff:
+ self.ui.pushbuffer()
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=False)
+ self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
+
+ self.ui.write("\n }")
+
+class changesettemplater(changesetprinter):
+ '''format changeset information.
+
+ Note: there are a variety of convenience functions to build a
+ changesettemplater for common cases. See functions such as:
+ maketemplater, changesetdisplayer, buildcommittemplate, or other
+ functions that use changesest_templater.
+ '''
+
+ # Arguments before "buffered" used to be positional. Consider not
+ # adding/removing arguments before "buffered" to not break callers.
+ def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
+ buffered=False):
+ changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
+ tres = formatter.templateresources(ui, repo)
+ self.t = formatter.loadtemplater(ui, tmplspec,
+ defaults=templatekw.keywords,
+ resources=tres,
+ cache=templatekw.defaulttempl)
+ self._counter = itertools.count()
+ self._getcache = tres['cache'] # shared with _graphnodeformatter()
+
+ self._tref = tmplspec.ref
+ self._parts = {'header': '', 'footer': '',
+ tmplspec.ref: tmplspec.ref,
+ 'docheader': '', 'docfooter': '',
+ 'separator': ''}
+ if tmplspec.mapfile:
+ # find correct templates for current mode, for backward
+ # compatibility with 'log -v/-q/--debug' using a mapfile
+ tmplmodes = [
+ (True, ''),
+ (self.ui.verbose, '_verbose'),
+ (self.ui.quiet, '_quiet'),
+ (self.ui.debugflag, '_debug'),
+ ]
+ for mode, postfix in tmplmodes:
+ for t in self._parts:
+ cur = t + postfix
+ if mode and cur in self.t:
+ self._parts[t] = cur
+ else:
+ partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
+ m = formatter.templatepartsmap(tmplspec, self.t, partnames)
+ self._parts.update(m)
+
+ if self._parts['docheader']:
+ self.ui.write(self.t.render(self._parts['docheader'], {}))
+
+ def close(self):
+ if self._parts['docfooter']:
+ if not self.footer:
+ self.footer = ""
+ self.footer += self.t.render(self._parts['docfooter'], {})
+ return super(changesettemplater, self).close()
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ props = props.copy()
+ props['ctx'] = ctx
+ props['index'] = index = next(self._counter)
+ props['revcache'] = {'copies': copies}
+
+ # write separator, which wouldn't work well with the header part below
+ # since there's inherently a conflict between header (across items) and
+ # separator (per item)
+ if self._parts['separator'] and index > 0:
+ self.ui.write(self.t.render(self._parts['separator'], {}))
+
+ # write header
+ if self._parts['header']:
+ h = self.t.render(self._parts['header'], props)
+ if self.buffered:
+ self.header[ctx.rev()] = h
+ else:
+ if self.lastheader != h:
+ self.lastheader = h
+ self.ui.write(h)
+
+ # write changeset metadata, then patch if requested
+ key = self._parts[self._tref]
+ self.ui.write(self.t.render(key, props))
+ self._showpatch(ctx)
+
+ if self._parts['footer']:
+ if not self.footer:
+ self.footer = self.t.render(self._parts['footer'], props)
+
+def templatespec(tmpl, mapfile):
+ if mapfile:
+ return formatter.templatespec('changeset', tmpl, mapfile)
+ else:
+ return formatter.templatespec('', tmpl, None)
+
+def _lookuptemplate(ui, tmpl, style):
+ """Find the template matching the given template spec or style
+
+ See formatter.lookuptemplate() for details.
+ """
+
+ # ui settings
+ if not tmpl and not style: # template are stronger than style
+ tmpl = ui.config('ui', 'logtemplate')
+ if tmpl:
+ return templatespec(templater.unquotestring(tmpl), None)
+ else:
+ style = util.expandpath(ui.config('ui', 'style'))
+
+ if not tmpl and style:
+ mapfile = style
+ if not os.path.split(mapfile)[0]:
+ mapname = (templater.templatepath('map-cmdline.' + mapfile)
+ or templater.templatepath(mapfile))
+ if mapname:
+ mapfile = mapname
+ return templatespec(None, mapfile)
+
+ if not tmpl:
+ return templatespec(None, None)
+
+ return formatter.lookuptemplate(ui, 'changeset', tmpl)
+
+def maketemplater(ui, repo, tmpl, buffered=False):
+ """Create a changesettemplater from a literal template 'tmpl'
+ byte-string."""
+ spec = templatespec(tmpl, None)
+ return changesettemplater(ui, repo, spec, buffered=buffered)
+
+def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
+ """show one changeset using template or regular display.
+
+ Display format will be the first non-empty hit of:
+ 1. option 'template'
+ 2. option 'style'
+ 3. [ui] setting 'logtemplate'
+ 4. [ui] setting 'style'
+ If all of these values are either the unset or the empty string,
+ regular display via changesetprinter() is done.
+ """
+ postargs = (differ, opts, buffered)
+ if opts.get('template') == 'json':
+ return jsonchangeset(ui, repo, *postargs)
+
+ spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
+
+ if not spec.ref and not spec.tmpl and not spec.mapfile:
+ return changesetprinter(ui, repo, *postargs)
+
+ return changesettemplater(ui, repo, spec, *postargs)
+
+def _makematcher(repo, revs, pats, opts):
+ """Build matcher and expanded patterns from log options
+
+ If --follow, revs are the revisions to follow from.
+
+ Returns (match, pats, slowpath) where
+ - match: a matcher built from the given pats and -I/-X opts
+ - pats: patterns used (globs are expanded on Windows)
+ - slowpath: True if patterns aren't as simple as scanning filelogs
+ """
+ # pats/include/exclude are passed to match.match() directly in
+ # _matchfiles() revset but walkchangerevs() builds its matcher with
+ # scmutil.match(). The difference is input pats are globbed on
+ # platforms without shell expansion (windows).
+ wctx = repo[None]
+ match, pats = scmutil.matchandpats(wctx, pats, opts)
+ slowpath = match.anypats() or (not match.always() and opts.get('removed'))
+ if not slowpath:
+ follow = opts.get('follow') or opts.get('follow_first')
+ startctxs = []
+ if follow and opts.get('rev'):
+ startctxs = [repo[r] for r in revs]
+ for f in match.files():
+ if follow and startctxs:
+ # No idea if the path was a directory at that revision, so
+ # take the slow path.
+ if any(f not in c for c in startctxs):
+ slowpath = True
+ continue
+ elif follow and f not in wctx:
+ # If the file exists, it may be a directory, so let it
+ # take the slow path.
+ if os.path.exists(repo.wjoin(f)):
+ slowpath = True
+ continue
+ else:
+ raise error.Abort(_('cannot follow file not in parent '
+ 'revision: "%s"') % f)
+ filelog = repo.file(f)
+ if not filelog:
+ # A zero count may be a directory or deleted file, so
+ # try to find matching entries on the slow path.
+ if follow:
+ raise error.Abort(
+ _('cannot follow nonexistent file: "%s"') % f)
+ slowpath = True
+
+ # We decided to fall back to the slowpath because at least one
+ # of the paths was not a file. Check to see if at least one of them
+ # existed in history - in that case, we'll continue down the
+ # slowpath; otherwise, we can turn off the slowpath
+ if slowpath:
+ for path in match.files():
+ if path == '.' or path in repo.store:
+ break
+ else:
+ slowpath = False
+
+ return match, pats, slowpath
+
+def _fileancestors(repo, revs, match, followfirst):
+ fctxs = []
+ for r in revs:
+ ctx = repo[r]
+ fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
+
+ # When displaying a revision with --patch --follow FILE, we have
+ # to know which file of the revision must be diffed. With
+ # --follow, we want the names of the ancestors of FILE in the
+ # revision, stored in "fcache". "fcache" is populated as a side effect
+ # of the graph traversal.
+ fcache = {}
+ def filematcher(ctx):
+ return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
+
+ def revgen():
+ for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
+ fcache[rev] = [c.path() for c in cs]
+ yield rev
+ return smartset.generatorset(revgen(), iterasc=False), filematcher
+
+def _makenofollowfilematcher(repo, pats, opts):
+ '''hook for extensions to override the filematcher for non-follow cases'''
+ return None
+
+_opt2logrevset = {
+ 'no_merges': ('not merge()', None),
+ 'only_merges': ('merge()', None),
+ '_matchfiles': (None, '_matchfiles(%ps)'),
+ 'date': ('date(%s)', None),
+ 'branch': ('branch(%s)', '%lr'),
+ '_patslog': ('filelog(%s)', '%lr'),
+ 'keyword': ('keyword(%s)', '%lr'),
+ 'prune': ('ancestors(%s)', 'not %lr'),
+ 'user': ('user(%s)', '%lr'),
+}
+
+def _makerevset(repo, match, pats, slowpath, opts):
+ """Return a revset string built from log options and file patterns"""
+ opts = dict(opts)
+ # follow or not follow?
+ follow = opts.get('follow') or opts.get('follow_first')
+
+ # branch and only_branch are really aliases and must be handled at
+ # the same time
+ opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
+ opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
+
+ if slowpath:
+ # See walkchangerevs() slow path.
+ #
+ # pats/include/exclude cannot be represented as separate
+ # revset expressions as their filtering logic applies at file
+ # level. For instance "-I a -X b" matches a revision touching
+ # "a" and "b" while "file(a) and not file(b)" does
+ # not. Besides, filesets are evaluated against the working
+ # directory.
+ matchargs = ['r:', 'd:relpath']
+ for p in pats:
+ matchargs.append('p:' + p)
+ for p in opts.get('include', []):
+ matchargs.append('i:' + p)
+ for p in opts.get('exclude', []):
+ matchargs.append('x:' + p)
+ opts['_matchfiles'] = matchargs
+ elif not follow:
+ opts['_patslog'] = list(pats)
+
+ expr = []
+ for op, val in sorted(opts.iteritems()):
+ if not val:
+ continue
+ if op not in _opt2logrevset:
+ continue
+ revop, listop = _opt2logrevset[op]
+ if revop and '%' not in revop:
+ expr.append(revop)
+ elif not listop:
+ expr.append(revsetlang.formatspec(revop, val))
+ else:
+ if revop:
+ val = [revsetlang.formatspec(revop, v) for v in val]
+ expr.append(revsetlang.formatspec(listop, val))
+
+ if expr:
+ expr = '(' + ' and '.join(expr) + ')'
+ else:
+ expr = None
+ return expr
+
+def _initialrevs(repo, opts):
+ """Return the initial set of revisions to be filtered or followed"""
+ follow = opts.get('follow') or opts.get('follow_first')
+ if opts.get('rev'):
+ revs = scmutil.revrange(repo, opts['rev'])
+ elif follow and repo.dirstate.p1() == nullid:
+ revs = smartset.baseset()
+ elif follow:
+ revs = repo.revs('.')
+ else:
+ revs = smartset.spanset(repo)
+ revs.reverse()
+ return revs
+
+def getrevs(repo, pats, opts):
+ """Return (revs, differ) where revs is a smartset
+
+ differ is a changesetdiffer with pre-configured file matcher.
+ """
+ follow = opts.get('follow') or opts.get('follow_first')
+ followfirst = opts.get('follow_first')
+ limit = getlimit(opts)
+ revs = _initialrevs(repo, opts)
+ if not revs:
+ return smartset.baseset(), None
+ match, pats, slowpath = _makematcher(repo, revs, pats, opts)
+ filematcher = None
+ if follow:
+ if slowpath or match.always():
+ revs = dagop.revancestors(repo, revs, followfirst=followfirst)
+ else:
+ revs, filematcher = _fileancestors(repo, revs, match, followfirst)
+ revs.reverse()
+ if filematcher is None:
+ filematcher = _makenofollowfilematcher(repo, pats, opts)
+ if filematcher is None:
+ def filematcher(ctx):
+ return match
+
+ expr = _makerevset(repo, match, pats, slowpath, opts)
+ if opts.get('graph') and opts.get('rev'):
+ # User-specified revs might be unsorted, but don't sort before
+ # _makerevset because it might depend on the order of revs
+ if not (revs.isdescending() or revs.istopo()):
+ revs.sort(reverse=True)
+ if expr:
+ matcher = revset.match(None, expr)
+ revs = matcher(repo, revs)
+ if limit is not None:
+ revs = revs.slice(0, limit)
+
+ differ = changesetdiffer()
+ differ._makefilematcher = filematcher
+ return revs, differ
+
+def _parselinerangeopt(repo, opts):
+ """Parse --line-range log option and return a list of tuples (filename,
+ (fromline, toline)).
+ """
+ linerangebyfname = []
+ for pat in opts.get('line_range', []):
+ try:
+ pat, linerange = pat.rsplit(',', 1)
+ except ValueError:
+ raise error.Abort(_('malformatted line-range pattern %s') % pat)
+ try:
+ fromline, toline = map(int, linerange.split(':'))
+ except ValueError:
+ raise error.Abort(_("invalid line range for %s") % pat)
+ msg = _("line range pattern '%s' must match exactly one file") % pat
+ fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
+ linerangebyfname.append(
+ (fname, util.processlinerange(fromline, toline)))
+ return linerangebyfname
+
+def getlinerangerevs(repo, userrevs, opts):
+ """Return (revs, differ).
+
+ "revs" are revisions obtained by processing "line-range" log options and
+ walking block ancestors of each specified file/line-range.
+
+ "differ" is a changesetdiffer with pre-configured file matcher and hunks
+ filter.
+ """
+ wctx = repo[None]
+
+ # Two-levels map of "rev -> file ctx -> [line range]".
+ linerangesbyrev = {}
+ for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
+ if fname not in wctx:
+ raise error.Abort(_('cannot follow file not in parent '
+ 'revision: "%s"') % fname)
+ fctx = wctx.filectx(fname)
+ for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
+ rev = fctx.introrev()
+ if rev not in userrevs:
+ continue
+ linerangesbyrev.setdefault(
+ rev, {}).setdefault(
+ fctx.path(), []).append(linerange)
+
+ def nofilterhunksfn(fctx, hunks):
+ return hunks
+
+ def hunksfilter(ctx):
+ fctxlineranges = linerangesbyrev.get(ctx.rev())
+ if fctxlineranges is None:
+ return nofilterhunksfn
+
+ def filterfn(fctx, hunks):
+ lineranges = fctxlineranges.get(fctx.path())
+ if lineranges is not None:
+ for hr, lines in hunks:
+ if hr is None: # binary
+ yield hr, lines
+ continue
+ if any(mdiff.hunkinrange(hr[2:], lr)
+ for lr in lineranges):
+ yield hr, lines
+ else:
+ for hunk in hunks:
+ yield hunk
+
+ return filterfn
+
+ def filematcher(ctx):
+ files = list(linerangesbyrev.get(ctx.rev(), []))
+ return scmutil.matchfiles(repo, files)
+
+ revs = sorted(linerangesbyrev, reverse=True)
+
+ differ = changesetdiffer()
+ differ._makefilematcher = filematcher
+ differ._makehunksfilter = hunksfilter
+ return revs, differ
+
+def _graphnodeformatter(ui, displayer):
+ spec = ui.config('ui', 'graphnodetemplate')
+ if not spec:
+ return templatekw.getgraphnode # fast path for "{graphnode}"
+
+ spec = templater.unquotestring(spec)
+ tres = formatter.templateresources(ui)
+ if isinstance(displayer, changesettemplater):
+ # reuse cache of slow templates
+ tres['cache'] = displayer._getcache
+ templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
+ resources=tres)
+ def formatnode(repo, ctx):
+ props = {'ctx': ctx, 'repo': repo, 'revcache': {}}
+ return templ.renderdefault(props)
+ return formatnode
+
+def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
+ props = props or {}
+ formatnode = _graphnodeformatter(ui, displayer)
+ state = graphmod.asciistate()
+ styles = state['styles']
+
+ # only set graph styling if HGPLAIN is not set.
+ if ui.plain('graph'):
+ # set all edge styles to |, the default pre-3.8 behaviour
+ styles.update(dict.fromkeys(styles, '|'))
+ else:
+ edgetypes = {
+ 'parent': graphmod.PARENT,
+ 'grandparent': graphmod.GRANDPARENT,
+ 'missing': graphmod.MISSINGPARENT
+ }
+ for name, key in edgetypes.items():
+ # experimental config: experimental.graphstyle.*
+ styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
+ styles[key])
+ if not styles[key]:
+ styles[key] = None
+
+ # experimental config: experimental.graphshorten
+ state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
+
+ for rev, type, ctx, parents in dag:
+ char = formatnode(repo, ctx)
+ copies = None
+ if getrenamed and ctx.rev():
+ copies = []
+ for fn in ctx.files():
+ rename = getrenamed(fn, ctx.rev())
+ if rename:
+ copies.append((fn, rename[0]))
+ edges = edgefn(type, char, state, rev, parents)
+ firstedge = next(edges)
+ width = firstedge[2]
+ displayer.show(ctx, copies=copies,
+ graphwidth=width, **pycompat.strkwargs(props))
+ lines = displayer.hunk.pop(rev).split('\n')
+ if not lines[-1]:
+ del lines[-1]
+ displayer.flush(ctx)
+ for type, char, width, coldata in itertools.chain([firstedge], edges):
+ graphmod.ascii(ui, state, type, char, lines, coldata)
+ lines = []
+ displayer.close()
+
+def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
+ revdag = graphmod.dagwalker(repo, revs)
+ displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
+
+def displayrevs(ui, repo, revs, displayer, getrenamed):
+ for rev in revs:
+ ctx = repo[rev]
+ copies = None
+ if getrenamed is not None and rev:
+ copies = []
+ for fn in ctx.files():
+ rename = getrenamed(fn, rev)
+ if rename:
+ copies.append((fn, rename[0]))
+ displayer.show(ctx, copies=copies)
+ displayer.flush(ctx)
+ displayer.close()
+
+def checkunsupportedgraphflags(pats, opts):
+ for op in ["newest_first"]:
+ if op in opts and opts[op]:
+ raise error.Abort(_("-G/--graph option is incompatible with --%s")
+ % op.replace("_", "-"))
+
+def graphrevs(repo, nodes, opts):
+ limit = getlimit(opts)
+ nodes.reverse()
+ if limit is not None:
+ nodes = nodes[:limit]
+ return graphmod.nodes(repo, nodes)
--- a/mercurial/logexchange.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/logexchange.py Mon Mar 19 08:07:18 2018 -0700
@@ -11,6 +11,7 @@
from .node import hex
from . import (
+ util,
vfs as vfsmod,
)
@@ -94,6 +95,30 @@
finally:
wlock.release()
+def activepath(repo, remote):
+ """returns remote path"""
+ local = None
+ # is the remote a local peer
+ local = remote.local()
+
+ # determine the remote path from the repo, if possible; else just
+ # use the string given to us
+ rpath = remote
+ if local:
+ rpath = remote._repo.root
+ elif not isinstance(remote, str):
+ rpath = remote._url
+
+ # represent the remotepath with user defined path name if exists
+ for path, url in repo.ui.configitems('paths'):
+ # remove auth info from user defined url
+ url = util.removeauth(url)
+ if url == rpath:
+ rpath = path
+ break
+
+ return rpath
+
def pullremotenames(localrepo, remoterepo):
"""
pulls bookmarks and branches information of the remote repo during a
@@ -101,7 +126,7 @@
localrepo is our local repository
remoterepo is the peer instance
"""
- remotepath = remoterepo.url()
+ remotepath = activepath(localrepo, remoterepo)
bookmarks = remoterepo.listkeys('bookmarks')
# on a push, we don't want to keep obsolete heads since
# they won't show up as heads on the next pull, so we
--- a/mercurial/lsprof.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/lsprof.py Mon Mar 19 08:07:18 2018 -0700
@@ -27,7 +27,7 @@
def __init__(self, data):
self.data = data
- def sort(self, crit="inlinetime"):
+ def sort(self, crit=r"inlinetime"):
"""XXX docstring"""
# profiler_entries isn't defined when running under PyPy.
if profiler_entry:
--- a/mercurial/mail.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/mail.py Mon Mar 19 08:07:18 2018 -0700
@@ -20,6 +20,7 @@
from . import (
encoding,
error,
+ pycompat,
sslutil,
util,
)
@@ -186,7 +187,7 @@
def codec2iana(cs):
''''''
- cs = email.charset.Charset(cs).input_charset.lower()
+ cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower())
# "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1"
if cs.startswith("iso") and not cs.startswith("iso-"):
@@ -205,7 +206,7 @@
return mimetextqp(s, subtype, 'us-ascii')
for charset in cs:
try:
- s.decode(charset)
+ s.decode(pycompat.sysstr(charset))
return mimetextqp(s, subtype, codec2iana(charset))
except UnicodeDecodeError:
pass
@@ -218,7 +219,7 @@
'''
cs = email.charset.Charset(charset)
msg = email.message.Message()
- msg.set_type('text/' + subtype)
+ msg.set_type(pycompat.sysstr('text/' + subtype))
for line in body.splitlines():
if len(line) > 950:
@@ -287,13 +288,13 @@
addr = addr.encode('ascii')
except UnicodeDecodeError:
raise error.Abort(_('invalid local address: %s') % addr)
- return email.Utils.formataddr((name, addr))
+ return email.utils.formataddr((name, addr))
def addressencode(ui, address, charsets=None, display=False):
'''Turns address into RFC-2047 compliant header.'''
if display or not address:
return address or ''
- name, addr = email.Utils.parseaddr(address)
+ name, addr = email.utils.parseaddr(address)
return _addressencode(ui, name, addr, charsets)
def addrlistencode(ui, addrs, charsets=None, display=False):
@@ -304,7 +305,7 @@
return [a.strip() for a in addrs if a.strip()]
result = []
- for name, addr in email.Utils.getaddresses(addrs):
+ for name, addr in email.utils.getaddresses(addrs):
if name or addr:
result.append(_addressencode(ui, name, addr, charsets))
return result
--- a/mercurial/manifest.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/manifest.py Mon Mar 19 08:07:18 2018 -0700
@@ -9,7 +9,6 @@
import heapq
import itertools
-import os
import struct
from .i18n import _
@@ -28,7 +27,7 @@
parsers = policy.importmod(r'parsers')
propertycache = util.propertycache
-def _parsev1(data):
+def _parse(data):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
@@ -47,43 +46,7 @@
else:
yield f, bin(n), ''
-def _parsev2(data):
- metadataend = data.find('\n')
- # Just ignore metadata for now
- pos = metadataend + 1
- prevf = ''
- while pos < len(data):
- end = data.find('\n', pos + 1) # +1 to skip stem length byte
- if end == -1:
- raise ValueError('Manifest ended with incomplete file entry.')
- stemlen = ord(data[pos:pos + 1])
- items = data[pos + 1:end].split('\0')
- f = prevf[:stemlen] + items[0]
- if prevf > f:
- raise ValueError('Manifest entries not in sorted order.')
- fl = items[1]
- # Just ignore metadata (items[2:] for now)
- n = data[end + 1:end + 21]
- yield f, n, fl
- pos = end + 22
- prevf = f
-
-def _parse(data):
- """Generates (path, node, flags) tuples from a manifest text"""
- if data.startswith('\0'):
- return iter(_parsev2(data))
- else:
- return iter(_parsev1(data))
-
-def _text(it, usemanifestv2):
- """Given an iterator over (path, node, flags) tuples, returns a manifest
- text"""
- if usemanifestv2:
- return _textv2(it)
- else:
- return _textv1(it)
-
-def _textv1(it):
+def _text(it):
files = []
lines = []
_hex = revlog.hex
@@ -96,19 +59,6 @@
_checkforbidden(files)
return ''.join(lines)
-def _textv2(it):
- files = []
- lines = ['\0\n']
- prevf = ''
- for f, n, fl in it:
- files.append(f)
- stem = os.path.commonprefix([prevf, f])
- stemlen = min(len(stem), 255)
- lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
- prevf = f
- _checkforbidden(files)
- return ''.join(lines)
-
class lazymanifestiter(object):
def __init__(self, lm):
self.pos = 0
@@ -414,13 +364,7 @@
class manifestdict(object):
def __init__(self, data=''):
- if data.startswith('\0'):
- #_lazymanifest can not parse v2
- self._lm = _lazymanifest('')
- for f, n, fl in _parsev2(data):
- self._lm[f] = n, fl
- else:
- self._lm = _lazymanifest(data)
+ self._lm = _lazymanifest(data)
def __getitem__(self, key):
return self._lm[key][0]
@@ -589,12 +533,9 @@
def iterentries(self):
return self._lm.iterentries()
- def text(self, usemanifestv2=False):
- if usemanifestv2:
- return _textv2(self._lm.iterentries())
- else:
- # use (probably) native version for v1
- return self._lm.text()
+ def text(self):
+ # most likely uses native version
+ return self._lm.text()
def fastdelta(self, base, changes):
"""Given a base manifest text as a bytearray and a list of changes
@@ -755,6 +696,12 @@
size += m.__len__()
return size
+ def __nonzero__(self):
+ # Faster than "__len() != 0" since it avoids loading sub-manifests
+ return not self._isempty()
+
+ __bool__ = __nonzero__
+
def _isempty(self):
self._load() # for consistency; already loaded by all callers
return (not self._files and (not self._dirs or
@@ -954,7 +901,7 @@
else:
files.update(m1.iterkeys())
- for fn in t1._files.iterkeys():
+ for fn in t1._files:
if fn not in t2._files:
files.add(t1._subpath(fn))
@@ -1013,7 +960,7 @@
# yield this dir's files and walk its submanifests
self._load()
- for p in sorted(self._dirs.keys() + self._files.keys()):
+ for p in sorted(list(self._dirs) + list(self._files)):
if p in self._files:
fullp = self._subpath(p)
if match(fullp):
@@ -1132,12 +1079,12 @@
if fl:
self._flags[f] = fl
- def text(self, usemanifestv2=False):
+ def text(self):
"""Get the full data of this manifest as a bytestring."""
self._load()
- return _text(self.iterentries(), usemanifestv2)
+ return _text(self.iterentries())
- def dirtext(self, usemanifestv2=False):
+ def dirtext(self):
"""Get the full data of this directory as a bytestring. Make sure that
any submanifests have been written first, so their nodeids are correct.
"""
@@ -1145,7 +1092,7 @@
flags = self.flags
dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
files = [(f, self._files[f], flags(f)) for f in self._files]
- return _text(sorted(dirs + files), usemanifestv2)
+ return _text(sorted(dirs + files))
def read(self, gettext, readsubtree):
def _load_for_read(s):
@@ -1202,15 +1149,12 @@
# stacks of commits, the number can go up, hence the config knob below.
cachesize = 4
optiontreemanifest = False
- usemanifestv2 = False
opts = getattr(opener, 'options', None)
if opts is not None:
cachesize = opts.get('manifestcachesize', cachesize)
optiontreemanifest = opts.get('treemanifest', False)
- usemanifestv2 = opts.get('manifestv2', usemanifestv2)
self._treeondisk = optiontreemanifest or treemanifest
- self._usemanifestv2 = usemanifestv2
self._fulltextcache = util.lrucachedict(cachesize)
@@ -1245,19 +1189,18 @@
self._fulltextcache.clear()
self._dirlogcache = {'': self}
- def dirlog(self, dir):
- if dir:
+ def dirlog(self, d):
+ if d:
assert self._treeondisk
- if dir not in self._dirlogcache:
- mfrevlog = manifestrevlog(self.opener, dir,
+ if d not in self._dirlogcache:
+ mfrevlog = manifestrevlog(self.opener, d,
self._dirlogcache,
treemanifest=self._treeondisk)
- self._dirlogcache[dir] = mfrevlog
- return self._dirlogcache[dir]
+ self._dirlogcache[d] = mfrevlog
+ return self._dirlogcache[d]
def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
- if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
- and not self._usemanifestv2):
+ if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
# If our first parent is in the manifest cache, we can
# compute a delta here using properties we know about the
# manifest up-front, which may save time later for the
@@ -1284,7 +1227,7 @@
n = self._addtree(m, transaction, link, m1, m2, readtree)
arraytext = None
else:
- text = m.text(self._usemanifestv2)
+ text = m.text()
n = self.addrevision(text, transaction, link, p1, p2)
arraytext = bytearray(text)
@@ -1303,13 +1246,13 @@
sublog.add(subm, transaction, link, subp1, subp2, None, None,
readtree=readtree)
m.writesubtrees(m1, m2, writesubtree)
- text = m.dirtext(self._usemanifestv2)
+ text = m.dirtext()
n = None
if self._dir != '':
# Double-check whether contents are unchanged to one parent
- if text == m1.dirtext(self._usemanifestv2):
+ if text == m1.dirtext():
n = m1.node()
- elif text == m2.dirtext(self._usemanifestv2):
+ elif text == m2.dirtext():
n = m2.node()
if not n:
@@ -1487,19 +1430,6 @@
Changing the value of `shallow` has no effect on flat manifests.
'''
revlog = self._revlog()
- if revlog._usemanifestv2:
- # Need to perform a slow delta
- r0 = revlog.deltaparent(revlog.rev(self._node))
- m0 = self._manifestlog[revlog.node(r0)].read()
- m1 = self.read()
- md = manifestdict()
- for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
- if n1:
- md[f] = n1
- if fl1:
- md.setflag(f, fl1)
- return md
-
r = revlog.rev(self._node)
d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
return manifestdict(d)
@@ -1602,7 +1532,7 @@
its 't' flag.
'''
revlog = self._revlog()
- if shallow and not revlog._usemanifestv2:
+ if shallow:
r = revlog.rev(self._node)
d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
return manifestdict(d)
--- a/mercurial/match.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/match.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,8 +13,10 @@
from .i18n import _
from . import (
+ encoding,
error,
pathutil,
+ pycompat,
util,
)
@@ -225,7 +227,7 @@
except IOError as inst:
if warn:
warn(_("skipping unreadable pattern file '%s': %s\n") %
- (pat, inst.strerror))
+ (pat, util.forcebytestr(inst.strerror)))
continue
# else: re or relre - which cannot be normalized
kindpats.append((kind, pat, ''))
@@ -345,7 +347,7 @@
return 'all'
def __repr__(self):
- return '<alwaysmatcher>'
+ return r'<alwaysmatcher>'
class nevermatcher(basematcher):
'''Matches nothing.'''
@@ -368,7 +370,7 @@
return False
def __repr__(self):
- return '<nevermatcher>'
+ return r'<nevermatcher>'
class patternmatcher(basematcher):
@@ -397,6 +399,7 @@
def prefix(self):
return self._prefix
+ @encoding.strmethod
def __repr__(self):
return ('<patternmatcher patterns=%r>' % self._pats)
@@ -424,8 +427,9 @@
any(parentdir in self._roots
for parentdir in util.finddirs(dir)))
+ @encoding.strmethod
def __repr__(self):
- return ('<includematcher includes=%r>' % self._pats)
+ return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats))
class exactmatcher(basematcher):
'''Matches the input files exactly. They are interpreted as paths, not
@@ -452,6 +456,7 @@
def isexact(self):
return True
+ @encoding.strmethod
def __repr__(self):
return ('<exactmatcher files=%r>' % self._files)
@@ -492,6 +497,7 @@
def isexact(self):
return self._m1.isexact()
+ @encoding.strmethod
def __repr__(self):
return ('<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2))
@@ -558,6 +564,7 @@
def isexact(self):
return self._m1.isexact() or self._m2.isexact()
+ @encoding.strmethod
def __repr__(self):
return ('<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2))
@@ -638,6 +645,7 @@
def prefix(self):
return self._matcher.prefix() and not self._always
+ @encoding.strmethod
def __repr__(self):
return ('<subdirmatcher path=%r, matcher=%r>' %
(self._path, self._matcher))
@@ -671,6 +679,7 @@
r |= v
return r
+ @encoding.strmethod
def __repr__(self):
return ('<unionmatcher matchers=%r>' % self._matchers)
--- a/mercurial/mdiff.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/mdiff.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,11 +13,15 @@
from .i18n import _
from . import (
+ encoding,
error,
policy,
pycompat,
util,
)
+from .utils import dateutil
+
+_missing_newline_marker = "\\ No newline at end of file\n"
bdiff = policy.importmod(r'bdiff')
mpatch = policy.importmod(r'mpatch')
@@ -27,16 +31,7 @@
patches = mpatch.patches
patchedsize = mpatch.patchedsize
textdiff = bdiff.bdiff
-
-def splitnewlines(text):
- '''like str.splitlines, but only split on newlines.'''
- lines = [l + '\n' for l in text.split('\n')]
- if lines:
- if lines[-1] == '\n':
- lines.pop()
- else:
- lines[-1] = lines[-1][:-1]
- return lines
+splitnewlines = bdiff.splitnewlines
class diffopts(object):
'''context is the number of context lines
@@ -68,6 +63,7 @@
'upgrade': False,
'showsimilarity': False,
'worddiff': False,
+ 'xdiff': False,
}
def __init__(self, **opts):
@@ -193,6 +189,13 @@
raise error.Abort(_('line range exceeds file size'))
return filteredblocks, (lba, uba)
+def chooseblocksfunc(opts=None):
+ if (opts is None or not opts.xdiff
+ or not util.safehasattr(bdiff, 'xdiffblocks')):
+ return bdiff.blocks
+ else:
+ return bdiff.xdiffblocks
+
def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
"""Return (block, type) tuples, where block is an mdiff.blocks
line entry. type is '=' for blocks matching exactly one another
@@ -206,7 +209,7 @@
if opts.ignorews or opts.ignorewsamount or opts.ignorewseol:
text1 = wsclean(opts, text1, False)
text2 = wsclean(opts, text2, False)
- diff = bdiff.blocks(text1, text2)
+ diff = chooseblocksfunc(opts)(text1, text2)
for i, s1 in enumerate(diff):
# The first match is special.
# we've either found a match starting at line 0 or a match later
@@ -234,13 +237,15 @@
yield s, type
yield s1, '='
-def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
+def unidiff(a, ad, b, bd, fn1, fn2, binary, opts=defaultopts):
"""Return a unified diff as a (headers, hunks) tuple.
If the diff is not null, `headers` is a list with unified diff header
lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
(hunkrange, hunklines) coming from _unidiff().
Otherwise, `headers` and `hunks` are empty.
+
+ Set binary=True if either a or b should be taken as a binary file.
"""
def datetag(date, fn=None):
if not opts.git and not opts.nodates:
@@ -259,23 +264,18 @@
aprefix = 'a/'
bprefix = 'b/'
- epoch = util.datestr((0, 0))
+ epoch = dateutil.datestr((0, 0))
fn1 = util.pconvert(fn1)
fn2 = util.pconvert(fn2)
- def checknonewline(lines):
- for text in lines:
- if text[-1:] != '\n':
- text += "\n\ No newline at end of file\n"
- yield text
-
- if not opts.text and (util.binary(a) or util.binary(b)):
+ if binary:
if a and b and len(a) == len(b) and a == b:
return sentinel
headerlines = []
hunks = (None, ['Binary file %s has changed\n' % fn1]),
elif not a:
+ without_newline = not b.endswith('\n')
b = splitnewlines(b)
if a is None:
l1 = '--- /dev/null%s' % datetag(epoch)
@@ -286,8 +286,12 @@
size = len(b)
hunkrange = (0, 0, 1, size)
hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
- hunks = (hunkrange, checknonewline(hunklines)),
+ if without_newline:
+ hunklines[-1] += '\n'
+ hunklines.append(_missing_newline_marker)
+ hunks = (hunkrange, hunklines),
elif not b:
+ without_newline = not a.endswith('\n')
a = splitnewlines(a)
l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
if b is None:
@@ -298,24 +302,19 @@
size = len(a)
hunkrange = (1, size, 0, 0)
hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
- hunks = (hunkrange, checknonewline(hunklines)),
+ if without_newline:
+ hunklines[-1] += '\n'
+ hunklines.append(_missing_newline_marker)
+ hunks = (hunkrange, hunklines),
else:
- diffhunks = _unidiff(a, b, opts=opts)
- try:
- hunkrange, hunklines = next(diffhunks)
- except StopIteration:
+ hunks = _unidiff(a, b, opts=opts)
+ if not next(hunks):
return sentinel
headerlines = [
"--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
"+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
]
- def rewindhunks():
- yield hunkrange, checknonewline(hunklines)
- for hr, hl in diffhunks:
- yield hr, checknonewline(hl)
-
- hunks = rewindhunks()
return headerlines, hunks
@@ -327,6 +326,8 @@
form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
of the hunk combining said header followed by line additions and
deletions.
+
+ The hunks are prefixed with a bool.
"""
l1 = splitnewlines(t1)
l2 = splitnewlines(t2)
@@ -357,7 +358,11 @@
# alphanumeric char.
for i in xrange(astart - 1, lastpos - 1, -1):
if l1[i][0:1].isalnum():
- func = ' ' + l1[i].rstrip()[:40]
+ func = b' ' + l1[i].rstrip()
+ # split long function name if ASCII. otherwise we have no
+ # idea where the multi-byte boundary is, so just leave it.
+ if encoding.isasciistr(func):
+ func = func[:41]
lastfunc[1] = func
break
# by recording this hunk's starting point as the next place to
@@ -377,6 +382,26 @@
+ delta
+ [' ' + l1[x] for x in xrange(a2, aend)]
)
+ # If either file ends without a newline and the last line of
+ # that file is part of a hunk, a marker is printed. If the
+ # last line of both files is identical and neither ends in
+ # a newline, print only one marker. That's the only case in
+ # which the hunk can end in a shared line without a newline.
+ skip = False
+ if not t1.endswith('\n') and astart + alen == len(l1) + 1:
+ for i in xrange(len(hunklines) - 1, -1, -1):
+ if hunklines[i].startswith(('-', ' ')):
+ if hunklines[i].startswith(' '):
+ skip = True
+ hunklines[i] += '\n'
+ hunklines.insert(i + 1, _missing_newline_marker)
+ break
+ if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1:
+ for i in xrange(len(hunklines) - 1, -1, -1):
+ if hunklines[i].startswith('+'):
+ hunklines[i] += '\n'
+ hunklines.insert(i + 1, _missing_newline_marker)
+ break
yield hunkrange, hunklines
# bdiff.blocks gives us the matching sequences in the files. The loop
@@ -385,6 +410,7 @@
#
hunk = None
ignoredlines = 0
+ has_hunks = False
for s, stype in allblocks(t1, t2, opts, l1, l2):
a1, a2, b1, b2 = s
if stype != '!':
@@ -411,6 +437,9 @@
astart = hunk[1]
bstart = hunk[3]
else:
+ if not has_hunks:
+ has_hunks = True
+ yield True
for x in yieldhunk(hunk):
yield x
if prev:
@@ -427,17 +456,22 @@
delta[len(delta):] = ['+' + x for x in new]
if hunk:
+ if not has_hunks:
+ has_hunks = True
+ yield True
for x in yieldhunk(hunk):
yield x
+ elif not has_hunks:
+ yield False
def b85diff(to, tn):
'''print base85-encoded binary diff'''
def fmtline(line):
l = len(line)
if l <= 26:
- l = chr(ord('A') + l - 1)
+ l = pycompat.bytechr(ord('A') + l - 1)
else:
- l = chr(l - 26 + ord('a') - 1)
+ l = pycompat.bytechr(l - 26 + ord('a') - 1)
return '%c%s\n' % (l, util.b85encode(line, True))
def chunk(text, csize=52):
--- a/mercurial/merge.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/merge.py Mon Mar 19 08:07:18 2018 -0700
@@ -25,13 +25,12 @@
from . import (
copies,
error,
- extensions,
filemerge,
match as matchmod,
obsutil,
pycompat,
scmutil,
- subrepo,
+ subrepoutil,
util,
worker,
)
@@ -288,14 +287,14 @@
off = 0
end = len(data)
while off < end:
- rtype = data[off]
+ rtype = data[off:off + 1]
off += 1
length = _unpack('>I', data[off:(off + 4)])[0]
off += 4
record = data[off:(off + length)]
off += length
if rtype == 't':
- rtype, record = record[0], record[1:]
+ rtype, record = record[0:1], record[1:]
records.append((rtype, record))
f.close()
except IOError as err:
@@ -400,7 +399,7 @@
def _writerecordsv1(self, records):
"""Write current state on disk in a version 1 file"""
- f = self._repo.vfs(self.statepathv1, 'w')
+ f = self._repo.vfs(self.statepathv1, 'wb')
irecords = iter(records)
lrecords = next(irecords)
assert lrecords[0] == 'L'
@@ -416,7 +415,7 @@
See the docstring for _readrecordsv2 for why we use 't'."""
# these are the records that all version 2 clients can read
whitelist = 'LOF'
- f = self._repo.vfs(self.statepathv2, 'w')
+ f = self._repo.vfs(self.statepathv2, 'wb')
for key, data in records:
assert len(key) == 1
if key not in whitelist:
@@ -974,14 +973,14 @@
# Rename all local conflicting files that have not been deleted.
for p in localconflicts:
if p not in deletedfiles:
- ctxname = str(wctx).rstrip('+')
+ ctxname = bytes(wctx).rstrip('+')
pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
actions[pnew] = ('pr', (p,), "local path conflict")
actions[p] = ('p', (pnew, 'l'), "path conflict")
if remoteconflicts:
# Check if all files in the conflicting directories have been removed.
- ctxname = str(mctx).rstrip('+')
+ ctxname = bytes(mctx).rstrip('+')
for f, p in _filesindirs(repo, mf, remoteconflicts):
if f not in deletedfiles:
m, args, msg = actions[p]
@@ -1186,8 +1185,9 @@
def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
"""Resolves false conflicts where the nodeid changed but the content
remained the same."""
-
- for f, (m, args, msg) in actions.items():
+ # We force a copy of actions.items() because we're going to mutate
+ # actions as we resolve trivial conflicts.
+ for f, (m, args, msg) in list(actions.items()):
if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
# local did change but ended up with same content
actions[f] = 'r', None, "prompt same"
@@ -1386,6 +1386,16 @@
if i > 0:
yield i, f
+def _prefetchfiles(repo, ctx, actions):
+ """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
+ of merge actions. ``ctx`` is the context being merged in."""
+
+ # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
+ # don't touch the context to be merged in. 'cd' is skipped, because
+ # changed/deleted never resolves to something from the remote side.
+ oplist = [actions[a] for a in 'g dc dg m'.split()]
+ prefetch = scmutil.fileprefetchhooks
+ prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
"""apply the merge action list to the working directory
@@ -1397,6 +1407,8 @@
describes how many files were affected by the update.
"""
+ _prefetchfiles(repo, mctx, actions)
+
updated, merged, removed = 0, 0, 0
ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
moves = []
@@ -1445,7 +1457,7 @@
z = 0
if [a for a in actions['r'] if a[0] == '.hgsubstate']:
- subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
+ subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# record path conflicts
for f, args, msg in actions['p']:
@@ -1495,7 +1507,7 @@
updated = len(actions['g'])
if [a for a in actions['g'] if a[0] == '.hgsubstate']:
- subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
+ subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# forget (manifest only, just log it) (must come first)
for f, args, msg in actions['f']:
@@ -1583,8 +1595,8 @@
z += 1
progress(_updating, z, item=f, total=numupdates, unit=_files)
if f == '.hgsubstate': # subrepo states need updating
- subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
- overwrite, labels)
+ subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
+ overwrite, labels)
continue
wctx[f].audit()
complete, r = ms.preresolve(f, wctx)
@@ -1835,7 +1847,7 @@
else:
pas = [p1.ancestor(p2, warn=branchmerge)]
- fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
+ fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
### check phase
if not overwrite:
@@ -1913,7 +1925,7 @@
# Prompt and create actions. Most of this is in the resolve phase
# already, but we can't handle .hgsubstate in filemerge or
- # subrepo.submerge yet so we have to keep prompting for it.
+ # subrepoutil.submerge yet so we have to keep prompting for it.
if '.hgsubstate' in actionbyfile:
f = '.hgsubstate'
m, args, msg = actionbyfile[f]
@@ -1992,6 +2004,8 @@
fsmonitorthreshold = repo.ui.configint('fsmonitor',
'warn_update_file_count')
try:
+ # avoid cycle: extensions -> cmdutil -> merge
+ from . import extensions
extensions.find('fsmonitor')
fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
# We intentionally don't look at whether fsmonitor has disabled
--- a/mercurial/namespaces.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/namespaces.py Mon Mar 19 08:07:18 2018 -0700
@@ -2,6 +2,7 @@
from .i18n import _
from . import (
+ registrar,
templatekw,
util,
)
@@ -87,10 +88,10 @@
# we only generate a template keyword if one does not already exist
if namespace.name not in templatekw.keywords:
- def generatekw(**args):
- return templatekw.shownames(namespace.name, **args)
-
- templatekw.keywords[namespace.name] = generatekw
+ templatekeyword = registrar.templatekeyword(templatekw.keywords)
+ @templatekeyword(namespace.name, requires={'repo', 'ctx', 'templ'})
+ def generatekw(context, mapping):
+ return templatekw.shownames(context, mapping, namespace.name)
def singlenode(self, repo, name):
"""
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/narrowspec.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,199 @@
+# narrowspec.py - methods for working with a narrow view of a repository
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from .i18n import _
+from . import (
+ error,
+ match as matchmod,
+ util,
+)
+
+FILENAME = 'narrowspec'
+
+def _parsestoredpatterns(text):
+ """Parses the narrowspec format that's stored on disk."""
+ patlist = None
+ includepats = []
+ excludepats = []
+ for l in text.splitlines():
+ if l == '[includes]':
+ if patlist is None:
+ patlist = includepats
+ else:
+ raise error.Abort(_('narrowspec includes section must appear '
+ 'at most once, before excludes'))
+ elif l == '[excludes]':
+ if patlist is not excludepats:
+ patlist = excludepats
+ else:
+ raise error.Abort(_('narrowspec excludes section must appear '
+ 'at most once'))
+ else:
+ patlist.append(l)
+
+ return set(includepats), set(excludepats)
+
+def parseserverpatterns(text):
+ """Parses the narrowspec format that's returned by the server."""
+ includepats = set()
+ excludepats = set()
+
+ # We get one entry per line, in the format "<key> <value>".
+ # It's OK for value to contain other spaces.
+ for kp in (l.split(' ', 1) for l in text.splitlines()):
+ if len(kp) != 2:
+ raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
+ key = kp[0]
+ pat = kp[1]
+ if key == 'include':
+ includepats.add(pat)
+ elif key == 'exclude':
+ excludepats.add(pat)
+ else:
+ raise error.Abort(_('Invalid key "%s" in server response') % key)
+
+ return includepats, excludepats
+
+def normalizesplitpattern(kind, pat):
+ """Returns the normalized version of a pattern and kind.
+
+ Returns a tuple with the normalized kind and normalized pattern.
+ """
+ pat = pat.rstrip('/')
+ _validatepattern(pat)
+ return kind, pat
+
+def _numlines(s):
+ """Returns the number of lines in s, including ending empty lines."""
+ # We use splitlines because it is Unicode-friendly and thus Python 3
+ # compatible. However, it does not count empty lines at the end, so trick
+ # it by adding a character at the end.
+ return len((s + 'x').splitlines())
+
+def _validatepattern(pat):
+ """Validates the pattern and aborts if it is invalid.
+
+ Patterns are stored in the narrowspec as newline-separated
+ POSIX-style bytestring paths. There's no escaping.
+ """
+
+ # We use newlines as separators in the narrowspec file, so don't allow them
+ # in patterns.
+ if _numlines(pat) > 1:
+ raise error.Abort(_('newlines are not allowed in narrowspec paths'))
+
+ components = pat.split('/')
+ if '.' in components or '..' in components:
+ raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
+
+def normalizepattern(pattern, defaultkind='path'):
+ """Returns the normalized version of a text-format pattern.
+
+ If the pattern has no kind, the default will be added.
+ """
+ kind, pat = matchmod._patsplit(pattern, defaultkind)
+ return '%s:%s' % normalizesplitpattern(kind, pat)
+
+def parsepatterns(pats):
+ """Parses a list of patterns into a typed pattern set."""
+ return set(normalizepattern(p) for p in pats)
+
+def format(includes, excludes):
+ output = '[includes]\n'
+ for i in sorted(includes - excludes):
+ output += i + '\n'
+ output += '[excludes]\n'
+ for e in sorted(excludes):
+ output += e + '\n'
+ return output
+
+def match(root, include=None, exclude=None):
+ if not include:
+ # Passing empty include and empty exclude to matchmod.match()
+ # gives a matcher that matches everything, so explicitly use
+ # the nevermatcher.
+ return matchmod.never(root, '')
+ return matchmod.match(root, '', [], include=include or [],
+ exclude=exclude or [])
+
+def needsexpansion(includes):
+ return [i for i in includes if i.startswith('include:')]
+
+def load(repo):
+ try:
+ spec = repo.vfs.read(FILENAME)
+ except IOError as e:
+ # Treat "narrowspec does not exist" the same as "narrowspec file exists
+ # and is empty".
+ if e.errno == errno.ENOENT:
+ # Without this the next call to load will use the cached
+ # non-existence of the file, which can cause some odd issues.
+ repo.invalidate(clearfilecache=True)
+ return set(), set()
+ raise
+ return _parsestoredpatterns(spec)
+
+def save(repo, includepats, excludepats):
+ spec = format(includepats, excludepats)
+ repo.vfs.write(FILENAME, spec)
+
+def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
+ r""" Restricts the patterns according to repo settings,
+ results in a logical AND operation
+
+ :param req_includes: requested includes
+ :param req_excludes: requested excludes
+ :param repo_includes: repo includes
+ :param repo_excludes: repo excludes
+ :return: include patterns, exclude patterns, and invalid include patterns.
+
+ >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
+ (set(['f1']), {}, [])
+ >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
+ (set(['f1']), {}, [])
+ >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
+ (set(['f1/fc1']), {}, [])
+ >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
+ ([], set(['path:.']), [])
+ >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
+ (set(['f2/fc2']), {}, [])
+ >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
+ ([], set(['path:.']), [])
+ >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
+ (set(['f1/$non_exitent_var']), {}, [])
+ """
+ res_excludes = set(req_excludes)
+ res_excludes.update(repo_excludes)
+ invalid_includes = []
+ if not req_includes:
+ res_includes = set(repo_includes)
+ elif 'path:.' not in repo_includes:
+ res_includes = []
+ for req_include in req_includes:
+ req_include = util.expandpath(util.normpath(req_include))
+ if req_include in repo_includes:
+ res_includes.append(req_include)
+ continue
+ valid = False
+ for repo_include in repo_includes:
+ if req_include.startswith(repo_include + '/'):
+ valid = True
+ res_includes.append(req_include)
+ break
+ if not valid:
+ invalid_includes.append(req_include)
+ if len(res_includes) == 0:
+ res_excludes = {'path:.'}
+ else:
+ res_includes = set(res_includes)
+ else:
+ res_includes = set(req_includes)
+ return res_includes, res_excludes, invalid_includes
--- a/mercurial/node.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/node.py Mon Mar 19 08:07:18 2018 -0700
@@ -11,7 +11,14 @@
# This ugly style has a noticeable effect in manifest parsing
hex = binascii.hexlify
-bin = binascii.unhexlify
+# Adapt to Python 3 API changes. If this ends up showing up in
+# profiles, we can use this version only on Python 3, and forward
+# binascii.unhexlify like we used to on Python 2.
+def bin(s):
+ try:
+ return binascii.unhexlify(s)
+ except binascii.Error as e:
+ raise TypeError(e)
nullrev = -1
nullid = b"\0" * 20
--- a/mercurial/obsolete.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/obsolete.py Mon Mar 19 08:07:18 2018 -0700
@@ -81,6 +81,7 @@
policy,
util,
)
+from .utils import dateutil
parsers = policy.importmod(r'parsers')
@@ -147,38 +148,10 @@
return _getoptionvalue(repo, option)
-### obsolescence marker flag
-
-## bumpedfix flag
-#
-# When a changeset A' succeed to a changeset A which became public, we call A'
-# "bumped" because it's a successors of a public changesets
-#
-# o A' (bumped)
-# |`:
-# | o A
-# |/
-# o Z
-#
-# The way to solve this situation is to create a new changeset Ad as children
-# of A. This changeset have the same content than A'. So the diff from A to A'
-# is the same than the diff from A to Ad. Ad is marked as a successors of A'
-#
-# o Ad
-# |`:
-# | x A'
-# |'|
-# o | A
-# |/
-# o Z
-#
-# But by transitivity Ad is also a successors of A. To avoid having Ad marked
-# as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
-# This flag mean that the successors express the changes between the public and
-# bumped version and fix the situation, breaking the transitivity of
-# "bumped" here.
-bumpedfix = 1
-usingsha256 = 2
+# Creating aliases for marker flags because evolve extension looks for
+# bumpedfix in obsolete.py
+bumpedfix = obsutil.bumpedfix
+usingsha256 = obsutil.usingsha256
## Parsing and writing of version "0"
#
@@ -506,13 +479,6 @@
for mark in markers:
successors.setdefault(mark[0], set()).add(mark)
-def _addprecursors(*args, **kwargs):
- msg = ("'obsolete._addprecursors' is deprecated, "
- "use 'obsolete._addpredecessors'")
- util.nouideprecwarn(msg, '4.4')
-
- return _addpredecessors(*args, **kwargs)
-
@util.nogc
def _addpredecessors(predecessors, markers):
for mark in markers:
@@ -570,7 +536,7 @@
return len(self._all)
def __nonzero__(self):
- if not self._cached('_all'):
+ if not self._cached(r'_all'):
try:
return self.svfs.stat('obsstore').st_size > 1
except OSError as inst:
@@ -608,13 +574,13 @@
if date is None:
if 'date' in metadata:
# as a courtesy for out-of-tree extensions
- date = util.parsedate(metadata.pop('date'))
+ date = dateutil.parsedate(metadata.pop('date'))
elif ui is not None:
date = ui.configdate('devel', 'default-date')
if date is None:
- date = util.makedate()
+ date = dateutil.makedate()
else:
- date = util.makedate()
+ date = dateutil.makedate()
if len(prec) != 20:
raise ValueError(prec)
for succ in succs:
@@ -663,7 +629,7 @@
self.caches.clear()
# records the number of new markers for the transaction hooks
previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
- transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
+ transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
return len(new)
def mergemarkers(self, transaction, data):
@@ -700,14 +666,6 @@
_addsuccessors(successors, self._all)
return successors
- @property
- def precursors(self):
- msg = ("'obsstore.precursors' is deprecated, "
- "use 'obsstore.predecessors'")
- util.nouideprecwarn(msg, '4.4')
-
- return self.predecessors
-
@propertycache
def predecessors(self):
predecessors = {}
@@ -727,11 +685,11 @@
markers = list(markers) # to allow repeated iteration
self._data = self._data + rawdata
self._all.extend(markers)
- if self._cached('successors'):
+ if self._cached(r'successors'):
_addsuccessors(self.successors, markers)
- if self._cached('predecessors'):
+ if self._cached(r'predecessors'):
_addpredecessors(self.predecessors, markers)
- if self._cached('children'):
+ if self._cached(r'children'):
_addchildren(self.children, markers)
_checkinvalidmarkers(markers)
@@ -843,42 +801,6 @@
repo.invalidatevolatilesets()
return True
-# keep compatibility for the 4.3 cycle
-def allprecursors(obsstore, nodes, ignoreflags=0):
- movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
- util.nouideprecwarn(movemsg, '4.3')
- return obsutil.allprecursors(obsstore, nodes, ignoreflags)
-
-def allsuccessors(obsstore, nodes, ignoreflags=0):
- movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
- util.nouideprecwarn(movemsg, '4.3')
- return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
-
-def marker(repo, data):
- movemsg = 'obsolete.marker moved to obsutil.marker'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.marker(repo, data)
-
-def getmarkers(repo, nodes=None, exclusive=False):
- movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
-
-def exclusivemarkers(repo, nodes):
- movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.exclusivemarkers(repo, nodes)
-
-def foreground(repo, nodes):
- movemsg = 'obsolete.foreground moved to obsutil.foreground'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.foreground(repo, nodes)
-
-def successorssets(repo, initialnode, cache=None):
- movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.successorssets(repo, initialnode, cache=cache)
-
# mapping of 'set-name' -> <function to compute this set>
cachefuncs = {}
def cachefor(name):
@@ -933,14 +855,6 @@
obs = set(r for r in notpublic if isobs(getnode(r)))
return obs
-@cachefor('unstable')
-def _computeunstableset(repo):
- msg = ("'unstable' volatile set is deprecated, "
- "use 'orphan'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return _computeorphanset(repo)
-
@cachefor('orphan')
def _computeorphanset(repo):
"""the set of non obsolete revisions with obsolete parents"""
@@ -969,14 +883,6 @@
"""the set of obsolete parents without non obsolete descendants"""
return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
-@cachefor('bumped')
-def _computebumpedset(repo):
- msg = ("'bumped' volatile set is deprecated, "
- "use 'phasedivergent'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return _computephasedivergentset(repo)
-
@cachefor('phasedivergent')
def _computephasedivergentset(repo):
"""the set of revs trying to obsolete public revisions"""
@@ -1000,14 +906,6 @@
break # Next draft!
return bumped
-@cachefor('divergent')
-def _computedivergentset(repo):
- msg = ("'divergent' volatile set is deprecated, "
- "use 'contentdivergent'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return _computecontentdivergentset(repo)
-
@cachefor('contentdivergent')
def _computecontentdivergentset(repo):
"""the set of rev that compete to be the final successors of some revision.
--- a/mercurial/obsutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/obsutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -15,6 +15,40 @@
phases,
util,
)
+from .utils import dateutil
+
+### obsolescence marker flag
+
+## bumpedfix flag
+#
+# When a changeset A' succeed to a changeset A which became public, we call A'
+# "bumped" because it's a successors of a public changesets
+#
+# o A' (bumped)
+# |`:
+# | o A
+# |/
+# o Z
+#
+# The way to solve this situation is to create a new changeset Ad as children
+# of A. This changeset have the same content than A'. So the diff from A to A'
+# is the same than the diff from A to Ad. Ad is marked as a successors of A'
+#
+# o Ad
+# |`:
+# | x A'
+# |'|
+# o | A
+# |/
+# o Z
+#
+# But by transitivity Ad is also a successors of A. To avoid having Ad marked
+# as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
+# This flag mean that the successors express the changes between the public and
+# bumped version and fix the situation, breaking the transitivity of
+# "bumped" here.
+bumpedfix = 1
+usingsha256 = 2
class marker(object):
"""Wrap obsolete marker raw data"""
@@ -33,12 +67,6 @@
return False
return self._data == other._data
- def precnode(self):
- msg = ("'marker.precnode' is deprecated, "
- "use 'marker.prednode'")
- util.nouideprecwarn(msg, '4.4')
- return self.prednode()
-
def prednode(self):
"""Predecessor changeset node identifier"""
return self._data[0]
@@ -106,15 +134,6 @@
else:
stack.append(precnodeid)
-def allprecursors(*args, **kwargs):
- """ (DEPRECATED)
- """
- msg = ("'obsutil.allprecursors' is deprecated, "
- "use 'obsutil.allpredecessors'")
- util.nouideprecwarn(msg, '4.4')
-
- return allpredecessors(*args, **kwargs)
-
def allpredecessors(obsstore, nodes, ignoreflags=0):
"""Yield node for every precursors of <nodes>.
@@ -421,10 +440,10 @@
# Check if other meta has changed
changeextra = changectx.extra().items()
- ctxmeta = filter(metanotblacklisted, changeextra)
+ ctxmeta = list(filter(metanotblacklisted, changeextra))
sourceextra = source.extra().items()
- srcmeta = filter(metanotblacklisted, sourceextra)
+ srcmeta = list(filter(metanotblacklisted, sourceextra))
if ctxmeta != srcmeta:
effects |= METACHANGED
@@ -856,11 +875,11 @@
max_date = max(dates)
if min_date == max_date:
- fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
+ fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
line.append(" (at %s)" % fmtmin_date)
else:
- fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
- fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
+ fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
+ fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
return "".join(line)
@@ -904,3 +923,55 @@
args = (changeid, firstsuccessors, remainingnumber)
return filteredmsgtable['superseded_split_several'] % args
+
+def divergentsets(repo, ctx):
+ """Compute sets of commits divergent with a given one"""
+ cache = {}
+ base = {}
+ for n in allpredecessors(repo.obsstore, [ctx.node()]):
+ if n == ctx.node():
+ # a node can't be a base for divergence with itself
+ continue
+ nsuccsets = successorssets(repo, n, cache)
+ for nsuccset in nsuccsets:
+ if ctx.node() in nsuccset:
+ # we are only interested in *other* successor sets
+ continue
+ if tuple(nsuccset) in base:
+ # we already know the latest base for this divergency
+ continue
+ base[tuple(nsuccset)] = n
+ return [{'divergentnodes': divset, 'commonpredecessor': b}
+ for divset, b in base.iteritems()]
+
+def whyunstable(repo, ctx):
+ result = []
+ if ctx.orphan():
+ for parent in ctx.parents():
+ kind = None
+ if parent.orphan():
+ kind = 'orphan'
+ elif parent.obsolete():
+ kind = 'obsolete'
+ if kind is not None:
+ result.append({'instability': 'orphan',
+ 'reason': '%s parent' % kind,
+ 'node': parent.hex()})
+ if ctx.phasedivergent():
+ predecessors = allpredecessors(repo.obsstore, [ctx.node()],
+ ignoreflags=bumpedfix)
+ immutable = [repo[p] for p in predecessors
+ if p in repo and not repo[p].mutable()]
+ for predecessor in immutable:
+ result.append({'instability': 'phase-divergent',
+ 'reason': 'immutable predecessor',
+ 'node': predecessor.hex()})
+ if ctx.contentdivergent():
+ dsets = divergentsets(repo, ctx)
+ for dset in dsets:
+ divnodes = [repo[n] for n in dset['divergentnodes']]
+ result.append({'instability': 'content-divergent',
+ 'divergentnodes': divnodes,
+ 'reason': 'predecessor',
+ 'node': nodemod.hex(dset['commonpredecessor'])})
+ return result
--- a/mercurial/parser.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/parser.py Mon Mar 19 08:07:18 2018 -0700
@@ -22,6 +22,7 @@
from . import (
encoding,
error,
+ pycompat,
util,
)
@@ -192,7 +193,7 @@
return util.unescapestr(s)
except ValueError as e:
# mangle Python's exception into our format
- raise error.ParseError(str(e).lower())
+ raise error.ParseError(pycompat.bytestr(e).lower())
def _brepr(obj):
if isinstance(obj, bytes):
--- a/mercurial/patch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/patch.py Mon Mar 19 08:07:18 2018 -0700
@@ -12,7 +12,6 @@
import copy
import difflib
import email
-import email.parser as emailparser
import errno
import hashlib
import os
@@ -41,6 +40,7 @@
util,
vfs as vfsmod,
)
+from .utils import dateutil
diffhelpers = policy.importmod(r'diffhelpers')
stringio = util.stringio
@@ -109,7 +109,7 @@
cur.append(line)
c = chunk(cur)
- m = emailparser.Parser().parse(c)
+ m = pycompat.emailparser().parse(c)
if not m.is_multipart():
yield msgfp(m)
else:
@@ -216,9 +216,9 @@
data = {}
fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
- tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
+ tmpfp = os.fdopen(fd, r'wb')
try:
- msg = emailparser.Parser().parse(fileobj)
+ msg = pycompat.emailparser().parse(fileobj)
subject = msg['Subject'] and mail.headdecode(msg['Subject'])
data['user'] = msg['From'] and mail.headdecode(msg['From'])
@@ -242,7 +242,7 @@
ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
message = ''
for part in msg.walk():
- content_type = part.get_content_type()
+ content_type = pycompat.bytestr(part.get_content_type())
ui.debug('Content-Type: %s\n' % content_type)
if content_type not in ok_types:
continue
@@ -567,7 +567,7 @@
root = tempfile.mkdtemp(prefix='hg-patch-')
self.opener = vfsmod.vfs(root)
# Avoid filename issues with these simple names
- fn = str(self.created)
+ fn = '%d' % self.created
self.opener.write(fn, data)
self.created += 1
self.files[fname] = (fn, mode, copied)
@@ -1102,11 +1102,11 @@
the hunk is left unchanged.
""")
(patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
- suffix=".diff", text=True)
+ suffix=".diff")
ncpatchfp = None
try:
# Write the initial patch
- f = os.fdopen(patchfd, pycompat.sysstr("w"))
+ f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
chunk.header.write(f)
chunk.write(f)
f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
@@ -1120,9 +1120,10 @@
ui.warn(_("editor exited with exit code %d\n") % ret)
continue
# Remove comment lines
- patchfp = open(patchfn)
+ patchfp = open(patchfn, r'rb')
ncpatchfp = stringio()
for line in util.iterfile(patchfp):
+ line = util.fromnativeeol(line)
if not line.startswith('#'):
ncpatchfp.write(line)
patchfp.close()
@@ -1451,7 +1452,7 @@
dec = []
line = getline(lr, self.hunk)
while len(line) > 1:
- l = line[0]
+ l = line[0:1]
if l <= 'Z' and l >= 'A':
l = ord(l) - ord('A') + 1
else:
@@ -1460,7 +1461,7 @@
dec.append(util.b85decode(line[1:])[:l])
except ValueError as e:
raise PatchError(_('could not decode "%s" binary patch: %s')
- % (self._fname, str(e)))
+ % (self._fname, util.forcebytestr(e)))
line = getline(lr, self.hunk)
text = zlib.decompress(''.join(dec))
if len(text) != size:
@@ -1852,7 +1853,7 @@
for x in iter(lr.readline, ''):
if state == BFILE and (
- (not context and x[0] == '@')
+ (not context and x.startswith('@'))
or (context is not False and x.startswith('***************'))
or x.startswith('GIT binary patch')):
gp = None
@@ -2256,6 +2257,7 @@
'context': get('unified', getter=ui.config),
}
buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
+ buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
if git:
buildopts['git'] = get('git')
@@ -2342,7 +2344,7 @@
if hunksfilterfn is not None:
# If the file has been removed, fctx2 is None; but this should
# not occur here since we catch removed files early in
- # cmdutil.getloglinerangerevs() for 'hg log -L'.
+ # logcmdutil.getlinerangerevs() for 'hg log -L'.
assert fctx2 is not None, \
'fctx2 unexpectly None in diff hunks filtering'
hunks = hunksfilterfn(fctx2, hunks)
@@ -2519,7 +2521,7 @@
yield (t, l)
else:
for token in tabsplitter.findall(stripline):
- if '\t' == token[0]:
+ if token.startswith('\t'):
yield (token, 'diff.tab')
else:
yield (token, label)
@@ -2670,8 +2672,8 @@
def isempty(fctx):
return fctx is None or fctx.size() == 0
- date1 = util.datestr(ctx1.date())
- date2 = util.datestr(ctx2.date())
+ date1 = dateutil.datestr(ctx1.date())
+ date2 = dateutil.datestr(ctx2.date())
gitmode = {'l': '120000', 'x': '100755', '': '100644'}
@@ -2698,8 +2700,10 @@
if opts.git or losedatafn:
flag2 = ctx2.flags(f2)
# if binary is True, output "summary" or "base85", but not "text diff"
- binary = not opts.text and any(f.isbinary()
- for f in [fctx1, fctx2] if f is not None)
+ if opts.text:
+ binary = False
+ else:
+ binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
if losedatafn and not opts.git:
if (binary or
@@ -2789,7 +2793,8 @@
uheaders, hunks = mdiff.unidiff(content1, date1,
content2, date2,
- path1, path2, opts=opts)
+ path1, path2,
+ binary=binary, opts=opts)
header.extend(uheaders)
yield fctx1, fctx2, header, hunks
--- a/mercurial/pathutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/pathutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -65,7 +65,7 @@
parts = util.splitpath(path)
if (os.path.splitdrive(path)[0]
or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
- or os.pardir in parts):
+ or pycompat.ospardir in parts):
raise error.Abort(_("path contains illegal component: %s") % path)
# Windows shortname aliases
for p in parts:
@@ -81,7 +81,7 @@
pos = lparts.index(p)
base = os.path.join(*parts[:pos])
raise error.Abort(_("path '%s' is inside nested repo %r")
- % (path, base))
+ % (path, pycompat.bytestr(base)))
normparts = util.splitpath(normpath)
assert len(parts) == len(normparts)
@@ -119,13 +119,14 @@
raise
else:
if stat.S_ISLNK(st.st_mode):
- msg = _('path %r traverses symbolic link %r') % (path, prefix)
+ msg = (_('path %r traverses symbolic link %r')
+ % (pycompat.bytestr(path), pycompat.bytestr(prefix)))
raise error.Abort(msg)
elif (stat.S_ISDIR(st.st_mode) and
os.path.isdir(os.path.join(curpath, '.hg'))):
if not self.callback or not self.callback(curpath):
msg = _("path '%s' is inside nested repo %r")
- raise error.Abort(msg % (path, prefix))
+ raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
def check(self, path):
try:
--- a/mercurial/phases.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/phases.py Mon Mar 19 08:07:18 2018 -0700
@@ -262,7 +262,8 @@
repo = repo.unfiltered()
nativeroots = []
for phase in trackedphases:
- nativeroots.append(map(repo.changelog.rev, self.phaseroots[phase]))
+ nativeroots.append(pycompat.maplist(repo.changelog.rev,
+ self.phaseroots[phase]))
return repo.changelog.computephases(nativeroots)
def _computephaserevspure(self, repo):
@@ -326,7 +327,7 @@
def _write(self, fp):
for phase, roots in enumerate(self.phaseroots):
- for h in roots:
+ for h in sorted(roots):
fp.write('%i %s\n' % (phase, hex(h)))
self.dirty = False
--- a/mercurial/policy.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/policy.py Mon Mar 19 08:07:18 2018 -0700
@@ -44,11 +44,6 @@
if r'__pypy__' in sys.builtin_module_names:
policy = b'cffi'
-# Our C extensions aren't yet compatible with Python 3. So use pure Python
-# on Python 3 for now.
-if sys.version_info[0] >= 3:
- policy = b'py'
-
# Environment variable can always force settings.
if sys.version_info[0] >= 3:
if r'HGMODULEPOLICY' in os.environ:
@@ -71,10 +66,10 @@
# keep in sync with "version" in C modules
_cextversions = {
(r'cext', r'base85'): 1,
- (r'cext', r'bdiff'): 1,
+ (r'cext', r'bdiff'): 3,
(r'cext', r'diffhelpers'): 1,
(r'cext', r'mpatch'): 1,
- (r'cext', r'osutil'): 3,
+ (r'cext', r'osutil'): 4,
(r'cext', r'parsers'): 4,
}
--- a/mercurial/posix.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/posix.py Mon Mar 19 08:07:18 2018 -0700
@@ -113,7 +113,7 @@
if l:
if not stat.S_ISLNK(s):
# switch file to link
- fp = open(f)
+ fp = open(f, 'rb')
data = fp.read()
fp.close()
unlink(f)
@@ -121,7 +121,7 @@
os.symlink(data, f)
except OSError:
# failed to make a link, rewrite file
- fp = open(f, "w")
+ fp = open(f, "wb")
fp.write(data)
fp.close()
# no chmod needed at this point
@@ -130,7 +130,7 @@
# switch link to file
data = os.readlink(f)
unlink(f)
- fp = open(f, "w")
+ fp = open(f, "wb")
fp.write(data)
fp.close()
s = 0o666 & ~umask # avoid restatting for chmod
@@ -264,7 +264,8 @@
# already exists.
target = 'checklink-target'
try:
- open(os.path.join(cachedir, target), 'w').close()
+ fullpath = os.path.join(cachedir, target)
+ open(fullpath, 'w').close()
except IOError as inst:
if inst[0] == errno.EACCES:
# If we can't write to cachedir, just pretend
@@ -461,6 +462,10 @@
else:
return "'%s'" % s.replace("'", "'\\''")
+def shellsplit(s):
+ """Parse a command string in POSIX shell way (best-effort)"""
+ return pycompat.shlexsplit(s, posix=True)
+
def quotecommand(cmd):
return cmd
@@ -613,8 +618,8 @@
self.stat.st_uid == other.stat.st_uid and
self.stat.st_gid == other.stat.st_gid and
self.stat.st_size == other.stat.st_size and
- self.stat.st_mtime == other.stat.st_mtime and
- self.stat.st_ctime == other.stat.st_ctime)
+ self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] and
+ self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME])
except AttributeError:
return False
--- a/mercurial/profiling.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/profiling.py Mon Mar 19 08:07:18 2018 -0700
@@ -14,6 +14,7 @@
encoding,
error,
extensions,
+ pycompat,
util,
)
@@ -143,7 +144,7 @@
elif profformat == 'hotpath':
# inconsistent config: profiling.showmin
limit = ui.configwith(fraction, 'profiling', 'showmin', 0.05)
- kwargs['limit'] = limit
+ kwargs[r'limit'] = limit
statprof.display(fp, data=data, format=displayformat, **kwargs)
@@ -200,6 +201,17 @@
elif self._output:
path = self._ui.expandpath(self._output)
self._fp = open(path, 'wb')
+ elif pycompat.iswindows:
+ # parse escape sequence by win32print()
+ class uifp(object):
+ def __init__(self, ui):
+ self._ui = ui
+ def write(self, data):
+ self._ui.write_err(data)
+ def flush(self):
+ self._ui.flush()
+ self._fpdoclose = False
+ self._fp = uifp(self._ui)
else:
self._fpdoclose = False
self._fp = self._ui.ferr
--- a/mercurial/progress.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/progress.py Mon Mar 19 08:07:18 2018 -0700
@@ -119,10 +119,9 @@
add = topic
elif indicator == 'number':
if total:
- add = ('% ' + str(len(str(total))) +
- 's/%s') % (pos, total)
+ add = b'%*d/%d' % (len(str(total)), pos, total)
else:
- add = str(pos)
+ add = b'%d' % pos
elif indicator.startswith('item') and item:
slice = 'end'
if '-' in indicator:
--- a/mercurial/pure/base85.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/pure/base85.py Mon Mar 19 08:07:18 2018 -0700
@@ -9,8 +9,10 @@
import struct
-_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
- "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
+from .. import pycompat
+
+_b85chars = pycompat.bytestr("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"
+ "ghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
_b85dec = {}
@@ -51,6 +53,7 @@
out = []
for i in range(0, len(text), 5):
chunk = text[i:i + 5]
+ chunk = pycompat.bytestr(chunk)
acc = 0
for j, c in enumerate(chunk):
try:
--- a/mercurial/pure/bdiff.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/pure/bdiff.py Mon Mar 19 08:07:18 2018 -0700
@@ -90,3 +90,13 @@
text = re.sub('[ \t\r]+', ' ', text)
text = text.replace(' \n', '\n')
return text
+
+def splitnewlines(text):
+ '''like str.splitlines, but only split on newlines.'''
+ lines = [l + '\n' for l in text.split('\n')]
+ if lines:
+ if lines[-1] == '\n':
+ lines.pop()
+ else:
+ lines[-1] = lines[-1][:-1]
+ return lines
--- a/mercurial/pure/mpatch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/pure/mpatch.py Mon Mar 19 08:07:18 2018 -0700
@@ -10,7 +10,7 @@
import struct
from .. import pycompat
-stringio = pycompat.stringio
+stringio = pycompat.bytesio
class mpatchError(Exception):
"""error raised when a delta cannot be decoded
--- a/mercurial/pure/parsers.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/pure/parsers.py Mon Mar 19 08:07:18 2018 -0700
@@ -12,7 +12,7 @@
from ..node import nullid
from .. import pycompat
-stringio = pycompat.stringio
+stringio = pycompat.bytesio
_pack = struct.pack
--- a/mercurial/pycompat.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/pycompat.py Mon Mar 19 08:07:18 2018 -0700
@@ -11,6 +11,7 @@
from __future__ import absolute_import
import getopt
+import inspect
import os
import shlex
import sys
@@ -47,9 +48,11 @@
fsencode = os.fsencode
fsdecode = os.fsdecode
+ oscurdir = os.curdir.encode('ascii')
oslinesep = os.linesep.encode('ascii')
osname = os.name.encode('ascii')
ospathsep = os.pathsep.encode('ascii')
+ ospardir = os.pardir.encode('ascii')
ossep = os.sep.encode('ascii')
osaltsep = os.altsep
if osaltsep:
@@ -61,10 +64,18 @@
sysexecutable = sys.executable
if sysexecutable:
sysexecutable = os.fsencode(sysexecutable)
- stringio = io.BytesIO
- maplist = lambda *args: list(map(*args))
- ziplist = lambda *args: list(zip(*args))
+ bytesio = io.BytesIO
+ # TODO deprecate stringio name, as it is a lie on Python 3.
+ stringio = bytesio
+
+ def maplist(*args):
+ return list(map(*args))
+
+ def ziplist(*args):
+ return list(zip(*args))
+
rawinput = input
+ getargspec = inspect.getfullargspec
# TODO: .buffer might not exist if std streams were replaced; we'll need
# a silly wrapper to make a bytes stream backed by a unicode one.
@@ -83,12 +94,13 @@
sysargv = list(map(os.fsencode, sys.argv))
bytechr = struct.Struct('>B').pack
+ byterepr = b'%r'.__mod__
class bytestr(bytes):
"""A bytes which mostly acts as a Python 2 str
>>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
- (b'', b'foo', b'ascii', b'1')
+ ('', 'foo', 'ascii', '1')
>>> s = bytestr(b'foo')
>>> assert s is bytestr(s)
@@ -98,7 +110,7 @@
... def __bytes__(self):
... return b'bytes'
>>> bytestr(bytesable())
- b'bytes'
+ 'bytes'
There's no implicit conversion from non-ascii str as its encoding is
unknown:
@@ -154,10 +166,19 @@
def __iter__(self):
return iterbytestr(bytes.__iter__(self))
+ def __repr__(self):
+ return bytes.__repr__(self)[1:] # drop b''
+
def iterbytestr(s):
"""Iterate bytes as if it were a str object of Python 2"""
return map(bytechr, s)
+ def maybebytestr(s):
+ """Promote bytes to bytestr"""
+ if isinstance(s, bytes):
+ return bytestr(s)
+ return s
+
def sysbytes(s):
"""Convert an internal str (e.g. keyword, __doc__) back to bytes
@@ -180,11 +201,15 @@
def strurl(url):
"""Converts a bytes url back to str"""
- return url.decode(u'ascii')
+ if isinstance(url, bytes):
+ return url.decode(u'ascii')
+ return url
def bytesurl(url):
"""Converts a str url to bytes by encoding in ascii"""
- return url.encode(u'ascii')
+ if isinstance(url, str):
+ return url.encode(u'ascii')
+ return url
def raisewithtb(exc, tb):
"""Raise exception with the given traceback"""
@@ -212,8 +237,8 @@
xrange = builtins.range
unicode = str
- def open(name, mode='r', buffering=-1):
- return builtins.open(name, sysstr(mode), buffering)
+ def open(name, mode='r', buffering=-1, encoding=None):
+ return builtins.open(name, sysstr(mode), buffering, encoding)
def _getoptbwrapper(orig, args, shortlist, namelist):
"""
@@ -249,21 +274,27 @@
return dic
# TODO: handle shlex.shlex().
- def shlexsplit(s):
+ def shlexsplit(s, comments=False, posix=True):
"""
Takes bytes argument, convert it to str i.e. unicodes, pass that into
shlex.split(), convert the returned value to bytes and return that for
Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
"""
- ret = shlex.split(s.decode('latin-1'))
+ ret = shlex.split(s.decode('latin-1'), comments, posix)
return [a.encode('latin-1') for a in ret]
+ def emailparser(*args, **kwargs):
+ import email.parser
+ return email.parser.BytesParser(*args, **kwargs)
+
else:
import cStringIO
bytechr = chr
+ byterepr = repr
bytestr = str
iterbytestr = iter
+ maybebytestr = identity
sysbytes = identity
sysstr = identity
strurl = identity
@@ -298,9 +329,11 @@
strkwargs = identity
byteskwargs = identity
+ oscurdir = os.curdir
oslinesep = os.linesep
osname = os.name
ospathsep = os.pathsep
+ ospardir = os.pardir
ossep = os.sep
osaltsep = os.altsep
stdin = sys.stdin
@@ -312,10 +345,16 @@
getcwd = os.getcwd
sysexecutable = sys.executable
shlexsplit = shlex.split
- stringio = cStringIO.StringIO
+ bytesio = cStringIO.StringIO
+ stringio = bytesio
maplist = map
ziplist = zip
rawinput = raw_input
+ getargspec = inspect.getargspec
+
+ def emailparser(*args, **kwargs):
+ import email.parser
+ return email.parser.Parser(*args, **kwargs)
isjython = sysplatform.startswith('java')
--- a/mercurial/registrar.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/registrar.py Mon Mar 19 08:07:18 2018 -0700
@@ -283,6 +283,14 @@
templatekeyword = registrar.templatekeyword()
+ # new API (since Mercurial 4.6)
+ @templatekeyword('mykeyword', requires={'repo', 'ctx'})
+ def mykeywordfunc(context, mapping):
+ '''Explanation of this template keyword ....
+ '''
+ pass
+
+ # old API
@templatekeyword('mykeyword')
def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
'''Explanation of this template keyword ....
@@ -291,6 +299,11 @@
The first string argument is used also in online help.
+ Optional argument 'requires' should be a collection of resource names
+ which the template keyword depends on. This also serves as a flag to
+ switch to the new API. If 'requires' is unspecified, all template
+ keywords and resources are expanded to the function arguments.
+
'templatekeyword' instance in example above can be used to
decorate multiple functions.
@@ -301,6 +314,9 @@
Otherwise, explicit 'templatekw.loadkeyword()' is needed.
"""
+ def _extrasetup(self, name, func, requires=None):
+ func._requires = requires
+
class templatefilter(_templateregistrarbase):
"""Decorator to register template filer
@@ -352,7 +368,7 @@
extension, if an instance named as 'templatefunc' is used for
decorating in extension.
- Otherwise, explicit 'templater.loadfunction()' is needed.
+ Otherwise, explicit 'templatefuncs.loadfunction()' is needed.
"""
_getname = _funcregistrarbase._parsefuncdecl
--- a/mercurial/repair.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/repair.py Mon Mar 19 08:07:18 2018 -0700
@@ -27,7 +27,8 @@
util,
)
-def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
+def backupbundle(repo, bases, heads, node, suffix, compress=True,
+ obsolescence=True):
"""create a bundle with the specified revisions as a backup"""
backupdir = "strip-backup"
@@ -166,7 +167,7 @@
vfs = repo.vfs
node = nodelist[-1]
if backup:
- backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
+ backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
repo.ui.status(_("saved backup bundle to %s\n") %
vfs.join(backupfile))
repo.ui.log("backupbundle", "saved backup bundle to %s\n",
@@ -179,8 +180,8 @@
# we are trying to strip. This is harmless since the stripped markers
# are already backed up and we did not touched the markers for the
# saved changesets.
- tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
- compress=False, obsolescence=False)
+ tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
+ compress=False, obsolescence=False)
try:
with repo.transaction("strip") as tr:
@@ -235,7 +236,7 @@
except OSError as e:
if e.errno != errno.ENOENT:
ui.warn(_('error removing %s: %s\n') %
- (undovfs.join(undofile), str(e)))
+ (undovfs.join(undofile), util.forcebytestr(e)))
except: # re-raises
if backupfile:
--- a/mercurial/revlog.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/revlog.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,8 +13,8 @@
from __future__ import absolute_import
-import binascii
import collections
+import contextlib
import errno
import hashlib
import heapq
@@ -629,13 +629,12 @@
indexdata = ''
self._initempty = True
try:
- f = self.opener(self.indexfile)
- if (mmapindexthreshold is not None and
- self.opener.fstat(f).st_size >= mmapindexthreshold):
- indexdata = util.buffer(util.mmapread(f))
- else:
- indexdata = f.read()
- f.close()
+ with self._indexfp() as f:
+ if (mmapindexthreshold is not None and
+ self.opener.fstat(f).st_size >= mmapindexthreshold):
+ indexdata = util.buffer(util.mmapread(f))
+ else:
+ indexdata = f.read()
if len(indexdata) > 0:
v = versionformat_unpack(indexdata[:4])[0]
self._initempty = False
@@ -690,6 +689,32 @@
def _compressor(self):
return util.compengines[self._compengine].revlogcompressor()
+ def _indexfp(self, mode='r'):
+ """file object for the revlog's index file"""
+ args = {r'mode': mode}
+ if mode != 'r':
+ args[r'checkambig'] = self._checkambig
+ if mode == 'w':
+ args[r'atomictemp'] = True
+ return self.opener(self.indexfile, **args)
+
+ def _datafp(self, mode='r'):
+ """file object for the revlog's data file"""
+ return self.opener(self.datafile, mode=mode)
+
+ @contextlib.contextmanager
+ def _datareadfp(self, existingfp=None):
+ """file object suitable to read data"""
+ if existingfp is not None:
+ yield existingfp
+ else:
+ if self._inline:
+ func = self._indexfp
+ else:
+ func = self._datafp
+ with func() as fp:
+ yield fp
+
def tip(self):
return self.node(len(self.index) - 2)
def __contains__(self, rev):
@@ -1362,7 +1387,7 @@
try:
# str(rev)
rev = int(id)
- if str(rev) != id:
+ if "%d" % rev != id:
raise ValueError
if rev < 0:
rev = len(self) + rev
@@ -1424,7 +1449,7 @@
if maybewdir:
raise error.WdirUnsupported
return None
- except (TypeError, binascii.Error):
+ except TypeError:
pass
def lookup(self, id):
@@ -1510,15 +1535,6 @@
Returns a str or buffer of raw byte data.
"""
- if df is not None:
- closehandle = False
- else:
- if self._inline:
- df = self.opener(self.indexfile)
- else:
- df = self.opener(self.datafile)
- closehandle = True
-
# Cache data both forward and backward around the requested
# data, in a fixed size window. This helps speed up operations
# involving reading the revlog backwards.
@@ -1526,10 +1542,9 @@
realoffset = offset & ~(cachesize - 1)
reallength = (((offset + length + cachesize) & ~(cachesize - 1))
- realoffset)
- df.seek(realoffset)
- d = df.read(reallength)
- if closehandle:
- df.close()
+ with self._datareadfp(df) as df:
+ df.seek(realoffset)
+ d = df.read(reallength)
self._cachesegment(realoffset, d)
if offset != realoffset or reallength != length:
return util.buffer(d, offset - realoffset, length)
@@ -1838,7 +1853,7 @@
raise RevlogError(_("integrity check failed on %s:%s")
% (self.indexfile, pycompat.bytestr(revornode)))
- def checkinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr, fp=None):
"""Check if the revlog is too big for inline and convert if so.
This should be called after revisions are added to the revlog. If the
@@ -1867,24 +1882,20 @@
fp.flush()
fp.close()
- df = self.opener(self.datafile, 'w')
- try:
+ with self._datafp('w') as df:
for r in self:
df.write(self._getsegmentforrevs(r, r)[1])
- finally:
- df.close()
- fp = self.opener(self.indexfile, 'w', atomictemp=True,
- checkambig=self._checkambig)
- self.version &= ~FLAG_INLINE_DATA
- self._inline = False
- for i in self:
- e = self._io.packentry(self.index[i], self.node, self.version, i)
- fp.write(e)
+ with self._indexfp('w') as fp:
+ self.version &= ~FLAG_INLINE_DATA
+ self._inline = False
+ io = self._io
+ for i in self:
+ e = io.packentry(self.index[i], self.node, self.version, i)
+ fp.write(e)
- # if we don't call close, the temp file will never replace the
- # real index
- fp.close()
+ # the temp file replace the real index when we exit the context
+ # manager
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear()
@@ -1943,8 +1954,8 @@
"""
dfh = None
if not self._inline:
- dfh = self.opener(self.datafile, "a+")
- ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
+ dfh = self._datafp("a+")
+ ifh = self._indexfp("a+")
try:
return self._addrevision(node, rawtext, transaction, link, p1, p2,
flags, cachedelta, ifh, dfh,
@@ -2005,7 +2016,8 @@
try:
return _zlibdecompress(data)
except zlib.error as e:
- raise RevlogError(_('revlog decompress error: %s') % str(e))
+ raise RevlogError(_('revlog decompress error: %s') %
+ util.forcebytestr(e))
# '\0' is more common than 'u' so it goes first.
elif t == '\0':
return data
@@ -2129,7 +2141,7 @@
if alwayscache and rawtext is None:
rawtext = deltacomputer._buildtext(revinfo, fh)
- if type(rawtext) == str: # only accept immutable objects
+ if type(rawtext) == bytes: # only accept immutable objects
self._cache = (node, curr, rawtext)
self._chainbasecache[curr] = chainbase
return node
@@ -2163,7 +2175,7 @@
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
- self.checkinlinesize(transaction, ifh)
+ self._enforceinlinesize(transaction, ifh)
def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
"""
@@ -2183,7 +2195,7 @@
end = 0
if r:
end = self.end(r - 1)
- ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
+ ifh = self._indexfp("a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
@@ -2191,7 +2203,7 @@
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
- dfh = self.opener(self.datafile, "a+")
+ dfh = self._datafp("a+")
def flush():
if dfh:
dfh.flush()
@@ -2254,9 +2266,8 @@
# addrevision switched from inline to conventional
# reopen the index
ifh.close()
- dfh = self.opener(self.datafile, "a+")
- ifh = self.opener(self.indexfile, "a+",
- checkambig=self._checkambig)
+ dfh = self._datafp("a+")
+ ifh = self._indexfp("a+")
finally:
if dfh:
dfh.close()
@@ -2358,10 +2369,9 @@
expected = max(0, self.end(len(self) - 1))
try:
- f = self.opener(self.datafile)
- f.seek(0, 2)
- actual = f.tell()
- f.close()
+ with self._datafp() as f:
+ f.seek(0, 2)
+ actual = f.tell()
dd = actual - expected
except IOError as inst:
if inst.errno != errno.ENOENT:
@@ -2488,7 +2498,7 @@
if populatecachedelta:
dp = self.deltaparent(rev)
if dp != nullrev:
- cachedelta = (dp, str(self._chunk(rev)))
+ cachedelta = (dp, bytes(self._chunk(rev)))
if not cachedelta:
rawtext = self.revision(rev, raw=True)
--- a/mercurial/revset.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/revset.py Mon Mar 19 08:07:18 2018 -0700
@@ -28,8 +28,10 @@
revsetlang,
scmutil,
smartset,
+ stack,
util,
)
+from .utils import dateutil
# helpers for processing parsed tree
getsymbol = revsetlang.getsymbol
@@ -105,6 +107,9 @@
pass
return None
+def _sortedb(xs):
+ return sorted(util.rapply(pycompat.maybebytestr, xs))
+
# operator methods
def stringset(repo, subset, x, order):
@@ -507,15 +512,7 @@
b.add(getbranch(r))
c = s.__contains__
return subset.filter(lambda r: c(r) or getbranch(r) in b,
- condrepr=lambda: '<branch %r>' % sorted(b))
-
-@predicate('bumped()', safe=True)
-def bumped(repo, subset, x):
- msg = ("'bumped()' is deprecated, "
- "use 'phasedivergent()'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return phasedivergent(repo, subset, x)
+ condrepr=lambda: '<branch %r>' % _sortedb(b))
@predicate('phasedivergent()', safe=True)
def phasedivergent(repo, subset, x):
@@ -663,7 +660,7 @@
"""
# i18n: "date" is a keyword
ds = getstring(x, _("date requires a string"))
- dm = util.matchdate(ds)
+ dm = dateutil.matchdate(ds)
return subset.filter(lambda x: dm(repo[x].date()[0]),
condrepr=('<date %r>', ds))
@@ -768,15 +765,7 @@
src = _getrevsource(repo, r)
return subset.filter(dests.__contains__,
- condrepr=lambda: '<destination %r>' % sorted(dests))
-
-@predicate('divergent()', safe=True)
-def divergent(repo, subset, x):
- msg = ("'divergent()' is deprecated, "
- "use 'contentdivergent()'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return contentdivergent(repo, subset, x)
+ condrepr=lambda: '<destination %r>' % _sortedb(dests))
@predicate('contentdivergent()', safe=True)
def contentdivergent(repo, subset, x):
@@ -1024,7 +1013,8 @@
# i18n: "grep" is a keyword
gr = re.compile(getstring(x, _("grep requires a string")))
except re.error as e:
- raise error.ParseError(_('invalid match pattern: %s') % e)
+ raise error.ParseError(
+ _('invalid match pattern: %s') % util.forcebytestr(e))
def matches(x):
c = repo[x]
@@ -1543,6 +1533,21 @@
target = phases.secret
return _phase(repo, subset, target)
+@predicate('stack([revs])', safe=True)
+def _stack(repo, subset, x):
+ # experimental revset for the stack of changesets or working directory
+ # parent
+ if x is None:
+ stacks = stack.getstack(repo, x)
+ else:
+ stacks = smartset.baseset([])
+ for revision in getset(repo, fullreposet(repo), x):
+ currentstack = stack.getstack(repo, revision)
+ stacks = stacks + currentstack
+
+ # Force to use the order of the stacks instead of the subset one
+ return stacks & subset
+
def parentspec(repo, subset, x, n, order):
"""``set^0``
The set.
@@ -1854,11 +1859,12 @@
keyflags = []
for k in keys.split():
fk = k
- reverse = (k[0] == '-')
+ reverse = (k.startswith('-'))
if reverse:
k = k[1:]
if k not in _sortkeyfuncs and k != 'topo':
- raise error.ParseError(_("unknown sort key %r") % fk)
+ raise error.ParseError(
+ _("unknown sort key %r") % pycompat.bytestr(fk))
keyflags.append((k, reverse))
if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
@@ -2031,14 +2037,6 @@
def tagged(repo, subset, x):
return tag(repo, subset, x)
-@predicate('unstable()', safe=True)
-def unstable(repo, subset, x):
- msg = ("'unstable()' is deprecated, "
- "use 'orphan()'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return orphan(repo, subset, x)
-
@predicate('orphan()', safe=True)
def orphan(repo, subset, x):
"""Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
@@ -2080,7 +2078,7 @@
try:
# fast path for integer revision
r = int(t)
- if str(r) != t or r not in cl:
+ if ('%d' % r) != t or r not in cl:
raise ValueError
revs = [r]
except ValueError:
--- a/mercurial/revsetlang.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/revsetlang.py Mon Mar 19 08:07:18 2018 -0700
@@ -539,7 +539,19 @@
return tuple(foldconcat(t) for t in tree)
def parse(spec, lookup=None):
- return _parsewith(spec, lookup=lookup)
+ try:
+ return _parsewith(spec, lookup=lookup)
+ except error.ParseError as inst:
+ if len(inst.args) > 1: # has location
+ loc = inst.args[1]
+ # Remove newlines -- spaces are equivalent whitespace.
+ spec = spec.replace('\n', ' ')
+ # We want the caret to point to the place in the template that
+ # failed to parse, but in a hint we get a open paren at the
+ # start. Therefore, we print "loc + 1" spaces (instead of "loc")
+ # to line up the caret with the location of the error.
+ inst.hint = spec + '\n' + ' ' * (loc + 1) + '^ ' + _('here')
+ raise
def _quote(s):
r"""Quote a value in order to make it safe for the revset engine.
@@ -635,7 +647,7 @@
"root(_list('a\\\\x00b\\\\x00c\\\\x00d'))"
>>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user'])
"sort((:), 'desc', 'user')"
- >>> formatspec('%ls', ['a', "'"])
+ >>> formatspec(b'%ls', [b'a', b"'"])
"_list('a\\\\x00\\\\'')"
'''
expr = pycompat.bytestr(expr)
@@ -717,13 +729,13 @@
def gethashlikesymbols(tree):
"""returns the list of symbols of the tree that look like hashes
- >>> gethashlikesymbols(('dagrange', ('symbol', '3'), ('symbol', 'abe3ff')))
+ >>> gethashlikesymbols(parse(b'3::abe3ff'))
['3', 'abe3ff']
- >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '.')))
+ >>> gethashlikesymbols(parse(b'precursors(.)'))
[]
- >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '34')))
+ >>> gethashlikesymbols(parse(b'precursors(34)'))
['34']
- >>> gethashlikesymbols(('symbol', 'abe3ffZ'))
+ >>> gethashlikesymbols(parse(b'abe3ffZ'))
[]
"""
if not tree:
--- a/mercurial/scmutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/scmutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -162,13 +162,14 @@
reason = _('timed out waiting for lock held by %r') % inst.locker
else:
reason = _('lock held by %r') % inst.locker
- ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
+ ui.warn(_("abort: %s: %s\n")
+ % (inst.desc or util.forcebytestr(inst.filename), reason))
if not inst.locker:
ui.warn(_("(lock might be very busy)\n"))
except error.LockUnavailable as inst:
ui.warn(_("abort: could not lock %s: %s\n") %
- (inst.desc or inst.filename,
- encoding.strtolocal(inst.strerror)))
+ (inst.desc or util.forcebytestr(inst.filename),
+ encoding.strtolocal(inst.strerror)))
except error.OutOfBandError as inst:
if inst.args:
msg = _("abort: remote error:\n")
@@ -185,12 +186,15 @@
ui.warn(_("(%s)\n") % inst.hint)
except error.ResponseError as inst:
ui.warn(_("abort: %s") % inst.args[0])
- if not isinstance(inst.args[1], basestring):
- ui.warn(" %r\n" % (inst.args[1],))
- elif not inst.args[1]:
+ msg = inst.args[1]
+ if isinstance(msg, type(u'')):
+ msg = pycompat.sysbytes(msg)
+ if not isinstance(msg, bytes):
+ ui.warn(" %r\n" % (msg,))
+ elif not msg:
ui.warn(_(" empty string\n"))
else:
- ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
+ ui.warn("\n%r\n" % util.ellipsis(msg))
except error.CensoredNodeError as inst:
ui.warn(_("abort: file censored %s!\n") % inst)
except error.RevlogError as inst:
@@ -207,15 +211,15 @@
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
except ImportError as inst:
- ui.warn(_("abort: %s!\n") % inst)
- m = str(inst).split()[-1]
+ ui.warn(_("abort: %s!\n") % util.forcebytestr(inst))
+ m = util.forcebytestr(inst).split()[-1]
if m in "mpatch bdiff".split():
ui.warn(_("(did you forget to compile extensions?)\n"))
elif m in "zlib".split():
ui.warn(_("(is your Python install correct?)\n"))
except IOError as inst:
if util.safehasattr(inst, "code"):
- ui.warn(_("abort: %s\n") % inst)
+ ui.warn(_("abort: %s\n") % util.forcebytestr(inst))
elif util.safehasattr(inst, "reason"):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
@@ -232,7 +236,8 @@
elif getattr(inst, "strerror", None):
if getattr(inst, "filename", None):
ui.warn(_("abort: %s: %s\n") % (
- encoding.strtolocal(inst.strerror), inst.filename))
+ encoding.strtolocal(inst.strerror),
+ util.forcebytestr(inst.filename)))
else:
ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
else:
@@ -240,7 +245,8 @@
except OSError as inst:
if getattr(inst, "filename", None) is not None:
ui.warn(_("abort: %s: '%s'\n") % (
- encoding.strtolocal(inst.strerror), inst.filename))
+ encoding.strtolocal(inst.strerror),
+ util.forcebytestr(inst.filename)))
else:
ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
except MemoryError:
@@ -250,7 +256,7 @@
# Just in case catch this and and pass exit code to caller.
return inst.code
except socket.error as inst:
- ui.warn(_("abort: %s\n") % inst.args[-1])
+ ui.warn(_("abort: %s\n") % util.forcebytestr(inst.args[-1]))
return -1
@@ -261,12 +267,15 @@
raise error.Abort(_("the name '%s' is reserved") % lbl)
for c in (':', '\0', '\n', '\r'):
if c in lbl:
- raise error.Abort(_("%r cannot be used in a name") % c)
+ raise error.Abort(
+ _("%r cannot be used in a name") % pycompat.bytestr(c))
try:
int(lbl)
raise error.Abort(_("cannot use an integer as a name"))
except ValueError:
pass
+ if lbl.strip() != lbl:
+ raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
def checkfilename(f):
'''Check that the filename f is an acceptable filename for a tracked file'''
@@ -355,12 +364,8 @@
samestat = getattr(os.path, 'samestat', None)
if followsym and samestat is not None:
def adddir(dirlst, dirname):
- match = False
dirstat = os.stat(dirname)
- for lstdirstat in dirlst:
- if samestat(dirstat, lstdirstat):
- match = True
- break
+ match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
if not match:
dirlst.append(dirstat)
return not match
@@ -411,7 +416,7 @@
def formatchangeid(ctx):
"""Format changectx as '{rev}:{node|formatnode}', which is the default
- template provided by cmdutil.changeset_templater"""
+ template provided by logcmdutil.changesettemplater"""
repo = ctx.repo()
return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
@@ -684,7 +689,8 @@
continue
from . import bookmarks # avoid import cycle
repo.ui.debug('moving bookmarks %r from %s to %s\n' %
- (oldbmarks, hex(oldnode), hex(newnode)))
+ (util.rapply(pycompat.maybebytestr, oldbmarks),
+ hex(oldnode), hex(newnode)))
# Delete divergent bookmarks being parents of related newnodes
deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
allnewnodes, newnode, oldnode)
@@ -885,7 +891,7 @@
missings = []
for r in requirements:
if r not in supported:
- if not r or not r[0].isalnum():
+ if not r or not r[0:1].isalnum():
raise error.RequirementError(_(".hg/requires file is corrupt"))
missings.append(r)
missings.sort()
@@ -1196,7 +1202,7 @@
if k == self.firstlinekey:
e = "key name '%s' is reserved" % self.firstlinekey
raise error.ProgrammingError(e)
- if not k[0].isalpha():
+ if not k[0:1].isalpha():
e = "keys must start with a letter in a key-value file"
raise error.ProgrammingError(e)
if not k.isalnum():
@@ -1222,6 +1228,11 @@
'unbundle',
]
+# a list of (repo, ctx, files) functions called by various commands to allow
+# extensions to ensure the corresponding files are available locally, before the
+# command uses them.
+fileprefetchhooks = util.hooks()
+
# A marker that tells the evolve extension to suppress its own reporting
_reportstroubledchangesets = True
--- a/mercurial/setdiscovery.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/setdiscovery.py Mon Mar 19 08:07:18 2018 -0700
@@ -106,7 +106,7 @@
:nodes: set of nodes to discover
:size: the maximum size of the sample"""
sample = dag.headsetofconnecteds(nodes)
- if size <= len(sample):
+ if len(sample) >= size:
return _limitsample(sample, size)
_updatesample(dag, None, sample, quicksamplesize=size)
return sample
@@ -175,7 +175,7 @@
ui.debug("all remote heads known locally\n")
return (srvheadhashes, False, srvheadhashes,)
- if sample and len(ownheads) <= initialsamplesize and all(yesno):
+ if len(sample) == len(ownheads) and all(yesno):
ui.note(_("all local heads known remotely\n"))
ownheadhashes = dag.externalizeall(ownheads)
return (ownheadhashes, True, srvheadhashes,)
@@ -221,7 +221,6 @@
sample = list(undecided)
else:
sample = samplefunc(dag, undecided, targetsize)
- sample = _limitsample(sample, targetsize)
roundtrips += 1
ui.progress(_('searching'), roundtrips, unit=_('queries'))
--- a/mercurial/smartset.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/smartset.py Mon Mar 19 08:07:18 2018 -0700
@@ -8,7 +8,9 @@
from __future__ import absolute_import
from . import (
+ encoding,
error,
+ pycompat,
util,
)
@@ -19,7 +21,7 @@
type(r) example
======== =================================
tuple ('<not %r>', other)
- str '<branch closed>'
+ bytes '<branch closed>'
callable lambda: '<branch %r>' % sorted(b)
object other
======== =================================
@@ -27,13 +29,16 @@
if r is None:
return ''
elif isinstance(r, tuple):
- return r[0] % r[1:]
- elif isinstance(r, str):
+ return r[0] % util.rapply(pycompat.maybebytestr, r[1:])
+ elif isinstance(r, bytes):
return r
elif callable(r):
return r()
else:
- return repr(r)
+ return pycompat.byterepr(r)
+
+def _typename(o):
+ return pycompat.sysbytes(type(o).__name__).lstrip('_')
class abstractsmartset(object):
@@ -306,7 +311,7 @@
self._istopo = False
def __len__(self):
- if '_list' in self.__dict__:
+ if r'_list' in self.__dict__:
return len(self._list)
else:
return len(self._set)
@@ -384,6 +389,7 @@
s._ascending = self._ascending
return s
+ @encoding.strmethod
def __repr__(self):
d = {None: '', False: '-', True: '+'}[self._ascending]
s = _formatsetrepr(self._datarepr)
@@ -394,8 +400,8 @@
# We fallback to the sorted version for a stable output.
if self._ascending is not None:
l = self._asclist
- s = repr(l)
- return '<%s%s %s>' % (type(self).__name__, d, s)
+ s = pycompat.byterepr(l)
+ return '<%s%s %s>' % (_typename(self), d, s)
class filteredset(abstractsmartset):
"""Duck type for baseset class which iterates lazily over the revisions in
@@ -505,12 +511,13 @@
pass
return x
+ @encoding.strmethod
def __repr__(self):
- xs = [repr(self._subset)]
+ xs = [pycompat.byterepr(self._subset)]
s = _formatsetrepr(self._condrepr)
if s:
xs.append(s)
- return '<%s %s>' % (type(self).__name__, ', '.join(xs))
+ return '<%s %s>' % (_typename(self), ', '.join(xs))
def _iterordered(ascending, iter1, iter2):
"""produce an ordered iteration from two iterators with the same order
@@ -755,9 +762,10 @@
self.reverse()
return val
+ @encoding.strmethod
def __repr__(self):
d = {None: '', False: '-', True: '+'}[self._ascending]
- return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
+ return '<%s%s %r, %r>' % (_typename(self), d, self._r1, self._r2)
class generatorset(abstractsmartset):
"""Wrap a generator for lazy iteration
@@ -918,9 +926,10 @@
return self.last()
return next(it(), None)
+ @encoding.strmethod
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
- return '<%s%s>' % (type(self).__name__.lstrip('_'), d)
+ return '<%s%s>' % (_typename(self), d)
class _generatorsetasc(generatorset):
"""Special case of generatorset optimized for ascending generators."""
@@ -1087,10 +1096,10 @@
y = max(self._end - start, self._start)
return _spanset(x, y, self._ascending, self._hiddenrevs)
+ @encoding.strmethod
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
- return '<%s%s %d:%d>' % (type(self).__name__.lstrip('_'), d,
- self._start, self._end)
+ return '<%s%s %d:%d>' % (_typename(self), d, self._start, self._end)
class fullreposet(_spanset):
"""a set containing all revisions in the repo
@@ -1123,7 +1132,7 @@
def prettyformat(revs):
lines = []
- rs = repr(revs)
+ rs = pycompat.byterepr(revs)
p = 0
while p < len(rs):
q = rs.find('<', p + 1)
--- a/mercurial/sshpeer.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/sshpeer.py Mon Mar 19 08:07:18 2018 -0700
@@ -8,6 +8,7 @@
from __future__ import absolute_import
import re
+import uuid
from .i18n import _
from . import (
@@ -15,6 +16,8 @@
pycompat,
util,
wireproto,
+ wireprotoserver,
+ wireprototypes,
)
def _serverquote(s):
@@ -29,10 +32,11 @@
"""display all data currently available on pipe as remote output.
This is non blocking."""
- s = util.readpipe(pipe)
- if s:
- for l in s.splitlines():
- ui.status(_("remote: "), l, '\n')
+ if pipe:
+ s = util.readpipe(pipe)
+ if s:
+ for l in s.splitlines():
+ ui.status(_("remote: "), l, '\n')
class doublepipe(object):
"""Operate a side-channel pipe in addition of a main one
@@ -63,8 +67,11 @@
(This will only wait for data if the setup is supported by `util.poll`)
"""
- if getattr(self._main, 'hasbuffer', False): # getattr for classic pipe
- return (True, True) # main has data, assume side is worth poking at.
+ if (isinstance(self._main, util.bufferedinputpipe) and
+ self._main.hasbuffer):
+ # Main has data. Assume side is worth poking at.
+ return True, True
+
fds = [self._main.fileno(), self._side.fileno()]
try:
act = util.poll(fds)
@@ -114,43 +121,258 @@
def flush(self):
return self._main.flush()
-class sshpeer(wireproto.wirepeer):
- def __init__(self, ui, path, create=False):
- self._url = path
- self._ui = ui
- self._pipeo = self._pipei = self._pipee = None
+def _cleanuppipes(ui, pipei, pipeo, pipee):
+ """Clean up pipes used by an SSH connection."""
+ if pipeo:
+ pipeo.close()
+ if pipei:
+ pipei.close()
+
+ if pipee:
+ # Try to read from the err descriptor until EOF.
+ try:
+ for l in pipee:
+ ui.status(_('remote: '), l)
+ except (IOError, ValueError):
+ pass
+
+ pipee.close()
+
+def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
+ """Create an SSH connection to a server.
+
+ Returns a tuple of (process, stdin, stdout, stderr) for the
+ spawned process.
+ """
+ cmd = '%s %s %s' % (
+ sshcmd,
+ args,
+ util.shellquote('%s -R %s serve --stdio' % (
+ _serverquote(remotecmd), _serverquote(path))))
+
+ ui.debug('running %s\n' % cmd)
+ cmd = util.quotecommand(cmd)
+
+ # no buffer allow the use of 'select'
+ # feel free to remove buffering and select usage when we ultimately
+ # move to threading.
+ stdin, stdout, stderr, proc = util.popen4(cmd, bufsize=0, env=sshenv)
+
+ return proc, stdin, stdout, stderr
+
+def _performhandshake(ui, stdin, stdout, stderr):
+ def badresponse():
+ # Flush any output on stderr.
+ _forwardoutput(ui, stderr)
+
+ msg = _('no suitable response from remote hg')
+ hint = ui.config('ui', 'ssherrorhint')
+ raise error.RepoError(msg, hint=hint)
- u = util.url(path, parsequery=False, parsefragment=False)
- if u.scheme != 'ssh' or not u.host or u.path is None:
- self._abort(error.RepoError(_("couldn't parse location %s") % path))
+ # The handshake consists of sending wire protocol commands in reverse
+ # order of protocol implementation and then sniffing for a response
+ # to one of them.
+ #
+ # Those commands (from oldest to newest) are:
+ #
+ # ``between``
+ # Asks for the set of revisions between a pair of revisions. Command
+ # present in all Mercurial server implementations.
+ #
+ # ``hello``
+ # Instructs the server to advertise its capabilities. Introduced in
+ # Mercurial 0.9.1.
+ #
+ # ``upgrade``
+ # Requests upgrade from default transport protocol version 1 to
+ # a newer version. Introduced in Mercurial 4.6 as an experimental
+ # feature.
+ #
+ # The ``between`` command is issued with a request for the null
+ # range. If the remote is a Mercurial server, this request will
+ # generate a specific response: ``1\n\n``. This represents the
+ # wire protocol encoded value for ``\n``. We look for ``1\n\n``
+ # in the output stream and know this is the response to ``between``
+ # and we're at the end of our handshake reply.
+ #
+ # The response to the ``hello`` command will be a line with the
+ # length of the value returned by that command followed by that
+ # value. If the server doesn't support ``hello`` (which should be
+ # rare), that line will be ``0\n``. Otherwise, the value will contain
+ # RFC 822 like lines. Of these, the ``capabilities:`` line contains
+ # the capabilities of the server.
+ #
+ # The ``upgrade`` command isn't really a command in the traditional
+ # sense of version 1 of the transport because it isn't using the
+ # proper mechanism for formatting insteads: instead, it just encodes
+ # arguments on the line, delimited by spaces.
+ #
+ # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
+ # If the server doesn't support protocol upgrades, it will reply to
+ # this line with ``0\n``. Otherwise, it emits an
+ # ``upgraded <token> <protocol>`` line to both stdout and stderr.
+ # Content immediately following this line describes additional
+ # protocol and server state.
+ #
+ # In addition to the responses to our command requests, the server
+ # may emit "banner" output on stdout. SSH servers are allowed to
+ # print messages to stdout on login. Issuing commands on connection
+ # allows us to flush this banner output from the server by scanning
+ # for output to our well-known ``between`` command. Of course, if
+ # the banner contains ``1\n\n``, this will throw off our detection.
- util.checksafessh(path)
+ requestlog = ui.configbool('devel', 'debug.peer-request')
+
+ # Generate a random token to help identify responses to version 2
+ # upgrade request.
+ token = pycompat.sysbytes(str(uuid.uuid4()))
+ upgradecaps = [
+ ('proto', wireprotoserver.SSHV2),
+ ]
+ upgradecaps = util.urlreq.urlencode(upgradecaps)
- if u.passwd is not None:
- self._abort(error.RepoError(_("password in URL not supported")))
+ try:
+ pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
+ handshake = [
+ 'hello\n',
+ 'between\n',
+ 'pairs %d\n' % len(pairsarg),
+ pairsarg,
+ ]
+
+ # Request upgrade to version 2 if configured.
+ if ui.configbool('experimental', 'sshpeer.advertise-v2'):
+ ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
+ handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
- self._user = u.user
- self._host = u.host
- self._port = u.port
- self._path = u.path or '.'
+ if requestlog:
+ ui.debug('devel-peer-request: hello\n')
+ ui.debug('sending hello command\n')
+ if requestlog:
+ ui.debug('devel-peer-request: between\n')
+ ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
+ ui.debug('sending between command\n')
+
+ stdin.write(''.join(handshake))
+ stdin.flush()
+ except IOError:
+ badresponse()
+
+ # Assume version 1 of wire protocol by default.
+ protoname = wireprototypes.SSHV1
+ reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
+
+ lines = ['', 'dummy']
+ max_noise = 500
+ while lines[-1] and max_noise:
+ try:
+ l = stdout.readline()
+ _forwardoutput(ui, stderr)
- sshcmd = self.ui.config("ui", "ssh")
- remotecmd = self.ui.config("ui", "remotecmd")
- sshaddenv = dict(self.ui.configitems("sshenv"))
- sshenv = util.shellenviron(sshaddenv)
+ # Look for reply to protocol upgrade request. It has a token
+ # in it, so there should be no false positives.
+ m = reupgraded.match(l)
+ if m:
+ protoname = m.group(1)
+ ui.debug('protocol upgraded to %s\n' % protoname)
+ # If an upgrade was handled, the ``hello`` and ``between``
+ # requests are ignored. The next output belongs to the
+ # protocol, so stop scanning lines.
+ break
+
+ # Otherwise it could be a banner, ``0\n`` response if server
+ # doesn't support upgrade.
+
+ if lines[-1] == '1\n' and l == '\n':
+ break
+ if l:
+ ui.debug('remote: ', l)
+ lines.append(l)
+ max_noise -= 1
+ except IOError:
+ badresponse()
+ else:
+ badresponse()
+
+ caps = set()
- args = util.sshargs(sshcmd, self._host, self._user, self._port)
+ # For version 1, we should see a ``capabilities`` line in response to the
+ # ``hello`` command.
+ if protoname == wireprototypes.SSHV1:
+ for l in reversed(lines):
+ # Look for response to ``hello`` command. Scan from the back so
+ # we don't misinterpret banner output as the command reply.
+ if l.startswith('capabilities:'):
+ caps.update(l[:-1].split(':')[1].split())
+ break
+ elif protoname == wireprotoserver.SSHV2:
+ # We see a line with number of bytes to follow and then a value
+ # looking like ``capabilities: *``.
+ line = stdout.readline()
+ try:
+ valuelen = int(line)
+ except ValueError:
+ badresponse()
+
+ capsline = stdout.read(valuelen)
+ if not capsline.startswith('capabilities: '):
+ badresponse()
+
+ ui.debug('remote: %s\n' % capsline)
+
+ caps.update(capsline.split(':')[1].split())
+ # Trailing newline.
+ stdout.read(1)
+
+ # Error if we couldn't find capabilities, this means:
+ #
+ # 1. Remote isn't a Mercurial server
+ # 2. Remote is a <0.9.1 Mercurial server
+ # 3. Remote is a future Mercurial server that dropped ``hello``
+ # and other attempted handshake mechanisms.
+ if not caps:
+ badresponse()
- if create:
- cmd = '%s %s %s' % (sshcmd, args,
- util.shellquote("%s init %s" %
- (_serverquote(remotecmd), _serverquote(self._path))))
- ui.debug('running %s\n' % cmd)
- res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
- if res != 0:
- self._abort(error.RepoError(_("could not create remote repo")))
+ # Flush any output on stderr before proceeding.
+ _forwardoutput(ui, stderr)
+
+ return protoname, caps
+
+class sshv1peer(wireproto.wirepeer):
+ def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
+ autoreadstderr=True):
+ """Create a peer from an existing SSH connection.
- self._validaterepo(sshcmd, args, remotecmd, sshenv)
+ ``proc`` is a handle on the underlying SSH process.
+ ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
+ pipes for that process.
+ ``caps`` is a set of capabilities supported by the remote.
+ ``autoreadstderr`` denotes whether to automatically read from
+ stderr and to forward its output.
+ """
+ self._url = url
+ self._ui = ui
+ # self._subprocess is unused. Keeping a handle on the process
+ # holds a reference and prevents it from being garbage collected.
+ self._subprocess = proc
+
+ # And we hook up our "doublepipe" wrapper to allow querying
+ # stderr any time we perform I/O.
+ if autoreadstderr:
+ stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
+ stdin = doublepipe(ui, stdin, stderr)
+
+ self._pipeo = stdin
+ self._pipei = stdout
+ self._pipee = stderr
+ self._caps = caps
+ self._autoreadstderr = autoreadstderr
+
+ # Commands that have a "framed" response where the first line of the
+ # response contains the length of that response.
+ _FRAMED_COMMANDS = {
+ 'batch',
+ }
# Begin of _basepeer interface.
@@ -182,64 +404,6 @@
# End of _basewirecommands interface.
- def _validaterepo(self, sshcmd, args, remotecmd, sshenv=None):
- # cleanup up previous run
- self._cleanup()
-
- cmd = '%s %s %s' % (sshcmd, args,
- util.shellquote("%s -R %s serve --stdio" %
- (_serverquote(remotecmd), _serverquote(self._path))))
- self.ui.debug('running %s\n' % cmd)
- cmd = util.quotecommand(cmd)
-
- # while self._subprocess isn't used, having it allows the subprocess to
- # to clean up correctly later
- #
- # no buffer allow the use of 'select'
- # feel free to remove buffering and select usage when we ultimately
- # move to threading.
- sub = util.popen4(cmd, bufsize=0, env=sshenv)
- self._pipeo, self._pipei, self._pipee, self._subprocess = sub
-
- self._pipei = util.bufferedinputpipe(self._pipei)
- self._pipei = doublepipe(self.ui, self._pipei, self._pipee)
- self._pipeo = doublepipe(self.ui, self._pipeo, self._pipee)
-
- def badresponse():
- msg = _("no suitable response from remote hg")
- hint = self.ui.config("ui", "ssherrorhint")
- self._abort(error.RepoError(msg, hint=hint))
-
- try:
- # skip any noise generated by remote shell
- self._callstream("hello")
- r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
- except IOError:
- badresponse()
-
- lines = ["", "dummy"]
- max_noise = 500
- while lines[-1] and max_noise:
- try:
- l = r.readline()
- self._readerr()
- if lines[-1] == "1\n" and l == "\n":
- break
- if l:
- self.ui.debug("remote: ", l)
- lines.append(l)
- max_noise -= 1
- except IOError:
- badresponse()
- else:
- badresponse()
-
- self._caps = set()
- for l in reversed(lines):
- if l.startswith("capabilities:"):
- self._caps.update(l[:-1].split(":")[1].split())
- break
-
def _readerr(self):
_forwardoutput(self.ui, self._pipee)
@@ -248,41 +412,11 @@
raise exception
def _cleanup(self):
- if self._pipeo is None:
- return
- self._pipeo.close()
- self._pipei.close()
- try:
- # read the error descriptor until EOF
- for l in self._pipee:
- self.ui.status(_("remote: "), l)
- except (IOError, ValueError):
- pass
- self._pipee.close()
+ _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
__del__ = _cleanup
- def _submitbatch(self, req):
- rsp = self._callstream("batch", cmds=wireproto.encodebatchcmds(req))
- available = self._getamount()
- # TODO this response parsing is probably suboptimal for large
- # batches with large responses.
- toread = min(available, 1024)
- work = rsp.read(toread)
- available -= toread
- chunk = work
- while chunk:
- while ';' in work:
- one, work = work.split(';', 1)
- yield wireproto.unescapearg(one)
- toread = min(available, 1024)
- chunk = rsp.read(toread)
- available -= toread
- work += chunk
- yield wireproto.unescapearg(work)
-
- def _callstream(self, cmd, **args):
- args = pycompat.byteskwargs(args)
+ def _sendrequest(self, cmd, args, framed=False):
if (self.ui.debugflag
and self.ui.configbool('devel', 'debug.peer-request')):
dbg = self.ui.debug
@@ -316,58 +450,164 @@
self._pipeo.write(v)
self._pipeo.flush()
+ # We know exactly how many bytes are in the response. So return a proxy
+ # around the raw output stream that allows reading exactly this many
+ # bytes. Callers then can read() without fear of overrunning the
+ # response.
+ if framed:
+ amount = self._getamount()
+ return util.cappedreader(self._pipei, amount)
+
return self._pipei
+ def _callstream(self, cmd, **args):
+ args = pycompat.byteskwargs(args)
+ return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
+
def _callcompressable(self, cmd, **args):
- return self._callstream(cmd, **args)
+ args = pycompat.byteskwargs(args)
+ return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
def _call(self, cmd, **args):
- self._callstream(cmd, **args)
- return self._recv()
+ args = pycompat.byteskwargs(args)
+ return self._sendrequest(cmd, args, framed=True).read()
def _callpush(self, cmd, fp, **args):
+ # The server responds with an empty frame if the client should
+ # continue submitting the payload.
r = self._call(cmd, **args)
if r:
return '', r
+
+ # The payload consists of frames with content followed by an empty
+ # frame.
for d in iter(lambda: fp.read(4096), ''):
- self._send(d)
- self._send("", flush=True)
- r = self._recv()
+ self._writeframed(d)
+ self._writeframed("", flush=True)
+
+ # In case of success, there is an empty frame and a frame containing
+ # the integer result (as a string).
+ # In case of error, there is a non-empty frame containing the error.
+ r = self._readframed()
if r:
return '', r
- return self._recv(), ''
+ return self._readframed(), ''
def _calltwowaystream(self, cmd, fp, **args):
+ # The server responds with an empty frame if the client should
+ # continue submitting the payload.
r = self._call(cmd, **args)
if r:
# XXX needs to be made better
raise error.Abort(_('unexpected remote reply: %s') % r)
+
+ # The payload consists of frames with content followed by an empty
+ # frame.
for d in iter(lambda: fp.read(4096), ''):
- self._send(d)
- self._send("", flush=True)
+ self._writeframed(d)
+ self._writeframed("", flush=True)
+
return self._pipei
def _getamount(self):
l = self._pipei.readline()
if l == '\n':
- self._readerr()
+ if self._autoreadstderr:
+ self._readerr()
msg = _('check previous remote output')
self._abort(error.OutOfBandError(hint=msg))
- self._readerr()
+ if self._autoreadstderr:
+ self._readerr()
try:
return int(l)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), l))
- def _recv(self):
- return self._pipei.read(self._getamount())
+ def _readframed(self):
+ size = self._getamount()
+ if not size:
+ return b''
- def _send(self, data, flush=False):
+ return self._pipei.read(size)
+
+ def _writeframed(self, data, flush=False):
self._pipeo.write("%d\n" % len(data))
if data:
self._pipeo.write(data)
if flush:
self._pipeo.flush()
- self._readerr()
+ if self._autoreadstderr:
+ self._readerr()
+
+class sshv2peer(sshv1peer):
+ """A peer that speakers version 2 of the transport protocol."""
+ # Currently version 2 is identical to version 1 post handshake.
+ # And handshake is performed before the peer is instantiated. So
+ # we need no custom code.
+
+def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
+ """Make a peer instance from existing pipes.
+
+ ``path`` and ``proc`` are stored on the eventual peer instance and may
+ not be used for anything meaningful.
+
+ ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
+ SSH server's stdio handles.
+
+ This function is factored out to allow creating peers that don't
+ actually spawn a new process. It is useful for starting SSH protocol
+ servers and clients via non-standard means, which can be useful for
+ testing.
+ """
+ try:
+ protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
+ except Exception:
+ _cleanuppipes(ui, stdout, stdin, stderr)
+ raise
-instance = sshpeer
+ if protoname == wireprototypes.SSHV1:
+ return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
+ autoreadstderr=autoreadstderr)
+ elif protoname == wireprototypes.SSHV2:
+ return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
+ autoreadstderr=autoreadstderr)
+ else:
+ _cleanuppipes(ui, stdout, stdin, stderr)
+ raise error.RepoError(_('unknown version of SSH protocol: %s') %
+ protoname)
+
+def instance(ui, path, create):
+ """Create an SSH peer.
+
+ The returned object conforms to the ``wireproto.wirepeer`` interface.
+ """
+ u = util.url(path, parsequery=False, parsefragment=False)
+ if u.scheme != 'ssh' or not u.host or u.path is None:
+ raise error.RepoError(_("couldn't parse location %s") % path)
+
+ util.checksafessh(path)
+
+ if u.passwd is not None:
+ raise error.RepoError(_('password in URL not supported'))
+
+ sshcmd = ui.config('ui', 'ssh')
+ remotecmd = ui.config('ui', 'remotecmd')
+ sshaddenv = dict(ui.configitems('sshenv'))
+ sshenv = util.shellenviron(sshaddenv)
+ remotepath = u.path or '.'
+
+ args = util.sshargs(sshcmd, u.host, u.user, u.port)
+
+ if create:
+ cmd = '%s %s %s' % (sshcmd, args,
+ util.shellquote('%s init %s' %
+ (_serverquote(remotecmd), _serverquote(remotepath))))
+ ui.debug('running %s\n' % cmd)
+ res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
+ if res != 0:
+ raise error.RepoError(_('could not create remote repo'))
+
+ proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
+ remotepath, sshenv)
+
+ return makepeer(ui, path, proc, stdin, stdout, stderr)
--- a/mercurial/sshserver.py Thu Mar 15 22:35:07 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,131 +0,0 @@
-# sshserver.py - ssh protocol server support for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import sys
-
-from .i18n import _
-from . import (
- encoding,
- error,
- hook,
- util,
- wireproto,
-)
-
-class sshserver(wireproto.abstractserverproto):
- def __init__(self, ui, repo):
- self.ui = ui
- self.repo = repo
- self.lock = None
- self.fin = ui.fin
- self.fout = ui.fout
- self.name = 'ssh'
-
- hook.redirect(True)
- ui.fout = repo.ui.fout = ui.ferr
-
- # Prevent insertion/deletion of CRs
- util.setbinary(self.fin)
- util.setbinary(self.fout)
-
- def getargs(self, args):
- data = {}
- keys = args.split()
- for n in xrange(len(keys)):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- if arg not in keys:
- raise error.Abort(_("unexpected parameter %r") % arg)
- if arg == '*':
- star = {}
- for k in xrange(int(l)):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- val = self.fin.read(int(l))
- star[arg] = val
- data['*'] = star
- else:
- val = self.fin.read(int(l))
- data[arg] = val
- return [data[k] for k in keys]
-
- def getarg(self, name):
- return self.getargs(name)[0]
-
- def getfile(self, fpout):
- self.sendresponse('')
- count = int(self.fin.readline())
- while count:
- fpout.write(self.fin.read(count))
- count = int(self.fin.readline())
-
- def redirect(self):
- pass
-
- def sendresponse(self, v):
- self.fout.write("%d\n" % len(v))
- self.fout.write(v)
- self.fout.flush()
-
- def sendstream(self, source):
- write = self.fout.write
- for chunk in source.gen:
- write(chunk)
- self.fout.flush()
-
- def sendpushresponse(self, rsp):
- self.sendresponse('')
- self.sendresponse(str(rsp.res))
-
- def sendpusherror(self, rsp):
- self.sendresponse(rsp.res)
-
- def sendooberror(self, rsp):
- self.ui.ferr.write('%s\n-\n' % rsp.message)
- self.ui.ferr.flush()
- self.fout.write('\n')
- self.fout.flush()
-
- def serve_forever(self):
- try:
- while self.serve_one():
- pass
- finally:
- if self.lock is not None:
- self.lock.release()
- sys.exit(0)
-
- handlers = {
- str: sendresponse,
- wireproto.streamres: sendstream,
- wireproto.streamres_legacy: sendstream,
- wireproto.pushres: sendpushresponse,
- wireproto.pusherr: sendpusherror,
- wireproto.ooberror: sendooberror,
- }
-
- def serve_one(self):
- cmd = self.fin.readline()[:-1]
- if cmd and cmd in wireproto.commands:
- rsp = wireproto.dispatch(self.repo, self, cmd)
- self.handlers[rsp.__class__](self, rsp)
- elif cmd:
- impl = getattr(self, 'do_' + cmd, None)
- if impl:
- r = impl()
- if r is not None:
- self.sendresponse(r)
- else:
- self.sendresponse("")
- return cmd != ''
-
- def _client(self):
- client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
- return 'remote:ssh:' + client
--- a/mercurial/sslutil.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/sslutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -113,6 +113,7 @@
Returns a dict of settings relevant to that hostname.
"""
+ bhostname = pycompat.bytesurl(hostname)
s = {
# Whether we should attempt to load default/available CA certs
# if an explicit ``cafile`` is not defined.
@@ -162,14 +163,14 @@
ui.warn(_('warning: connecting to %s using legacy security '
'technology (TLS 1.0); see '
'https://mercurial-scm.org/wiki/SecureConnections for '
- 'more info\n') % hostname)
+ 'more info\n') % bhostname)
defaultprotocol = 'tls1.0'
key = 'minimumprotocol'
protocol = ui.config('hostsecurity', key, defaultprotocol)
validateprotocol(protocol, key)
- key = '%s:minimumprotocol' % hostname
+ key = '%s:minimumprotocol' % bhostname
protocol = ui.config('hostsecurity', key, protocol)
validateprotocol(protocol, key)
@@ -182,16 +183,16 @@
s['protocol'], s['ctxoptions'], s['protocolui'] = protocolsettings(protocol)
ciphers = ui.config('hostsecurity', 'ciphers')
- ciphers = ui.config('hostsecurity', '%s:ciphers' % hostname, ciphers)
+ ciphers = ui.config('hostsecurity', '%s:ciphers' % bhostname, ciphers)
s['ciphers'] = ciphers
# Look for fingerprints in [hostsecurity] section. Value is a list
# of <alg>:<fingerprint> strings.
- fingerprints = ui.configlist('hostsecurity', '%s:fingerprints' % hostname)
+ fingerprints = ui.configlist('hostsecurity', '%s:fingerprints' % bhostname)
for fingerprint in fingerprints:
if not (fingerprint.startswith(('sha1:', 'sha256:', 'sha512:'))):
raise error.Abort(_('invalid fingerprint for %s: %s') % (
- hostname, fingerprint),
+ bhostname, fingerprint),
hint=_('must begin with "sha1:", "sha256:", '
'or "sha512:"'))
@@ -200,7 +201,7 @@
s['certfingerprints'].append((alg, fingerprint))
# Fingerprints from [hostfingerprints] are always SHA-1.
- for fingerprint in ui.configlist('hostfingerprints', hostname):
+ for fingerprint in ui.configlist('hostfingerprints', bhostname):
fingerprint = fingerprint.replace(':', '').lower()
s['certfingerprints'].append(('sha1', fingerprint))
s['legacyfingerprint'] = True
@@ -223,11 +224,11 @@
# If both fingerprints and a per-host ca file are specified, issue a warning
# because users should not be surprised about what security is or isn't
# being performed.
- cafile = ui.config('hostsecurity', '%s:verifycertsfile' % hostname)
+ cafile = ui.config('hostsecurity', '%s:verifycertsfile' % bhostname)
if s['certfingerprints'] and cafile:
ui.warn(_('(hostsecurity.%s:verifycertsfile ignored when host '
'fingerprints defined; using host fingerprints for '
- 'verification)\n') % hostname)
+ 'verification)\n') % bhostname)
# Try to hook up CA certificate validation unless something above
# makes it not necessary.
@@ -237,8 +238,8 @@
cafile = util.expandpath(cafile)
if not os.path.exists(cafile):
raise error.Abort(_('path specified by %s does not exist: %s') %
- ('hostsecurity.%s:verifycertsfile' % hostname,
- cafile))
+ ('hostsecurity.%s:verifycertsfile' % (
+ bhostname,), cafile))
s['cafile'] = cafile
else:
# Find global certificates file in config.
@@ -345,10 +346,11 @@
for f in (keyfile, certfile):
if f and not os.path.exists(f):
- raise error.Abort(_('certificate file (%s) does not exist; '
- 'cannot connect to %s') % (f, serverhostname),
- hint=_('restore missing file or fix references '
- 'in Mercurial config'))
+ raise error.Abort(
+ _('certificate file (%s) does not exist; cannot connect to %s')
+ % (f, pycompat.bytesurl(serverhostname)),
+ hint=_('restore missing file or fix references '
+ 'in Mercurial config'))
settings = _hostsettings(ui, serverhostname)
@@ -369,11 +371,12 @@
if settings['ciphers']:
try:
- sslcontext.set_ciphers(settings['ciphers'])
+ sslcontext.set_ciphers(pycompat.sysstr(settings['ciphers']))
except ssl.SSLError as e:
- raise error.Abort(_('could not set ciphers: %s') % e.args[0],
- hint=_('change cipher string (%s) in config') %
- settings['ciphers'])
+ raise error.Abort(
+ _('could not set ciphers: %s') % util.forcebytestr(e.args[0]),
+ hint=_('change cipher string (%s) in config') %
+ settings['ciphers'])
if certfile is not None:
def password():
@@ -390,7 +393,7 @@
else:
msg = e.args[1]
raise error.Abort(_('error loading CA file %s: %s') % (
- settings['cafile'], msg),
+ settings['cafile'], util.forcebytestr(msg)),
hint=_('file is empty or malformed?'))
caloaded = True
elif settings['allowloaddefaultcerts']:
@@ -583,8 +586,10 @@
pats = []
if not dn:
return False
+ dn = pycompat.bytesurl(dn)
+ hostname = pycompat.bytesurl(hostname)
- pieces = dn.split(r'.')
+ pieces = dn.split('.')
leftmost = pieces[0]
remainder = pieces[1:]
wildcards = leftmost.count('*')
@@ -637,17 +642,17 @@
if _dnsnamematch(value, hostname):
return
except wildcarderror as e:
- return e.args[0]
+ return util.forcebytestr(e.args[0])
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no DNS in subjectAltName.
- for sub in cert.get('subject', []):
+ for sub in cert.get(r'subject', []):
for key, value in sub:
# According to RFC 2818 the most specific Common Name must
# be used.
- if key == 'commonName':
+ if key == r'commonName':
# 'subject' entries are unicode.
try:
value = value.encode('ascii')
@@ -658,7 +663,7 @@
if _dnsnamematch(value, hostname):
return
except wildcarderror as e:
- return e.args[0]
+ return util.forcebytestr(e.args[0])
dnsnames.append(value)
@@ -780,7 +785,8 @@
The passed socket must have been created with ``wrapsocket()``.
"""
- host = sock._hgstate['hostname']
+ shost = sock._hgstate['hostname']
+ host = pycompat.bytesurl(shost)
ui = sock._hgstate['ui']
settings = sock._hgstate['settings']
@@ -856,7 +862,7 @@
'hostsecurity.%s:fingerprints=%s to trust this server') %
(host, nicefingerprint))
- msg = _verifycert(peercert2, host)
+ msg = _verifycert(peercert2, shost)
if msg:
raise error.Abort(_('%s certificate error: %s') % (host, msg),
hint=_('set hostsecurity.%s:certfingerprints=%s '
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/stack.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,29 @@
+# stack.py - Mercurial functions for stack definition
+#
+# Copyright Matt Mackall <mpm@selenic.com> and other
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from . import (
+ revsetlang,
+ scmutil,
+)
+
+def getstack(repo, rev=None):
+ """return a sorted smartrev of the stack containing either rev if it is
+ not None or the current working directory parent.
+
+ The stack will always contain all drafts changesets which are ancestors to
+ the revision and are not merges.
+ """
+ if rev is None:
+ rev = '.'
+
+ revspec = 'reverse(only(%s) and not public() and not ::merge())'
+ revset = revsetlang.formatspec(revspec, rev)
+ revisions = scmutil.revrange(repo, [revset])
+ revisions.sort()
+ return revisions
--- a/mercurial/statichttprepo.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/statichttprepo.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,7 +13,6 @@
from .i18n import _
from . import (
- byterange,
changelog,
error,
localrepo,
@@ -82,10 +81,36 @@
def close(self):
pass
+# _RangeError and _HTTPRangeHandler were originally in byterange.py,
+# which was itself extracted from urlgrabber. See the last version of
+# byterange.py from history if you need more information.
+class _RangeError(IOError):
+ """Error raised when an unsatisfiable range is requested."""
+
+class _HTTPRangeHandler(urlreq.basehandler):
+ """Handler that enables HTTP Range headers.
+
+ This was extremely simple. The Range header is a HTTP feature to
+ begin with so all this class does is tell urllib2 that the
+ "206 Partial Content" response from the HTTP server is what we
+ expected.
+ """
+
+ def http_error_206(self, req, fp, code, msg, hdrs):
+ # 206 Partial Content Response
+ r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
+ r.code = code
+ r.msg = msg
+ return r
+
+ def http_error_416(self, req, fp, code, msg, hdrs):
+ # HTTP's Range Not Satisfiable error
+ raise _RangeError('Requested Range Not Satisfiable')
+
def build_opener(ui, authinfo):
# urllib cannot handle URLs with embedded user or passwd
urlopener = url.opener(ui, authinfo)
- urlopener.add_handler(byterange.HTTPRangeHandler())
+ urlopener.add_handler(_HTTPRangeHandler())
class statichttpvfs(vfsmod.abstractvfs):
def __init__(self, base):
--- a/mercurial/subrepo.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/subrepo.py Mon Mar 19 08:07:18 2018 -0700
@@ -1,4 +1,4 @@
-# subrepo.py - sub-repository handling for Mercurial
+# subrepo.py - sub-repository classes and factory
#
# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
#
@@ -19,30 +19,31 @@
import tarfile
import xml.dom.minidom
-
from .i18n import _
from . import (
cmdutil,
- config,
encoding,
error,
exchange,
- filemerge,
+ logcmdutil,
match as matchmod,
node,
pathutil,
phases,
pycompat,
scmutil,
+ subrepoutil,
util,
vfs as vfsmod,
)
+from .utils import dateutil
hg = None
+reporelpath = subrepoutil.reporelpath
+subrelpath = subrepoutil.subrelpath
+_abssource = subrepoutil._abssource
propertycache = util.propertycache
-nullstate = ('', '', 'empty')
-
def _expandedabspath(path):
'''
get a path or url and if it is a path expand it and return an absolute path
@@ -73,291 +74,14 @@
raise ex
except error.Abort as ex:
subrepo = subrelpath(self)
- errormsg = str(ex) + ' ' + _('(in subrepository "%s")') % subrepo
+ errormsg = (util.forcebytestr(ex) + ' '
+ + _('(in subrepository "%s")') % subrepo)
# avoid handling this exception by raising a SubrepoAbort exception
raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
cause=sys.exc_info())
return res
return decoratedmethod
-def state(ctx, ui):
- """return a state dict, mapping subrepo paths configured in .hgsub
- to tuple: (source from .hgsub, revision from .hgsubstate, kind
- (key in types dict))
- """
- p = config.config()
- repo = ctx.repo()
- def read(f, sections=None, remap=None):
- if f in ctx:
- try:
- data = ctx[f].data()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- # handle missing subrepo spec files as removed
- ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
- repo.pathto(f))
- return
- p.parse(f, data, sections, remap, read)
- else:
- raise error.Abort(_("subrepo spec file \'%s\' not found") %
- repo.pathto(f))
- if '.hgsub' in ctx:
- read('.hgsub')
-
- for path, src in ui.configitems('subpaths'):
- p.set('subpaths', path, src, ui.configsource('subpaths', path))
-
- rev = {}
- if '.hgsubstate' in ctx:
- try:
- for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
- l = l.lstrip()
- if not l:
- continue
- try:
- revision, path = l.split(" ", 1)
- except ValueError:
- raise error.Abort(_("invalid subrepository revision "
- "specifier in \'%s\' line %d")
- % (repo.pathto('.hgsubstate'), (i + 1)))
- rev[path] = revision
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
-
- def remap(src):
- for pattern, repl in p.items('subpaths'):
- # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
- # does a string decode.
- repl = util.escapestr(repl)
- # However, we still want to allow back references to go
- # through unharmed, so we turn r'\\1' into r'\1'. Again,
- # extra escapes are needed because re.sub string decodes.
- repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl)
- try:
- src = re.sub(pattern, repl, src, 1)
- except re.error as e:
- raise error.Abort(_("bad subrepository pattern in %s: %s")
- % (p.source('subpaths', pattern), e))
- return src
-
- state = {}
- for path, src in p[''].items():
- kind = 'hg'
- if src.startswith('['):
- if ']' not in src:
- raise error.Abort(_('missing ] in subrepository source'))
- kind, src = src.split(']', 1)
- kind = kind[1:]
- src = src.lstrip() # strip any extra whitespace after ']'
-
- if not util.url(src).isabs():
- parent = _abssource(repo, abort=False)
- if parent:
- parent = util.url(parent)
- parent.path = posixpath.join(parent.path or '', src)
- parent.path = posixpath.normpath(parent.path)
- joined = str(parent)
- # Remap the full joined path and use it if it changes,
- # else remap the original source.
- remapped = remap(joined)
- if remapped == joined:
- src = remap(src)
- else:
- src = remapped
-
- src = remap(src)
- state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
-
- return state
-
-def writestate(repo, state):
- """rewrite .hgsubstate in (outer) repo with these subrepo states"""
- lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
- if state[s][1] != nullstate[1]]
- repo.wwrite('.hgsubstate', ''.join(lines), '')
-
-def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
- """delegated from merge.applyupdates: merging of .hgsubstate file
- in working context, merging context and ancestor context"""
- if mctx == actx: # backwards?
- actx = wctx.p1()
- s1 = wctx.substate
- s2 = mctx.substate
- sa = actx.substate
- sm = {}
-
- repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
-
- def debug(s, msg, r=""):
- if r:
- r = "%s:%s:%s" % r
- repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
-
- promptssrc = filemerge.partextras(labels)
- for s, l in sorted(s1.iteritems()):
- prompts = None
- a = sa.get(s, nullstate)
- ld = l # local state with possible dirty flag for compares
- if wctx.sub(s).dirty():
- ld = (l[0], l[1] + "+")
- if wctx == actx: # overwrite
- a = ld
-
- prompts = promptssrc.copy()
- prompts['s'] = s
- if s in s2:
- r = s2[s]
- if ld == r or r == a: # no change or local is newer
- sm[s] = l
- continue
- elif ld == a: # other side changed
- debug(s, "other changed, get", r)
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- elif ld[0] != r[0]: # sources differ
- prompts['lo'] = l[0]
- prompts['ro'] = r[0]
- if repo.ui.promptchoice(
- _(' subrepository sources for %(s)s differ\n'
- 'use (l)ocal%(l)s source (%(lo)s)'
- ' or (r)emote%(o)s source (%(ro)s)?'
- '$$ &Local $$ &Remote') % prompts, 0):
- debug(s, "prompt changed, get", r)
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- elif ld[1] == a[1]: # local side is unchanged
- debug(s, "other side changed, get", r)
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- else:
- debug(s, "both sides changed")
- srepo = wctx.sub(s)
- prompts['sl'] = srepo.shortid(l[1])
- prompts['sr'] = srepo.shortid(r[1])
- option = repo.ui.promptchoice(
- _(' subrepository %(s)s diverged (local revision: %(sl)s, '
- 'remote revision: %(sr)s)\n'
- '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?'
- '$$ &Merge $$ &Local $$ &Remote')
- % prompts, 0)
- if option == 0:
- wctx.sub(s).merge(r)
- sm[s] = l
- debug(s, "merge with", r)
- elif option == 1:
- sm[s] = l
- debug(s, "keep local subrepo revision", l)
- else:
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- debug(s, "get remote subrepo revision", r)
- elif ld == a: # remote removed, local unchanged
- debug(s, "remote removed, remove")
- wctx.sub(s).remove()
- elif a == nullstate: # not present in remote or ancestor
- debug(s, "local added, keep")
- sm[s] = l
- continue
- else:
- if repo.ui.promptchoice(
- _(' local%(l)s changed subrepository %(s)s'
- ' which remote%(o)s removed\n'
- 'use (c)hanged version or (d)elete?'
- '$$ &Changed $$ &Delete') % prompts, 0):
- debug(s, "prompt remove")
- wctx.sub(s).remove()
-
- for s, r in sorted(s2.items()):
- prompts = None
- if s in s1:
- continue
- elif s not in sa:
- debug(s, "remote added, get", r)
- mctx.sub(s).get(r)
- sm[s] = r
- elif r != sa[s]:
- prompts = promptssrc.copy()
- prompts['s'] = s
- if repo.ui.promptchoice(
- _(' remote%(o)s changed subrepository %(s)s'
- ' which local%(l)s removed\n'
- 'use (c)hanged version or (d)elete?'
- '$$ &Changed $$ &Delete') % prompts, 0) == 0:
- debug(s, "prompt recreate", r)
- mctx.sub(s).get(r)
- sm[s] = r
-
- # record merged .hgsubstate
- writestate(repo, sm)
- return sm
-
-def precommit(ui, wctx, status, match, force=False):
- """Calculate .hgsubstate changes that should be applied before committing
-
- Returns (subs, commitsubs, newstate) where
- - subs: changed subrepos (including dirty ones)
- - commitsubs: dirty subrepos which the caller needs to commit recursively
- - newstate: new state dict which the caller must write to .hgsubstate
-
- This also updates the given status argument.
- """
- subs = []
- commitsubs = set()
- newstate = wctx.substate.copy()
-
- # only manage subrepos and .hgsubstate if .hgsub is present
- if '.hgsub' in wctx:
- # we'll decide whether to track this ourselves, thanks
- for c in status.modified, status.added, status.removed:
- if '.hgsubstate' in c:
- c.remove('.hgsubstate')
-
- # compare current state to last committed state
- # build new substate based on last committed state
- oldstate = wctx.p1().substate
- for s in sorted(newstate.keys()):
- if not match(s):
- # ignore working copy, use old state if present
- if s in oldstate:
- newstate[s] = oldstate[s]
- continue
- if not force:
- raise error.Abort(
- _("commit with new subrepo %s excluded") % s)
- dirtyreason = wctx.sub(s).dirtyreason(True)
- if dirtyreason:
- if not ui.configbool('ui', 'commitsubrepos'):
- raise error.Abort(dirtyreason,
- hint=_("use --subrepos for recursive commit"))
- subs.append(s)
- commitsubs.add(s)
- else:
- bs = wctx.sub(s).basestate()
- newstate[s] = (newstate[s][0], bs, newstate[s][2])
- if oldstate.get(s, (None, None, None))[1] != bs:
- subs.append(s)
-
- # check for removed subrepos
- for p in wctx.parents():
- r = [s for s in p.substate if s not in newstate]
- subs += [s for s in r if match(s)]
- if subs:
- if (not match('.hgsub') and
- '.hgsub' in (wctx.modified() + wctx.added())):
- raise error.Abort(_("can't commit subrepos without .hgsub"))
- status.modified.insert(0, '.hgsubstate')
-
- elif '.hgsub' in status.removed:
- # clean up .hgsubstate when .hgsub is removed
- if ('.hgsubstate' in wctx and
- '.hgsubstate' not in (status.modified + status.added +
- status.removed)):
- status.removed.insert(0, '.hgsubstate')
-
- return subs, commitsubs, newstate
-
def _updateprompt(ui, sub, dirty, local, remote):
if dirty:
msg = (_(' subrepository sources for %s differ\n'
@@ -372,64 +96,6 @@
% (subrelpath(sub), local, remote))
return ui.promptchoice(msg, 0)
-def reporelpath(repo):
- """return path to this (sub)repo as seen from outermost repo"""
- parent = repo
- while util.safehasattr(parent, '_subparent'):
- parent = parent._subparent
- return repo.root[len(pathutil.normasprefix(parent.root)):]
-
-def subrelpath(sub):
- """return path to this subrepo as seen from outermost repo"""
- return sub._relpath
-
-def _abssource(repo, push=False, abort=True):
- """return pull/push path of repo - either based on parent repo .hgsub info
- or on the top repo config. Abort or return None if no source found."""
- if util.safehasattr(repo, '_subparent'):
- source = util.url(repo._subsource)
- if source.isabs():
- return bytes(source)
- source.path = posixpath.normpath(source.path)
- parent = _abssource(repo._subparent, push, abort=False)
- if parent:
- parent = util.url(util.pconvert(parent))
- parent.path = posixpath.join(parent.path or '', source.path)
- parent.path = posixpath.normpath(parent.path)
- return bytes(parent)
- else: # recursion reached top repo
- path = None
- if util.safehasattr(repo, '_subtoppath'):
- path = repo._subtoppath
- elif push and repo.ui.config('paths', 'default-push'):
- path = repo.ui.config('paths', 'default-push')
- elif repo.ui.config('paths', 'default'):
- path = repo.ui.config('paths', 'default')
- elif repo.shared():
- # chop off the .hg component to get the default path form. This has
- # already run through vfsmod.vfs(..., realpath=True), so it doesn't
- # have problems with 'C:'
- return os.path.dirname(repo.sharedpath)
- if path:
- # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is
- # as expected: an absolute path to the root of the C: drive. The
- # latter is a relative path, and works like so:
- #
- # C:\>cd C:\some\path
- # C:\>D:
- # D:\>python -c "import os; print os.path.abspath('C:')"
- # C:\some\path
- #
- # D:\>python -c "import os; print os.path.abspath('C:relative')"
- # C:\some\path\relative
- if util.hasdriveletter(path):
- if len(path) == 2 or path[2:3] not in br'\/':
- path = os.path.abspath(path)
- return path
-
- if abort:
- raise error.Abort(_("default path for subrepository not found"))
-
def _sanitize(ui, vfs, ignore):
for dirname, dirs, names in vfs.walk():
for i, d in enumerate(dirs):
@@ -508,37 +174,6 @@
subrev = "0" * 40
return types[state[2]](pctx, path, (state[0], subrev), True)
-def newcommitphase(ui, ctx):
- commitphase = phases.newcommitphase(ui)
- substate = getattr(ctx, "substate", None)
- if not substate:
- return commitphase
- check = ui.config('phases', 'checksubrepos')
- if check not in ('ignore', 'follow', 'abort'):
- raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
- % (check))
- if check == 'ignore':
- return commitphase
- maxphase = phases.public
- maxsub = None
- for s in sorted(substate):
- sub = ctx.sub(s)
- subphase = sub.phase(substate[s][1])
- if maxphase < subphase:
- maxphase = subphase
- maxsub = s
- if commitphase < maxphase:
- if check == 'abort':
- raise error.Abort(_("can't commit in %s phase"
- " conflicting %s from subrepository %s") %
- (phases.phasenames[commitphase],
- phases.phasenames[maxphase], maxsub))
- ui.warn(_("warning: changes are committed in"
- " %s phase from subrepository %s\n") %
- (phases.phasenames[maxphase], maxsub))
- return maxphase
- return commitphase
-
# subrepo classes need to implement the following abstract class:
class abstractsubrepo(object):
@@ -713,7 +348,7 @@
matched by the match function
'''
- def forget(self, match, prefix):
+ def forget(self, match, prefix, dryrun):
return ([], [])
def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
@@ -907,10 +542,10 @@
# in hex format
if node2 is not None:
node2 = node.bin(node2)
- cmdutil.diffordiffstat(ui, self._repo, diffopts,
- node1, node2, match,
- prefix=posixpath.join(prefix, self._path),
- listsubrepos=True, **opts)
+ logcmdutil.diffordiffstat(ui, self._repo, diffopts,
+ node1, node2, match,
+ prefix=posixpath.join(prefix, self._path),
+ listsubrepos=True, **opts)
except error.RepoLookupError as inst:
self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
% (inst, subrelpath(self)))
@@ -918,9 +553,13 @@
@annotatesubrepoerror
def archive(self, archiver, prefix, match=None, decode=True):
self._get(self._state + ('hg',))
- total = abstractsubrepo.archive(self, archiver, prefix, match)
+ files = self.files()
+ if match:
+ files = [f for f in files if match(f)]
rev = self._state[1]
ctx = self._repo[rev]
+ scmutil.fileprefetchhooks(self._repo, ctx, files)
+ total = abstractsubrepo.archive(self, archiver, prefix, match)
for subpath in ctx.substate:
s = subrepo(ctx, subpath, True)
submatch = matchmod.subdirmatcher(subpath, match)
@@ -1172,9 +811,10 @@
return ctx.walk(match)
@annotatesubrepoerror
- def forget(self, match, prefix):
+ def forget(self, match, prefix, dryrun):
return cmdutil.forget(self.ui, self._repo, match,
- self.wvfs.reljoin(prefix, self._path), True)
+ self.wvfs.reljoin(prefix, self._path),
+ True, dryrun=dryrun)
@annotatesubrepoerror
def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
@@ -1484,7 +1124,7 @@
doc = xml.dom.minidom.parseString(output)
paths = []
for e in doc.getElementsByTagName('entry'):
- kind = str(e.getAttribute('kind'))
+ kind = pycompat.bytestr(e.getAttribute('kind'))
if kind != 'file':
continue
name = ''.join(c.data for c
@@ -1849,7 +1489,7 @@
if date:
# git's date parser silently ignores when seconds < 1e9
# convert to ISO8601
- env['GIT_AUTHOR_DATE'] = util.datestr(date,
+ env['GIT_AUTHOR_DATE'] = dateutil.datestr(date,
'%Y-%m-%dT%H:%M:%S %1%2')
self._gitcommand(cmd, env=env)
# make sure commit works otherwise HEAD might not exist under certain
@@ -2025,8 +1665,7 @@
# TODO: add support for non-plain formatter (see cmdutil.cat())
for f in match.files():
output = self._gitcommand(["show", "%s:%s" % (rev, f)])
- fp = cmdutil.makefileobj(self._subparent, fntemplate,
- self._ctx.node(),
+ fp = cmdutil.makefileobj(self._ctx, fntemplate,
pathname=self.wvfs.reljoin(prefix, f))
fp.write(output)
fp.close()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/subrepoutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,392 @@
+# subrepoutil.py - sub-repository operations and substate handling
+#
+# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+import os
+import posixpath
+import re
+
+from .i18n import _
+from . import (
+ config,
+ error,
+ filemerge,
+ pathutil,
+ phases,
+ util,
+)
+
+nullstate = ('', '', 'empty')
+
+def state(ctx, ui):
+ """return a state dict, mapping subrepo paths configured in .hgsub
+ to tuple: (source from .hgsub, revision from .hgsubstate, kind
+ (key in types dict))
+ """
+ p = config.config()
+ repo = ctx.repo()
+ def read(f, sections=None, remap=None):
+ if f in ctx:
+ try:
+ data = ctx[f].data()
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ # handle missing subrepo spec files as removed
+ ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
+ repo.pathto(f))
+ return
+ p.parse(f, data, sections, remap, read)
+ else:
+ raise error.Abort(_("subrepo spec file \'%s\' not found") %
+ repo.pathto(f))
+ if '.hgsub' in ctx:
+ read('.hgsub')
+
+ for path, src in ui.configitems('subpaths'):
+ p.set('subpaths', path, src, ui.configsource('subpaths', path))
+
+ rev = {}
+ if '.hgsubstate' in ctx:
+ try:
+ for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
+ l = l.lstrip()
+ if not l:
+ continue
+ try:
+ revision, path = l.split(" ", 1)
+ except ValueError:
+ raise error.Abort(_("invalid subrepository revision "
+ "specifier in \'%s\' line %d")
+ % (repo.pathto('.hgsubstate'), (i + 1)))
+ rev[path] = revision
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def remap(src):
+ for pattern, repl in p.items('subpaths'):
+ # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
+ # does a string decode.
+ repl = util.escapestr(repl)
+ # However, we still want to allow back references to go
+ # through unharmed, so we turn r'\\1' into r'\1'. Again,
+ # extra escapes are needed because re.sub string decodes.
+ repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl)
+ try:
+ src = re.sub(pattern, repl, src, 1)
+ except re.error as e:
+ raise error.Abort(_("bad subrepository pattern in %s: %s")
+ % (p.source('subpaths', pattern), e))
+ return src
+
+ state = {}
+ for path, src in p[''].items():
+ kind = 'hg'
+ if src.startswith('['):
+ if ']' not in src:
+ raise error.Abort(_('missing ] in subrepository source'))
+ kind, src = src.split(']', 1)
+ kind = kind[1:]
+ src = src.lstrip() # strip any extra whitespace after ']'
+
+ if not util.url(src).isabs():
+ parent = _abssource(repo, abort=False)
+ if parent:
+ parent = util.url(parent)
+ parent.path = posixpath.join(parent.path or '', src)
+ parent.path = posixpath.normpath(parent.path)
+ joined = str(parent)
+ # Remap the full joined path and use it if it changes,
+ # else remap the original source.
+ remapped = remap(joined)
+ if remapped == joined:
+ src = remap(src)
+ else:
+ src = remapped
+
+ src = remap(src)
+ state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
+
+ return state
+
+def writestate(repo, state):
+ """rewrite .hgsubstate in (outer) repo with these subrepo states"""
+ lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
+ if state[s][1] != nullstate[1]]
+ repo.wwrite('.hgsubstate', ''.join(lines), '')
+
+def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
+ """delegated from merge.applyupdates: merging of .hgsubstate file
+ in working context, merging context and ancestor context"""
+ if mctx == actx: # backwards?
+ actx = wctx.p1()
+ s1 = wctx.substate
+ s2 = mctx.substate
+ sa = actx.substate
+ sm = {}
+
+ repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
+
+ def debug(s, msg, r=""):
+ if r:
+ r = "%s:%s:%s" % r
+ repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
+
+ promptssrc = filemerge.partextras(labels)
+ for s, l in sorted(s1.iteritems()):
+ prompts = None
+ a = sa.get(s, nullstate)
+ ld = l # local state with possible dirty flag for compares
+ if wctx.sub(s).dirty():
+ ld = (l[0], l[1] + "+")
+ if wctx == actx: # overwrite
+ a = ld
+
+ prompts = promptssrc.copy()
+ prompts['s'] = s
+ if s in s2:
+ r = s2[s]
+ if ld == r or r == a: # no change or local is newer
+ sm[s] = l
+ continue
+ elif ld == a: # other side changed
+ debug(s, "other changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ elif ld[0] != r[0]: # sources differ
+ prompts['lo'] = l[0]
+ prompts['ro'] = r[0]
+ if repo.ui.promptchoice(
+ _(' subrepository sources for %(s)s differ\n'
+ 'use (l)ocal%(l)s source (%(lo)s)'
+ ' or (r)emote%(o)s source (%(ro)s)?'
+ '$$ &Local $$ &Remote') % prompts, 0):
+ debug(s, "prompt changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ elif ld[1] == a[1]: # local side is unchanged
+ debug(s, "other side changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ else:
+ debug(s, "both sides changed")
+ srepo = wctx.sub(s)
+ prompts['sl'] = srepo.shortid(l[1])
+ prompts['sr'] = srepo.shortid(r[1])
+ option = repo.ui.promptchoice(
+ _(' subrepository %(s)s diverged (local revision: %(sl)s, '
+ 'remote revision: %(sr)s)\n'
+ '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?'
+ '$$ &Merge $$ &Local $$ &Remote')
+ % prompts, 0)
+ if option == 0:
+ wctx.sub(s).merge(r)
+ sm[s] = l
+ debug(s, "merge with", r)
+ elif option == 1:
+ sm[s] = l
+ debug(s, "keep local subrepo revision", l)
+ else:
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ debug(s, "get remote subrepo revision", r)
+ elif ld == a: # remote removed, local unchanged
+ debug(s, "remote removed, remove")
+ wctx.sub(s).remove()
+ elif a == nullstate: # not present in remote or ancestor
+ debug(s, "local added, keep")
+ sm[s] = l
+ continue
+ else:
+ if repo.ui.promptchoice(
+ _(' local%(l)s changed subrepository %(s)s'
+ ' which remote%(o)s removed\n'
+ 'use (c)hanged version or (d)elete?'
+ '$$ &Changed $$ &Delete') % prompts, 0):
+ debug(s, "prompt remove")
+ wctx.sub(s).remove()
+
+ for s, r in sorted(s2.items()):
+ prompts = None
+ if s in s1:
+ continue
+ elif s not in sa:
+ debug(s, "remote added, get", r)
+ mctx.sub(s).get(r)
+ sm[s] = r
+ elif r != sa[s]:
+ prompts = promptssrc.copy()
+ prompts['s'] = s
+ if repo.ui.promptchoice(
+ _(' remote%(o)s changed subrepository %(s)s'
+ ' which local%(l)s removed\n'
+ 'use (c)hanged version or (d)elete?'
+ '$$ &Changed $$ &Delete') % prompts, 0) == 0:
+ debug(s, "prompt recreate", r)
+ mctx.sub(s).get(r)
+ sm[s] = r
+
+ # record merged .hgsubstate
+ writestate(repo, sm)
+ return sm
+
+def precommit(ui, wctx, status, match, force=False):
+ """Calculate .hgsubstate changes that should be applied before committing
+
+ Returns (subs, commitsubs, newstate) where
+ - subs: changed subrepos (including dirty ones)
+ - commitsubs: dirty subrepos which the caller needs to commit recursively
+ - newstate: new state dict which the caller must write to .hgsubstate
+
+ This also updates the given status argument.
+ """
+ subs = []
+ commitsubs = set()
+ newstate = wctx.substate.copy()
+
+ # only manage subrepos and .hgsubstate if .hgsub is present
+ if '.hgsub' in wctx:
+ # we'll decide whether to track this ourselves, thanks
+ for c in status.modified, status.added, status.removed:
+ if '.hgsubstate' in c:
+ c.remove('.hgsubstate')
+
+ # compare current state to last committed state
+ # build new substate based on last committed state
+ oldstate = wctx.p1().substate
+ for s in sorted(newstate.keys()):
+ if not match(s):
+ # ignore working copy, use old state if present
+ if s in oldstate:
+ newstate[s] = oldstate[s]
+ continue
+ if not force:
+ raise error.Abort(
+ _("commit with new subrepo %s excluded") % s)
+ dirtyreason = wctx.sub(s).dirtyreason(True)
+ if dirtyreason:
+ if not ui.configbool('ui', 'commitsubrepos'):
+ raise error.Abort(dirtyreason,
+ hint=_("use --subrepos for recursive commit"))
+ subs.append(s)
+ commitsubs.add(s)
+ else:
+ bs = wctx.sub(s).basestate()
+ newstate[s] = (newstate[s][0], bs, newstate[s][2])
+ if oldstate.get(s, (None, None, None))[1] != bs:
+ subs.append(s)
+
+ # check for removed subrepos
+ for p in wctx.parents():
+ r = [s for s in p.substate if s not in newstate]
+ subs += [s for s in r if match(s)]
+ if subs:
+ if (not match('.hgsub') and
+ '.hgsub' in (wctx.modified() + wctx.added())):
+ raise error.Abort(_("can't commit subrepos without .hgsub"))
+ status.modified.insert(0, '.hgsubstate')
+
+ elif '.hgsub' in status.removed:
+ # clean up .hgsubstate when .hgsub is removed
+ if ('.hgsubstate' in wctx and
+ '.hgsubstate' not in (status.modified + status.added +
+ status.removed)):
+ status.removed.insert(0, '.hgsubstate')
+
+ return subs, commitsubs, newstate
+
+def reporelpath(repo):
+ """return path to this (sub)repo as seen from outermost repo"""
+ parent = repo
+ while util.safehasattr(parent, '_subparent'):
+ parent = parent._subparent
+ return repo.root[len(pathutil.normasprefix(parent.root)):]
+
+def subrelpath(sub):
+ """return path to this subrepo as seen from outermost repo"""
+ return sub._relpath
+
+def _abssource(repo, push=False, abort=True):
+ """return pull/push path of repo - either based on parent repo .hgsub info
+ or on the top repo config. Abort or return None if no source found."""
+ if util.safehasattr(repo, '_subparent'):
+ source = util.url(repo._subsource)
+ if source.isabs():
+ return bytes(source)
+ source.path = posixpath.normpath(source.path)
+ parent = _abssource(repo._subparent, push, abort=False)
+ if parent:
+ parent = util.url(util.pconvert(parent))
+ parent.path = posixpath.join(parent.path or '', source.path)
+ parent.path = posixpath.normpath(parent.path)
+ return bytes(parent)
+ else: # recursion reached top repo
+ path = None
+ if util.safehasattr(repo, '_subtoppath'):
+ path = repo._subtoppath
+ elif push and repo.ui.config('paths', 'default-push'):
+ path = repo.ui.config('paths', 'default-push')
+ elif repo.ui.config('paths', 'default'):
+ path = repo.ui.config('paths', 'default')
+ elif repo.shared():
+ # chop off the .hg component to get the default path form. This has
+ # already run through vfsmod.vfs(..., realpath=True), so it doesn't
+ # have problems with 'C:'
+ return os.path.dirname(repo.sharedpath)
+ if path:
+ # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is
+ # as expected: an absolute path to the root of the C: drive. The
+ # latter is a relative path, and works like so:
+ #
+ # C:\>cd C:\some\path
+ # C:\>D:
+ # D:\>python -c "import os; print os.path.abspath('C:')"
+ # C:\some\path
+ #
+ # D:\>python -c "import os; print os.path.abspath('C:relative')"
+ # C:\some\path\relative
+ if util.hasdriveletter(path):
+ if len(path) == 2 or path[2:3] not in br'\/':
+ path = os.path.abspath(path)
+ return path
+
+ if abort:
+ raise error.Abort(_("default path for subrepository not found"))
+
+def newcommitphase(ui, ctx):
+ commitphase = phases.newcommitphase(ui)
+ substate = getattr(ctx, "substate", None)
+ if not substate:
+ return commitphase
+ check = ui.config('phases', 'checksubrepos')
+ if check not in ('ignore', 'follow', 'abort'):
+ raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
+ % (check))
+ if check == 'ignore':
+ return commitphase
+ maxphase = phases.public
+ maxsub = None
+ for s in sorted(substate):
+ sub = ctx.sub(s)
+ subphase = sub.phase(substate[s][1])
+ if maxphase < subphase:
+ maxphase = subphase
+ maxsub = s
+ if commitphase < maxphase:
+ if check == 'abort':
+ raise error.Abort(_("can't commit in %s phase"
+ " conflicting %s from subrepository %s") %
+ (phases.phasenames[commitphase],
+ phases.phasenames[maxphase], maxsub))
+ ui.warn(_("warning: changes are committed in"
+ " %s phase from subrepository %s\n") %
+ (phases.phasenames[maxphase], maxsub))
+ return maxphase
+ return commitphase
--- a/mercurial/tagmerge.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/tagmerge.py Mon Mar 19 08:07:18 2018 -0700
@@ -73,8 +73,6 @@
from __future__ import absolute_import
-import operator
-
from .i18n import _
from .node import (
hex,
@@ -146,7 +144,7 @@
possible to the first parent's .hgtags file.
'''
# group the node-tag pairs that must be written next to each other
- for tname, taglist in mergedtags.items():
+ for tname, taglist in list(mergedtags.items()):
mergedtags[tname] = grouptagnodesbyline(taglist)
# convert the grouped merged tags dict into a format that resembles the
@@ -164,7 +162,7 @@
# before writing them
# the position is calculated to ensure that the diff of the merged .hgtags
# file to the first parent's .hgtags file is as small as possible
- finaltags.sort(key=operator.itemgetter(0))
+ finaltags.sort(key=lambda x: -1 if x[0] is None else x[0])
# finally we can join the sorted groups to get the final contents of the
# merged .hgtags file, and then write it to disk
@@ -269,4 +267,3 @@
writemergedtags(fcd, mergedtags)
ui.note(_('.hgtags merged successfully\n'))
return False, 0
-
--- a/mercurial/tags.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/tags.py Mon Mar 19 08:07:18 2018 -0700
@@ -244,7 +244,7 @@
# remove tags pointing to invalid nodes
cl = repo.changelog
- for t in filetags.keys():
+ for t in list(filetags):
try:
cl.rev(filetags[t][0])
except (LookupError, ValueError):
@@ -276,7 +276,7 @@
count = 0
def dbg(msg):
- ui.debug("%s, line %s: %s\n" % (fn, count, msg))
+ ui.debug("%s, line %d: %s\n" % (fn, count, msg))
for nline, line in enumerate(lines):
count += 1
@@ -559,7 +559,7 @@
def writetags(fp, names, munge, prevtags):
fp.seek(0, 2)
- if prevtags and prevtags[-1] != '\n':
+ if prevtags and not prevtags.endswith('\n'):
fp.write('\n')
for name in names:
if munge:
@@ -739,7 +739,7 @@
entry = bytearray(prefix + fnode)
self._raw[offset:offset + _fnodesrecsize] = entry
# self._dirtyoffset could be None.
- self._dirtyoffset = min(self._dirtyoffset, offset) or 0
+ self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
def write(self):
"""Perform all necessary writes to cache file.
@@ -783,6 +783,6 @@
except (IOError, OSError) as inst:
repo.ui.log('tagscache',
"couldn't write cache/%s: %s\n" % (
- _fnodescachefile, inst))
+ _fnodescachefile, util.forcebytestr(inst)))
finally:
lock.release()
--- a/mercurial/templatefilters.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templatefilters.py Mon Mar 19 08:07:18 2018 -0700
@@ -14,14 +14,14 @@
from . import (
encoding,
error,
- hbisect,
node,
pycompat,
registrar,
- templatekw,
+ templateutil,
url,
util,
)
+from .utils import dateutil
urlerr = util.urlerr
urlreq = util.urlreq
@@ -78,7 +78,7 @@
else:
delta = max(1, int(now - then))
if delta > agescales[0][1] * 2:
- return util.shortdate(date)
+ return dateutil.shortdate(date)
for t, s, a in agescales:
n = delta // s
@@ -100,6 +100,13 @@
"""List or text. Returns the length as an integer."""
return len(i)
+@templatefilter('dirname')
+def dirname(path):
+ """Any text. Treats the text as a path, and strips the last
+ component of the path after splitting by the path separator.
+ """
+ return os.path.dirname(path)
+
@templatefilter('domain')
def domain(author):
"""Any text. Finds the first string that looks like an email
@@ -138,19 +145,19 @@
global para_re, space_re
if para_re is None:
para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
- space_re = re.compile(r' +')
+ space_re = re.compile(br' +')
def findparas():
start = 0
while True:
m = para_re.search(text, start)
if not m:
- uctext = unicode(text[start:], encoding.encoding)
+ uctext = encoding.unifromlocal(text[start:])
w = len(uctext)
while 0 < w and uctext[w - 1].isspace():
w -= 1
- yield (uctext[:w].encode(encoding.encoding),
- uctext[w:].encode(encoding.encoding))
+ yield (encoding.unitolocal(uctext[:w]),
+ encoding.unitolocal(uctext[w:]))
break
yield text[start:m.start(0)], m.group(1)
start = m.end(1)
@@ -196,7 +203,7 @@
"""Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
+0200".
"""
- return util.datestr(text, '%Y-%m-%d %H:%M %1%2')
+ return dateutil.datestr(text, '%Y-%m-%d %H:%M %1%2')
@templatefilter('isodatesec')
def isodatesec(text):
@@ -204,7 +211,7 @@
seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
filter.
"""
- return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2')
+ return dateutil.datestr(text, '%Y-%m-%d %H:%M:%S %1%2')
def indent(text, prefix):
'''indent each non-empty line of text after first with prefix.'''
@@ -257,16 +264,16 @@
return encoding.lower(text)
@templatefilter('nonempty')
-def nonempty(str):
+def nonempty(text):
"""Any text. Returns '(none)' if the string is empty."""
- return str or "(none)"
+ return text or "(none)"
@templatefilter('obfuscate')
def obfuscate(text):
"""Any text. Returns the input text rendered as a sequence of
XML entities.
"""
- text = unicode(text, encoding.encoding, 'replace')
+ text = unicode(text, pycompat.sysstr(encoding.encoding), r'replace')
return ''.join(['&#%d;' % ord(c) for c in text])
@templatefilter('permissions')
@@ -318,14 +325,14 @@
"""Date. Returns a date using the Internet date format
specified in RFC 3339: "2009-08-18T13:00:13+02:00".
"""
- return util.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2")
+ return dateutil.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2")
@templatefilter('rfc822date')
def rfc822date(text):
"""Date. Returns a date using the same format used in email
headers: "Tue, 18 Aug 2009 13:00:13 +0200".
"""
- return util.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2")
+ return dateutil.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2")
@templatefilter('short')
def short(text):
@@ -335,18 +342,20 @@
return text[:12]
@templatefilter('shortbisect')
-def shortbisect(text):
- """Any text. Treats `text` as a bisection status, and
+def shortbisect(label):
+ """Any text. Treats `label` as a bisection status, and
returns a single-character representing the status (G: good, B: bad,
S: skipped, U: untested, I: ignored). Returns single space if `text`
is not a valid bisection status.
"""
- return hbisect.shortlabel(text) or ' '
+ if label:
+ return label[0:1].upper()
+ return ' '
@templatefilter('shortdate')
def shortdate(text):
"""Date. Returns a date like "2006-09-18"."""
- return util.shortdate(text)
+ return dateutil.shortdate(text)
@templatefilter('slashpath')
def slashpath(path):
@@ -356,7 +365,7 @@
@templatefilter('splitlines')
def splitlines(text):
"""Any text. Split text into a list of lines."""
- return templatekw.hybridlist(text.splitlines(), name='line')
+ return templateutil.hybridlist(text.splitlines(), name='line')
@templatefilter('stringescape')
def stringescape(text):
@@ -367,12 +376,7 @@
"""Any type. Turns the value into text by converting values into
text and concatenating them.
"""
- thing = templatekw.unwraphybrid(thing)
- if util.safehasattr(thing, '__iter__') and not isinstance(thing, bytes):
- return "".join([stringify(t) for t in thing if t is not None])
- if thing is None:
- return ""
- return pycompat.bytestr(thing)
+ return templateutil.stringify(thing)
@templatefilter('stripdir')
def stripdir(text):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templatefuncs.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,664 @@
+# templatefuncs.py - common template functions
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import re
+
+from .i18n import _
+from . import (
+ color,
+ encoding,
+ error,
+ minirst,
+ obsutil,
+ pycompat,
+ registrar,
+ revset as revsetmod,
+ revsetlang,
+ scmutil,
+ templatefilters,
+ templatekw,
+ templateutil,
+ util,
+)
+from .utils import dateutil
+
+evalrawexp = templateutil.evalrawexp
+evalfuncarg = templateutil.evalfuncarg
+evalboolean = templateutil.evalboolean
+evalinteger = templateutil.evalinteger
+evalstring = templateutil.evalstring
+evalstringliteral = templateutil.evalstringliteral
+evalastype = templateutil.evalastype
+
+# dict of template built-in functions
+funcs = {}
+templatefunc = registrar.templatefunc(funcs)
+
+@templatefunc('date(date[, fmt])')
+def date(context, mapping, args):
+ """Format a date. See :hg:`help dates` for formatting
+ strings. The default is a Unix date format, including the timezone:
+ "Mon Sep 04 15:13:13 2006 0700"."""
+ if not (1 <= len(args) <= 2):
+ # i18n: "date" is a keyword
+ raise error.ParseError(_("date expects one or two arguments"))
+
+ date = evalfuncarg(context, mapping, args[0])
+ fmt = None
+ if len(args) == 2:
+ fmt = evalstring(context, mapping, args[1])
+ try:
+ if fmt is None:
+ return dateutil.datestr(date)
+ else:
+ return dateutil.datestr(date, fmt)
+ except (TypeError, ValueError):
+ # i18n: "date" is a keyword
+ raise error.ParseError(_("date expects a date information"))
+
+@templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
+def dict_(context, mapping, args):
+ """Construct a dict from key-value pairs. A key may be omitted if
+ a value expression can provide an unambiguous name."""
+ data = util.sortdict()
+
+ for v in args['args']:
+ k = templateutil.findsymbolicname(v)
+ if not k:
+ raise error.ParseError(_('dict key cannot be inferred'))
+ if k in data or k in args['kwargs']:
+ raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
+ data[k] = evalfuncarg(context, mapping, v)
+
+ data.update((k, evalfuncarg(context, mapping, v))
+ for k, v in args['kwargs'].iteritems())
+ return templateutil.hybriddict(data)
+
+@templatefunc('diff([includepattern [, excludepattern]])')
+def diff(context, mapping, args):
+ """Show a diff, optionally
+ specifying files to include or exclude."""
+ if len(args) > 2:
+ # i18n: "diff" is a keyword
+ raise error.ParseError(_("diff expects zero, one, or two arguments"))
+
+ def getpatterns(i):
+ if i < len(args):
+ s = evalstring(context, mapping, args[i]).strip()
+ if s:
+ return [s]
+ return []
+
+ ctx = context.resource(mapping, 'ctx')
+ chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
+
+ return ''.join(chunks)
+
+@templatefunc('extdata(source)', argspec='source')
+def extdata(context, mapping, args):
+ """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
+ if 'source' not in args:
+ # i18n: "extdata" is a keyword
+ raise error.ParseError(_('extdata expects one argument'))
+
+ source = evalstring(context, mapping, args['source'])
+ cache = context.resource(mapping, 'cache').setdefault('extdata', {})
+ ctx = context.resource(mapping, 'ctx')
+ if source in cache:
+ data = cache[source]
+ else:
+ data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
+ return data.get(ctx.rev(), '')
+
+@templatefunc('files(pattern)')
+def files(context, mapping, args):
+ """All files of the current changeset matching the pattern. See
+ :hg:`help patterns`."""
+ if not len(args) == 1:
+ # i18n: "files" is a keyword
+ raise error.ParseError(_("files expects one argument"))
+
+ raw = evalstring(context, mapping, args[0])
+ ctx = context.resource(mapping, 'ctx')
+ m = ctx.match([raw])
+ files = list(ctx.matches(m))
+ return templateutil.compatlist(context, mapping, "file", files)
+
+@templatefunc('fill(text[, width[, initialident[, hangindent]]])')
+def fill(context, mapping, args):
+ """Fill many
+ paragraphs with optional indentation. See the "fill" filter."""
+ if not (1 <= len(args) <= 4):
+ # i18n: "fill" is a keyword
+ raise error.ParseError(_("fill expects one to four arguments"))
+
+ text = evalstring(context, mapping, args[0])
+ width = 76
+ initindent = ''
+ hangindent = ''
+ if 2 <= len(args) <= 4:
+ width = evalinteger(context, mapping, args[1],
+ # i18n: "fill" is a keyword
+ _("fill expects an integer width"))
+ try:
+ initindent = evalstring(context, mapping, args[2])
+ hangindent = evalstring(context, mapping, args[3])
+ except IndexError:
+ pass
+
+ return templatefilters.fill(text, width, initindent, hangindent)
+
+@templatefunc('formatnode(node)')
+def formatnode(context, mapping, args):
+ """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
+ if len(args) != 1:
+ # i18n: "formatnode" is a keyword
+ raise error.ParseError(_("formatnode expects one argument"))
+
+ ui = context.resource(mapping, 'ui')
+ node = evalstring(context, mapping, args[0])
+ if ui.debugflag:
+ return node
+ return templatefilters.short(node)
+
+@templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
+ argspec='text width fillchar left')
+def pad(context, mapping, args):
+ """Pad text with a
+ fill character."""
+ if 'text' not in args or 'width' not in args:
+ # i18n: "pad" is a keyword
+ raise error.ParseError(_("pad() expects two to four arguments"))
+
+ width = evalinteger(context, mapping, args['width'],
+ # i18n: "pad" is a keyword
+ _("pad() expects an integer width"))
+
+ text = evalstring(context, mapping, args['text'])
+
+ left = False
+ fillchar = ' '
+ if 'fillchar' in args:
+ fillchar = evalstring(context, mapping, args['fillchar'])
+ if len(color.stripeffects(fillchar)) != 1:
+ # i18n: "pad" is a keyword
+ raise error.ParseError(_("pad() expects a single fill character"))
+ if 'left' in args:
+ left = evalboolean(context, mapping, args['left'])
+
+ fillwidth = width - encoding.colwidth(color.stripeffects(text))
+ if fillwidth <= 0:
+ return text
+ if left:
+ return fillchar * fillwidth + text
+ else:
+ return text + fillchar * fillwidth
+
+@templatefunc('indent(text, indentchars[, firstline])')
+def indent(context, mapping, args):
+ """Indents all non-empty lines
+ with the characters given in the indentchars string. An optional
+ third parameter will override the indent for the first line only
+ if present."""
+ if not (2 <= len(args) <= 3):
+ # i18n: "indent" is a keyword
+ raise error.ParseError(_("indent() expects two or three arguments"))
+
+ text = evalstring(context, mapping, args[0])
+ indent = evalstring(context, mapping, args[1])
+
+ if len(args) == 3:
+ firstline = evalstring(context, mapping, args[2])
+ else:
+ firstline = indent
+
+ # the indent function doesn't indent the first line, so we do it here
+ return templatefilters.indent(firstline + text, indent)
+
+@templatefunc('get(dict, key)')
+def get(context, mapping, args):
+ """Get an attribute/key from an object. Some keywords
+ are complex types. This function allows you to obtain the value of an
+ attribute on these types."""
+ if len(args) != 2:
+ # i18n: "get" is a keyword
+ raise error.ParseError(_("get() expects two arguments"))
+
+ dictarg = evalfuncarg(context, mapping, args[0])
+ if not util.safehasattr(dictarg, 'get'):
+ # i18n: "get" is a keyword
+ raise error.ParseError(_("get() expects a dict as first argument"))
+
+ key = evalfuncarg(context, mapping, args[1])
+ return templateutil.getdictitem(dictarg, key)
+
+@templatefunc('if(expr, then[, else])')
+def if_(context, mapping, args):
+ """Conditionally execute based on the result of
+ an expression."""
+ if not (2 <= len(args) <= 3):
+ # i18n: "if" is a keyword
+ raise error.ParseError(_("if expects two or three arguments"))
+
+ test = evalboolean(context, mapping, args[0])
+ if test:
+ return evalrawexp(context, mapping, args[1])
+ elif len(args) == 3:
+ return evalrawexp(context, mapping, args[2])
+
+@templatefunc('ifcontains(needle, haystack, then[, else])')
+def ifcontains(context, mapping, args):
+ """Conditionally execute based
+ on whether the item "needle" is in "haystack"."""
+ if not (3 <= len(args) <= 4):
+ # i18n: "ifcontains" is a keyword
+ raise error.ParseError(_("ifcontains expects three or four arguments"))
+
+ haystack = evalfuncarg(context, mapping, args[1])
+ try:
+ needle = evalastype(context, mapping, args[0],
+ getattr(haystack, 'keytype', None) or bytes)
+ found = (needle in haystack)
+ except error.ParseError:
+ found = False
+
+ if found:
+ return evalrawexp(context, mapping, args[2])
+ elif len(args) == 4:
+ return evalrawexp(context, mapping, args[3])
+
+@templatefunc('ifeq(expr1, expr2, then[, else])')
+def ifeq(context, mapping, args):
+ """Conditionally execute based on
+ whether 2 items are equivalent."""
+ if not (3 <= len(args) <= 4):
+ # i18n: "ifeq" is a keyword
+ raise error.ParseError(_("ifeq expects three or four arguments"))
+
+ test = evalstring(context, mapping, args[0])
+ match = evalstring(context, mapping, args[1])
+ if test == match:
+ return evalrawexp(context, mapping, args[2])
+ elif len(args) == 4:
+ return evalrawexp(context, mapping, args[3])
+
+@templatefunc('join(list, sep)')
+def join(context, mapping, args):
+ """Join items in a list with a delimiter."""
+ if not (1 <= len(args) <= 2):
+ # i18n: "join" is a keyword
+ raise error.ParseError(_("join expects one or two arguments"))
+
+ # TODO: perhaps this should be evalfuncarg(), but it can't because hgweb
+ # abuses generator as a keyword that returns a list of dicts.
+ joinset = evalrawexp(context, mapping, args[0])
+ joinset = templateutil.unwrapvalue(joinset)
+ joinfmt = getattr(joinset, 'joinfmt', pycompat.identity)
+ joiner = " "
+ if len(args) > 1:
+ joiner = evalstring(context, mapping, args[1])
+
+ first = True
+ for x in pycompat.maybebytestr(joinset):
+ if first:
+ first = False
+ else:
+ yield joiner
+ yield joinfmt(x)
+
+@templatefunc('label(label, expr)')
+def label(context, mapping, args):
+ """Apply a label to generated content. Content with
+ a label applied can result in additional post-processing, such as
+ automatic colorization."""
+ if len(args) != 2:
+ # i18n: "label" is a keyword
+ raise error.ParseError(_("label expects two arguments"))
+
+ ui = context.resource(mapping, 'ui')
+ thing = evalstring(context, mapping, args[1])
+ # preserve unknown symbol as literal so effects like 'red', 'bold',
+ # etc. don't need to be quoted
+ label = evalstringliteral(context, mapping, args[0])
+
+ return ui.label(thing, label)
+
+@templatefunc('latesttag([pattern])')
+def latesttag(context, mapping, args):
+ """The global tags matching the given pattern on the
+ most recent globally tagged ancestor of this changeset.
+ If no such tags exist, the "{tag}" template resolves to
+ the string "null"."""
+ if len(args) > 1:
+ # i18n: "latesttag" is a keyword
+ raise error.ParseError(_("latesttag expects at most one argument"))
+
+ pattern = None
+ if len(args) == 1:
+ pattern = evalstring(context, mapping, args[0])
+ return templatekw.showlatesttags(context, mapping, pattern)
+
+@templatefunc('localdate(date[, tz])')
+def localdate(context, mapping, args):
+ """Converts a date to the specified timezone.
+ The default is local date."""
+ if not (1 <= len(args) <= 2):
+ # i18n: "localdate" is a keyword
+ raise error.ParseError(_("localdate expects one or two arguments"))
+
+ date = evalfuncarg(context, mapping, args[0])
+ try:
+ date = dateutil.parsedate(date)
+ except AttributeError: # not str nor date tuple
+ # i18n: "localdate" is a keyword
+ raise error.ParseError(_("localdate expects a date information"))
+ if len(args) >= 2:
+ tzoffset = None
+ tz = evalfuncarg(context, mapping, args[1])
+ if isinstance(tz, bytes):
+ tzoffset, remainder = dateutil.parsetimezone(tz)
+ if remainder:
+ tzoffset = None
+ if tzoffset is None:
+ try:
+ tzoffset = int(tz)
+ except (TypeError, ValueError):
+ # i18n: "localdate" is a keyword
+ raise error.ParseError(_("localdate expects a timezone"))
+ else:
+ tzoffset = dateutil.makedate()[1]
+ return (date[0], tzoffset)
+
+@templatefunc('max(iterable)')
+def max_(context, mapping, args, **kwargs):
+ """Return the max of an iterable"""
+ if len(args) != 1:
+ # i18n: "max" is a keyword
+ raise error.ParseError(_("max expects one argument"))
+
+ iterable = evalfuncarg(context, mapping, args[0])
+ try:
+ x = max(pycompat.maybebytestr(iterable))
+ except (TypeError, ValueError):
+ # i18n: "max" is a keyword
+ raise error.ParseError(_("max first argument should be an iterable"))
+ return templateutil.wraphybridvalue(iterable, x, x)
+
+@templatefunc('min(iterable)')
+def min_(context, mapping, args, **kwargs):
+ """Return the min of an iterable"""
+ if len(args) != 1:
+ # i18n: "min" is a keyword
+ raise error.ParseError(_("min expects one argument"))
+
+ iterable = evalfuncarg(context, mapping, args[0])
+ try:
+ x = min(pycompat.maybebytestr(iterable))
+ except (TypeError, ValueError):
+ # i18n: "min" is a keyword
+ raise error.ParseError(_("min first argument should be an iterable"))
+ return templateutil.wraphybridvalue(iterable, x, x)
+
+@templatefunc('mod(a, b)')
+def mod(context, mapping, args):
+ """Calculate a mod b such that a / b + a mod b == a"""
+ if not len(args) == 2:
+ # i18n: "mod" is a keyword
+ raise error.ParseError(_("mod expects two arguments"))
+
+ func = lambda a, b: a % b
+ return templateutil.runarithmetic(context, mapping,
+ (func, args[0], args[1]))
+
+@templatefunc('obsfateoperations(markers)')
+def obsfateoperations(context, mapping, args):
+ """Compute obsfate related information based on markers (EXPERIMENTAL)"""
+ if len(args) != 1:
+ # i18n: "obsfateoperations" is a keyword
+ raise error.ParseError(_("obsfateoperations expects one argument"))
+
+ markers = evalfuncarg(context, mapping, args[0])
+
+ try:
+ data = obsutil.markersoperations(markers)
+ return templateutil.hybridlist(data, name='operation')
+ except (TypeError, KeyError):
+ # i18n: "obsfateoperations" is a keyword
+ errmsg = _("obsfateoperations first argument should be an iterable")
+ raise error.ParseError(errmsg)
+
+@templatefunc('obsfatedate(markers)')
+def obsfatedate(context, mapping, args):
+ """Compute obsfate related information based on markers (EXPERIMENTAL)"""
+ if len(args) != 1:
+ # i18n: "obsfatedate" is a keyword
+ raise error.ParseError(_("obsfatedate expects one argument"))
+
+ markers = evalfuncarg(context, mapping, args[0])
+
+ try:
+ data = obsutil.markersdates(markers)
+ return templateutil.hybridlist(data, name='date', fmt='%d %d')
+ except (TypeError, KeyError):
+ # i18n: "obsfatedate" is a keyword
+ errmsg = _("obsfatedate first argument should be an iterable")
+ raise error.ParseError(errmsg)
+
+@templatefunc('obsfateusers(markers)')
+def obsfateusers(context, mapping, args):
+ """Compute obsfate related information based on markers (EXPERIMENTAL)"""
+ if len(args) != 1:
+ # i18n: "obsfateusers" is a keyword
+ raise error.ParseError(_("obsfateusers expects one argument"))
+
+ markers = evalfuncarg(context, mapping, args[0])
+
+ try:
+ data = obsutil.markersusers(markers)
+ return templateutil.hybridlist(data, name='user')
+ except (TypeError, KeyError, ValueError):
+ # i18n: "obsfateusers" is a keyword
+ msg = _("obsfateusers first argument should be an iterable of "
+ "obsmakers")
+ raise error.ParseError(msg)
+
+@templatefunc('obsfateverb(successors, markers)')
+def obsfateverb(context, mapping, args):
+ """Compute obsfate related information based on successors (EXPERIMENTAL)"""
+ if len(args) != 2:
+ # i18n: "obsfateverb" is a keyword
+ raise error.ParseError(_("obsfateverb expects two arguments"))
+
+ successors = evalfuncarg(context, mapping, args[0])
+ markers = evalfuncarg(context, mapping, args[1])
+
+ try:
+ return obsutil.obsfateverb(successors, markers)
+ except TypeError:
+ # i18n: "obsfateverb" is a keyword
+ errmsg = _("obsfateverb first argument should be countable")
+ raise error.ParseError(errmsg)
+
+@templatefunc('relpath(path)')
+def relpath(context, mapping, args):
+ """Convert a repository-absolute path into a filesystem path relative to
+ the current working directory."""
+ if len(args) != 1:
+ # i18n: "relpath" is a keyword
+ raise error.ParseError(_("relpath expects one argument"))
+
+ repo = context.resource(mapping, 'ctx').repo()
+ path = evalstring(context, mapping, args[0])
+ return repo.pathto(path)
+
+@templatefunc('revset(query[, formatargs...])')
+def revset(context, mapping, args):
+ """Execute a revision set query. See
+ :hg:`help revset`."""
+ if not len(args) > 0:
+ # i18n: "revset" is a keyword
+ raise error.ParseError(_("revset expects one or more arguments"))
+
+ raw = evalstring(context, mapping, args[0])
+ ctx = context.resource(mapping, 'ctx')
+ repo = ctx.repo()
+
+ def query(expr):
+ m = revsetmod.match(repo.ui, expr, repo=repo)
+ return m(repo)
+
+ if len(args) > 1:
+ formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
+ revs = query(revsetlang.formatspec(raw, *formatargs))
+ revs = list(revs)
+ else:
+ cache = context.resource(mapping, 'cache')
+ revsetcache = cache.setdefault("revsetcache", {})
+ if raw in revsetcache:
+ revs = revsetcache[raw]
+ else:
+ revs = query(raw)
+ revs = list(revs)
+ revsetcache[raw] = revs
+ return templatekw.showrevslist(context, mapping, "revision", revs)
+
+@templatefunc('rstdoc(text, style)')
+def rstdoc(context, mapping, args):
+ """Format reStructuredText."""
+ if len(args) != 2:
+ # i18n: "rstdoc" is a keyword
+ raise error.ParseError(_("rstdoc expects two arguments"))
+
+ text = evalstring(context, mapping, args[0])
+ style = evalstring(context, mapping, args[1])
+
+ return minirst.format(text, style=style, keep=['verbose'])
+
+@templatefunc('separate(sep, args)', argspec='sep *args')
+def separate(context, mapping, args):
+ """Add a separator between non-empty arguments."""
+ if 'sep' not in args:
+ # i18n: "separate" is a keyword
+ raise error.ParseError(_("separate expects at least one argument"))
+
+ sep = evalstring(context, mapping, args['sep'])
+ first = True
+ for arg in args['args']:
+ argstr = evalstring(context, mapping, arg)
+ if not argstr:
+ continue
+ if first:
+ first = False
+ else:
+ yield sep
+ yield argstr
+
+@templatefunc('shortest(node, minlength=4)')
+def shortest(context, mapping, args):
+ """Obtain the shortest representation of
+ a node."""
+ if not (1 <= len(args) <= 2):
+ # i18n: "shortest" is a keyword
+ raise error.ParseError(_("shortest() expects one or two arguments"))
+
+ node = evalstring(context, mapping, args[0])
+
+ minlength = 4
+ if len(args) > 1:
+ minlength = evalinteger(context, mapping, args[1],
+ # i18n: "shortest" is a keyword
+ _("shortest() expects an integer minlength"))
+
+ # _partialmatch() of filtered changelog could take O(len(repo)) time,
+ # which would be unacceptably slow. so we look for hash collision in
+ # unfiltered space, which means some hashes may be slightly longer.
+ cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog
+ return cl.shortest(node, minlength)
+
+@templatefunc('strip(text[, chars])')
+def strip(context, mapping, args):
+ """Strip characters from a string. By default,
+ strips all leading and trailing whitespace."""
+ if not (1 <= len(args) <= 2):
+ # i18n: "strip" is a keyword
+ raise error.ParseError(_("strip expects one or two arguments"))
+
+ text = evalstring(context, mapping, args[0])
+ if len(args) == 2:
+ chars = evalstring(context, mapping, args[1])
+ return text.strip(chars)
+ return text.strip()
+
+@templatefunc('sub(pattern, replacement, expression)')
+def sub(context, mapping, args):
+ """Perform text substitution
+ using regular expressions."""
+ if len(args) != 3:
+ # i18n: "sub" is a keyword
+ raise error.ParseError(_("sub expects three arguments"))
+
+ pat = evalstring(context, mapping, args[0])
+ rpl = evalstring(context, mapping, args[1])
+ src = evalstring(context, mapping, args[2])
+ try:
+ patre = re.compile(pat)
+ except re.error:
+ # i18n: "sub" is a keyword
+ raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
+ try:
+ yield patre.sub(rpl, src)
+ except re.error:
+ # i18n: "sub" is a keyword
+ raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
+
+@templatefunc('startswith(pattern, text)')
+def startswith(context, mapping, args):
+ """Returns the value from the "text" argument
+ if it begins with the content from the "pattern" argument."""
+ if len(args) != 2:
+ # i18n: "startswith" is a keyword
+ raise error.ParseError(_("startswith expects two arguments"))
+
+ patn = evalstring(context, mapping, args[0])
+ text = evalstring(context, mapping, args[1])
+ if text.startswith(patn):
+ return text
+ return ''
+
+@templatefunc('word(number, text[, separator])')
+def word(context, mapping, args):
+ """Return the nth word from a string."""
+ if not (2 <= len(args) <= 3):
+ # i18n: "word" is a keyword
+ raise error.ParseError(_("word expects two or three arguments, got %d")
+ % len(args))
+
+ num = evalinteger(context, mapping, args[0],
+ # i18n: "word" is a keyword
+ _("word expects an integer index"))
+ text = evalstring(context, mapping, args[1])
+ if len(args) == 3:
+ splitter = evalstring(context, mapping, args[2])
+ else:
+ splitter = None
+
+ tokens = text.split(splitter)
+ if num >= len(tokens) or num < -len(tokens):
+ return ''
+ else:
+ return tokens[num]
+
+def loadfunction(ui, extname, registrarobj):
+ """Load template function from specified registrarobj
+ """
+ for name, func in registrarobj._table.iteritems():
+ funcs[name] = func
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = funcs.values()
--- a/mercurial/templatekw.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templatekw.py Mon Mar 19 08:07:18 2018 -0700
@@ -23,202 +23,43 @@
pycompat,
registrar,
scmutil,
+ templateutil,
util,
)
-class _hybrid(object):
- """Wrapper for list or dict to support legacy template
-
- This class allows us to handle both:
- - "{files}" (legacy command-line-specific list hack) and
- - "{files % '{file}\n'}" (hgweb-style with inlining and function support)
- and to access raw values:
- - "{ifcontains(file, files, ...)}", "{ifcontains(key, extras, ...)}"
- - "{get(extras, key)}"
- - "{files|json}"
- """
-
- def __init__(self, gen, values, makemap, joinfmt, keytype=None):
- if gen is not None:
- self.gen = gen # generator or function returning generator
- self._values = values
- self._makemap = makemap
- self.joinfmt = joinfmt
- self.keytype = keytype # hint for 'x in y' where type(x) is unresolved
- def gen(self):
- """Default generator to stringify this as {join(self, ' ')}"""
- for i, x in enumerate(self._values):
- if i > 0:
- yield ' '
- yield self.joinfmt(x)
- def itermaps(self):
- makemap = self._makemap
- for x in self._values:
- yield makemap(x)
- def __contains__(self, x):
- return x in self._values
- def __getitem__(self, key):
- return self._values[key]
- def __len__(self):
- return len(self._values)
- def __iter__(self):
- return iter(self._values)
- def __getattr__(self, name):
- if name not in ('get', 'items', 'iteritems', 'iterkeys', 'itervalues',
- 'keys', 'values'):
- raise AttributeError(name)
- return getattr(self._values, name)
-
-class _mappable(object):
- """Wrapper for non-list/dict object to support map operation
-
- This class allows us to handle both:
- - "{manifest}"
- - "{manifest % '{rev}:{node}'}"
- - "{manifest.rev}"
-
- Unlike a _hybrid, this does not simulate the behavior of the underling
- value. Use unwrapvalue() or unwraphybrid() to obtain the inner object.
- """
-
- def __init__(self, gen, key, value, makemap):
- if gen is not None:
- self.gen = gen # generator or function returning generator
- self._key = key
- self._value = value # may be generator of strings
- self._makemap = makemap
-
- def gen(self):
- yield pycompat.bytestr(self._value)
-
- def tomap(self):
- return self._makemap(self._key)
-
- def itermaps(self):
- yield self.tomap()
-
-def hybriddict(data, key='key', value='value', fmt='%s=%s', gen=None):
- """Wrap data to support both dict-like and string-like operations"""
- return _hybrid(gen, data, lambda k: {key: k, value: data[k]},
- lambda k: fmt % (k, data[k]))
-
-def hybridlist(data, name, fmt='%s', gen=None):
- """Wrap data to support both list-like and string-like operations"""
- return _hybrid(gen, data, lambda x: {name: x}, lambda x: fmt % x)
-
-def unwraphybrid(thing):
- """Return an object which can be stringified possibly by using a legacy
- template"""
- gen = getattr(thing, 'gen', None)
- if gen is None:
- return thing
- if callable(gen):
- return gen()
- return gen
-
-def unwrapvalue(thing):
- """Move the inner value object out of the wrapper"""
- if not util.safehasattr(thing, '_value'):
- return thing
- return thing._value
-
-def wraphybridvalue(container, key, value):
- """Wrap an element of hybrid container to be mappable
-
- The key is passed to the makemap function of the given container, which
- should be an item generated by iter(container).
- """
- makemap = getattr(container, '_makemap', None)
- if makemap is None:
- return value
- if util.safehasattr(value, '_makemap'):
- # a nested hybrid list/dict, which has its own way of map operation
- return value
- return _mappable(None, key, value, makemap)
+_hybrid = templateutil.hybrid
+_mappable = templateutil.mappable
+_showlist = templateutil._showlist
+hybriddict = templateutil.hybriddict
+hybridlist = templateutil.hybridlist
+compatdict = templateutil.compatdict
+compatlist = templateutil.compatlist
def showdict(name, data, mapping, plural=None, key='key', value='value',
- fmt='%s=%s', separator=' '):
+ fmt=None, separator=' '):
+ ui = mapping.get('ui')
+ if ui:
+ ui.deprecwarn("templatekw.showdict() is deprecated, use "
+ "templateutil.compatdict()", '4.6')
c = [{key: k, value: v} for k, v in data.iteritems()]
- f = _showlist(name, c, mapping, plural, separator)
+ f = _showlist(name, c, mapping['templ'], mapping, plural, separator)
return hybriddict(data, key=key, value=value, fmt=fmt, gen=f)
def showlist(name, values, mapping, plural=None, element=None, separator=' '):
+ ui = mapping.get('ui')
+ if ui:
+ ui.deprecwarn("templatekw.showlist() is deprecated, use "
+ "templateutil.compatlist()", '4.6')
if not element:
element = name
- f = _showlist(name, values, mapping, plural, separator)
+ f = _showlist(name, values, mapping['templ'], mapping, plural, separator)
return hybridlist(values, name=element, gen=f)
-def _showlist(name, values, mapping, plural=None, separator=' '):
- '''expand set of values.
- name is name of key in template map.
- values is list of strings or dicts.
- plural is plural of name, if not simply name + 's'.
- separator is used to join values as a string
-
- expansion works like this, given name 'foo'.
-
- if values is empty, expand 'no_foos'.
-
- if 'foo' not in template map, return values as a string,
- joined by 'separator'.
-
- expand 'start_foos'.
-
- for each value, expand 'foo'. if 'last_foo' in template
- map, expand it instead of 'foo' for last key.
-
- expand 'end_foos'.
- '''
- templ = mapping['templ']
- strmapping = pycompat.strkwargs(mapping)
- if not plural:
- plural = name + 's'
- if not values:
- noname = 'no_' + plural
- if noname in templ:
- yield templ(noname, **strmapping)
- return
- if name not in templ:
- if isinstance(values[0], bytes):
- yield separator.join(values)
- else:
- for v in values:
- yield dict(v, **strmapping)
- return
- startname = 'start_' + plural
- if startname in templ:
- yield templ(startname, **strmapping)
- vmapping = mapping.copy()
- def one(v, tag=name):
- try:
- vmapping.update(v)
- except (AttributeError, ValueError):
- try:
- for a, b in v:
- vmapping[a] = b
- except ValueError:
- vmapping[name] = v
- return templ(tag, **pycompat.strkwargs(vmapping))
- lastname = 'last_' + name
- if lastname in templ:
- last = values.pop()
- else:
- last = None
- for v in values:
- yield one(v)
- if last is not None:
- yield one(last, tag=lastname)
- endname = 'end_' + plural
- if endname in templ:
- yield templ(endname, **strmapping)
-
-def getfiles(repo, ctx, revcache):
- if 'files' not in revcache:
- revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
- return revcache['files']
-
-def getlatesttags(repo, ctx, cache, pattern=None):
+def getlatesttags(context, mapping, pattern=None):
'''return date, distance and name for the latest tag of rev'''
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ cache = context.resource(mapping, 'cache')
cachename = 'latesttags'
if pattern is not None:
@@ -337,91 +178,92 @@
# filecopy is preserved for compatibility reasons
defaulttempl['filecopy'] = defaulttempl['file_copy']
-# keywords are callables like:
-# fn(repo, ctx, templ, cache, revcache, **args)
-# with:
-# repo - current repository instance
-# ctx - the changectx being displayed
-# templ - the templater instance
-# cache - a cache dictionary for the whole templater run
-# revcache - a cache dictionary for the current revision
+# keywords are callables (see registrar.templatekeyword for details)
keywords = {}
-
templatekeyword = registrar.templatekeyword(keywords)
-@templatekeyword('author')
-def showauthor(repo, ctx, templ, **args):
+@templatekeyword('author', requires={'ctx'})
+def showauthor(context, mapping):
"""String. The unmodified author of the changeset."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.user()
-@templatekeyword('bisect')
-def showbisect(repo, ctx, templ, **args):
+@templatekeyword('bisect', requires={'repo', 'ctx'})
+def showbisect(context, mapping):
"""String. The changeset bisection status."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
return hbisect.label(repo, ctx.node())
-@templatekeyword('branch')
-def showbranch(**args):
+@templatekeyword('branch', requires={'ctx'})
+def showbranch(context, mapping):
"""String. The name of the branch on which the changeset was
committed.
"""
- return args[r'ctx'].branch()
+ ctx = context.resource(mapping, 'ctx')
+ return ctx.branch()
-@templatekeyword('branches')
-def showbranches(**args):
+@templatekeyword('branches', requires={'ctx', 'templ'})
+def showbranches(context, mapping):
"""List of strings. The name of the branch on which the
changeset was committed. Will be empty if the branch name was
default. (DEPRECATED)
"""
- args = pycompat.byteskwargs(args)
- branch = args['ctx'].branch()
+ ctx = context.resource(mapping, 'ctx')
+ branch = ctx.branch()
if branch != 'default':
- return showlist('branch', [branch], args, plural='branches')
- return showlist('branch', [], args, plural='branches')
+ return compatlist(context, mapping, 'branch', [branch],
+ plural='branches')
+ return compatlist(context, mapping, 'branch', [], plural='branches')
-@templatekeyword('bookmarks')
-def showbookmarks(**args):
+@templatekeyword('bookmarks', requires={'repo', 'ctx', 'templ'})
+def showbookmarks(context, mapping):
"""List of strings. Any bookmarks associated with the
changeset. Also sets 'active', the name of the active bookmark.
"""
- args = pycompat.byteskwargs(args)
- repo = args['ctx']._repo
- bookmarks = args['ctx'].bookmarks()
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
+ bookmarks = ctx.bookmarks()
active = repo._activebookmark
makemap = lambda v: {'bookmark': v, 'active': active, 'current': active}
- f = _showlist('bookmark', bookmarks, args)
+ f = _showlist('bookmark', bookmarks, templ, mapping)
return _hybrid(f, bookmarks, makemap, pycompat.identity)
-@templatekeyword('children')
-def showchildren(**args):
+@templatekeyword('children', requires={'ctx', 'templ'})
+def showchildren(context, mapping):
"""List of strings. The children of the changeset."""
- args = pycompat.byteskwargs(args)
- ctx = args['ctx']
- childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()]
- return showlist('children', childrevs, args, element='child')
+ ctx = context.resource(mapping, 'ctx')
+ childrevs = ['%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()]
+ return compatlist(context, mapping, 'children', childrevs, element='child')
# Deprecated, but kept alive for help generation a purpose.
-@templatekeyword('currentbookmark')
-def showcurrentbookmark(**args):
+@templatekeyword('currentbookmark', requires={'repo', 'ctx'})
+def showcurrentbookmark(context, mapping):
"""String. The active bookmark, if it is associated with the changeset.
(DEPRECATED)"""
- return showactivebookmark(**args)
+ return showactivebookmark(context, mapping)
-@templatekeyword('activebookmark')
-def showactivebookmark(**args):
+@templatekeyword('activebookmark', requires={'repo', 'ctx'})
+def showactivebookmark(context, mapping):
"""String. The active bookmark, if it is associated with the changeset."""
- active = args[r'repo']._activebookmark
- if active and active in args[r'ctx'].bookmarks():
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ active = repo._activebookmark
+ if active and active in ctx.bookmarks():
return active
return ''
-@templatekeyword('date')
-def showdate(repo, ctx, templ, **args):
+@templatekeyword('date', requires={'ctx'})
+def showdate(context, mapping):
"""Date information. The date when the changeset was committed."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.date()
-@templatekeyword('desc')
-def showdescription(repo, ctx, templ, **args):
+@templatekeyword('desc', requires={'ctx'})
+def showdescription(context, mapping):
"""String. The text of the changeset description."""
+ ctx = context.resource(mapping, 'ctx')
s = ctx.description()
if isinstance(s, encoding.localstr):
# try hard to preserve utf-8 bytes
@@ -429,55 +271,65 @@
else:
return s.strip()
-@templatekeyword('diffstat')
-def showdiffstat(repo, ctx, templ, **args):
+@templatekeyword('diffstat', requires={'ctx'})
+def showdiffstat(context, mapping):
"""String. Statistics of changes with the following format:
"modified files: +added/-removed lines"
"""
+ ctx = context.resource(mapping, 'ctx')
stats = patch.diffstatdata(util.iterlines(ctx.diff(noprefix=False)))
maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
- return '%s: +%s/-%s' % (len(stats), adds, removes)
+ return '%d: +%d/-%d' % (len(stats), adds, removes)
-@templatekeyword('envvars')
-def showenvvars(repo, **args):
+@templatekeyword('envvars', requires={'ui', 'templ'})
+def showenvvars(context, mapping):
"""A dictionary of environment variables. (EXPERIMENTAL)"""
- args = pycompat.byteskwargs(args)
- env = repo.ui.exportableenviron()
+ ui = context.resource(mapping, 'ui')
+ env = ui.exportableenviron()
env = util.sortdict((k, env[k]) for k in sorted(env))
- return showdict('envvar', env, args, plural='envvars')
+ return compatdict(context, mapping, 'envvar', env, plural='envvars')
-@templatekeyword('extras')
-def showextras(**args):
+@templatekeyword('extras', requires={'ctx', 'templ'})
+def showextras(context, mapping):
"""List of dicts with key, value entries of the 'extras'
field of this changeset."""
- args = pycompat.byteskwargs(args)
- extras = args['ctx'].extra()
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
+ extras = ctx.extra()
extras = util.sortdict((k, extras[k]) for k in sorted(extras))
makemap = lambda k: {'key': k, 'value': extras[k]}
c = [makemap(k) for k in extras]
- f = _showlist('extra', c, args, plural='extras')
+ f = _showlist('extra', c, templ, mapping, plural='extras')
return _hybrid(f, extras, makemap,
lambda k: '%s=%s' % (k, util.escapestr(extras[k])))
-@templatekeyword('file_adds')
-def showfileadds(**args):
+def _showfilesbystat(context, mapping, name, index):
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ revcache = context.resource(mapping, 'revcache')
+ if 'files' not in revcache:
+ revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
+ files = revcache['files'][index]
+ return compatlist(context, mapping, name, files, element='file')
+
+@templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache', 'templ'})
+def showfileadds(context, mapping):
"""List of strings. Files added by this changeset."""
- args = pycompat.byteskwargs(args)
- repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
- return showlist('file_add', getfiles(repo, ctx, revcache)[1], args,
- element='file')
+ return _showfilesbystat(context, mapping, 'file_add', 1)
-@templatekeyword('file_copies')
-def showfilecopies(**args):
+@templatekeyword('file_copies',
+ requires={'repo', 'ctx', 'cache', 'revcache', 'templ'})
+def showfilecopies(context, mapping):
"""List of strings. Files copied in this changeset with
their sources.
"""
- args = pycompat.byteskwargs(args)
- cache, ctx = args['cache'], args['ctx']
- copies = args['revcache'].get('copies')
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ cache = context.resource(mapping, 'cache')
+ copies = context.resource(mapping, 'revcache').get('copies')
if copies is None:
if 'getrenamed' not in cache:
- cache['getrenamed'] = getrenamedfn(args['repo'])
+ cache['getrenamed'] = getrenamedfn(repo)
copies = []
getrenamed = cache['getrenamed']
for fn in ctx.files():
@@ -486,51 +338,51 @@
copies.append((fn, rename[0]))
copies = util.sortdict(copies)
- return showdict('file_copy', copies, args, plural='file_copies',
- key='name', value='source', fmt='%s (%s)')
+ return compatdict(context, mapping, 'file_copy', copies,
+ key='name', value='source', fmt='%s (%s)',
+ plural='file_copies')
# showfilecopiesswitch() displays file copies only if copy records are
# provided before calling the templater, usually with a --copies
# command line switch.
-@templatekeyword('file_copies_switch')
-def showfilecopiesswitch(**args):
+@templatekeyword('file_copies_switch', requires={'revcache', 'templ'})
+def showfilecopiesswitch(context, mapping):
"""List of strings. Like "file_copies" but displayed
only if the --copied switch is set.
"""
- args = pycompat.byteskwargs(args)
- copies = args['revcache'].get('copies') or []
+ copies = context.resource(mapping, 'revcache').get('copies') or []
copies = util.sortdict(copies)
- return showdict('file_copy', copies, args, plural='file_copies',
- key='name', value='source', fmt='%s (%s)')
+ return compatdict(context, mapping, 'file_copy', copies,
+ key='name', value='source', fmt='%s (%s)',
+ plural='file_copies')
-@templatekeyword('file_dels')
-def showfiledels(**args):
+@templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache', 'templ'})
+def showfiledels(context, mapping):
"""List of strings. Files removed by this changeset."""
- args = pycompat.byteskwargs(args)
- repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
- return showlist('file_del', getfiles(repo, ctx, revcache)[2], args,
- element='file')
+ return _showfilesbystat(context, mapping, 'file_del', 2)
-@templatekeyword('file_mods')
-def showfilemods(**args):
+@templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache', 'templ'})
+def showfilemods(context, mapping):
"""List of strings. Files modified by this changeset."""
- args = pycompat.byteskwargs(args)
- repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
- return showlist('file_mod', getfiles(repo, ctx, revcache)[0], args,
- element='file')
+ return _showfilesbystat(context, mapping, 'file_mod', 0)
-@templatekeyword('files')
-def showfiles(**args):
+@templatekeyword('files', requires={'ctx', 'templ'})
+def showfiles(context, mapping):
"""List of strings. All files modified, added, or removed by this
changeset.
"""
- args = pycompat.byteskwargs(args)
- return showlist('file', args['ctx'].files(), args)
+ ctx = context.resource(mapping, 'ctx')
+ return compatlist(context, mapping, 'file', ctx.files())
-@templatekeyword('graphnode')
-def showgraphnode(repo, ctx, **args):
+@templatekeyword('graphnode', requires={'repo', 'ctx'})
+def showgraphnode(context, mapping):
"""String. The character representing the changeset node in an ASCII
revision graph."""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ return getgraphnode(repo, ctx)
+
+def getgraphnode(repo, ctx):
wpnodes = repo.dirstate.parents()
if wpnodes[1] == nullid:
wpnodes = wpnodes[:1]
@@ -545,33 +397,29 @@
else:
return 'o'
-@templatekeyword('graphwidth')
-def showgraphwidth(repo, ctx, templ, **args):
+@templatekeyword('graphwidth', requires=())
+def showgraphwidth(context, mapping):
"""Integer. The width of the graph drawn by 'log --graph' or zero."""
- # The value args['graphwidth'] will be this function, so we use an internal
- # name to pass the value through props into this function.
- return args.get('_graphwidth', 0)
+ # just hosts documentation; should be overridden by template mapping
+ return 0
-@templatekeyword('index')
-def showindex(**args):
+@templatekeyword('index', requires=())
+def showindex(context, mapping):
"""Integer. The current iteration of the loop. (0 indexed)"""
# just hosts documentation; should be overridden by template mapping
raise error.Abort(_("can't use index in this context"))
-@templatekeyword('latesttag')
-def showlatesttag(**args):
+@templatekeyword('latesttag', requires={'repo', 'ctx', 'cache', 'templ'})
+def showlatesttag(context, mapping):
"""List of strings. The global tags on the most recent globally
tagged ancestor of this changeset. If no such tags exist, the list
consists of the single string "null".
"""
- return showlatesttags(None, **args)
+ return showlatesttags(context, mapping, None)
-def showlatesttags(pattern, **args):
+def showlatesttags(context, mapping, pattern):
"""helper method for the latesttag keyword and function"""
- args = pycompat.byteskwargs(args)
- repo, ctx = args['repo'], args['ctx']
- cache = args['cache']
- latesttags = getlatesttags(repo, ctx, cache, pattern)
+ latesttags = getlatesttags(context, mapping, pattern)
# latesttag[0] is an implementation detail for sorting csets on different
# branches in a stable manner- it is the date the tagged cset was created,
@@ -584,25 +432,28 @@
}
tags = latesttags[2]
- f = _showlist('latesttag', tags, args, separator=':')
+ templ = context.resource(mapping, 'templ')
+ f = _showlist('latesttag', tags, templ, mapping, separator=':')
return _hybrid(f, tags, makemap, pycompat.identity)
-@templatekeyword('latesttagdistance')
-def showlatesttagdistance(repo, ctx, templ, cache, **args):
+@templatekeyword('latesttagdistance', requires={'repo', 'ctx', 'cache'})
+def showlatesttagdistance(context, mapping):
"""Integer. Longest path to the latest tag."""
- return getlatesttags(repo, ctx, cache)[1]
+ return getlatesttags(context, mapping)[1]
-@templatekeyword('changessincelatesttag')
-def showchangessincelatesttag(repo, ctx, templ, cache, **args):
+@templatekeyword('changessincelatesttag', requires={'repo', 'ctx', 'cache'})
+def showchangessincelatesttag(context, mapping):
"""Integer. All ancestors not in the latest tag."""
- latesttag = getlatesttags(repo, ctx, cache)[2][0]
+ mapping = mapping.copy()
+ mapping['tag'] = getlatesttags(context, mapping)[2][0]
+ return _showchangessincetag(context, mapping)
- return _showchangessincetag(repo, ctx, tag=latesttag, **args)
-
-def _showchangessincetag(repo, ctx, **args):
+def _showchangessincetag(context, mapping):
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
offset = 0
revs = [ctx.rev()]
- tag = args[r'tag']
+ tag = context.symbol(mapping, 'tag')
# The only() revset doesn't currently support wdir()
if ctx.rev() is None:
@@ -611,56 +462,59 @@
return len(repo.revs('only(%ld, %s)', revs, tag)) + offset
-@templatekeyword('manifest')
-def showmanifest(**args):
- repo, ctx, templ = args[r'repo'], args[r'ctx'], args[r'templ']
+# teach templater latesttags.changes is switched to (context, mapping) API
+_showchangessincetag._requires = {'repo', 'ctx'}
+
+@templatekeyword('manifest', requires={'repo', 'ctx', 'templ'})
+def showmanifest(context, mapping):
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
mnode = ctx.manifestnode()
if mnode is None:
# just avoid crash, we might want to use the 'ff...' hash in future
return
mrev = repo.manifestlog._revlog.rev(mnode)
mhex = hex(mnode)
- args = args.copy()
- args.update({r'rev': mrev, r'node': mhex})
- f = templ('manifest', **args)
+ mapping = mapping.copy()
+ mapping.update({'rev': mrev, 'node': mhex})
+ f = templ.generate('manifest', mapping)
# TODO: perhaps 'ctx' should be dropped from mapping because manifest
# rev and node are completely different from changeset's.
return _mappable(f, None, f, lambda x: {'rev': mrev, 'node': mhex})
-@templatekeyword('obsfate')
-def showobsfate(**args):
+@templatekeyword('obsfate', requires={'ui', 'repo', 'ctx', 'templ'})
+def showobsfate(context, mapping):
# this function returns a list containing pre-formatted obsfate strings.
#
# This function will be replaced by templates fragments when we will have
# the verbosity templatekw available.
- succsandmarkers = showsuccsandmarkers(**args)
+ succsandmarkers = showsuccsandmarkers(context, mapping)
- args = pycompat.byteskwargs(args)
- ui = args['ui']
-
+ ui = context.resource(mapping, 'ui')
values = []
for x in succsandmarkers:
values.append(obsutil.obsfateprinter(x['successors'], x['markers'], ui))
- return showlist("fate", values, args)
+ return compatlist(context, mapping, "fate", values)
-def shownames(namespace, **args):
+def shownames(context, mapping, namespace):
"""helper method to generate a template keyword for a namespace"""
- args = pycompat.byteskwargs(args)
- ctx = args['ctx']
- repo = ctx.repo()
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
ns = repo.names[namespace]
names = ns.names(repo, ctx.node())
- return showlist(ns.templatename, names, args, plural=namespace)
+ return compatlist(context, mapping, ns.templatename, names,
+ plural=namespace)
-@templatekeyword('namespaces')
-def shownamespaces(**args):
+@templatekeyword('namespaces', requires={'repo', 'ctx', 'templ'})
+def shownamespaces(context, mapping):
"""Dict of lists. Names attached to this changeset per
namespace."""
- args = pycompat.byteskwargs(args)
- ctx = args['ctx']
- repo = ctx.repo()
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
namespaces = util.sortdict()
def makensmapfn(ns):
@@ -669,10 +523,10 @@
for k, ns in repo.names.iteritems():
names = ns.names(repo, ctx.node())
- f = _showlist('name', names, args)
+ f = _showlist('name', names, templ, mapping)
namespaces[k] = _hybrid(f, names, makensmapfn(ns), pycompat.identity)
- f = _showlist('namespace', list(namespaces), args)
+ f = _showlist('namespace', list(namespaces), templ, mapping)
def makemap(ns):
return {
@@ -684,24 +538,27 @@
return _hybrid(f, namespaces, makemap, pycompat.identity)
-@templatekeyword('node')
-def shownode(repo, ctx, templ, **args):
+@templatekeyword('node', requires={'ctx'})
+def shownode(context, mapping):
"""String. The changeset identification hash, as a 40 hexadecimal
digit string.
"""
+ ctx = context.resource(mapping, 'ctx')
return ctx.hex()
-@templatekeyword('obsolete')
-def showobsolete(repo, ctx, templ, **args):
+@templatekeyword('obsolete', requires={'ctx'})
+def showobsolete(context, mapping):
"""String. Whether the changeset is obsolete. (EXPERIMENTAL)"""
+ ctx = context.resource(mapping, 'ctx')
if ctx.obsolete():
return 'obsolete'
return ''
-@templatekeyword('peerurls')
-def showpeerurls(repo, **args):
+@templatekeyword('peerurls', requires={'repo'})
+def showpeerurls(context, mapping):
"""A dictionary of repository locations defined in the [paths] section
of your configuration file."""
+ repo = context.resource(mapping, 'repo')
# see commands.paths() for naming of dictionary keys
paths = repo.ui.paths
urls = util.sortdict((k, p.rawloc) for k, p in sorted(paths.iteritems()))
@@ -712,9 +569,11 @@
return d
return _hybrid(None, urls, makemap, lambda k: '%s=%s' % (k, urls[k]))
-@templatekeyword("predecessors")
-def showpredecessors(repo, ctx, **args):
+@templatekeyword("predecessors", requires={'repo', 'ctx'})
+def showpredecessors(context, mapping):
"""Returns the list if the closest visible successors. (EXPERIMENTAL)"""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node()))
predecessors = map(hex, predecessors)
@@ -722,14 +581,21 @@
lambda x: {'ctx': repo[x], 'revcache': {}},
lambda x: scmutil.formatchangeid(repo[x]))
-@templatekeyword("successorssets")
-def showsuccessorssets(repo, ctx, **args):
+@templatekeyword('reporoot', requires={'repo'})
+def showreporoot(context, mapping):
+ """String. The root directory of the current repository."""
+ repo = context.resource(mapping, 'repo')
+ return repo.root
+
+@templatekeyword("successorssets", requires={'repo', 'ctx'})
+def showsuccessorssets(context, mapping):
"""Returns a string of sets of successors for a changectx. Format used
is: [ctx1, ctx2], [ctx3] if ctx has been splitted into ctx1 and ctx2
while also diverged into ctx3. (EXPERIMENTAL)"""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
if not ctx.obsolete():
return ''
- args = pycompat.byteskwargs(args)
ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
ssets = [[hex(n) for n in ss] for ss in ssets]
@@ -753,13 +619,16 @@
return _hybrid(gen(data), data, lambda x: {'successorset': x},
pycompat.identity)
-@templatekeyword("succsandmarkers")
-def showsuccsandmarkers(repo, ctx, **args):
+@templatekeyword("succsandmarkers", requires={'repo', 'ctx', 'templ'})
+def showsuccsandmarkers(context, mapping):
"""Returns a list of dict for each final successor of ctx. The dict
contains successors node id in "successors" keys and the list of
obs-markers from ctx to the set of successors in "markers".
(EXPERIMENTAL)
"""
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
values = obsutil.successorsandmarkers(repo, ctx)
@@ -790,86 +659,92 @@
data.append({'successors': successors, 'markers': finalmarkers})
- f = _showlist('succsandmarkers', data, args)
+ f = _showlist('succsandmarkers', data, templ, mapping)
return _hybrid(f, data, lambda x: x, pycompat.identity)
-@templatekeyword('p1rev')
-def showp1rev(repo, ctx, templ, **args):
+@templatekeyword('p1rev', requires={'ctx'})
+def showp1rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
first parent, or -1 if the changeset has no parents."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.p1().rev()
-@templatekeyword('p2rev')
-def showp2rev(repo, ctx, templ, **args):
+@templatekeyword('p2rev', requires={'ctx'})
+def showp2rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
second parent, or -1 if the changeset has no second parent."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.p2().rev()
-@templatekeyword('p1node')
-def showp1node(repo, ctx, templ, **args):
+@templatekeyword('p1node', requires={'ctx'})
+def showp1node(context, mapping):
"""String. The identification hash of the changeset's first parent,
as a 40 digit hexadecimal string. If the changeset has no parents, all
digits are 0."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.p1().hex()
-@templatekeyword('p2node')
-def showp2node(repo, ctx, templ, **args):
+@templatekeyword('p2node', requires={'ctx'})
+def showp2node(context, mapping):
"""String. The identification hash of the changeset's second
parent, as a 40 digit hexadecimal string. If the changeset has no second
parent, all digits are 0."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.p2().hex()
-@templatekeyword('parents')
-def showparents(**args):
+@templatekeyword('parents', requires={'repo', 'ctx', 'templ'})
+def showparents(context, mapping):
"""List of strings. The parents of the changeset in "rev:node"
format. If the changeset has only one "natural" parent (the predecessor
revision) nothing is shown."""
- args = pycompat.byteskwargs(args)
- repo = args['repo']
- ctx = args['ctx']
+ repo = context.resource(mapping, 'repo')
+ ctx = context.resource(mapping, 'ctx')
+ templ = context.resource(mapping, 'templ')
pctxs = scmutil.meaningfulparents(repo, ctx)
prevs = [p.rev() for p in pctxs]
parents = [[('rev', p.rev()),
('node', p.hex()),
('phase', p.phasestr())]
for p in pctxs]
- f = _showlist('parent', parents, args)
+ f = _showlist('parent', parents, templ, mapping)
return _hybrid(f, prevs, lambda x: {'ctx': repo[x], 'revcache': {}},
lambda x: scmutil.formatchangeid(repo[x]), keytype=int)
-@templatekeyword('phase')
-def showphase(repo, ctx, templ, **args):
+@templatekeyword('phase', requires={'ctx'})
+def showphase(context, mapping):
"""String. The changeset phase name."""
+ ctx = context.resource(mapping, 'ctx')
return ctx.phasestr()
-@templatekeyword('phaseidx')
-def showphaseidx(repo, ctx, templ, **args):
+@templatekeyword('phaseidx', requires={'ctx'})
+def showphaseidx(context, mapping):
"""Integer. The changeset phase index. (ADVANCED)"""
+ ctx = context.resource(mapping, 'ctx')
return ctx.phase()
-@templatekeyword('rev')
-def showrev(repo, ctx, templ, **args):
+@templatekeyword('rev', requires={'ctx'})
+def showrev(context, mapping):
"""Integer. The repository-local changeset revision number."""
+ ctx = context.resource(mapping, 'ctx')
return scmutil.intrev(ctx)
-def showrevslist(name, revs, **args):
+def showrevslist(context, mapping, name, revs):
"""helper to generate a list of revisions in which a mapped template will
be evaluated"""
- args = pycompat.byteskwargs(args)
- repo = args['ctx'].repo()
- f = _showlist(name, ['%d' % r for r in revs], args)
+ repo = context.resource(mapping, 'repo')
+ templ = context.resource(mapping, 'templ')
+ f = _showlist(name, ['%d' % r for r in revs], templ, mapping)
return _hybrid(f, revs,
lambda x: {name: x, 'ctx': repo[x], 'revcache': {}},
pycompat.identity, keytype=int)
-@templatekeyword('subrepos')
-def showsubrepos(**args):
+@templatekeyword('subrepos', requires={'ctx', 'templ'})
+def showsubrepos(context, mapping):
"""List of strings. Updated subrepositories in the changeset."""
- args = pycompat.byteskwargs(args)
- ctx = args['ctx']
+ ctx = context.resource(mapping, 'ctx')
substate = ctx.substate
if not substate:
- return showlist('subrepo', [], args)
+ return compatlist(context, mapping, 'subrepo', [])
psubstate = ctx.parents()[0].substate or {}
subrepos = []
for sub in substate:
@@ -878,46 +753,37 @@
for sub in psubstate:
if sub not in substate:
subrepos.append(sub) # removed in ctx
- return showlist('subrepo', sorted(subrepos), args)
+ return compatlist(context, mapping, 'subrepo', sorted(subrepos))
# don't remove "showtags" definition, even though namespaces will put
# a helper function for "tags" keyword into "keywords" map automatically,
# because online help text is built without namespaces initialization
-@templatekeyword('tags')
-def showtags(**args):
+@templatekeyword('tags', requires={'repo', 'ctx', 'templ'})
+def showtags(context, mapping):
"""List of strings. Any tags associated with the changeset."""
- return shownames('tags', **args)
-
-@templatekeyword('termwidth')
-def showtermwidth(repo, ctx, templ, **args):
- """Integer. The width of the current terminal."""
- return repo.ui.termwidth()
+ return shownames(context, mapping, 'tags')
-@templatekeyword('troubles')
-def showtroubles(repo, **args):
- """List of strings. Evolution troubles affecting the changeset.
- (DEPRECATED)
- """
- msg = ("'troubles' is deprecated, "
- "use 'instabilities'")
- repo.ui.deprecwarn(msg, '4.4')
+@templatekeyword('termwidth', requires={'ui'})
+def showtermwidth(context, mapping):
+ """Integer. The width of the current terminal."""
+ ui = context.resource(mapping, 'ui')
+ return ui.termwidth()
- return showinstabilities(repo=repo, **args)
-
-@templatekeyword('instabilities')
-def showinstabilities(**args):
+@templatekeyword('instabilities', requires={'ctx', 'templ'})
+def showinstabilities(context, mapping):
"""List of strings. Evolution instabilities affecting the changeset.
(EXPERIMENTAL)
"""
- args = pycompat.byteskwargs(args)
- return showlist('instability', args['ctx'].instabilities(), args,
- plural='instabilities')
+ ctx = context.resource(mapping, 'ctx')
+ return compatlist(context, mapping, 'instability', ctx.instabilities(),
+ plural='instabilities')
-@templatekeyword('verbosity')
-def showverbosity(ui, **args):
+@templatekeyword('verbosity', requires={'ui'})
+def showverbosity(context, mapping):
"""String. The current output verbosity in 'debug', 'quiet', 'verbose',
or ''."""
- # see cmdutil.changeset_templater for priority of these flags
+ ui = context.resource(mapping, 'ui')
+ # see logcmdutil.changesettemplater for priority of these flags
if ui.debugflag:
return 'debug'
elif ui.quiet:
--- a/mercurial/templater.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templater.py Mon Mar 19 08:07:18 2018 -0700
@@ -5,28 +5,61 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+"""Slightly complicated template engine for commands and hgweb
+
+This module provides low-level interface to the template engine. See the
+formatter and cmdutil modules if you are looking for high-level functions
+such as ``cmdutil.rendertemplate(ctx, tmpl)``.
+
+Internal Data Types
+-------------------
+
+Template keywords and functions take a dictionary of current symbols and
+resources (a "mapping") and return result. Inputs and outputs must be one
+of the following data types:
+
+bytes
+ a byte string, which is generally a human-readable text in local encoding.
+
+generator
+ a lazily-evaluated byte string, which is a possibly nested generator of
+ values of any printable types, and will be folded by ``stringify()``
+ or ``flatten()``.
+
+ BUG: hgweb overloads this type for mappings (i.e. some hgweb keywords
+ returns a generator of dicts.)
+
+None
+ sometimes represents an empty value, which can be stringified to ''.
+
+True, False, int, float
+ can be stringified as such.
+
+date tuple
+ a (unixtime, offset) tuple, which produces no meaningful output by itself.
+
+hybrid
+ represents a list/dict of printable values, which can also be converted
+ to mappings by % operator.
+
+mappable
+ represents a scalar printable value, also supports % operator.
+"""
+
from __future__ import absolute_import, print_function
import os
-import re
-import types
from .i18n import _
from . import (
- color,
config,
encoding,
error,
- minirst,
- obsutil,
parser,
pycompat,
- registrar,
- revset as revsetmod,
- revsetlang,
- scmutil,
templatefilters,
- templatekw,
+ templatefuncs,
+ templateutil,
util,
)
@@ -92,8 +125,8 @@
pos += 1
yield ('integer', program[s:pos], s)
pos -= 1
- elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
- or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
+ elif (c == '\\' and program[pos:pos + 2] in (br"\'", br'\"')
+ or c == 'r' and program[pos:pos + 3] in (br"r\'", br'r\"')):
# handle escaped quoted strings for compatibility with 2.9.2-3.4,
# where some of nested templates were preprocessed as strings and
# then compiled. therefore, \"...\" was allowed. (issue4733)
@@ -138,7 +171,7 @@
yield ('symbol', sym, s)
pos -= 1
elif c == term:
- yield ('end', None, pos + 1)
+ yield ('end', None, pos)
return
else:
raise error.ParseError(_("syntax error"), pos)
@@ -161,36 +194,98 @@
([('string', 'foo\\')], 6)
"""
parsed = []
+ for typ, val, pos in _scantemplate(tmpl, start, stop, quote):
+ if typ == 'string':
+ parsed.append((typ, val))
+ elif typ == 'template':
+ parsed.append(val)
+ elif typ == 'end':
+ return parsed, pos
+ else:
+ raise error.ProgrammingError('unexpected type: %s' % typ)
+ raise error.ProgrammingError('unterminated scanning of template')
+
+def scantemplate(tmpl, raw=False):
+ r"""Scan (type, start, end) positions of outermost elements in template
+
+ If raw=True, a backslash is not taken as an escape character just like
+ r'' string in Python. Note that this is different from r'' literal in
+ template in that no template fragment can appear in r'', e.g. r'{foo}'
+ is a literal '{foo}', but ('{foo}', raw=True) is a template expression
+ 'foo'.
+
+ >>> list(scantemplate(b'foo{bar}"baz'))
+ [('string', 0, 3), ('template', 3, 8), ('string', 8, 12)]
+ >>> list(scantemplate(b'outer{"inner"}outer'))
+ [('string', 0, 5), ('template', 5, 14), ('string', 14, 19)]
+ >>> list(scantemplate(b'foo\\{escaped}'))
+ [('string', 0, 5), ('string', 5, 13)]
+ >>> list(scantemplate(b'foo\\{escaped}', raw=True))
+ [('string', 0, 4), ('template', 4, 13)]
+ """
+ last = None
+ for typ, val, pos in _scantemplate(tmpl, 0, len(tmpl), raw=raw):
+ if last:
+ yield last + (pos,)
+ if typ == 'end':
+ return
+ else:
+ last = (typ, pos)
+ raise error.ProgrammingError('unterminated scanning of template')
+
+def _scantemplate(tmpl, start, stop, quote='', raw=False):
+ """Parse template string into chunks of strings and template expressions"""
sepchars = '{' + quote
+ unescape = [parser.unescapestr, pycompat.identity][raw]
pos = start
p = parser.parser(elements)
- while pos < stop:
- n = min((tmpl.find(c, pos, stop) for c in sepchars),
- key=lambda n: (n < 0, n))
- if n < 0:
- parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
- pos = stop
- break
- c = tmpl[n:n + 1]
- bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
- if bs % 2 == 1:
- # escaped (e.g. '\{', '\\\{', but not '\\{')
- parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
- pos = n + 1
- continue
- if n > pos:
- parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
- if c == quote:
- return parsed, n + 1
+ try:
+ while pos < stop:
+ n = min((tmpl.find(c, pos, stop) for c in sepchars),
+ key=lambda n: (n < 0, n))
+ if n < 0:
+ yield ('string', unescape(tmpl[pos:stop]), pos)
+ pos = stop
+ break
+ c = tmpl[n:n + 1]
+ bs = 0 # count leading backslashes
+ if not raw:
+ bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
+ if bs % 2 == 1:
+ # escaped (e.g. '\{', '\\\{', but not '\\{')
+ yield ('string', unescape(tmpl[pos:n - 1]) + c, pos)
+ pos = n + 1
+ continue
+ if n > pos:
+ yield ('string', unescape(tmpl[pos:n]), pos)
+ if c == quote:
+ yield ('end', None, n + 1)
+ return
- parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
- if not tmpl.endswith('}', n + 1, pos):
- raise error.ParseError(_("invalid token"), pos)
- parsed.append(parseres)
+ parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
+ if not tmpl.startswith('}', pos):
+ raise error.ParseError(_("invalid token"), pos)
+ yield ('template', parseres, n)
+ pos += 1
- if quote:
- raise error.ParseError(_("unterminated string"), start)
- return parsed, pos
+ if quote:
+ raise error.ParseError(_("unterminated string"), start)
+ except error.ParseError as inst:
+ if len(inst.args) > 1: # has location
+ loc = inst.args[1]
+ # Offset the caret location by the number of newlines before the
+ # location of the error, since we will replace one-char newlines
+ # with the two-char literal r'\n'.
+ offset = tmpl[:loc].count('\n')
+ tmpl = tmpl.replace('\n', br'\n')
+ # We want the caret to point to the place in the template that
+ # failed to parse, but in a hint we get a open paren at the
+ # start. Therefore, we print "loc + 1" spaces (instead of "loc")
+ # to line up the caret with the location of the error.
+ inst.hint = (tmpl + '\n'
+ + ' ' * (loc + 1 + offset) + '^ ' + _('here'))
+ raise
+ yield ('end', None, pos)
def _unnesttemplatelist(tree):
"""Expand list of templates to node tuple
@@ -292,236 +387,48 @@
return context._load(exp[1])
raise error.ParseError(_("expected template specifier"))
-def findsymbolicname(arg):
- """Find symbolic name for the given compiled expression; returns None
- if nothing found reliably"""
- while True:
- func, data = arg
- if func is runsymbol:
- return data
- elif func is runfilter:
- arg = data[0]
- else:
- return None
-
-def evalrawexp(context, mapping, arg):
- """Evaluate given argument as a bare template object which may require
- further processing (such as folding generator of strings)"""
- func, data = arg
- return func(context, mapping, data)
-
-def evalfuncarg(context, mapping, arg):
- """Evaluate given argument as value type"""
- thing = evalrawexp(context, mapping, arg)
- thing = templatekw.unwrapvalue(thing)
- # evalrawexp() may return string, generator of strings or arbitrary object
- # such as date tuple, but filter does not want generator.
- if isinstance(thing, types.GeneratorType):
- thing = stringify(thing)
- return thing
-
-def evalboolean(context, mapping, arg):
- """Evaluate given argument as boolean, but also takes boolean literals"""
- func, data = arg
- if func is runsymbol:
- thing = func(context, mapping, data, default=None)
- if thing is None:
- # not a template keyword, takes as a boolean literal
- thing = util.parsebool(data)
- else:
- thing = func(context, mapping, data)
- thing = templatekw.unwrapvalue(thing)
- if isinstance(thing, bool):
- return thing
- # other objects are evaluated as strings, which means 0 is True, but
- # empty dict/list should be False as they are expected to be ''
- return bool(stringify(thing))
-
-def evalinteger(context, mapping, arg, err=None):
- v = evalfuncarg(context, mapping, arg)
- try:
- return int(v)
- except (TypeError, ValueError):
- raise error.ParseError(err or _('not an integer'))
-
-def evalstring(context, mapping, arg):
- return stringify(evalrawexp(context, mapping, arg))
-
-def evalstringliteral(context, mapping, arg):
- """Evaluate given argument as string template, but returns symbol name
- if it is unknown"""
- func, data = arg
- if func is runsymbol:
- thing = func(context, mapping, data, default=data)
- else:
- thing = func(context, mapping, data)
- return stringify(thing)
-
-_evalfuncbytype = {
- bool: evalboolean,
- bytes: evalstring,
- int: evalinteger,
-}
-
-def evalastype(context, mapping, arg, typ):
- """Evaluate given argument and coerce its type"""
- try:
- f = _evalfuncbytype[typ]
- except KeyError:
- raise error.ProgrammingError('invalid type specified: %r' % typ)
- return f(context, mapping, arg)
-
-def runinteger(context, mapping, data):
- return int(data)
-
-def runstring(context, mapping, data):
- return data
-
-def _recursivesymbolblocker(key):
- def showrecursion(**args):
- raise error.Abort(_("recursive reference '%s' in template") % key)
- return showrecursion
-
def _runrecursivesymbol(context, mapping, key):
raise error.Abort(_("recursive reference '%s' in template") % key)
-def runsymbol(context, mapping, key, default=''):
- v = context.symbol(mapping, key)
- if v is None:
- # put poison to cut recursion. we can't move this to parsing phase
- # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
- safemapping = mapping.copy()
- safemapping[key] = _recursivesymbolblocker(key)
- try:
- v = context.process(key, safemapping)
- except TemplateNotFound:
- v = default
- if callable(v):
- # TODO: templatekw functions will be updated to take (context, mapping)
- # pair instead of **props
- props = context._resources.copy()
- props.update(mapping)
- return v(**pycompat.strkwargs(props))
- return v
-
def buildtemplate(exp, context):
ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
- return (runtemplate, ctmpl)
-
-def runtemplate(context, mapping, template):
- for arg in template:
- yield evalrawexp(context, mapping, arg)
+ return (templateutil.runtemplate, ctmpl)
def buildfilter(exp, context):
n = getsymbol(exp[2])
if n in context._filters:
filt = context._filters[n]
arg = compileexp(exp[1], context, methods)
- return (runfilter, (arg, filt))
- if n in funcs:
- f = funcs[n]
+ return (templateutil.runfilter, (arg, filt))
+ if n in context._funcs:
+ f = context._funcs[n]
args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
return (f, args)
raise error.ParseError(_("unknown function '%s'") % n)
-def runfilter(context, mapping, data):
- arg, filt = data
- thing = evalfuncarg(context, mapping, arg)
- try:
- return filt(thing)
- except (ValueError, AttributeError, TypeError):
- sym = findsymbolicname(arg)
- if sym:
- msg = (_("template filter '%s' is not compatible with keyword '%s'")
- % (pycompat.sysbytes(filt.__name__), sym))
- else:
- msg = (_("incompatible use of template filter '%s'")
- % pycompat.sysbytes(filt.__name__))
- raise error.Abort(msg)
-
def buildmap(exp, context):
darg = compileexp(exp[1], context, methods)
targ = gettemplate(exp[2], context)
- return (runmap, (darg, targ))
-
-def runmap(context, mapping, data):
- darg, targ = data
- d = evalrawexp(context, mapping, darg)
- if util.safehasattr(d, 'itermaps'):
- diter = d.itermaps()
- else:
- try:
- diter = iter(d)
- except TypeError:
- sym = findsymbolicname(darg)
- if sym:
- raise error.ParseError(_("keyword '%s' is not iterable") % sym)
- else:
- raise error.ParseError(_("%r is not iterable") % d)
-
- for i, v in enumerate(diter):
- lm = mapping.copy()
- lm['index'] = i
- if isinstance(v, dict):
- lm.update(v)
- lm['originalnode'] = mapping.get('node')
- yield evalrawexp(context, lm, targ)
- else:
- # v is not an iterable of dicts, this happen when 'key'
- # has been fully expanded already and format is useless.
- # If so, return the expanded value.
- yield v
+ return (templateutil.runmap, (darg, targ))
def buildmember(exp, context):
darg = compileexp(exp[1], context, methods)
memb = getsymbol(exp[2])
- return (runmember, (darg, memb))
-
-def runmember(context, mapping, data):
- darg, memb = data
- d = evalrawexp(context, mapping, darg)
- if util.safehasattr(d, 'tomap'):
- lm = mapping.copy()
- lm.update(d.tomap())
- return runsymbol(context, lm, memb)
- if util.safehasattr(d, 'get'):
- return _getdictitem(d, memb)
-
- sym = findsymbolicname(darg)
- if sym:
- raise error.ParseError(_("keyword '%s' has no member") % sym)
- else:
- raise error.ParseError(_("%r has no member") % d)
+ return (templateutil.runmember, (darg, memb))
def buildnegate(exp, context):
arg = compileexp(exp[1], context, exprmethods)
- return (runnegate, arg)
-
-def runnegate(context, mapping, data):
- data = evalinteger(context, mapping, data,
- _('negation needs an integer argument'))
- return -data
+ return (templateutil.runnegate, arg)
def buildarithmetic(exp, context, func):
left = compileexp(exp[1], context, exprmethods)
right = compileexp(exp[2], context, exprmethods)
- return (runarithmetic, (func, left, right))
-
-def runarithmetic(context, mapping, data):
- func, left, right = data
- left = evalinteger(context, mapping, left,
- _('arithmetic only defined on integers'))
- right = evalinteger(context, mapping, right,
- _('arithmetic only defined on integers'))
- try:
- return func(left, right)
- except ZeroDivisionError:
- raise error.Abort(_('division by zero is not defined'))
+ return (templateutil.runarithmetic, (func, left, right))
def buildfunc(exp, context):
n = getsymbol(exp[1])
- if n in funcs:
- f = funcs[n]
+ if n in context._funcs:
+ f = context._funcs[n]
args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
return (f, args)
if n in context._filters:
@@ -529,14 +436,14 @@
if len(args) != 1:
raise error.ParseError(_("filter %s expects one argument") % n)
f = context._filters[n]
- return (runfilter, (args[0], f))
+ return (templateutil.runfilter, (args[0], f))
raise error.ParseError(_("unknown function '%s'") % n)
def _buildfuncargs(exp, context, curmethods, funcname, argspec):
"""Compile parsed tree of function arguments into list or dict of
(func, data) pairs
- >>> context = engine(lambda t: (runsymbol, t))
+ >>> context = engine(lambda t: (templateutil.runsymbol, t))
>>> def fargs(expr, argspec):
... x = _parseexpr(expr)
... n = getsymbol(x[1])
@@ -572,647 +479,11 @@
def buildkeyvaluepair(exp, content):
raise error.ParseError(_("can't use a key-value pair in this context"))
-# dict of template built-in functions
-funcs = {}
-
-templatefunc = registrar.templatefunc(funcs)
-
-@templatefunc('date(date[, fmt])')
-def date(context, mapping, args):
- """Format a date. See :hg:`help dates` for formatting
- strings. The default is a Unix date format, including the timezone:
- "Mon Sep 04 15:13:13 2006 0700"."""
- if not (1 <= len(args) <= 2):
- # i18n: "date" is a keyword
- raise error.ParseError(_("date expects one or two arguments"))
-
- date = evalfuncarg(context, mapping, args[0])
- fmt = None
- if len(args) == 2:
- fmt = evalstring(context, mapping, args[1])
- try:
- if fmt is None:
- return util.datestr(date)
- else:
- return util.datestr(date, fmt)
- except (TypeError, ValueError):
- # i18n: "date" is a keyword
- raise error.ParseError(_("date expects a date information"))
-
-@templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
-def dict_(context, mapping, args):
- """Construct a dict from key-value pairs. A key may be omitted if
- a value expression can provide an unambiguous name."""
- data = util.sortdict()
-
- for v in args['args']:
- k = findsymbolicname(v)
- if not k:
- raise error.ParseError(_('dict key cannot be inferred'))
- if k in data or k in args['kwargs']:
- raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
- data[k] = evalfuncarg(context, mapping, v)
-
- data.update((k, evalfuncarg(context, mapping, v))
- for k, v in args['kwargs'].iteritems())
- return templatekw.hybriddict(data)
-
-@templatefunc('diff([includepattern [, excludepattern]])')
-def diff(context, mapping, args):
- """Show a diff, optionally
- specifying files to include or exclude."""
- if len(args) > 2:
- # i18n: "diff" is a keyword
- raise error.ParseError(_("diff expects zero, one, or two arguments"))
-
- def getpatterns(i):
- if i < len(args):
- s = evalstring(context, mapping, args[i]).strip()
- if s:
- return [s]
- return []
-
- ctx = context.resource(mapping, 'ctx')
- chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
-
- return ''.join(chunks)
-
-@templatefunc('extdata(source)', argspec='source')
-def extdata(context, mapping, args):
- """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
- if 'source' not in args:
- # i18n: "extdata" is a keyword
- raise error.ParseError(_('extdata expects one argument'))
-
- source = evalstring(context, mapping, args['source'])
- cache = context.resource(mapping, 'cache').setdefault('extdata', {})
- ctx = context.resource(mapping, 'ctx')
- if source in cache:
- data = cache[source]
- else:
- data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
- return data.get(ctx.rev(), '')
-
-@templatefunc('files(pattern)')
-def files(context, mapping, args):
- """All files of the current changeset matching the pattern. See
- :hg:`help patterns`."""
- if not len(args) == 1:
- # i18n: "files" is a keyword
- raise error.ParseError(_("files expects one argument"))
-
- raw = evalstring(context, mapping, args[0])
- ctx = context.resource(mapping, 'ctx')
- m = ctx.match([raw])
- files = list(ctx.matches(m))
- # TODO: pass (context, mapping) pair to keyword function
- props = context._resources.copy()
- props.update(mapping)
- return templatekw.showlist("file", files, props)
-
-@templatefunc('fill(text[, width[, initialident[, hangindent]]])')
-def fill(context, mapping, args):
- """Fill many
- paragraphs with optional indentation. See the "fill" filter."""
- if not (1 <= len(args) <= 4):
- # i18n: "fill" is a keyword
- raise error.ParseError(_("fill expects one to four arguments"))
-
- text = evalstring(context, mapping, args[0])
- width = 76
- initindent = ''
- hangindent = ''
- if 2 <= len(args) <= 4:
- width = evalinteger(context, mapping, args[1],
- # i18n: "fill" is a keyword
- _("fill expects an integer width"))
- try:
- initindent = evalstring(context, mapping, args[2])
- hangindent = evalstring(context, mapping, args[3])
- except IndexError:
- pass
-
- return templatefilters.fill(text, width, initindent, hangindent)
-
-@templatefunc('formatnode(node)')
-def formatnode(context, mapping, args):
- """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
- if len(args) != 1:
- # i18n: "formatnode" is a keyword
- raise error.ParseError(_("formatnode expects one argument"))
-
- ui = context.resource(mapping, 'ui')
- node = evalstring(context, mapping, args[0])
- if ui.debugflag:
- return node
- return templatefilters.short(node)
-
-@templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
- argspec='text width fillchar left')
-def pad(context, mapping, args):
- """Pad text with a
- fill character."""
- if 'text' not in args or 'width' not in args:
- # i18n: "pad" is a keyword
- raise error.ParseError(_("pad() expects two to four arguments"))
-
- width = evalinteger(context, mapping, args['width'],
- # i18n: "pad" is a keyword
- _("pad() expects an integer width"))
-
- text = evalstring(context, mapping, args['text'])
-
- left = False
- fillchar = ' '
- if 'fillchar' in args:
- fillchar = evalstring(context, mapping, args['fillchar'])
- if len(color.stripeffects(fillchar)) != 1:
- # i18n: "pad" is a keyword
- raise error.ParseError(_("pad() expects a single fill character"))
- if 'left' in args:
- left = evalboolean(context, mapping, args['left'])
-
- fillwidth = width - encoding.colwidth(color.stripeffects(text))
- if fillwidth <= 0:
- return text
- if left:
- return fillchar * fillwidth + text
- else:
- return text + fillchar * fillwidth
-
-@templatefunc('indent(text, indentchars[, firstline])')
-def indent(context, mapping, args):
- """Indents all non-empty lines
- with the characters given in the indentchars string. An optional
- third parameter will override the indent for the first line only
- if present."""
- if not (2 <= len(args) <= 3):
- # i18n: "indent" is a keyword
- raise error.ParseError(_("indent() expects two or three arguments"))
-
- text = evalstring(context, mapping, args[0])
- indent = evalstring(context, mapping, args[1])
-
- if len(args) == 3:
- firstline = evalstring(context, mapping, args[2])
- else:
- firstline = indent
-
- # the indent function doesn't indent the first line, so we do it here
- return templatefilters.indent(firstline + text, indent)
-
-@templatefunc('get(dict, key)')
-def get(context, mapping, args):
- """Get an attribute/key from an object. Some keywords
- are complex types. This function allows you to obtain the value of an
- attribute on these types."""
- if len(args) != 2:
- # i18n: "get" is a keyword
- raise error.ParseError(_("get() expects two arguments"))
-
- dictarg = evalfuncarg(context, mapping, args[0])
- if not util.safehasattr(dictarg, 'get'):
- # i18n: "get" is a keyword
- raise error.ParseError(_("get() expects a dict as first argument"))
-
- key = evalfuncarg(context, mapping, args[1])
- return _getdictitem(dictarg, key)
-
-def _getdictitem(dictarg, key):
- val = dictarg.get(key)
- if val is None:
- return
- return templatekw.wraphybridvalue(dictarg, key, val)
-
-@templatefunc('if(expr, then[, else])')
-def if_(context, mapping, args):
- """Conditionally execute based on the result of
- an expression."""
- if not (2 <= len(args) <= 3):
- # i18n: "if" is a keyword
- raise error.ParseError(_("if expects two or three arguments"))
-
- test = evalboolean(context, mapping, args[0])
- if test:
- yield evalrawexp(context, mapping, args[1])
- elif len(args) == 3:
- yield evalrawexp(context, mapping, args[2])
-
-@templatefunc('ifcontains(needle, haystack, then[, else])')
-def ifcontains(context, mapping, args):
- """Conditionally execute based
- on whether the item "needle" is in "haystack"."""
- if not (3 <= len(args) <= 4):
- # i18n: "ifcontains" is a keyword
- raise error.ParseError(_("ifcontains expects three or four arguments"))
-
- haystack = evalfuncarg(context, mapping, args[1])
- try:
- needle = evalastype(context, mapping, args[0],
- getattr(haystack, 'keytype', None) or bytes)
- found = (needle in haystack)
- except error.ParseError:
- found = False
-
- if found:
- yield evalrawexp(context, mapping, args[2])
- elif len(args) == 4:
- yield evalrawexp(context, mapping, args[3])
-
-@templatefunc('ifeq(expr1, expr2, then[, else])')
-def ifeq(context, mapping, args):
- """Conditionally execute based on
- whether 2 items are equivalent."""
- if not (3 <= len(args) <= 4):
- # i18n: "ifeq" is a keyword
- raise error.ParseError(_("ifeq expects three or four arguments"))
-
- test = evalstring(context, mapping, args[0])
- match = evalstring(context, mapping, args[1])
- if test == match:
- yield evalrawexp(context, mapping, args[2])
- elif len(args) == 4:
- yield evalrawexp(context, mapping, args[3])
-
-@templatefunc('join(list, sep)')
-def join(context, mapping, args):
- """Join items in a list with a delimiter."""
- if not (1 <= len(args) <= 2):
- # i18n: "join" is a keyword
- raise error.ParseError(_("join expects one or two arguments"))
-
- # TODO: perhaps this should be evalfuncarg(), but it can't because hgweb
- # abuses generator as a keyword that returns a list of dicts.
- joinset = evalrawexp(context, mapping, args[0])
- joinset = templatekw.unwrapvalue(joinset)
- joinfmt = getattr(joinset, 'joinfmt', pycompat.identity)
- joiner = " "
- if len(args) > 1:
- joiner = evalstring(context, mapping, args[1])
-
- first = True
- for x in joinset:
- if first:
- first = False
- else:
- yield joiner
- yield joinfmt(x)
-
-@templatefunc('label(label, expr)')
-def label(context, mapping, args):
- """Apply a label to generated content. Content with
- a label applied can result in additional post-processing, such as
- automatic colorization."""
- if len(args) != 2:
- # i18n: "label" is a keyword
- raise error.ParseError(_("label expects two arguments"))
-
- ui = context.resource(mapping, 'ui')
- thing = evalstring(context, mapping, args[1])
- # preserve unknown symbol as literal so effects like 'red', 'bold',
- # etc. don't need to be quoted
- label = evalstringliteral(context, mapping, args[0])
-
- return ui.label(thing, label)
-
-@templatefunc('latesttag([pattern])')
-def latesttag(context, mapping, args):
- """The global tags matching the given pattern on the
- most recent globally tagged ancestor of this changeset.
- If no such tags exist, the "{tag}" template resolves to
- the string "null"."""
- if len(args) > 1:
- # i18n: "latesttag" is a keyword
- raise error.ParseError(_("latesttag expects at most one argument"))
-
- pattern = None
- if len(args) == 1:
- pattern = evalstring(context, mapping, args[0])
-
- # TODO: pass (context, mapping) pair to keyword function
- props = context._resources.copy()
- props.update(mapping)
- return templatekw.showlatesttags(pattern, **pycompat.strkwargs(props))
-
-@templatefunc('localdate(date[, tz])')
-def localdate(context, mapping, args):
- """Converts a date to the specified timezone.
- The default is local date."""
- if not (1 <= len(args) <= 2):
- # i18n: "localdate" is a keyword
- raise error.ParseError(_("localdate expects one or two arguments"))
-
- date = evalfuncarg(context, mapping, args[0])
- try:
- date = util.parsedate(date)
- except AttributeError: # not str nor date tuple
- # i18n: "localdate" is a keyword
- raise error.ParseError(_("localdate expects a date information"))
- if len(args) >= 2:
- tzoffset = None
- tz = evalfuncarg(context, mapping, args[1])
- if isinstance(tz, str):
- tzoffset, remainder = util.parsetimezone(tz)
- if remainder:
- tzoffset = None
- if tzoffset is None:
- try:
- tzoffset = int(tz)
- except (TypeError, ValueError):
- # i18n: "localdate" is a keyword
- raise error.ParseError(_("localdate expects a timezone"))
- else:
- tzoffset = util.makedate()[1]
- return (date[0], tzoffset)
-
-@templatefunc('max(iterable)')
-def max_(context, mapping, args, **kwargs):
- """Return the max of an iterable"""
- if len(args) != 1:
- # i18n: "max" is a keyword
- raise error.ParseError(_("max expects one argument"))
-
- iterable = evalfuncarg(context, mapping, args[0])
- try:
- x = max(iterable)
- except (TypeError, ValueError):
- # i18n: "max" is a keyword
- raise error.ParseError(_("max first argument should be an iterable"))
- return templatekw.wraphybridvalue(iterable, x, x)
-
-@templatefunc('min(iterable)')
-def min_(context, mapping, args, **kwargs):
- """Return the min of an iterable"""
- if len(args) != 1:
- # i18n: "min" is a keyword
- raise error.ParseError(_("min expects one argument"))
-
- iterable = evalfuncarg(context, mapping, args[0])
- try:
- x = min(iterable)
- except (TypeError, ValueError):
- # i18n: "min" is a keyword
- raise error.ParseError(_("min first argument should be an iterable"))
- return templatekw.wraphybridvalue(iterable, x, x)
-
-@templatefunc('mod(a, b)')
-def mod(context, mapping, args):
- """Calculate a mod b such that a / b + a mod b == a"""
- if not len(args) == 2:
- # i18n: "mod" is a keyword
- raise error.ParseError(_("mod expects two arguments"))
-
- func = lambda a, b: a % b
- return runarithmetic(context, mapping, (func, args[0], args[1]))
-
-@templatefunc('obsfateoperations(markers)')
-def obsfateoperations(context, mapping, args):
- """Compute obsfate related information based on markers (EXPERIMENTAL)"""
- if len(args) != 1:
- # i18n: "obsfateoperations" is a keyword
- raise error.ParseError(_("obsfateoperations expects one argument"))
-
- markers = evalfuncarg(context, mapping, args[0])
-
- try:
- data = obsutil.markersoperations(markers)
- return templatekw.hybridlist(data, name='operation')
- except (TypeError, KeyError):
- # i18n: "obsfateoperations" is a keyword
- errmsg = _("obsfateoperations first argument should be an iterable")
- raise error.ParseError(errmsg)
-
-@templatefunc('obsfatedate(markers)')
-def obsfatedate(context, mapping, args):
- """Compute obsfate related information based on markers (EXPERIMENTAL)"""
- if len(args) != 1:
- # i18n: "obsfatedate" is a keyword
- raise error.ParseError(_("obsfatedate expects one argument"))
-
- markers = evalfuncarg(context, mapping, args[0])
-
- try:
- data = obsutil.markersdates(markers)
- return templatekw.hybridlist(data, name='date', fmt='%d %d')
- except (TypeError, KeyError):
- # i18n: "obsfatedate" is a keyword
- errmsg = _("obsfatedate first argument should be an iterable")
- raise error.ParseError(errmsg)
-
-@templatefunc('obsfateusers(markers)')
-def obsfateusers(context, mapping, args):
- """Compute obsfate related information based on markers (EXPERIMENTAL)"""
- if len(args) != 1:
- # i18n: "obsfateusers" is a keyword
- raise error.ParseError(_("obsfateusers expects one argument"))
-
- markers = evalfuncarg(context, mapping, args[0])
-
- try:
- data = obsutil.markersusers(markers)
- return templatekw.hybridlist(data, name='user')
- except (TypeError, KeyError, ValueError):
- # i18n: "obsfateusers" is a keyword
- msg = _("obsfateusers first argument should be an iterable of "
- "obsmakers")
- raise error.ParseError(msg)
-
-@templatefunc('obsfateverb(successors, markers)')
-def obsfateverb(context, mapping, args):
- """Compute obsfate related information based on successors (EXPERIMENTAL)"""
- if len(args) != 2:
- # i18n: "obsfateverb" is a keyword
- raise error.ParseError(_("obsfateverb expects two arguments"))
-
- successors = evalfuncarg(context, mapping, args[0])
- markers = evalfuncarg(context, mapping, args[1])
-
- try:
- return obsutil.obsfateverb(successors, markers)
- except TypeError:
- # i18n: "obsfateverb" is a keyword
- errmsg = _("obsfateverb first argument should be countable")
- raise error.ParseError(errmsg)
-
-@templatefunc('relpath(path)')
-def relpath(context, mapping, args):
- """Convert a repository-absolute path into a filesystem path relative to
- the current working directory."""
- if len(args) != 1:
- # i18n: "relpath" is a keyword
- raise error.ParseError(_("relpath expects one argument"))
-
- repo = context.resource(mapping, 'ctx').repo()
- path = evalstring(context, mapping, args[0])
- return repo.pathto(path)
-
-@templatefunc('revset(query[, formatargs...])')
-def revset(context, mapping, args):
- """Execute a revision set query. See
- :hg:`help revset`."""
- if not len(args) > 0:
- # i18n: "revset" is a keyword
- raise error.ParseError(_("revset expects one or more arguments"))
-
- raw = evalstring(context, mapping, args[0])
- ctx = context.resource(mapping, 'ctx')
- repo = ctx.repo()
-
- def query(expr):
- m = revsetmod.match(repo.ui, expr, repo=repo)
- return m(repo)
-
- if len(args) > 1:
- formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
- revs = query(revsetlang.formatspec(raw, *formatargs))
- revs = list(revs)
- else:
- cache = context.resource(mapping, 'cache')
- revsetcache = cache.setdefault("revsetcache", {})
- if raw in revsetcache:
- revs = revsetcache[raw]
- else:
- revs = query(raw)
- revs = list(revs)
- revsetcache[raw] = revs
-
- # TODO: pass (context, mapping) pair to keyword function
- props = context._resources.copy()
- props.update(mapping)
- return templatekw.showrevslist("revision", revs,
- **pycompat.strkwargs(props))
-
-@templatefunc('rstdoc(text, style)')
-def rstdoc(context, mapping, args):
- """Format reStructuredText."""
- if len(args) != 2:
- # i18n: "rstdoc" is a keyword
- raise error.ParseError(_("rstdoc expects two arguments"))
-
- text = evalstring(context, mapping, args[0])
- style = evalstring(context, mapping, args[1])
-
- return minirst.format(text, style=style, keep=['verbose'])
-
-@templatefunc('separate(sep, args)', argspec='sep *args')
-def separate(context, mapping, args):
- """Add a separator between non-empty arguments."""
- if 'sep' not in args:
- # i18n: "separate" is a keyword
- raise error.ParseError(_("separate expects at least one argument"))
-
- sep = evalstring(context, mapping, args['sep'])
- first = True
- for arg in args['args']:
- argstr = evalstring(context, mapping, arg)
- if not argstr:
- continue
- if first:
- first = False
- else:
- yield sep
- yield argstr
-
-@templatefunc('shortest(node, minlength=4)')
-def shortest(context, mapping, args):
- """Obtain the shortest representation of
- a node."""
- if not (1 <= len(args) <= 2):
- # i18n: "shortest" is a keyword
- raise error.ParseError(_("shortest() expects one or two arguments"))
-
- node = evalstring(context, mapping, args[0])
-
- minlength = 4
- if len(args) > 1:
- minlength = evalinteger(context, mapping, args[1],
- # i18n: "shortest" is a keyword
- _("shortest() expects an integer minlength"))
-
- # _partialmatch() of filtered changelog could take O(len(repo)) time,
- # which would be unacceptably slow. so we look for hash collision in
- # unfiltered space, which means some hashes may be slightly longer.
- cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog
- return cl.shortest(node, minlength)
-
-@templatefunc('strip(text[, chars])')
-def strip(context, mapping, args):
- """Strip characters from a string. By default,
- strips all leading and trailing whitespace."""
- if not (1 <= len(args) <= 2):
- # i18n: "strip" is a keyword
- raise error.ParseError(_("strip expects one or two arguments"))
-
- text = evalstring(context, mapping, args[0])
- if len(args) == 2:
- chars = evalstring(context, mapping, args[1])
- return text.strip(chars)
- return text.strip()
-
-@templatefunc('sub(pattern, replacement, expression)')
-def sub(context, mapping, args):
- """Perform text substitution
- using regular expressions."""
- if len(args) != 3:
- # i18n: "sub" is a keyword
- raise error.ParseError(_("sub expects three arguments"))
-
- pat = evalstring(context, mapping, args[0])
- rpl = evalstring(context, mapping, args[1])
- src = evalstring(context, mapping, args[2])
- try:
- patre = re.compile(pat)
- except re.error:
- # i18n: "sub" is a keyword
- raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
- try:
- yield patre.sub(rpl, src)
- except re.error:
- # i18n: "sub" is a keyword
- raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
-
-@templatefunc('startswith(pattern, text)')
-def startswith(context, mapping, args):
- """Returns the value from the "text" argument
- if it begins with the content from the "pattern" argument."""
- if len(args) != 2:
- # i18n: "startswith" is a keyword
- raise error.ParseError(_("startswith expects two arguments"))
-
- patn = evalstring(context, mapping, args[0])
- text = evalstring(context, mapping, args[1])
- if text.startswith(patn):
- return text
- return ''
-
-@templatefunc('word(number, text[, separator])')
-def word(context, mapping, args):
- """Return the nth word from a string."""
- if not (2 <= len(args) <= 3):
- # i18n: "word" is a keyword
- raise error.ParseError(_("word expects two or three arguments, got %d")
- % len(args))
-
- num = evalinteger(context, mapping, args[0],
- # i18n: "word" is a keyword
- _("word expects an integer index"))
- text = evalstring(context, mapping, args[1])
- if len(args) == 3:
- splitter = evalstring(context, mapping, args[2])
- else:
- splitter = None
-
- tokens = text.split(splitter)
- if num >= len(tokens) or num < -len(tokens):
- return ''
- else:
- return tokens[num]
-
# methods to interpret function arguments or inner expressions (e.g. {_(x)})
exprmethods = {
- "integer": lambda e, c: (runinteger, e[1]),
- "string": lambda e, c: (runstring, e[1]),
- "symbol": lambda e, c: (runsymbol, e[1]),
+ "integer": lambda e, c: (templateutil.runinteger, e[1]),
+ "string": lambda e, c: (templateutil.runstring, e[1]),
+ "symbol": lambda e, c: (templateutil.runsymbol, e[1]),
"template": buildtemplate,
"group": lambda e, c: compileexp(e[1], c, exprmethods),
".": buildmember,
@@ -1252,25 +523,23 @@
# template engine
-stringify = templatefilters.stringify
-
def _flatten(thing):
'''yield a single stream from a possibly nested set of iterators'''
- thing = templatekw.unwraphybrid(thing)
+ thing = templateutil.unwraphybrid(thing)
if isinstance(thing, bytes):
yield thing
elif isinstance(thing, str):
# We can only hit this on Python 3, and it's here to guard
# against infinite recursion.
raise error.ProgrammingError('Mercurial IO including templates is done'
- ' with bytes, not strings')
+ ' with bytes, not strings, got %r' % thing)
elif thing is None:
pass
elif not util.safehasattr(thing, '__iter__'):
yield pycompat.bytestr(thing)
else:
for i in thing:
- i = templatekw.unwraphybrid(i)
+ i = templateutil.unwraphybrid(i)
if isinstance(i, bytes):
yield i
elif i is None:
@@ -1313,6 +582,7 @@
if filters is None:
filters = {}
self._filters = filters
+ self._funcs = templatefuncs.funcs # make this a parameter if needed
if defaults is None:
defaults = {}
if resources is None:
@@ -1336,11 +606,10 @@
evaluation"""
v = None
if key in self._resources:
- v = mapping.get(key)
+ v = self._resources[key](self, mapping, key)
if v is None:
- v = self._resources.get(key)
- if v is None:
- raise error.Abort(_('template resource not available: %s') % key)
+ raise templateutil.ResourceUnavailable(
+ _('template resource not available: %s') % key)
return v
def _load(self, t):
@@ -1431,9 +700,6 @@
aliases.extend(conf['templatealias'].items())
return cache, tmap, aliases
-class TemplateNotFound(error.Abort):
- pass
-
class templater(object):
def __init__(self, filters=None, defaults=None, resources=None,
@@ -1443,8 +709,8 @@
- ``filters``: a dict of functions to transform a value into another.
- ``defaults``: a dict of symbol values/functions; may be overridden
by a ``mapping`` dict.
- - ``resources``: a dict of internal data (e.g. cache), inaccessible
- from user template; may be overridden by a ``mapping`` dict.
+ - ``resources``: a dict of functions returning internal data
+ (e.g. cache), inaccessible from user template.
- ``cache``: a dict of preloaded template fragments.
- ``aliases``: a list of alias (name, replacement) pairs.
@@ -1464,7 +730,7 @@
self.filters = templatefilters.filters.copy()
self.filters.update(filters)
self.defaults = defaults
- self._resources = {'templ': self}
+ self._resources = {'templ': lambda context, mapping, key: self}
self._resources.update(resources)
self._aliases = aliases
self.minchunk, self.maxchunk = minchunk, maxchunk
@@ -1490,20 +756,25 @@
try:
self.cache[t] = util.readfile(self.map[t][1])
except KeyError as inst:
- raise TemplateNotFound(_('"%s" not in template map') %
- inst.args[0])
+ raise templateutil.TemplateNotFound(
+ _('"%s" not in template map') % inst.args[0])
except IOError as inst:
- raise IOError(inst.args[0], _('template file %s: %s') %
- (self.map[t][1], inst.args[1]))
+ reason = (_('template file %s: %s')
+ % (self.map[t][1], util.forcebytestr(inst.args[1])))
+ raise IOError(inst.args[0], encoding.strfromlocal(reason))
return self.cache[t]
- def render(self, mapping):
+ def renderdefault(self, mapping):
"""Render the default unnamed template and return result as string"""
- mapping = pycompat.strkwargs(mapping)
- return stringify(self('', **mapping))
+ return self.render('', mapping)
- def __call__(self, t, **mapping):
- mapping = pycompat.byteskwargs(mapping)
+ def render(self, t, mapping):
+ """Render the specified named template and return result as string"""
+ return templateutil.stringify(self.generate(t, mapping))
+
+ def generate(self, t, mapping):
+ """Return a generator that renders the specified named template and
+ yields chunks"""
ttype = t in self.map and self.map[t][0] or 'default'
if ttype not in self.ecache:
try:
@@ -1546,16 +817,16 @@
if paths is None:
paths = templatepaths()
- elif isinstance(paths, str):
+ elif isinstance(paths, bytes):
paths = [paths]
- if isinstance(styles, str):
+ if isinstance(styles, bytes):
styles = [styles]
for style in styles:
# only plain name is allowed to honor template paths
if (not style
- or style in (os.curdir, os.pardir)
+ or style in (pycompat.oscurdir, pycompat.ospardir)
or pycompat.ossep in style
or pycompat.osaltsep and pycompat.osaltsep in style):
continue
@@ -1569,12 +840,3 @@
return style, mapfile
raise RuntimeError("No hgweb templates found in %r" % paths)
-
-def loadfunction(ui, extname, registrarobj):
- """Load template function from specified registrarobj
- """
- for name, func in registrarobj._table.iteritems():
- funcs[name] = func
-
-# tell hggettext to extract docstrings from these functions:
-i18nfunctions = funcs.values()
--- a/mercurial/templates/gitweb/changeset.tmpl Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/gitweb/changeset.tmpl Mon Mar 19 08:07:18 2018 -0700
@@ -44,7 +44,7 @@
<td>changeset {rev}</td>
<td style="font-family:monospace"><a class="list" href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
</tr>
-{if(obsolete, '<tr><td>obsolete</td><td>{succsandmarkers%obsfateentry}</td></tr>')}
+{if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)}
{child%changesetchild}
</table></div>
--- a/mercurial/templates/gitweb/map Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/gitweb/map Mon Mar 19 08:07:18 2018 -0700
@@ -275,7 +275,13 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '
+ <tr>
+ <td>obsolete</td>
+ <td>{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</td>
+ </tr>'
shortlogentry = '
<tr class="parity{parity}">
<td class="age"><i class="age">{date|rfc822date}</i></td>
--- a/mercurial/templates/monoblue/changeset.tmpl Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/monoblue/changeset.tmpl Mon Mar 19 08:07:18 2018 -0700
@@ -48,7 +48,7 @@
{branch%changesetbranch}
<dt>changeset {rev}</dt>
<dd><a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
- {if(obsolete, '<dt>obsolete</dt><dd>{succsandmarkers%obsfateentry}</dd>')}
+ {if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)}
{child%changesetchild}
</dl>
--- a/mercurial/templates/monoblue/map Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/monoblue/map Mon Mar 19 08:07:18 2018 -0700
@@ -233,7 +233,11 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '
+ <dt>obsolete</dt>
+ <dd>{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</dd>'
shortlogentry = '
<tr class="parity{parity}">
<td class="nowrap age">{date|rfc822date}</td>
--- a/mercurial/templates/paper/changeset.tmpl Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/paper/changeset.tmpl Mon Mar 19 08:07:18 2018 -0700
@@ -51,7 +51,11 @@
</tr>
{if(obsolete, '<tr>
<th>obsolete</th>
- <td>{succsandmarkers%obsfateentry}</td>
+ <td>{join(succsandmarkers%obsfateentry, '<br>\n')}</td>
+</tr>')}
+{if(instabilities, '<tr>
+ <th>unstable</th>
+ <td>{join(whyunstable%whyunstableentry, '<br>\n')}</td>
</tr>')}
<tr>
<th class="author">parents</th>
--- a/mercurial/templates/paper/map Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/paper/map Mon Mar 19 08:07:18 2018 -0700
@@ -213,7 +213,12 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}'
+instabilitychangesetlink = '<a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>'
+divergentnode = '{instabilitychangesetlink} ({phase})'
+whyunstableentry = '{instability}: {if(divergentnodes, divergentnodes%divergentnode)} {reason} {instabilitychangesetlink}'
filediffparent = '
<tr>
--- a/mercurial/templates/spartan/changelogentry.tmpl Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/spartan/changelogentry.tmpl Mon Mar 19 08:07:18 2018 -0700
@@ -22,10 +22,7 @@
<th class="phase">phase:</th>
<td class="phase">{phase|escape}</td>
</tr>')}
- {if(obsolete, '<tr>
- <th class="obsolete">obsolete:</th>
- <td class="obsolete">{succsandmarkers%obsfateentry}</td>
- </tr>')}
+ {if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(instabilities), '0', '', '<tr>
<th class="instabilities">instabilities:</th>
<td class="instabilities">{instabilities%"{instability} "|escape}</td>
--- a/mercurial/templates/spartan/changeset.tmpl Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/spartan/changeset.tmpl Mon Mar 19 08:07:18 2018 -0700
@@ -37,10 +37,7 @@
<th class="phase">phase:</th>
<td class="phase">{phase|escape}</td>
</tr>')}
-{if(obsolete, '<tr>
- <th class="obsolete">obsolete:</th>
- <td class="obsolete">{succsandmarkers%obsfateentry}</td>
-</tr>')}
+{if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(instabilities), '0', '', '<tr>
<th class="instabilities">instabilities:</th>
<td class="instabilities">{instabilities%"{instability} "|escape}</td>
--- a/mercurial/templates/spartan/map Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/spartan/map Mon Mar 19 08:07:18 2018 -0700
@@ -170,7 +170,13 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '
+ <tr>
+ <th class="obsolete">obsolete:</th>
+ <td class="obsolete">{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</td>
+ </tr>'
filediffparent = '
<tr>
<th class="parent">parent {rev}:</th>
--- a/mercurial/templates/static/style-gitweb.css Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/templates/static/style-gitweb.css Mon Mar 19 08:07:18 2018 -0700
@@ -29,7 +29,7 @@
div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; }
div.log_body { padding:8px 8px 8px 150px; }
.age { white-space:nowrap; }
-span.age { position:relative; float:left; width:142px; font-style:italic; }
+a.title span.age { position:relative; float:left; width:142px; font-style:italic; }
div.log_link {
padding:0px 8px;
font-size:10px; font-family:sans-serif; font-style:normal;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templateutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,448 @@
+# templateutil.py - utility for template evaluation
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import types
+
+from .i18n import _
+from . import (
+ error,
+ pycompat,
+ util,
+)
+
+class ResourceUnavailable(error.Abort):
+ pass
+
+class TemplateNotFound(error.Abort):
+ pass
+
+class hybrid(object):
+ """Wrapper for list or dict to support legacy template
+
+ This class allows us to handle both:
+ - "{files}" (legacy command-line-specific list hack) and
+ - "{files % '{file}\n'}" (hgweb-style with inlining and function support)
+ and to access raw values:
+ - "{ifcontains(file, files, ...)}", "{ifcontains(key, extras, ...)}"
+ - "{get(extras, key)}"
+ - "{files|json}"
+ """
+
+ def __init__(self, gen, values, makemap, joinfmt, keytype=None):
+ if gen is not None:
+ self.gen = gen # generator or function returning generator
+ self._values = values
+ self._makemap = makemap
+ self.joinfmt = joinfmt
+ self.keytype = keytype # hint for 'x in y' where type(x) is unresolved
+ def gen(self):
+ """Default generator to stringify this as {join(self, ' ')}"""
+ for i, x in enumerate(self._values):
+ if i > 0:
+ yield ' '
+ yield self.joinfmt(x)
+ def itermaps(self):
+ makemap = self._makemap
+ for x in self._values:
+ yield makemap(x)
+ def __contains__(self, x):
+ return x in self._values
+ def __getitem__(self, key):
+ return self._values[key]
+ def __len__(self):
+ return len(self._values)
+ def __iter__(self):
+ return iter(self._values)
+ def __getattr__(self, name):
+ if name not in (r'get', r'items', r'iteritems', r'iterkeys',
+ r'itervalues', r'keys', r'values'):
+ raise AttributeError(name)
+ return getattr(self._values, name)
+
+class mappable(object):
+ """Wrapper for non-list/dict object to support map operation
+
+ This class allows us to handle both:
+ - "{manifest}"
+ - "{manifest % '{rev}:{node}'}"
+ - "{manifest.rev}"
+
+ Unlike a hybrid, this does not simulate the behavior of the underling
+ value. Use unwrapvalue() or unwraphybrid() to obtain the inner object.
+ """
+
+ def __init__(self, gen, key, value, makemap):
+ if gen is not None:
+ self.gen = gen # generator or function returning generator
+ self._key = key
+ self._value = value # may be generator of strings
+ self._makemap = makemap
+
+ def gen(self):
+ yield pycompat.bytestr(self._value)
+
+ def tomap(self):
+ return self._makemap(self._key)
+
+ def itermaps(self):
+ yield self.tomap()
+
+def hybriddict(data, key='key', value='value', fmt=None, gen=None):
+ """Wrap data to support both dict-like and string-like operations"""
+ prefmt = pycompat.identity
+ if fmt is None:
+ fmt = '%s=%s'
+ prefmt = pycompat.bytestr
+ return hybrid(gen, data, lambda k: {key: k, value: data[k]},
+ lambda k: fmt % (prefmt(k), prefmt(data[k])))
+
+def hybridlist(data, name, fmt=None, gen=None):
+ """Wrap data to support both list-like and string-like operations"""
+ prefmt = pycompat.identity
+ if fmt is None:
+ fmt = '%s'
+ prefmt = pycompat.bytestr
+ return hybrid(gen, data, lambda x: {name: x}, lambda x: fmt % prefmt(x))
+
+def unwraphybrid(thing):
+ """Return an object which can be stringified possibly by using a legacy
+ template"""
+ gen = getattr(thing, 'gen', None)
+ if gen is None:
+ return thing
+ if callable(gen):
+ return gen()
+ return gen
+
+def unwrapvalue(thing):
+ """Move the inner value object out of the wrapper"""
+ if not util.safehasattr(thing, '_value'):
+ return thing
+ return thing._value
+
+def wraphybridvalue(container, key, value):
+ """Wrap an element of hybrid container to be mappable
+
+ The key is passed to the makemap function of the given container, which
+ should be an item generated by iter(container).
+ """
+ makemap = getattr(container, '_makemap', None)
+ if makemap is None:
+ return value
+ if util.safehasattr(value, '_makemap'):
+ # a nested hybrid list/dict, which has its own way of map operation
+ return value
+ return mappable(None, key, value, makemap)
+
+def compatdict(context, mapping, name, data, key='key', value='value',
+ fmt=None, plural=None, separator=' '):
+ """Wrap data like hybriddict(), but also supports old-style list template
+
+ This exists for backward compatibility with the old-style template. Use
+ hybriddict() for new template keywords.
+ """
+ c = [{key: k, value: v} for k, v in data.iteritems()]
+ t = context.resource(mapping, 'templ')
+ f = _showlist(name, c, t, mapping, plural, separator)
+ return hybriddict(data, key=key, value=value, fmt=fmt, gen=f)
+
+def compatlist(context, mapping, name, data, element=None, fmt=None,
+ plural=None, separator=' '):
+ """Wrap data like hybridlist(), but also supports old-style list template
+
+ This exists for backward compatibility with the old-style template. Use
+ hybridlist() for new template keywords.
+ """
+ t = context.resource(mapping, 'templ')
+ f = _showlist(name, data, t, mapping, plural, separator)
+ return hybridlist(data, name=element or name, fmt=fmt, gen=f)
+
+def _showlist(name, values, templ, mapping, plural=None, separator=' '):
+ '''expand set of values.
+ name is name of key in template map.
+ values is list of strings or dicts.
+ plural is plural of name, if not simply name + 's'.
+ separator is used to join values as a string
+
+ expansion works like this, given name 'foo'.
+
+ if values is empty, expand 'no_foos'.
+
+ if 'foo' not in template map, return values as a string,
+ joined by 'separator'.
+
+ expand 'start_foos'.
+
+ for each value, expand 'foo'. if 'last_foo' in template
+ map, expand it instead of 'foo' for last key.
+
+ expand 'end_foos'.
+ '''
+ if not plural:
+ plural = name + 's'
+ if not values:
+ noname = 'no_' + plural
+ if noname in templ:
+ yield templ.generate(noname, mapping)
+ return
+ if name not in templ:
+ if isinstance(values[0], bytes):
+ yield separator.join(values)
+ else:
+ for v in values:
+ r = dict(v)
+ r.update(mapping)
+ yield r
+ return
+ startname = 'start_' + plural
+ if startname in templ:
+ yield templ.generate(startname, mapping)
+ vmapping = mapping.copy()
+ def one(v, tag=name):
+ try:
+ vmapping.update(v)
+ # Python 2 raises ValueError if the type of v is wrong. Python
+ # 3 raises TypeError.
+ except (AttributeError, TypeError, ValueError):
+ try:
+ # Python 2 raises ValueError trying to destructure an e.g.
+ # bytes. Python 3 raises TypeError.
+ for a, b in v:
+ vmapping[a] = b
+ except (TypeError, ValueError):
+ vmapping[name] = v
+ return templ.generate(tag, vmapping)
+ lastname = 'last_' + name
+ if lastname in templ:
+ last = values.pop()
+ else:
+ last = None
+ for v in values:
+ yield one(v)
+ if last is not None:
+ yield one(last, tag=lastname)
+ endname = 'end_' + plural
+ if endname in templ:
+ yield templ.generate(endname, mapping)
+
+def stringify(thing):
+ """Turn values into bytes by converting into text and concatenating them"""
+ thing = unwraphybrid(thing)
+ if util.safehasattr(thing, '__iter__') and not isinstance(thing, bytes):
+ if isinstance(thing, str):
+ # This is only reachable on Python 3 (otherwise
+ # isinstance(thing, bytes) would have been true), and is
+ # here to prevent infinite recursion bugs on Python 3.
+ raise error.ProgrammingError(
+ 'stringify got unexpected unicode string: %r' % thing)
+ return "".join([stringify(t) for t in thing if t is not None])
+ if thing is None:
+ return ""
+ return pycompat.bytestr(thing)
+
+def findsymbolicname(arg):
+ """Find symbolic name for the given compiled expression; returns None
+ if nothing found reliably"""
+ while True:
+ func, data = arg
+ if func is runsymbol:
+ return data
+ elif func is runfilter:
+ arg = data[0]
+ else:
+ return None
+
+def evalrawexp(context, mapping, arg):
+ """Evaluate given argument as a bare template object which may require
+ further processing (such as folding generator of strings)"""
+ func, data = arg
+ return func(context, mapping, data)
+
+def evalfuncarg(context, mapping, arg):
+ """Evaluate given argument as value type"""
+ thing = evalrawexp(context, mapping, arg)
+ thing = unwrapvalue(thing)
+ # evalrawexp() may return string, generator of strings or arbitrary object
+ # such as date tuple, but filter does not want generator.
+ if isinstance(thing, types.GeneratorType):
+ thing = stringify(thing)
+ return thing
+
+def evalboolean(context, mapping, arg):
+ """Evaluate given argument as boolean, but also takes boolean literals"""
+ func, data = arg
+ if func is runsymbol:
+ thing = func(context, mapping, data, default=None)
+ if thing is None:
+ # not a template keyword, takes as a boolean literal
+ thing = util.parsebool(data)
+ else:
+ thing = func(context, mapping, data)
+ thing = unwrapvalue(thing)
+ if isinstance(thing, bool):
+ return thing
+ # other objects are evaluated as strings, which means 0 is True, but
+ # empty dict/list should be False as they are expected to be ''
+ return bool(stringify(thing))
+
+def evalinteger(context, mapping, arg, err=None):
+ v = evalfuncarg(context, mapping, arg)
+ try:
+ return int(v)
+ except (TypeError, ValueError):
+ raise error.ParseError(err or _('not an integer'))
+
+def evalstring(context, mapping, arg):
+ return stringify(evalrawexp(context, mapping, arg))
+
+def evalstringliteral(context, mapping, arg):
+ """Evaluate given argument as string template, but returns symbol name
+ if it is unknown"""
+ func, data = arg
+ if func is runsymbol:
+ thing = func(context, mapping, data, default=data)
+ else:
+ thing = func(context, mapping, data)
+ return stringify(thing)
+
+_evalfuncbytype = {
+ bool: evalboolean,
+ bytes: evalstring,
+ int: evalinteger,
+}
+
+def evalastype(context, mapping, arg, typ):
+ """Evaluate given argument and coerce its type"""
+ try:
+ f = _evalfuncbytype[typ]
+ except KeyError:
+ raise error.ProgrammingError('invalid type specified: %r' % typ)
+ return f(context, mapping, arg)
+
+def runinteger(context, mapping, data):
+ return int(data)
+
+def runstring(context, mapping, data):
+ return data
+
+def _recursivesymbolblocker(key):
+ def showrecursion(**args):
+ raise error.Abort(_("recursive reference '%s' in template") % key)
+ return showrecursion
+
+def runsymbol(context, mapping, key, default=''):
+ v = context.symbol(mapping, key)
+ if v is None:
+ # put poison to cut recursion. we can't move this to parsing phase
+ # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
+ safemapping = mapping.copy()
+ safemapping[key] = _recursivesymbolblocker(key)
+ try:
+ v = context.process(key, safemapping)
+ except TemplateNotFound:
+ v = default
+ if callable(v) and getattr(v, '_requires', None) is None:
+ # old templatekw: expand all keywords and resources
+ props = {k: f(context, mapping, k)
+ for k, f in context._resources.items()}
+ props.update(mapping)
+ return v(**pycompat.strkwargs(props))
+ if callable(v):
+ # new templatekw
+ try:
+ return v(context, mapping)
+ except ResourceUnavailable:
+ # unsupported keyword is mapped to empty just like unknown keyword
+ return None
+ return v
+
+def runtemplate(context, mapping, template):
+ for arg in template:
+ yield evalrawexp(context, mapping, arg)
+
+def runfilter(context, mapping, data):
+ arg, filt = data
+ thing = evalfuncarg(context, mapping, arg)
+ try:
+ return filt(thing)
+ except (ValueError, AttributeError, TypeError):
+ sym = findsymbolicname(arg)
+ if sym:
+ msg = (_("template filter '%s' is not compatible with keyword '%s'")
+ % (pycompat.sysbytes(filt.__name__), sym))
+ else:
+ msg = (_("incompatible use of template filter '%s'")
+ % pycompat.sysbytes(filt.__name__))
+ raise error.Abort(msg)
+
+def runmap(context, mapping, data):
+ darg, targ = data
+ d = evalrawexp(context, mapping, darg)
+ if util.safehasattr(d, 'itermaps'):
+ diter = d.itermaps()
+ else:
+ try:
+ diter = iter(d)
+ except TypeError:
+ sym = findsymbolicname(darg)
+ if sym:
+ raise error.ParseError(_("keyword '%s' is not iterable") % sym)
+ else:
+ raise error.ParseError(_("%r is not iterable") % d)
+
+ for i, v in enumerate(diter):
+ lm = mapping.copy()
+ lm['index'] = i
+ if isinstance(v, dict):
+ lm.update(v)
+ lm['originalnode'] = mapping.get('node')
+ yield evalrawexp(context, lm, targ)
+ else:
+ # v is not an iterable of dicts, this happen when 'key'
+ # has been fully expanded already and format is useless.
+ # If so, return the expanded value.
+ yield v
+
+def runmember(context, mapping, data):
+ darg, memb = data
+ d = evalrawexp(context, mapping, darg)
+ if util.safehasattr(d, 'tomap'):
+ lm = mapping.copy()
+ lm.update(d.tomap())
+ return runsymbol(context, lm, memb)
+ if util.safehasattr(d, 'get'):
+ return getdictitem(d, memb)
+
+ sym = findsymbolicname(darg)
+ if sym:
+ raise error.ParseError(_("keyword '%s' has no member") % sym)
+ else:
+ raise error.ParseError(_("%r has no member") % pycompat.bytestr(d))
+
+def runnegate(context, mapping, data):
+ data = evalinteger(context, mapping, data,
+ _('negation needs an integer argument'))
+ return -data
+
+def runarithmetic(context, mapping, data):
+ func, left, right = data
+ left = evalinteger(context, mapping, left,
+ _('arithmetic only defined on integers'))
+ right = evalinteger(context, mapping, right,
+ _('arithmetic only defined on integers'))
+ try:
+ return func(left, right)
+ except ZeroDivisionError:
+ raise error.Abort(_('division by zero is not defined'))
+
+def getdictitem(dictarg, key):
+ val = dictarg.get(key)
+ if val is None:
+ return
+ return wraphybridvalue(dictarg, key, val)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xdiff.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,91 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XDIFF_H)
+#define XDIFF_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* #ifdef __cplusplus */
+
+#include <stddef.h> /* size_t */
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1600
+#include <stdint.h>
+#else
+/* prior to Visual Studio 2010 */
+typedef long long int64_t;
+typedef unsigned long long uint64_t;
+#endif
+
+/* xpparm_t.flags */
+#define XDF_NEED_MINIMAL (1 << 0)
+
+#define XDF_INDENT_HEURISTIC (1 << 23)
+
+/* emit bdiff-style "matched" (a1, a2, b1, b2) hunks instead of "different"
+ * (a1, a2 - a1, b1, b2 - b1) hunks */
+#define XDL_EMIT_BDIFFHUNK (1 << 4)
+
+typedef struct s_mmfile {
+ char *ptr;
+ int64_t size;
+} mmfile_t;
+
+typedef struct s_mmbuffer {
+ char *ptr;
+ int64_t size;
+} mmbuffer_t;
+
+typedef struct s_xpparam {
+ uint64_t flags;
+} xpparam_t;
+
+typedef struct s_xdemitcb {
+ void *priv;
+} xdemitcb_t;
+
+typedef int (*xdl_emit_hunk_consume_func_t)(int64_t start_a, int64_t count_a,
+ int64_t start_b, int64_t count_b,
+ void *cb_data);
+
+typedef struct s_xdemitconf {
+ uint64_t flags;
+ xdl_emit_hunk_consume_func_t hunk_func;
+} xdemitconf_t;
+
+
+#define xdl_malloc(x) malloc(x)
+#define xdl_free(ptr) free(ptr)
+#define xdl_realloc(ptr,x) realloc(ptr,x)
+
+void *xdl_mmfile_first(mmfile_t *mmf, int64_t *size);
+int64_t xdl_mmfile_size(mmfile_t *mmf);
+
+int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdemitconf_t const *xecfg, xdemitcb_t *ecb);
+
+#ifdef __cplusplus
+}
+#endif /* #ifdef __cplusplus */
+
+#endif /* #if !defined(XDIFF_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xdiffi.c Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,1130 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#include "xinclude.h"
+
+
+
+#define XDL_MAX_COST_MIN 256
+#define XDL_HEUR_MIN_COST 256
+#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
+#define XDL_SNAKE_CNT 20
+#define XDL_K_HEUR 4
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER)
+#define inline __forceinline
+#endif
+
+
+typedef struct s_xdpsplit {
+ int64_t i1, i2;
+ int min_lo, min_hi;
+} xdpsplit_t;
+
+
+
+
+static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
+ uint64_t const *ha2, int64_t off2, int64_t lim2,
+ int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
+ xdalgoenv_t *xenv);
+static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2);
+
+
+
+
+
+/*
+ * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
+ * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
+ * the forward diagonal starting from (off1, off2) and the backward diagonal
+ * starting from (lim1, lim2). If the K values on the same diagonal crosses
+ * returns the furthest point of reach. We might end up having to expensive
+ * cases using this algorithm is full, so a little bit of heuristic is needed
+ * to cut the search and to return a suboptimal point.
+ */
+static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
+ uint64_t const *ha2, int64_t off2, int64_t lim2,
+ int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
+ xdalgoenv_t *xenv) {
+ int64_t dmin = off1 - lim2, dmax = lim1 - off2;
+ int64_t fmid = off1 - off2, bmid = lim1 - lim2;
+ int64_t odd = (fmid - bmid) & 1;
+ int64_t fmin = fmid, fmax = fmid;
+ int64_t bmin = bmid, bmax = bmid;
+ int64_t ec, d, i1, i2, prev1, best, dd, v, k;
+
+ /*
+ * Set initial diagonal values for both forward and backward path.
+ */
+ kvdf[fmid] = off1;
+ kvdb[bmid] = lim1;
+
+ for (ec = 1;; ec++) {
+ int got_snake = 0;
+
+ /*
+ * We need to extent the diagonal "domain" by one. If the next
+ * values exits the box boundaries we need to change it in the
+ * opposite direction because (max - min) must be a power of two.
+ * Also we initialize the external K value to -1 so that we can
+ * avoid extra conditions check inside the core loop.
+ */
+ if (fmin > dmin)
+ kvdf[--fmin - 1] = -1;
+ else
+ ++fmin;
+ if (fmax < dmax)
+ kvdf[++fmax + 1] = -1;
+ else
+ --fmax;
+
+ for (d = fmax; d >= fmin; d -= 2) {
+ if (kvdf[d - 1] >= kvdf[d + 1])
+ i1 = kvdf[d - 1] + 1;
+ else
+ i1 = kvdf[d + 1];
+ prev1 = i1;
+ i2 = i1 - d;
+ for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
+ if (i1 - prev1 > xenv->snake_cnt)
+ got_snake = 1;
+ kvdf[d] = i1;
+ if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
+ spl->i1 = i1;
+ spl->i2 = i2;
+ spl->min_lo = spl->min_hi = 1;
+ return ec;
+ }
+ }
+
+ /*
+ * We need to extent the diagonal "domain" by one. If the next
+ * values exits the box boundaries we need to change it in the
+ * opposite direction because (max - min) must be a power of two.
+ * Also we initialize the external K value to -1 so that we can
+ * avoid extra conditions check inside the core loop.
+ */
+ if (bmin > dmin)
+ kvdb[--bmin - 1] = XDL_LINE_MAX;
+ else
+ ++bmin;
+ if (bmax < dmax)
+ kvdb[++bmax + 1] = XDL_LINE_MAX;
+ else
+ --bmax;
+
+ for (d = bmax; d >= bmin; d -= 2) {
+ if (kvdb[d - 1] < kvdb[d + 1])
+ i1 = kvdb[d - 1];
+ else
+ i1 = kvdb[d + 1] - 1;
+ prev1 = i1;
+ i2 = i1 - d;
+ for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
+ if (prev1 - i1 > xenv->snake_cnt)
+ got_snake = 1;
+ kvdb[d] = i1;
+ if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
+ spl->i1 = i1;
+ spl->i2 = i2;
+ spl->min_lo = spl->min_hi = 1;
+ return ec;
+ }
+ }
+
+ if (need_min)
+ continue;
+
+ /*
+ * If the edit cost is above the heuristic trigger and if
+ * we got a good snake, we sample current diagonals to see
+ * if some of the, have reached an "interesting" path. Our
+ * measure is a function of the distance from the diagonal
+ * corner (i1 + i2) penalized with the distance from the
+ * mid diagonal itself. If this value is above the current
+ * edit cost times a magic factor (XDL_K_HEUR) we consider
+ * it interesting.
+ */
+ if (got_snake && ec > xenv->heur_min) {
+ for (best = 0, d = fmax; d >= fmin; d -= 2) {
+ dd = d > fmid ? d - fmid: fmid - d;
+ i1 = kvdf[d];
+ i2 = i1 - d;
+ v = (i1 - off1) + (i2 - off2) - dd;
+
+ if (v > XDL_K_HEUR * ec && v > best &&
+ off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
+ off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
+ for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
+ if (k == xenv->snake_cnt) {
+ best = v;
+ spl->i1 = i1;
+ spl->i2 = i2;
+ break;
+ }
+ }
+ }
+ if (best > 0) {
+ spl->min_lo = 1;
+ spl->min_hi = 0;
+ return ec;
+ }
+
+ for (best = 0, d = bmax; d >= bmin; d -= 2) {
+ dd = d > bmid ? d - bmid: bmid - d;
+ i1 = kvdb[d];
+ i2 = i1 - d;
+ v = (lim1 - i1) + (lim2 - i2) - dd;
+
+ if (v > XDL_K_HEUR * ec && v > best &&
+ off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
+ off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
+ for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
+ if (k == xenv->snake_cnt - 1) {
+ best = v;
+ spl->i1 = i1;
+ spl->i2 = i2;
+ break;
+ }
+ }
+ }
+ if (best > 0) {
+ spl->min_lo = 0;
+ spl->min_hi = 1;
+ return ec;
+ }
+ }
+
+ /*
+ * Enough is enough. We spent too much time here and now we collect
+ * the furthest reaching path using the (i1 + i2) measure.
+ */
+ if (ec >= xenv->mxcost) {
+ int64_t fbest, fbest1, bbest, bbest1;
+
+ fbest = fbest1 = -1;
+ for (d = fmax; d >= fmin; d -= 2) {
+ i1 = XDL_MIN(kvdf[d], lim1);
+ i2 = i1 - d;
+ if (lim2 < i2)
+ i1 = lim2 + d, i2 = lim2;
+ if (fbest < i1 + i2) {
+ fbest = i1 + i2;
+ fbest1 = i1;
+ }
+ }
+
+ bbest = bbest1 = XDL_LINE_MAX;
+ for (d = bmax; d >= bmin; d -= 2) {
+ i1 = XDL_MAX(off1, kvdb[d]);
+ i2 = i1 - d;
+ if (i2 < off2)
+ i1 = off2 + d, i2 = off2;
+ if (i1 + i2 < bbest) {
+ bbest = i1 + i2;
+ bbest1 = i1;
+ }
+ }
+
+ if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
+ spl->i1 = fbest1;
+ spl->i2 = fbest - fbest1;
+ spl->min_lo = 1;
+ spl->min_hi = 0;
+ } else {
+ spl->i1 = bbest1;
+ spl->i2 = bbest - bbest1;
+ spl->min_lo = 0;
+ spl->min_hi = 1;
+ }
+ return ec;
+ }
+ }
+}
+
+
+/*
+ * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
+ * the box splitting function. Note that the real job (marking changed lines)
+ * is done in the two boundary reaching checks.
+ */
+int xdl_recs_cmp(diffdata_t *dd1, int64_t off1, int64_t lim1,
+ diffdata_t *dd2, int64_t off2, int64_t lim2,
+ int64_t *kvdf, int64_t *kvdb, int need_min, xdalgoenv_t *xenv) {
+ uint64_t const *ha1 = dd1->ha, *ha2 = dd2->ha;
+
+ /*
+ * Shrink the box by walking through each diagonal snake (SW and NE).
+ */
+ for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
+ for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
+
+ /*
+ * If one dimension is empty, then all records on the other one must
+ * be obviously changed.
+ */
+ if (off1 == lim1) {
+ char *rchg2 = dd2->rchg;
+ int64_t *rindex2 = dd2->rindex;
+
+ for (; off2 < lim2; off2++)
+ rchg2[rindex2[off2]] = 1;
+ } else if (off2 == lim2) {
+ char *rchg1 = dd1->rchg;
+ int64_t *rindex1 = dd1->rindex;
+
+ for (; off1 < lim1; off1++)
+ rchg1[rindex1[off1]] = 1;
+ } else {
+ xdpsplit_t spl;
+ spl.i1 = spl.i2 = 0;
+
+ /*
+ * Divide ...
+ */
+ if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
+ need_min, &spl, xenv) < 0) {
+
+ return -1;
+ }
+
+ /*
+ * ... et Impera.
+ */
+ if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
+ kvdf, kvdb, spl.min_lo, xenv) < 0 ||
+ xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
+ kvdf, kvdb, spl.min_hi, xenv) < 0) {
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdfenv_t *xe) {
+ int64_t ndiags;
+ int64_t *kvd, *kvdf, *kvdb;
+ xdalgoenv_t xenv;
+ diffdata_t dd1, dd2;
+
+ if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
+
+ return -1;
+ }
+
+ /*
+ * Allocate and setup K vectors to be used by the differential algorithm.
+ * One is to store the forward path and one to store the backward path.
+ */
+ ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
+ if (!(kvd = (int64_t *) xdl_malloc((2 * ndiags + 2) * sizeof(int64_t)))) {
+
+ xdl_free_env(xe);
+ return -1;
+ }
+ kvdf = kvd;
+ kvdb = kvdf + ndiags;
+ kvdf += xe->xdf2.nreff + 1;
+ kvdb += xe->xdf2.nreff + 1;
+
+ xenv.mxcost = xdl_bogosqrt(ndiags);
+ if (xenv.mxcost < XDL_MAX_COST_MIN)
+ xenv.mxcost = XDL_MAX_COST_MIN;
+ xenv.snake_cnt = XDL_SNAKE_CNT;
+ xenv.heur_min = XDL_HEUR_MIN_COST;
+
+ dd1.nrec = xe->xdf1.nreff;
+ dd1.ha = xe->xdf1.ha;
+ dd1.rchg = xe->xdf1.rchg;
+ dd1.rindex = xe->xdf1.rindex;
+ dd2.nrec = xe->xdf2.nreff;
+ dd2.ha = xe->xdf2.ha;
+ dd2.rchg = xe->xdf2.rchg;
+ dd2.rindex = xe->xdf2.rindex;
+
+ if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
+ kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
+
+ xdl_free(kvd);
+ xdl_free_env(xe);
+ return -1;
+ }
+
+ xdl_free(kvd);
+
+ return 0;
+}
+
+
+static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2) {
+ xdchange_t *xch;
+
+ if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
+ return NULL;
+
+ xch->next = xscr;
+ xch->i1 = i1;
+ xch->i2 = i2;
+ xch->chg1 = chg1;
+ xch->chg2 = chg2;
+ xch->ignore = 0;
+
+ return xch;
+}
+
+
+static int recs_match(xrecord_t *rec1, xrecord_t *rec2)
+{
+ return (rec1->ha == rec2->ha &&
+ xdl_recmatch(rec1->ptr, rec1->size,
+ rec2->ptr, rec2->size));
+}
+
+/*
+ * If a line is indented more than this, get_indent() just returns this value.
+ * This avoids having to do absurd amounts of work for data that are not
+ * human-readable text, and also ensures that the output of get_indent fits within
+ * an int.
+ */
+#define MAX_INDENT 200
+
+/*
+ * Return the amount of indentation of the specified line, treating TAB as 8
+ * columns. Return -1 if line is empty or contains only whitespace. Clamp the
+ * output value at MAX_INDENT.
+ */
+static int get_indent(xrecord_t *rec)
+{
+ int64_t i;
+ int ret = 0;
+
+ for (i = 0; i < rec->size; i++) {
+ char c = rec->ptr[i];
+
+ if (!XDL_ISSPACE(c))
+ return ret;
+ else if (c == ' ')
+ ret += 1;
+ else if (c == '\t')
+ ret += 8 - ret % 8;
+ /* ignore other whitespace characters */
+
+ if (ret >= MAX_INDENT)
+ return MAX_INDENT;
+ }
+
+ /* The line contains only whitespace. */
+ return -1;
+}
+
+/*
+ * If more than this number of consecutive blank rows are found, just return this
+ * value. This avoids requiring O(N^2) work for pathological cases, and also
+ * ensures that the output of score_split fits in an int.
+ */
+#define MAX_BLANKS 20
+
+/* Characteristics measured about a hypothetical split position. */
+struct split_measurement {
+ /*
+ * Is the split at the end of the file (aside from any blank lines)?
+ */
+ int end_of_file;
+
+ /*
+ * How much is the line immediately following the split indented (or -1 if
+ * the line is blank):
+ */
+ int indent;
+
+ /*
+ * How many consecutive lines above the split are blank?
+ */
+ int pre_blank;
+
+ /*
+ * How much is the nearest non-blank line above the split indented (or -1
+ * if there is no such line)?
+ */
+ int pre_indent;
+
+ /*
+ * How many lines after the line following the split are blank?
+ */
+ int post_blank;
+
+ /*
+ * How much is the nearest non-blank line after the line following the
+ * split indented (or -1 if there is no such line)?
+ */
+ int post_indent;
+};
+
+struct split_score {
+ /* The effective indent of this split (smaller is preferred). */
+ int effective_indent;
+
+ /* Penalty for this split (smaller is preferred). */
+ int penalty;
+};
+
+/*
+ * Fill m with information about a hypothetical split of xdf above line split.
+ */
+static void measure_split(const xdfile_t *xdf, int64_t split,
+ struct split_measurement *m)
+{
+ int64_t i;
+
+ if (split >= xdf->nrec) {
+ m->end_of_file = 1;
+ m->indent = -1;
+ } else {
+ m->end_of_file = 0;
+ m->indent = get_indent(xdf->recs[split]);
+ }
+
+ m->pre_blank = 0;
+ m->pre_indent = -1;
+ for (i = split - 1; i >= 0; i--) {
+ m->pre_indent = get_indent(xdf->recs[i]);
+ if (m->pre_indent != -1)
+ break;
+ m->pre_blank += 1;
+ if (m->pre_blank == MAX_BLANKS) {
+ m->pre_indent = 0;
+ break;
+ }
+ }
+
+ m->post_blank = 0;
+ m->post_indent = -1;
+ for (i = split + 1; i < xdf->nrec; i++) {
+ m->post_indent = get_indent(xdf->recs[i]);
+ if (m->post_indent != -1)
+ break;
+ m->post_blank += 1;
+ if (m->post_blank == MAX_BLANKS) {
+ m->post_indent = 0;
+ break;
+ }
+ }
+}
+
+/*
+ * The empirically-determined weight factors used by score_split() below.
+ * Larger values means that the position is a less favorable place to split.
+ *
+ * Note that scores are only ever compared against each other, so multiplying
+ * all of these weight/penalty values by the same factor wouldn't change the
+ * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
+ * In practice, these numbers are chosen to be large enough that they can be
+ * adjusted relative to each other with sufficient precision despite using
+ * integer math.
+ */
+
+/* Penalty if there are no non-blank lines before the split */
+#define START_OF_FILE_PENALTY 1
+
+/* Penalty if there are no non-blank lines after the split */
+#define END_OF_FILE_PENALTY 21
+
+/* Multiplier for the number of blank lines around the split */
+#define TOTAL_BLANK_WEIGHT (-30)
+
+/* Multiplier for the number of blank lines after the split */
+#define POST_BLANK_WEIGHT 6
+
+/*
+ * Penalties applied if the line is indented more than its predecessor
+ */
+#define RELATIVE_INDENT_PENALTY (-4)
+#define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
+
+/*
+ * Penalties applied if the line is indented less than both its predecessor and
+ * its successor
+ */
+#define RELATIVE_OUTDENT_PENALTY 24
+#define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
+
+/*
+ * Penalties applied if the line is indented less than its predecessor but not
+ * less than its successor
+ */
+#define RELATIVE_DEDENT_PENALTY 23
+#define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
+
+/*
+ * We only consider whether the sum of the effective indents for splits are
+ * less than (-1), equal to (0), or greater than (+1) each other. The resulting
+ * value is multiplied by the following weight and combined with the penalty to
+ * determine the better of two scores.
+ */
+#define INDENT_WEIGHT 60
+
+/*
+ * Compute a badness score for the hypothetical split whose measurements are
+ * stored in m. The weight factors were determined empirically using the tools and
+ * corpus described in
+ *
+ * https://github.com/mhagger/diff-slider-tools
+ *
+ * Also see that project if you want to improve the weights based on, for example,
+ * a larger or more diverse corpus.
+ */
+static void score_add_split(const struct split_measurement *m, struct split_score *s)
+{
+ /*
+ * A place to accumulate penalty factors (positive makes this index more
+ * favored):
+ */
+ int post_blank, total_blank, indent, any_blanks;
+
+ if (m->pre_indent == -1 && m->pre_blank == 0)
+ s->penalty += START_OF_FILE_PENALTY;
+
+ if (m->end_of_file)
+ s->penalty += END_OF_FILE_PENALTY;
+
+ /*
+ * Set post_blank to the number of blank lines following the split,
+ * including the line immediately after the split:
+ */
+ post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
+ total_blank = m->pre_blank + post_blank;
+
+ /* Penalties based on nearby blank lines: */
+ s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
+ s->penalty += POST_BLANK_WEIGHT * post_blank;
+
+ if (m->indent != -1)
+ indent = m->indent;
+ else
+ indent = m->post_indent;
+
+ any_blanks = (total_blank != 0);
+
+ /* Note that the effective indent is -1 at the end of the file: */
+ s->effective_indent += indent;
+
+ if (indent == -1) {
+ /* No additional adjustments needed. */
+ } else if (m->pre_indent == -1) {
+ /* No additional adjustments needed. */
+ } else if (indent > m->pre_indent) {
+ /*
+ * The line is indented more than its predecessor.
+ */
+ s->penalty += any_blanks ?
+ RELATIVE_INDENT_WITH_BLANK_PENALTY :
+ RELATIVE_INDENT_PENALTY;
+ } else if (indent == m->pre_indent) {
+ /*
+ * The line has the same indentation level as its predecessor.
+ * No additional adjustments needed.
+ */
+ } else {
+ /*
+ * The line is indented less than its predecessor. It could be
+ * the block terminator of the previous block, but it could
+ * also be the start of a new block (e.g., an "else" block, or
+ * maybe the previous block didn't have a block terminator).
+ * Try to distinguish those cases based on what comes next:
+ */
+ if (m->post_indent != -1 && m->post_indent > indent) {
+ /*
+ * The following line is indented more. So it is likely
+ * that this line is the start of a block.
+ */
+ s->penalty += any_blanks ?
+ RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
+ RELATIVE_OUTDENT_PENALTY;
+ } else {
+ /*
+ * That was probably the end of a block.
+ */
+ s->penalty += any_blanks ?
+ RELATIVE_DEDENT_WITH_BLANK_PENALTY :
+ RELATIVE_DEDENT_PENALTY;
+ }
+ }
+}
+
+static int score_cmp(struct split_score *s1, struct split_score *s2)
+{
+ /* -1 if s1.effective_indent < s2->effective_indent, etc. */
+ int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
+ (s1->effective_indent < s2->effective_indent));
+
+ return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
+}
+
+/*
+ * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
+ * of lines that was inserted or deleted from the corresponding version of the
+ * file). We consider there to be such a group at the beginning of the file, at
+ * the end of the file, and between any two unchanged lines, though most such
+ * groups will usually be empty.
+ *
+ * If the first line in a group is equal to the line following the group, then
+ * the group can be slid down. Similarly, if the last line in a group is equal
+ * to the line preceding the group, then the group can be slid up. See
+ * group_slide_down() and group_slide_up().
+ *
+ * Note that loops that are testing for changed lines in xdf->rchg do not need
+ * index bounding since the array is prepared with a zero at position -1 and N.
+ */
+struct xdlgroup {
+ /*
+ * The index of the first changed line in the group, or the index of
+ * the unchanged line above which the (empty) group is located.
+ */
+ int64_t start;
+
+ /*
+ * The index of the first unchanged line after the group. For an empty
+ * group, end is equal to start.
+ */
+ int64_t end;
+};
+
+/*
+ * Initialize g to point at the first group in xdf.
+ */
+static void group_init(xdfile_t *xdf, struct xdlgroup *g)
+{
+ g->start = g->end = 0;
+ while (xdf->rchg[g->end])
+ g->end++;
+}
+
+/*
+ * Move g to describe the next (possibly empty) group in xdf and return 0. If g
+ * is already at the end of the file, do nothing and return -1.
+ */
+static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
+{
+ if (g->end == xdf->nrec)
+ return -1;
+
+ g->start = g->end + 1;
+ for (g->end = g->start; xdf->rchg[g->end]; g->end++)
+ ;
+
+ return 0;
+}
+
+/*
+ * Move g to describe the previous (possibly empty) group in xdf and return 0.
+ * If g is already at the beginning of the file, do nothing and return -1.
+ */
+static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
+{
+ if (g->start == 0)
+ return -1;
+
+ g->end = g->start - 1;
+ for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
+ ;
+
+ return 0;
+}
+
+/*
+ * If g can be slid toward the end of the file, do so, and if it bumps into a
+ * following group, expand this group to include it. Return 0 on success or -1
+ * if g cannot be slid down.
+ */
+static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
+{
+ if (g->end < xdf->nrec &&
+ recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
+ xdf->rchg[g->start++] = 0;
+ xdf->rchg[g->end++] = 1;
+
+ while (xdf->rchg[g->end])
+ g->end++;
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+/*
+ * If g can be slid toward the beginning of the file, do so, and if it bumps
+ * into a previous group, expand this group to include it. Return 0 on success
+ * or -1 if g cannot be slid up.
+ */
+static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
+{
+ if (g->start > 0 &&
+ recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
+ xdf->rchg[--g->start] = 1;
+ xdf->rchg[--g->end] = 0;
+
+ while (xdf->rchg[g->start - 1])
+ g->start--;
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static void xdl_bug(const char *msg)
+{
+ fprintf(stderr, "BUG: %s\n", msg);
+ exit(1);
+}
+
+/*
+ * For indentation heuristic, skip searching for better slide position after
+ * checking MAX_BORING lines without finding an improvement. This defends the
+ * indentation heuristic logic against pathological cases. The value is not
+ * picked scientifically but should be good enough.
+ */
+#define MAX_BORING 100
+
+/*
+ * Move back and forward change groups for a consistent and pretty diff output.
+ * This also helps in finding joinable change groups and reducing the diff
+ * size.
+ */
+int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, int64_t flags) {
+ struct xdlgroup g, go;
+ int64_t earliest_end, end_matching_other;
+ int64_t groupsize;
+
+ group_init(xdf, &g);
+ group_init(xdfo, &go);
+
+ while (1) {
+ /* If the group is empty in the to-be-compacted file, skip it: */
+ if (g.end == g.start)
+ goto next;
+
+ /*
+ * Now shift the change up and then down as far as possible in
+ * each direction. If it bumps into any other changes, merge them.
+ */
+ do {
+ groupsize = g.end - g.start;
+
+ /*
+ * Keep track of the last "end" index that causes this
+ * group to align with a group of changed lines in the
+ * other file. -1 indicates that we haven't found such
+ * a match yet:
+ */
+ end_matching_other = -1;
+
+ /* Shift the group backward as much as possible: */
+ while (!group_slide_up(xdf, &g))
+ if (group_previous(xdfo, &go))
+ xdl_bug("group sync broken sliding up");
+
+ /*
+ * This is this highest that this group can be shifted.
+ * Record its end index:
+ */
+ earliest_end = g.end;
+
+ if (go.end > go.start)
+ end_matching_other = g.end;
+
+ /* Now shift the group forward as far as possible: */
+ while (1) {
+ if (group_slide_down(xdf, &g))
+ break;
+ if (group_next(xdfo, &go))
+ xdl_bug("group sync broken sliding down");
+
+ if (go.end > go.start)
+ end_matching_other = g.end;
+ }
+ } while (groupsize != g.end - g.start);
+
+ /*
+ * If the group can be shifted, then we can possibly use this
+ * freedom to produce a more intuitive diff.
+ *
+ * The group is currently shifted as far down as possible, so the
+ * heuristics below only have to handle upwards shifts.
+ */
+
+ if (g.end == earliest_end) {
+ /* no shifting was possible */
+ } else if (end_matching_other != -1) {
+ /*
+ * Move the possibly merged group of changes back to line
+ * up with the last group of changes from the other file
+ * that it can align with.
+ */
+ while (go.end == go.start) {
+ if (group_slide_up(xdf, &g))
+ xdl_bug("match disappeared");
+ if (group_previous(xdfo, &go))
+ xdl_bug("group sync broken sliding to match");
+ }
+ } else if (flags & XDF_INDENT_HEURISTIC) {
+ /*
+ * Indent heuristic: a group of pure add/delete lines
+ * implies two splits, one between the end of the "before"
+ * context and the start of the group, and another between
+ * the end of the group and the beginning of the "after"
+ * context. Some splits are aesthetically better and some
+ * are worse. We compute a badness "score" for each split,
+ * and add the scores for the two splits to define a
+ * "score" for each position that the group can be shifted
+ * to. Then we pick the shift with the lowest score.
+ */
+ int64_t shift, best_shift = -1;
+ struct split_score best_score;
+
+ /*
+ * This is O(N * MAX_BLANKS) (N = shift-able lines).
+ * Even with MAX_BLANKS bounded to a small value, a
+ * large N could still make this loop take several
+ * times longer than the main diff algorithm. The
+ * "boring" value is to help cut down N to something
+ * like (MAX_BORING + groupsize).
+ *
+ * Scan from bottom to top. So we can exit the loop
+ * without compromising the assumption "for a same best
+ * score, pick the bottommost shift".
+ */
+ int boring = 0;
+ for (shift = g.end; shift >= earliest_end; shift--) {
+ struct split_measurement m;
+ struct split_score score = {0, 0};
+ int cmp;
+
+ measure_split(xdf, shift, &m);
+ score_add_split(&m, &score);
+ measure_split(xdf, shift - groupsize, &m);
+ score_add_split(&m, &score);
+
+ if (best_shift == -1) {
+ cmp = -1;
+ } else {
+ cmp = score_cmp(&score, &best_score);
+ }
+ if (cmp < 0) {
+ boring = 0;
+ best_score.effective_indent = score.effective_indent;
+ best_score.penalty = score.penalty;
+ best_shift = shift;
+ } else {
+ boring += 1;
+ if (boring >= MAX_BORING)
+ break;
+ }
+ }
+
+ while (g.end > best_shift) {
+ if (group_slide_up(xdf, &g))
+ xdl_bug("best shift unreached");
+ if (group_previous(xdfo, &go))
+ xdl_bug("group sync broken sliding to blank line");
+ }
+ }
+
+ next:
+ /* Move past the just-processed group: */
+ if (group_next(xdf, &g))
+ break;
+ if (group_next(xdfo, &go))
+ xdl_bug("group sync broken moving to next group");
+ }
+
+ if (!group_next(xdfo, &go))
+ xdl_bug("group sync broken at end of file");
+
+ return 0;
+}
+
+
+int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
+ xdchange_t *cscr = NULL, *xch;
+ char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
+ int64_t i1, i2, l1, l2;
+
+ /*
+ * Trivial. Collects "groups" of changes and creates an edit script.
+ */
+ for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
+ if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
+ for (l1 = i1; rchg1[i1 - 1]; i1--);
+ for (l2 = i2; rchg2[i2 - 1]; i2--);
+
+ if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
+ xdl_free_script(cscr);
+ return -1;
+ }
+ cscr = xch;
+ }
+
+ *xscr = cscr;
+
+ return 0;
+}
+
+
+void xdl_free_script(xdchange_t *xscr) {
+ xdchange_t *xch;
+
+ while ((xch = xscr) != NULL) {
+ xscr = xscr->next;
+ xdl_free(xch);
+ }
+}
+
+
+/*
+ * Starting at the passed change atom, find the latest change atom to be included
+ * inside the differential hunk according to the specified configuration.
+ * Also advance xscr if the first changes must be discarded.
+ */
+xdchange_t *xdl_get_hunk(xdchange_t **xscr)
+{
+ xdchange_t *xch, *xchp, *lxch;
+ uint64_t ignored = 0; /* number of ignored blank lines */
+
+ /* remove ignorable changes that are too far before other changes */
+ for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) {
+ xch = xchp->next;
+
+ if (xch == NULL ||
+ xch->i1 - (xchp->i1 + xchp->chg1) >= 0)
+ *xscr = xch;
+ }
+
+ if (*xscr == NULL)
+ return NULL;
+
+ lxch = *xscr;
+
+ for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) {
+ int64_t distance = xch->i1 - (xchp->i1 + xchp->chg1);
+ if (distance > 0)
+ break;
+
+ if (distance < 0 && (!xch->ignore || lxch == xchp)) {
+ lxch = xch;
+ ignored = 0;
+ } else if (distance < 0 && xch->ignore) {
+ ignored += xch->chg2;
+ } else if (lxch != xchp &&
+ xch->i1 + ignored - (lxch->i1 + lxch->chg1) > 0) {
+ break;
+ } else if (!xch->ignore) {
+ lxch = xch;
+ ignored = 0;
+ } else {
+ ignored += xch->chg2;
+ }
+ }
+
+ return lxch;
+}
+
+
+static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
+ xdemitconf_t const *xecfg)
+{
+ int64_t p = xe->nprefix, s = xe->nsuffix;
+ xdchange_t *xch, *xche;
+
+ if (!xecfg->hunk_func)
+ return -1;
+
+ if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) {
+ int64_t i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec;
+ for (xch = xscr; xch; xch = xche->next) {
+ xche = xdl_get_hunk(&xch);
+ if (!xch)
+ break;
+ if (xch != xche)
+ xdl_bug("xch != xche");
+ xch->i1 += p;
+ xch->i2 += p;
+ if (xch->i1 > i1 || xch->i2 > i2) {
+ if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0)
+ return -1;
+ }
+ i1 = xche->i1 + xche->chg1;
+ i2 = xche->i2 + xche->chg2;
+ }
+ if (xecfg->hunk_func(i1, n1 + p + s, i2, n2 + p + s,
+ ecb->priv) < 0)
+ return -1;
+ } else {
+ for (xch = xscr; xch; xch = xche->next) {
+ xche = xdl_get_hunk(&xch);
+ if (!xch)
+ break;
+ if (xecfg->hunk_func(xch->i1 + p,
+ xche->i1 + xche->chg1 - xch->i1,
+ xch->i2 + p,
+ xche->i2 + xche->chg2 - xch->i2,
+ ecb->priv) < 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
+ xdchange_t *xscr;
+ xdfenv_t xe;
+
+ if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
+
+ return -1;
+ }
+ if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
+ xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
+ xdl_build_script(&xe, &xscr) < 0) {
+
+ xdl_free_env(&xe);
+ return -1;
+ }
+
+ if (xdl_call_hunk_func(&xe, xscr, ecb, xecfg) < 0) {
+ xdl_free_script(xscr);
+ xdl_free_env(&xe);
+ return -1;
+ }
+ xdl_free_script(xscr);
+ xdl_free_env(&xe);
+
+ return 0;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xdiffi.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,58 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XDIFFI_H)
+#define XDIFFI_H
+
+
+typedef struct s_diffdata {
+ int64_t nrec;
+ uint64_t const *ha;
+ int64_t *rindex;
+ char *rchg;
+} diffdata_t;
+
+typedef struct s_xdalgoenv {
+ int64_t mxcost;
+ int64_t snake_cnt;
+ int64_t heur_min;
+} xdalgoenv_t;
+
+typedef struct s_xdchange {
+ struct s_xdchange *next;
+ int64_t i1, i2;
+ int64_t chg1, chg2;
+ int ignore;
+} xdchange_t;
+
+
+
+int xdl_recs_cmp(diffdata_t *dd1, int64_t off1, int64_t lim1,
+ diffdata_t *dd2, int64_t off2, int64_t lim2,
+ int64_t *kvdf, int64_t *kvdb, int need_min, xdalgoenv_t *xenv);
+int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdfenv_t *xe);
+int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, int64_t flags);
+int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr);
+void xdl_free_script(xdchange_t *xscr);
+
+#endif /* #if !defined(XDIFFI_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xinclude.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,40 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XINCLUDE_H)
+#define XINCLUDE_H
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#include "xmacros.h"
+#include "xdiff.h"
+#include "xtypes.h"
+#include "xutils.h"
+#include "xprepare.h"
+#include "xdiffi.h"
+
+
+#endif /* #if !defined(XINCLUDE_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xmacros.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,54 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XMACROS_H)
+#define XMACROS_H
+
+
+
+
+#define XDL_MIN(a, b) ((a) < (b) ? (a): (b))
+#define XDL_MAX(a, b) ((a) > (b) ? (a): (b))
+#define XDL_ABS(v) ((v) >= 0 ? (v): -(v))
+#define XDL_ISDIGIT(c) ((c) >= '0' && (c) <= '9')
+#define XDL_ISSPACE(c) (isspace((unsigned char)(c)))
+#define XDL_ADDBITS(v,b) ((v) + ((v) >> (b)))
+#define XDL_MASKBITS(b) ((1UL << (b)) - 1)
+#define XDL_HASHLONG(v,b) (XDL_ADDBITS((unsigned long)(v), b) & XDL_MASKBITS(b))
+#define XDL_PTRFREE(p) do { if (p) { xdl_free(p); (p) = NULL; } } while (0)
+#define XDL_LE32_PUT(p, v) \
+do { \
+ unsigned char *__p = (unsigned char *) (p); \
+ *__p++ = (unsigned char) (v); \
+ *__p++ = (unsigned char) ((v) >> 8); \
+ *__p++ = (unsigned char) ((v) >> 16); \
+ *__p = (unsigned char) ((v) >> 24); \
+} while (0)
+#define XDL_LE32_GET(p, v) \
+do { \
+ unsigned char const *__p = (unsigned char const *) (p); \
+ (v) = (unsigned long) __p[0] | ((unsigned long) __p[1]) << 8 | \
+ ((unsigned long) __p[2]) << 16 | ((unsigned long) __p[3]) << 24; \
+} while (0)
+
+
+#endif /* #if !defined(XMACROS_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xprepare.c Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,552 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#include "xinclude.h"
+
+
+#define XDL_KPDIS_RUN 4
+#define XDL_MAX_EQLIMIT 1024
+#define XDL_SIMSCAN_WINDOW 100
+#define XDL_GUESS_NLINES1 256
+
+
+typedef struct s_xdlclass {
+ struct s_xdlclass *next;
+ uint64_t ha;
+ char const *line;
+ int64_t size;
+ int64_t idx;
+ int64_t len1, len2;
+} xdlclass_t;
+
+typedef struct s_xdlclassifier {
+ unsigned int hbits;
+ int64_t hsize;
+ xdlclass_t **rchash;
+ chastore_t ncha;
+ xdlclass_t **rcrecs;
+ int64_t alloc;
+ int64_t count;
+ int64_t flags;
+} xdlclassifier_t;
+
+
+
+
+static int xdl_init_classifier(xdlclassifier_t *cf, int64_t size, int64_t flags);
+static void xdl_free_classifier(xdlclassifier_t *cf);
+static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
+ unsigned int hbits, xrecord_t *rec);
+static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, int64_t narec,
+ xdlclassifier_t *cf, xdfile_t *xdf);
+static void xdl_free_ctx(xdfile_t *xdf);
+static int xdl_clean_mmatch(char const *dis, int64_t i, int64_t s, int64_t e);
+static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
+static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2);
+static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
+
+
+
+
+static int xdl_init_classifier(xdlclassifier_t *cf, int64_t size, int64_t flags) {
+ cf->flags = flags;
+
+ cf->hbits = xdl_hashbits(size);
+ cf->hsize = ((uint64_t)1) << cf->hbits;
+
+ if (xdl_cha_init(&cf->ncha, sizeof(xdlclass_t), size / 4 + 1) < 0) {
+
+ return -1;
+ }
+ if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) {
+
+ xdl_cha_free(&cf->ncha);
+ return -1;
+ }
+ memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *));
+
+ cf->alloc = size;
+ if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) {
+
+ xdl_free(cf->rchash);
+ xdl_cha_free(&cf->ncha);
+ return -1;
+ }
+
+ cf->count = 0;
+
+ return 0;
+}
+
+
+static void xdl_free_classifier(xdlclassifier_t *cf) {
+
+ xdl_free(cf->rcrecs);
+ xdl_free(cf->rchash);
+ xdl_cha_free(&cf->ncha);
+}
+
+
+static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
+ unsigned int hbits, xrecord_t *rec) {
+ int64_t hi;
+ char const *line;
+ xdlclass_t *rcrec;
+ xdlclass_t **rcrecs;
+
+ line = rec->ptr;
+ hi = (long) XDL_HASHLONG(rec->ha, cf->hbits);
+ for (rcrec = cf->rchash[hi]; rcrec; rcrec = rcrec->next)
+ if (rcrec->ha == rec->ha &&
+ xdl_recmatch(rcrec->line, rcrec->size,
+ rec->ptr, rec->size))
+ break;
+
+ if (!rcrec) {
+ if (!(rcrec = xdl_cha_alloc(&cf->ncha))) {
+
+ return -1;
+ }
+ rcrec->idx = cf->count++;
+ if (cf->count > cf->alloc) {
+ cf->alloc *= 2;
+ if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) {
+
+ return -1;
+ }
+ cf->rcrecs = rcrecs;
+ }
+ cf->rcrecs[rcrec->idx] = rcrec;
+ rcrec->line = line;
+ rcrec->size = rec->size;
+ rcrec->ha = rec->ha;
+ rcrec->len1 = rcrec->len2 = 0;
+ rcrec->next = cf->rchash[hi];
+ cf->rchash[hi] = rcrec;
+ }
+
+ (pass == 1) ? rcrec->len1++ : rcrec->len2++;
+
+ rec->ha = (unsigned long) rcrec->idx;
+
+ hi = (long) XDL_HASHLONG(rec->ha, hbits);
+ rec->next = rhash[hi];
+ rhash[hi] = rec;
+
+ return 0;
+}
+
+
+/*
+ * Trim common prefix from files.
+ *
+ * Note: trimming could affect hunk shifting. But the performance benefit
+ * outweighs the shift change. A diff result with suboptimal shifting is still
+ * valid.
+ */
+static void xdl_trim_files(mmfile_t *mf1, mmfile_t *mf2, int64_t reserved,
+ xdfenv_t *xe, mmfile_t *out_mf1, mmfile_t *out_mf2) {
+ mmfile_t msmall, mlarge;
+ /* prefix lines, prefix bytes, suffix lines, suffix bytes */
+ int64_t plines = 0, pbytes = 0, slines = 0, sbytes = 0, i;
+ /* prefix char pointer for msmall and mlarge */
+ const char *pp1, *pp2;
+ /* suffix char pointer for msmall and mlarge */
+ const char *ps1, *ps2;
+
+ /* reserved must >= 0 for the line boundary adjustment to work */
+ if (reserved < 0)
+ reserved = 0;
+
+ if (mf1->size < mf2->size) {
+ memcpy(&msmall, mf1, sizeof(mmfile_t));
+ memcpy(&mlarge, mf2, sizeof(mmfile_t));
+ } else {
+ memcpy(&msmall, mf2, sizeof(mmfile_t));
+ memcpy(&mlarge, mf1, sizeof(mmfile_t));
+ }
+
+ pp1 = msmall.ptr, pp2 = mlarge.ptr;
+ for (i = 0; i < msmall.size && *pp1 == *pp2; ++i) {
+ plines += (*pp1 == '\n');
+ pp1++, pp2++;
+ }
+
+ ps1 = msmall.ptr + msmall.size - 1, ps2 = mlarge.ptr + mlarge.size - 1;
+ while (ps1 > pp1 && *ps1 == *ps2) {
+ slines += (*ps1 == '\n');
+ ps1--, ps2--;
+ }
+
+ /* Retract common prefix and suffix boundaries for reserved lines */
+ if (plines <= reserved + 1) {
+ plines = 0;
+ } else {
+ i = 0;
+ while (i <= reserved) {
+ pp1--;
+ i += (*pp1 == '\n');
+ }
+ /* The new mmfile starts at the next char just after '\n' */
+ pbytes = pp1 - msmall.ptr + 1;
+ plines -= reserved;
+ }
+
+ if (slines <= reserved + 1) {
+ slines = 0;
+ } else {
+ /* Note: with compiler SIMD support (ex. -O3 -mavx2), this
+ * might perform better than memchr. */
+ i = 0;
+ while (i <= reserved) {
+ ps1++;
+ i += (*ps1 == '\n');
+ }
+ /* The new mmfile includes this '\n' */
+ sbytes = msmall.ptr + msmall.size - ps1 - 1;
+ slines -= reserved;
+ if (msmall.ptr[msmall.size - 1] == '\n')
+ slines -= 1;
+ }
+
+ xe->nprefix = plines;
+ xe->nsuffix = slines;
+ out_mf1->ptr = mf1->ptr + pbytes;
+ out_mf1->size = mf1->size - pbytes - sbytes;
+ out_mf2->ptr = mf2->ptr + pbytes;
+ out_mf2->size = mf2->size - pbytes - sbytes;
+}
+
+
+static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, int64_t narec,
+ xdlclassifier_t *cf, xdfile_t *xdf) {
+ unsigned int hbits;
+ int64_t nrec, hsize, bsize;
+ uint64_t hav;
+ char const *blk, *cur, *top, *prev;
+ xrecord_t *crec;
+ xrecord_t **recs, **rrecs;
+ xrecord_t **rhash;
+ uint64_t *ha;
+ char *rchg;
+ int64_t *rindex;
+
+ ha = NULL;
+ rindex = NULL;
+ rchg = NULL;
+ rhash = NULL;
+ recs = NULL;
+
+ if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0)
+ goto abort;
+ if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *))))
+ goto abort;
+
+ {
+ hbits = xdl_hashbits(narec);
+ hsize = ((uint64_t)1) << hbits;
+ if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *))))
+ goto abort;
+ memset(rhash, 0, hsize * sizeof(xrecord_t *));
+ }
+
+ nrec = 0;
+ if ((cur = blk = xdl_mmfile_first(mf, &bsize)) != NULL) {
+ for (top = blk + bsize; cur < top; ) {
+ prev = cur;
+ hav = xdl_hash_record(&cur, top);
+ if (nrec >= narec) {
+ narec *= 2;
+ if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *))))
+ goto abort;
+ recs = rrecs;
+ }
+ if (!(crec = xdl_cha_alloc(&xdf->rcha)))
+ goto abort;
+ crec->ptr = prev;
+ crec->size = (long) (cur - prev);
+ crec->ha = hav;
+ recs[nrec++] = crec;
+
+ if (xdl_classify_record(pass, cf, rhash, hbits, crec) < 0)
+ goto abort;
+ }
+ }
+
+ if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char))))
+ goto abort;
+ memset(rchg, 0, (nrec + 2) * sizeof(char));
+
+ if (!(rindex = (int64_t *) xdl_malloc((nrec + 1) * sizeof(int64_t))))
+ goto abort;
+ if (!(ha = (uint64_t *) xdl_malloc((nrec + 1) * sizeof(uint64_t))))
+ goto abort;
+
+ xdf->nrec = nrec;
+ xdf->recs = recs;
+ xdf->hbits = hbits;
+ xdf->rhash = rhash;
+ xdf->rchg = rchg + 1;
+ xdf->rindex = rindex;
+ xdf->nreff = 0;
+ xdf->ha = ha;
+ xdf->dstart = 0;
+ xdf->dend = nrec - 1;
+
+ return 0;
+
+abort:
+ xdl_free(ha);
+ xdl_free(rindex);
+ xdl_free(rchg);
+ xdl_free(rhash);
+ xdl_free(recs);
+ xdl_cha_free(&xdf->rcha);
+ return -1;
+}
+
+
+static void xdl_free_ctx(xdfile_t *xdf) {
+
+ xdl_free(xdf->rhash);
+ xdl_free(xdf->rindex);
+ xdl_free(xdf->rchg - 1);
+ xdl_free(xdf->ha);
+ xdl_free(xdf->recs);
+ xdl_cha_free(&xdf->rcha);
+}
+
+/* Reserved lines for trimming, to leave room for shifting */
+#define TRIM_RESERVED_LINES 100
+
+int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdfenv_t *xe) {
+ int64_t enl1, enl2, sample;
+ mmfile_t tmf1, tmf2;
+ xdlclassifier_t cf;
+
+ memset(&cf, 0, sizeof(cf));
+
+ sample = XDL_GUESS_NLINES1;
+
+ enl1 = xdl_guess_lines(mf1, sample) + 1;
+ enl2 = xdl_guess_lines(mf2, sample) + 1;
+
+ if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0)
+ return -1;
+
+ xdl_trim_files(mf1, mf2, TRIM_RESERVED_LINES, xe, &tmf1, &tmf2);
+
+ if (xdl_prepare_ctx(1, &tmf1, enl1, &cf, &xe->xdf1) < 0) {
+
+ xdl_free_classifier(&cf);
+ return -1;
+ }
+ if (xdl_prepare_ctx(2, &tmf2, enl2, &cf, &xe->xdf2) < 0) {
+
+ xdl_free_ctx(&xe->xdf1);
+ xdl_free_classifier(&cf);
+ return -1;
+ }
+
+ if (xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) {
+ xdl_free_ctx(&xe->xdf2);
+ xdl_free_ctx(&xe->xdf1);
+ xdl_free_classifier(&cf);
+ return -1;
+ }
+
+ xdl_free_classifier(&cf);
+
+ return 0;
+}
+
+
+void xdl_free_env(xdfenv_t *xe) {
+
+ xdl_free_ctx(&xe->xdf2);
+ xdl_free_ctx(&xe->xdf1);
+}
+
+
+static int xdl_clean_mmatch(char const *dis, int64_t i, int64_t s, int64_t e) {
+ int64_t r, rdis0, rpdis0, rdis1, rpdis1;
+
+ /*
+ * Limits the window the is examined during the similar-lines
+ * scan. The loops below stops when dis[i - r] == 1 (line that
+ * has no match), but there are corner cases where the loop
+ * proceed all the way to the extremities by causing huge
+ * performance penalties in case of big files.
+ */
+ if (i - s > XDL_SIMSCAN_WINDOW)
+ s = i - XDL_SIMSCAN_WINDOW;
+ if (e - i > XDL_SIMSCAN_WINDOW)
+ e = i + XDL_SIMSCAN_WINDOW;
+
+ /*
+ * Scans the lines before 'i' to find a run of lines that either
+ * have no match (dis[j] == 0) or have multiple matches (dis[j] > 1).
+ * Note that we always call this function with dis[i] > 1, so the
+ * current line (i) is already a multimatch line.
+ */
+ for (r = 1, rdis0 = 0, rpdis0 = 1; (i - r) >= s; r++) {
+ if (!dis[i - r])
+ rdis0++;
+ else if (dis[i - r] == 2)
+ rpdis0++;
+ else
+ break;
+ }
+ /*
+ * If the run before the line 'i' found only multimatch lines, we
+ * return 0 and hence we don't make the current line (i) discarded.
+ * We want to discard multimatch lines only when they appear in the
+ * middle of runs with nomatch lines (dis[j] == 0).
+ */
+ if (rdis0 == 0)
+ return 0;
+ for (r = 1, rdis1 = 0, rpdis1 = 1; (i + r) <= e; r++) {
+ if (!dis[i + r])
+ rdis1++;
+ else if (dis[i + r] == 2)
+ rpdis1++;
+ else
+ break;
+ }
+ /*
+ * If the run after the line 'i' found only multimatch lines, we
+ * return 0 and hence we don't make the current line (i) discarded.
+ */
+ if (rdis1 == 0)
+ return 0;
+ rdis1 += rdis0;
+ rpdis1 += rpdis0;
+
+ return rpdis1 * XDL_KPDIS_RUN < (rpdis1 + rdis1);
+}
+
+
+/*
+ * Try to reduce the problem complexity, discard records that have no
+ * matches on the other file. Also, lines that have multiple matches
+ * might be potentially discarded if they happear in a run of discardable.
+ */
+static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
+ int64_t i, nm, nreff, mlim;
+ xrecord_t **recs;
+ xdlclass_t *rcrec;
+ char *dis, *dis1, *dis2;
+
+ if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) {
+
+ return -1;
+ }
+ memset(dis, 0, xdf1->nrec + xdf2->nrec + 2);
+ dis1 = dis;
+ dis2 = dis1 + xdf1->nrec + 1;
+
+ if ((mlim = xdl_bogosqrt(xdf1->nrec)) > XDL_MAX_EQLIMIT)
+ mlim = XDL_MAX_EQLIMIT;
+ for (i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; i <= xdf1->dend; i++, recs++) {
+ rcrec = cf->rcrecs[(*recs)->ha];
+ nm = rcrec ? rcrec->len2 : 0;
+ dis1[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
+ }
+
+ if ((mlim = xdl_bogosqrt(xdf2->nrec)) > XDL_MAX_EQLIMIT)
+ mlim = XDL_MAX_EQLIMIT;
+ for (i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; i <= xdf2->dend; i++, recs++) {
+ rcrec = cf->rcrecs[(*recs)->ha];
+ nm = rcrec ? rcrec->len1 : 0;
+ dis2[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
+ }
+
+ for (nreff = 0, i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart];
+ i <= xdf1->dend; i++, recs++) {
+ if (dis1[i] == 1 ||
+ (dis1[i] == 2 && !xdl_clean_mmatch(dis1, i, xdf1->dstart, xdf1->dend))) {
+ xdf1->rindex[nreff] = i;
+ xdf1->ha[nreff] = (*recs)->ha;
+ nreff++;
+ } else
+ xdf1->rchg[i] = 1;
+ }
+ xdf1->nreff = nreff;
+
+ for (nreff = 0, i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart];
+ i <= xdf2->dend; i++, recs++) {
+ if (dis2[i] == 1 ||
+ (dis2[i] == 2 && !xdl_clean_mmatch(dis2, i, xdf2->dstart, xdf2->dend))) {
+ xdf2->rindex[nreff] = i;
+ xdf2->ha[nreff] = (*recs)->ha;
+ nreff++;
+ } else
+ xdf2->rchg[i] = 1;
+ }
+ xdf2->nreff = nreff;
+
+ xdl_free(dis);
+
+ return 0;
+}
+
+
+/*
+ * Early trim initial and terminal matching records.
+ */
+static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) {
+ int64_t i, lim;
+ xrecord_t **recs1, **recs2;
+
+ recs1 = xdf1->recs;
+ recs2 = xdf2->recs;
+ for (i = 0, lim = XDL_MIN(xdf1->nrec, xdf2->nrec); i < lim;
+ i++, recs1++, recs2++)
+ if ((*recs1)->ha != (*recs2)->ha)
+ break;
+
+ xdf1->dstart = xdf2->dstart = i;
+
+ recs1 = xdf1->recs + xdf1->nrec - 1;
+ recs2 = xdf2->recs + xdf2->nrec - 1;
+ for (lim -= i, i = 0; i < lim; i++, recs1--, recs2--)
+ if ((*recs1)->ha != (*recs2)->ha)
+ break;
+
+ xdf1->dend = xdf1->nrec - i - 1;
+ xdf2->dend = xdf2->nrec - i - 1;
+
+ return 0;
+}
+
+
+static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
+
+ if (xdl_trim_ends(xdf1, xdf2) < 0 ||
+ xdl_cleanup_records(cf, xdf1, xdf2) < 0) {
+
+ return -1;
+ }
+
+ return 0;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xprepare.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,34 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XPREPARE_H)
+#define XPREPARE_H
+
+
+
+int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdfenv_t *xe);
+void xdl_free_env(xdfenv_t *xe);
+
+
+
+#endif /* #if !defined(XPREPARE_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xtypes.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,105 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XTYPES_H)
+#define XTYPES_H
+
+
+
+typedef struct s_chanode {
+ struct s_chanode *next;
+ int64_t icurr;
+} chanode_t;
+
+typedef struct s_chastore {
+ chanode_t *head, *tail;
+ int64_t isize, nsize;
+ chanode_t *ancur;
+ chanode_t *sncur;
+ int64_t scurr;
+} chastore_t;
+
+typedef struct s_xrecord {
+ struct s_xrecord *next;
+ char const *ptr;
+ int64_t size;
+ uint64_t ha;
+} xrecord_t;
+
+typedef struct s_xdfile {
+ /* manual memory management */
+ chastore_t rcha;
+
+ /* number of records (lines) */
+ int64_t nrec;
+
+ /* hash table size
+ * the maximum hash value in the table is (1 << hbits) */
+ unsigned int hbits;
+
+ /* hash table, hash value => xrecord_t
+ * note: xrecord_t is a linked list. */
+ xrecord_t **rhash;
+
+ /* range excluding common prefix and suffix
+ * [recs[i] for i in range(0, dstart)] are common prefix.
+ * [recs[i] for i in range(dstart, dend + 1 - dstart)] are interesting
+ * lines */
+ int64_t dstart, dend;
+
+ /* pointer to records (lines) */
+ xrecord_t **recs;
+
+ /* record changed, use original "recs" index
+ * rchag[i] can be either 0 or 1. 1 means recs[i] (line i) is marked
+ * "changed". */
+ char *rchg;
+
+ /* cleaned-up record index => original "recs" index
+ * clean-up means:
+ * rule 1. remove common prefix and suffix
+ * rule 2. remove records that are only on one side, since they can
+ * not match the other side
+ * rindex[0] is likely dstart, if not removed up by rule 2.
+ * rindex[nreff - 1] is likely dend, if not removed by rule 2.
+ */
+ int64_t *rindex;
+
+ /* rindex size */
+ int64_t nreff;
+
+ /* cleaned-up record index => hash value
+ * ha[i] = recs[rindex[i]]->ha */
+ uint64_t *ha;
+} xdfile_t;
+
+typedef struct s_xdfenv {
+ xdfile_t xdf1, xdf2;
+
+ /* number of lines for common prefix and suffix that are removed
+ * from xdf1 and xdf2 as a preprocessing step */
+ int64_t nprefix, nsuffix;
+} xdfenv_t;
+
+
+
+#endif /* #if !defined(XTYPES_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xutils.c Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,150 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#include <limits.h>
+#include <assert.h>
+#include "xinclude.h"
+
+
+
+
+int64_t xdl_bogosqrt(int64_t n) {
+ int64_t i;
+
+ /*
+ * Classical integer square root approximation using shifts.
+ */
+ for (i = 1; n > 0; n >>= 2)
+ i <<= 1;
+
+ return i;
+}
+
+
+void *xdl_mmfile_first(mmfile_t *mmf, int64_t *size)
+{
+ *size = mmf->size;
+ return mmf->ptr;
+}
+
+
+int64_t xdl_mmfile_size(mmfile_t *mmf)
+{
+ return mmf->size;
+}
+
+
+int xdl_cha_init(chastore_t *cha, int64_t isize, int64_t icount) {
+
+ cha->head = cha->tail = NULL;
+ cha->isize = isize;
+ cha->nsize = icount * isize;
+ cha->ancur = cha->sncur = NULL;
+ cha->scurr = 0;
+
+ return 0;
+}
+
+
+void xdl_cha_free(chastore_t *cha) {
+ chanode_t *cur, *tmp;
+
+ for (cur = cha->head; (tmp = cur) != NULL;) {
+ cur = cur->next;
+ xdl_free(tmp);
+ }
+}
+
+
+void *xdl_cha_alloc(chastore_t *cha) {
+ chanode_t *ancur;
+ void *data;
+
+ if (!(ancur = cha->ancur) || ancur->icurr == cha->nsize) {
+ if (!(ancur = (chanode_t *) xdl_malloc(sizeof(chanode_t) + cha->nsize))) {
+
+ return NULL;
+ }
+ ancur->icurr = 0;
+ ancur->next = NULL;
+ if (cha->tail)
+ cha->tail->next = ancur;
+ if (!cha->head)
+ cha->head = ancur;
+ cha->tail = ancur;
+ cha->ancur = ancur;
+ }
+
+ data = (char *) ancur + sizeof(chanode_t) + ancur->icurr;
+ ancur->icurr += cha->isize;
+
+ return data;
+}
+
+int64_t xdl_guess_lines(mmfile_t *mf, int64_t sample) {
+ int64_t nl = 0, size, tsize = 0;
+ char const *data, *cur, *top;
+
+ if ((cur = data = xdl_mmfile_first(mf, &size)) != NULL) {
+ for (top = data + size; nl < sample && cur < top; ) {
+ nl++;
+ if (!(cur = memchr(cur, '\n', top - cur)))
+ cur = top;
+ else
+ cur++;
+ }
+ tsize += (long) (cur - data);
+ }
+
+ if (nl && tsize)
+ nl = xdl_mmfile_size(mf) / (tsize / nl);
+
+ return nl + 1;
+}
+
+int xdl_recmatch(const char *l1, int64_t s1, const char *l2, int64_t s2)
+{
+ if (s1 == s2 && !memcmp(l1, l2, s1))
+ return 1;
+ return 0;
+}
+
+uint64_t xdl_hash_record(char const **data, char const *top) {
+ uint64_t ha = 5381;
+ char const *ptr = *data;
+
+ for (; ptr < top && *ptr != '\n'; ptr++) {
+ ha += (ha << 5);
+ ha ^= (unsigned long) *ptr;
+ }
+ *data = ptr < top ? ptr + 1: ptr;
+
+ return ha;
+}
+
+unsigned int xdl_hashbits(int64_t size) {
+ int64_t val = 1;
+ unsigned int bits = 0;
+
+ for (; val < size && bits < (int64_t) CHAR_BIT * sizeof(unsigned int); val <<= 1, bits++);
+ return bits ? bits: 1;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/xdiff/xutils.h Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,39 @@
+/*
+ * LibXDiff by Davide Libenzi ( File Differential Library )
+ * Copyright (C) 2003 Davide Libenzi
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#if !defined(XUTILS_H)
+#define XUTILS_H
+
+
+
+int64_t xdl_bogosqrt(int64_t n);
+int xdl_cha_init(chastore_t *cha, int64_t isize, int64_t icount);
+void xdl_cha_free(chastore_t *cha);
+void *xdl_cha_alloc(chastore_t *cha);
+int64_t xdl_guess_lines(mmfile_t *mf, int64_t sample);
+int xdl_recmatch(const char *l1, int64_t s1, const char *l2, int64_t s2);
+uint64_t xdl_hash_record(char const **data, char const *top);
+unsigned int xdl_hashbits(int64_t size);
+
+
+
+#endif /* #if !defined(XUTILS_H) */
--- a/mercurial/transaction.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/transaction.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,6 +18,7 @@
from .i18n import _
from . import (
error,
+ pycompat,
util,
)
@@ -104,7 +105,7 @@
class transaction(util.transactional):
def __init__(self, report, opener, vfsmap, journalname, undoname=None,
after=None, createmode=None, validator=None, releasefn=None,
- checkambigfiles=None):
+ checkambigfiles=None, name=r'<unnamed>'):
"""Begin a new transaction
Begins a new transaction that allows rolling back writes in the event of
@@ -148,6 +149,8 @@
if checkambigfiles:
self.checkambigfiles.update(checkambigfiles)
+ self.names = [name]
+
# A dict dedicated to precisely tracking the changes introduced in the
# transaction.
self.changes = {}
@@ -185,6 +188,11 @@
# holds callbacks to call during abort
self._abortcallback = {}
+ def __repr__(self):
+ name = r'/'.join(self.names)
+ return (r'<transaction name=%s, count=%d, usages=%d>' %
+ (name, self.count, self.usages))
+
def __del__(self):
if self.journal:
self._abort()
@@ -364,14 +372,17 @@
self.file.flush()
@active
- def nest(self):
+ def nest(self, name=r'<unnamed>'):
self.count += 1
self.usages += 1
+ self.names.append(name)
return self
def release(self):
if self.count > 0:
self.usages -= 1
+ if self.names:
+ self.names.pop()
# if the transaction scopes are left without being closed, fail
if self.count > 0 and self.usages == 0:
self._abort()
@@ -604,7 +615,8 @@
f, o = l.split('\0')
entries.append((f, int(o), None))
except ValueError:
- report(_("couldn't read journal entry %r!\n") % l)
+ report(
+ _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
backupjournal = "%s.backupfiles" % file
if opener.exists(backupjournal):
@@ -612,7 +624,7 @@
lines = fp.readlines()
if lines:
ver = lines[0][:-1]
- if ver == str(version):
+ if ver == (b'%d' % version):
for line in lines[1:]:
if line:
# Shave off the trailing newline
--- a/mercurial/ui.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/ui.py Mon Mar 19 08:07:18 2018 -0700
@@ -37,6 +37,7 @@
scmutil,
util,
)
+from .utils import dateutil
urlreq = util.urlreq
@@ -45,7 +46,7 @@
if not c.isalnum())
# The config knobs that will be altered (if unset) by ui.tweakdefaults.
-tweakrc = """
+tweakrc = b"""
[ui]
# The rollback command is dangerous. As a rule, don't use it.
rollback = False
@@ -59,6 +60,10 @@
status.relative = yes
# Refuse to perform an `hg update` that would cause a file content merge
update.check = noconflict
+# Show conflicts information in `hg status`
+status.verbose = True
+# Skip the bisect state in conflicts information in `hg status`
+status.skipstates = bisect
[diff]
git = 1
@@ -148,14 +153,10 @@
}
def _maybestrurl(maybebytes):
- if maybebytes is None:
- return None
- return pycompat.strurl(maybebytes)
+ return util.rapply(pycompat.strurl, maybebytes)
def _maybebytesurl(maybestr):
- if maybestr is None:
- return None
- return pycompat.bytesurl(maybestr)
+ return util.rapply(pycompat.bytesurl, maybestr)
class httppasswordmgrdbproxy(object):
"""Delays loading urllib2 until it's needed."""
@@ -168,18 +169,14 @@
return self._mgr
def add_password(self, realm, uris, user, passwd):
- if isinstance(uris, tuple):
- uris = tuple(_maybestrurl(u) for u in uris)
- else:
- uris = _maybestrurl(uris)
return self._get_mgr().add_password(
- _maybestrurl(realm), uris,
+ _maybestrurl(realm), _maybestrurl(uris),
_maybestrurl(user), _maybestrurl(passwd))
def find_user_password(self, realm, uri):
- return tuple(_maybebytesurl(v) for v in
- self._get_mgr().find_user_password(_maybestrurl(realm),
- _maybestrurl(uri)))
+ mgr = self._get_mgr()
+ return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
+ _maybestrurl(uri)))
def _catchterm(*args):
raise error.SignalInterrupt
@@ -374,7 +371,7 @@
except error.ConfigError as inst:
if trusted:
raise
- self.warn(_("ignored: %s\n") % str(inst))
+ self.warn(_("ignored: %s\n") % util.forcebytestr(inst))
if self.plain():
for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
@@ -506,7 +503,7 @@
and default != itemdefault):
msg = ("specifying a mismatched default value for a registered "
"config item: '%s.%s' '%s'")
- msg %= (section, name, default)
+ msg %= (section, name, pycompat.bytestr(default))
self.develwarn(msg, 2, 'warn-config-default')
for s, n in alternates:
@@ -722,7 +719,7 @@
(0, 0)
"""
if self.config(section, name, default, untrusted):
- return self.configwith(util.parsedate, section, name, default,
+ return self.configwith(dateutil.parsedate, section, name, default,
'date', untrusted)
if default is _unset:
return None
@@ -742,7 +739,7 @@
for k, v in items:
if ':' not in k:
newitems[k] = v
- items = newitems.items()
+ items = list(newitems.iteritems())
if self.debugflag and not untrusted and self._reportuntrusted:
for k, v in self._ucfg.items(section):
if self._tcfg.get(section, k) != v:
@@ -807,7 +804,8 @@
user = self.prompt(_("enter a commit username:"), default=None)
if user is None and not self.interactive():
try:
- user = '%s@%s' % (util.getuser(), socket.getfqdn())
+ user = '%s@%s' % (util.getuser(),
+ encoding.strtolocal(socket.getfqdn()))
self.warn(_("no username found, using '%s' instead\n") % user)
except KeyError:
pass
@@ -816,8 +814,8 @@
hint=_("use 'hg config --edit' "
'to set your username'))
if "\n" in user:
- raise error.Abort(_("username %s contains a newline\n")
- % repr(user))
+ raise error.Abort(_("username %r contains a newline\n")
+ % pycompat.bytestr(user))
return user
def shortuser(self, user):
@@ -878,6 +876,17 @@
return "".join(self._buffers.pop())
+ def canwritewithoutlabels(self):
+ '''check if write skips the label'''
+ if self._buffers and not self._bufferapplylabels:
+ return True
+ return self._colormode is None
+
+ def canbatchlabeledwrites(self):
+ '''check if write calls with labels are batchable'''
+ # Windows color printing is special, see ``write``.
+ return self._colormode != 'win32'
+
def write(self, *args, **opts):
'''write args to output
@@ -894,13 +903,17 @@
"cmdname.type" is recommended. For example, status issues
a label of "status.modified" for modified files.
'''
- if self._buffers and not opts.get(r'prompt', False):
+ if self._buffers:
if self._bufferapplylabels:
label = opts.get(r'label', '')
self._buffers[-1].extend(self.label(a, label) for a in args)
else:
self._buffers[-1].extend(args)
- elif self._colormode == 'win32':
+ else:
+ self._writenobuf(*args, **opts)
+
+ def _writenobuf(self, *args, **opts):
+ if self._colormode == 'win32':
# windows color printing is its own can of crab, defer to
# the color module and that is it.
color.win32print(self, self._write, *args, **opts)
@@ -916,8 +929,7 @@
# opencode timeblockedsection because this is a critical path
starttime = util.timer()
try:
- for a in msgs:
- self.fout.write(a)
+ self.fout.write(''.join(msgs))
except IOError as err:
raise error.StdioError(err)
finally:
@@ -1255,8 +1267,14 @@
return i
- def _readline(self, prompt=''):
- if self._isatty(self.fin):
+ def _readline(self):
+ # Replacing stdin/stdout temporarily is a hard problem on Python 3
+ # because they have to be text streams with *no buffering*. Instead,
+ # we use rawinput() only if call_readline() will be invoked by
+ # PyOS_Readline(), so no I/O will be made at Python layer.
+ usereadline = (self._isatty(self.fin) and self._isatty(self.fout)
+ and util.isstdin(self.fin) and util.isstdout(self.fout))
+ if usereadline:
try:
# magically add command line editing support, where
# available
@@ -1265,22 +1283,25 @@
readline.read_history_file
# windows sometimes raises something other than ImportError
except Exception:
- pass
-
- # call write() so output goes through subclassed implementation
- # e.g. color extension on Windows
- self.write(prompt, prompt=True)
- self.flush()
+ usereadline = False
# prompt ' ' must exist; otherwise readline may delete entire line
# - http://bugs.python.org/issue12833
with self.timeblockedsection('stdio'):
- line = util.bytesinput(self.fin, self.fout, r' ')
+ if usereadline:
+ line = encoding.strtolocal(pycompat.rawinput(r' '))
+ # When stdin is in binary mode on Windows, it can cause
+ # raw_input() to emit an extra trailing carriage return
+ if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
+ line = line[:-1]
+ else:
+ self.fout.write(b' ')
+ self.fout.flush()
+ line = self.fin.readline()
+ if not line:
+ raise EOFError
+ line = line.rstrip(pycompat.oslinesep)
- # When stdin is in binary mode on Windows, it can cause
- # raw_input() to emit an extra trailing carriage return
- if pycompat.oslinesep == '\r\n' and line and line[-1] == '\r':
- line = line[:-1]
return line
def prompt(self, msg, default="y"):
@@ -1290,8 +1311,10 @@
if not self.interactive():
self.write(msg, ' ', default or '', "\n")
return default
+ self._writenobuf(msg, label='ui.prompt')
+ self.flush()
try:
- r = self._readline(self.label(msg, 'ui.prompt'))
+ r = self._readline()
if not r:
r = default
if self.configbool('ui', 'promptecho'):
@@ -1509,11 +1532,7 @@
''.join(exconly))
else:
output = traceback.format_exception(exc[0], exc[1], exc[2])
- data = r''.join(output)
- if pycompat.ispy3:
- enc = pycompat.sysstr(encoding.encoding)
- data = data.encode(enc, errors=r'replace')
- self.write_err(data)
+ self.write_err(encoding.strtolocal(r''.join(output)))
return self.tracebackflag or force
def geteditor(self):
@@ -1621,13 +1640,15 @@
else:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
- self.write_err('%s at: %s:%s (%s)\n'
- % ((msg,) + calframe[stacklevel][1:4]))
- self.log('develwarn', '%s at: %s:%s (%s)\n',
- msg, *calframe[stacklevel][1:4])
+ fname, lineno, fmsg = calframe[stacklevel][1:4]
+ fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
+ self.write_err('%s at: %s:%d (%s)\n'
+ % (msg, fname, lineno, fmsg))
+ self.log('develwarn', '%s at: %s:%d (%s)\n',
+ msg, fname, lineno, fmsg)
curframe = calframe = None # avoid cycles
- def deprecwarn(self, msg, version):
+ def deprecwarn(self, msg, version, stacklevel=2):
"""issue a deprecation warning
- msg: message explaining what is deprecated and how to upgrade,
@@ -1638,7 +1659,7 @@
return
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
" update your code.)") % version
- self.develwarn(msg, stacklevel=2, config='deprec-warn')
+ self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
def exportableenviron(self):
"""The environment variables that are safe to export, e.g. through
@@ -1755,7 +1776,7 @@
'ignoring)\n') % path.name)
u.fragment = None
- return str(u)
+ return bytes(u)
@pathsuboption('pushrev', 'pushrev')
def pushrevpathoption(ui, path, value):
--- a/mercurial/upgrade.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/upgrade.py Mon Mar 19 08:07:18 2018 -0700
@@ -46,7 +46,6 @@
return {
# The upgrade code does not yet support these experimental features.
# This is an artificial limitation.
- 'manifestv2',
'treemanifest',
# This was a precursor to generaldelta and was never enabled by default.
# It should (hopefully) not exist in the wild.
--- a/mercurial/url.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/url.py Mon Mar 19 08:07:18 2018 -0700
@@ -67,15 +67,15 @@
user, passwd = auth.get('username'), auth.get('password')
self.ui.debug("using auth.%s.* for authentication\n" % group)
if not user or not passwd:
- u = util.url(authuri)
+ u = util.url(pycompat.bytesurl(authuri))
u.query = None
if not self.ui.interactive():
raise error.Abort(_('http authorization required for %s') %
- util.hidepassword(str(u)))
+ util.hidepassword(bytes(u)))
self.ui.write(_("http authorization required for %s\n") %
- util.hidepassword(str(u)))
- self.ui.write(_("realm: %s\n") % realm)
+ util.hidepassword(bytes(u)))
+ self.ui.write(_("realm: %s\n") % pycompat.bytesurl(realm))
if user:
self.ui.write(_("user: %s\n") % user)
else:
@@ -124,10 +124,9 @@
else:
self.no_list = no_list
- proxyurl = str(proxy)
+ proxyurl = bytes(proxy)
proxies = {'http': proxyurl, 'https': proxyurl}
- ui.debug('proxying through http://%s:%s\n' %
- (proxy.host, proxy.port))
+ ui.debug('proxying through %s\n' % util.hidepassword(proxyurl))
else:
proxies = {}
@@ -297,6 +296,34 @@
_generic_start_transaction(self, h, req)
return keepalive.HTTPHandler._start_transaction(self, h, req)
+class logginghttpconnection(keepalive.HTTPConnection):
+ def __init__(self, createconn, *args, **kwargs):
+ keepalive.HTTPConnection.__init__(self, *args, **kwargs)
+ self._create_connection = createconn
+
+class logginghttphandler(httphandler):
+ """HTTP handler that logs socket I/O."""
+ def __init__(self, logfh, name, observeropts):
+ super(logginghttphandler, self).__init__()
+
+ self._logfh = logfh
+ self._logname = name
+ self._observeropts = observeropts
+
+ # do_open() calls the passed class to instantiate an HTTPConnection. We
+ # pass in a callable method that creates a custom HTTPConnection instance
+ # whose callback to create the socket knows how to proxy the socket.
+ def http_open(self, req):
+ return self.do_open(self._makeconnection, req)
+
+ def _makeconnection(self, *args, **kwargs):
+ def createconnection(*args, **kwargs):
+ sock = socket.create_connection(*args, **kwargs)
+ return util.makeloggingsocket(self._logfh, sock, self._logname,
+ **self._observeropts)
+
+ return logginghttpconnection(createconnection, *args, **kwargs)
+
if has_https:
class httpsconnection(httplib.HTTPConnection):
response_class = keepalive.HTTPResponse
@@ -425,8 +452,8 @@
user, pw = self.passwd.find_user_password(
realm, urllibcompat.getfullurl(req))
if pw is not None:
- raw = "%s:%s" % (user, pw)
- auth = 'Basic %s' % base64.b64encode(raw).strip()
+ raw = "%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
+ auth = r'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
if req.get_header(self.auth_header, None) == auth:
return None
self.auth = auth
@@ -450,7 +477,7 @@
self.cookiejar = cookiejar
except util.cookielib.LoadError as e:
ui.warn(_('(error loading cookie file %s: %s; continuing without '
- 'cookies)\n') % (cookiefile, str(e)))
+ 'cookies)\n') % (cookiefile, util.forcebytestr(e)))
def http_request(self, request):
if self.cookiejar:
@@ -466,20 +493,30 @@
handlerfuncs = []
-def opener(ui, authinfo=None, useragent=None):
+def opener(ui, authinfo=None, useragent=None, loggingfh=None,
+ loggingname=b's', loggingopts=None):
'''
construct an opener suitable for urllib2
authinfo will be added to the password manager
+
+ The opener can be configured to log socket events if the various
+ ``logging*`` arguments are specified.
+
+ ``loggingfh`` denotes a file object to log events to.
+ ``loggingname`` denotes the name of the to print when logging.
+ ``loggingopts`` is a dict of keyword arguments to pass to the constructed
+ ``util.socketobserver`` instance.
'''
- # experimental config: ui.usehttp2
- if ui.configbool('ui', 'usehttp2'):
- handlers = [
- httpconnectionmod.http2handler(
- ui,
- passwordmgr(ui, ui.httppasswordmgrdb))
- ]
+ handlers = []
+
+ if loggingfh:
+ handlers.append(logginghttphandler(loggingfh, loggingname,
+ loggingopts or {}))
+ # We don't yet support HTTPS when logging I/O. If we attempt to open
+ # an HTTPS URL, we'll likely fail due to unknown protocol.
+
else:
- handlers = [httphandler()]
+ handlers.append(httphandler())
if has_https:
handlers.append(httpshandler(ui))
@@ -537,4 +574,4 @@
path = util.normpath(os.path.abspath(url_))
url_ = 'file://' + urlreq.pathname2url(path)
authinfo = None
- return opener(ui, authinfo).open(url_, data)
+ return opener(ui, authinfo).open(pycompat.strurl(url_), data)
--- a/mercurial/urllibcompat.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/urllibcompat.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,7 +18,7 @@
"""Add items that will be populated at the first access"""
items = map(_sysstr, items)
self._aliases.update(
- (item.replace(_sysstr('_'), _sysstr('')).lower(), (origin, item))
+ (item.replace(r'_', r'').lower(), (origin, item))
for item in items)
def _registeralias(self, origin, attr, name):
@@ -47,6 +47,8 @@
"urlparse",
"urlunparse",
))
+ urlreq._registeralias(urllib.parse, "parse_qs", "parseqs")
+ urlreq._registeralias(urllib.parse, "parse_qsl", "parseqsl")
urlreq._registeralias(urllib.parse, "unquote_to_bytes", "unquote")
import urllib.request
urlreq._registeraliases(urllib.request, (
@@ -157,6 +159,8 @@
"urlparse",
"urlunparse",
))
+ urlreq._registeralias(urlparse, "parse_qs", "parseqs")
+ urlreq._registeralias(urlparse, "parse_qsl", "parseqsl")
urlerr._registeraliases(urllib2, (
"HTTPError",
"URLError",
--- a/mercurial/util.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/util.py Mon Mar 19 08:07:18 2018 -0700
@@ -17,15 +17,14 @@
import abc
import bz2
-import calendar
import codecs
import collections
import contextlib
-import datetime
import errno
import gc
import hashlib
import imp
+import io
import itertools
import mmap
import os
@@ -54,6 +53,7 @@
pycompat,
urllibcompat,
)
+from .utils import dateutil
base85 = policy.importmod(r'base85')
osutil = policy.importmod(r'osutil')
@@ -71,7 +71,9 @@
stderr = pycompat.stderr
stdin = pycompat.stdin
stdout = pycompat.stdout
-stringio = pycompat.stringio
+bytesio = pycompat.bytesio
+# TODO deprecate stringio name, as it is a lie on Python 3.
+stringio = bytesio
xmlrpclib = pycompat.xmlrpclib
httpserver = urllibcompat.httpserver
@@ -91,7 +93,7 @@
# destined stdout with a pipe destined stdout (e.g. pager), we want line
# buffering
if isatty(stdout):
- stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
+ stdout = os.fdopen(stdout.fileno(), r'wb', 1)
if pycompat.iswindows:
from . import windows as platform
@@ -147,6 +149,7 @@
setflags = platform.setflags
setsignalhandler = platform.setsignalhandler
shellquote = platform.shellquote
+shellsplit = platform.shellsplit
spawndetached = platform.spawndetached
split = platform.split
sshargs = platform.sshargs
@@ -175,21 +178,41 @@
_notset = object()
-# disable Python's problematic floating point timestamps (issue4836)
-# (Python hypocritically says you shouldn't change this behavior in
-# libraries, and sure enough Mercurial is not a library.)
-os.stat_float_times(False)
-
def safehasattr(thing, attr):
return getattr(thing, attr, _notset) is not _notset
-def bytesinput(fin, fout, *args, **kwargs):
- sin, sout = sys.stdin, sys.stdout
- try:
- sys.stdin, sys.stdout = encoding.strio(fin), encoding.strio(fout)
- return encoding.strtolocal(pycompat.rawinput(*args, **kwargs))
- finally:
- sys.stdin, sys.stdout = sin, sout
+def _rapply(f, xs):
+ if xs is None:
+ # assume None means non-value of optional data
+ return xs
+ if isinstance(xs, (list, set, tuple)):
+ return type(xs)(_rapply(f, x) for x in xs)
+ if isinstance(xs, dict):
+ return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
+ return f(xs)
+
+def rapply(f, xs):
+ """Apply function recursively to every item preserving the data structure
+
+ >>> def f(x):
+ ... return 'f(%s)' % x
+ >>> rapply(f, None) is None
+ True
+ >>> rapply(f, 'a')
+ 'f(a)'
+ >>> rapply(f, {'a'}) == {'f(a)'}
+ True
+ >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
+ ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
+
+ >>> xs = [object()]
+ >>> rapply(pycompat.identity, xs) is xs
+ True
+ """
+ if f is pycompat.identity:
+ # fast path mainly for py2
+ return xs
+ return _rapply(f, xs)
def bitsfrom(container):
bits = 0
@@ -211,6 +234,12 @@
warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
+if _dowarn and pycompat.ispy3:
+ # silence warning emitted by passing user string to re.sub()
+ warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
+ r'mercurial')
+ warnings.filterwarnings(r'ignore', r'invalid escape sequence',
+ DeprecationWarning, r'mercurial')
def nouideprecwarn(msg, version, stacklevel=1):
"""Issue an python native deprecation warning
@@ -220,7 +249,7 @@
if _dowarn:
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
" update your code.)") % version
- warnings.warn(msg, DeprecationWarning, stacklevel + 1)
+ warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
DIGESTS = {
'md5': hashlib.md5,
@@ -338,6 +367,13 @@
This class lives in the 'util' module because it makes use of the 'os'
module from the python stdlib.
"""
+ def __new__(cls, fh):
+ # If we receive a fileobjectproxy, we need to use a variation of this
+ # class that notifies observers about activity.
+ if isinstance(fh, fileobjectproxy):
+ cls = observedbufferedinputpipe
+
+ return super(bufferedinputpipe, cls).__new__(cls)
def __init__(self, input):
self._input = input
@@ -418,6 +454,8 @@
self._lenbuf += len(data)
self._buffer.append(data)
+ return data
+
def mmapread(fp):
try:
fd = getattr(fp, 'fileno', lambda: fp)()
@@ -453,6 +491,541 @@
env=env)
return p.stdin, p.stdout, p.stderr, p
+class fileobjectproxy(object):
+ """A proxy around file objects that tells a watcher when events occur.
+
+ This type is intended to only be used for testing purposes. Think hard
+ before using it in important code.
+ """
+ __slots__ = (
+ r'_orig',
+ r'_observer',
+ )
+
+ def __init__(self, fh, observer):
+ object.__setattr__(self, r'_orig', fh)
+ object.__setattr__(self, r'_observer', observer)
+
+ def __getattribute__(self, name):
+ ours = {
+ r'_observer',
+
+ # IOBase
+ r'close',
+ # closed if a property
+ r'fileno',
+ r'flush',
+ r'isatty',
+ r'readable',
+ r'readline',
+ r'readlines',
+ r'seek',
+ r'seekable',
+ r'tell',
+ r'truncate',
+ r'writable',
+ r'writelines',
+ # RawIOBase
+ r'read',
+ r'readall',
+ r'readinto',
+ r'write',
+ # BufferedIOBase
+ # raw is a property
+ r'detach',
+ # read defined above
+ r'read1',
+ # readinto defined above
+ # write defined above
+ }
+
+ # We only observe some methods.
+ if name in ours:
+ return object.__getattribute__(self, name)
+
+ return getattr(object.__getattribute__(self, r'_orig'), name)
+
+ def __nonzero__(self):
+ return bool(object.__getattribute__(self, r'_orig'))
+
+ __bool__ = __nonzero__
+
+ def __delattr__(self, name):
+ return delattr(object.__getattribute__(self, r'_orig'), name)
+
+ def __setattr__(self, name, value):
+ return setattr(object.__getattribute__(self, r'_orig'), name, value)
+
+ def __iter__(self):
+ return object.__getattribute__(self, r'_orig').__iter__()
+
+ def _observedcall(self, name, *args, **kwargs):
+ # Call the original object.
+ orig = object.__getattribute__(self, r'_orig')
+ res = getattr(orig, name)(*args, **kwargs)
+
+ # Call a method on the observer of the same name with arguments
+ # so it can react, log, etc.
+ observer = object.__getattribute__(self, r'_observer')
+ fn = getattr(observer, name, None)
+ if fn:
+ fn(res, *args, **kwargs)
+
+ return res
+
+ def close(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'close', *args, **kwargs)
+
+ def fileno(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'fileno', *args, **kwargs)
+
+ def flush(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'flush', *args, **kwargs)
+
+ def isatty(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'isatty', *args, **kwargs)
+
+ def readable(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'readable', *args, **kwargs)
+
+ def readline(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'readline', *args, **kwargs)
+
+ def readlines(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'readlines', *args, **kwargs)
+
+ def seek(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'seek', *args, **kwargs)
+
+ def seekable(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'seekable', *args, **kwargs)
+
+ def tell(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'tell', *args, **kwargs)
+
+ def truncate(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'truncate', *args, **kwargs)
+
+ def writable(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'writable', *args, **kwargs)
+
+ def writelines(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'writelines', *args, **kwargs)
+
+ def read(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'read', *args, **kwargs)
+
+ def readall(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'readall', *args, **kwargs)
+
+ def readinto(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'readinto', *args, **kwargs)
+
+ def write(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'write', *args, **kwargs)
+
+ def detach(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'detach', *args, **kwargs)
+
+ def read1(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'read1', *args, **kwargs)
+
+class observedbufferedinputpipe(bufferedinputpipe):
+ """A variation of bufferedinputpipe that is aware of fileobjectproxy.
+
+ ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
+ bypass ``fileobjectproxy``. Because of this, we need to make
+ ``bufferedinputpipe`` aware of these operations.
+
+ This variation of ``bufferedinputpipe`` can notify observers about
+ ``os.read()`` events. It also re-publishes other events, such as
+ ``read()`` and ``readline()``.
+ """
+ def _fillbuffer(self):
+ res = super(observedbufferedinputpipe, self)._fillbuffer()
+
+ fn = getattr(self._input._observer, r'osread', None)
+ if fn:
+ fn(res, _chunksize)
+
+ return res
+
+ # We use different observer methods because the operation isn't
+ # performed on the actual file object but on us.
+ def read(self, size):
+ res = super(observedbufferedinputpipe, self).read(size)
+
+ fn = getattr(self._input._observer, r'bufferedread', None)
+ if fn:
+ fn(res, size)
+
+ return res
+
+ def readline(self, *args, **kwargs):
+ res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
+
+ fn = getattr(self._input._observer, r'bufferedreadline', None)
+ if fn:
+ fn(res)
+
+ return res
+
+PROXIED_SOCKET_METHODS = {
+ r'makefile',
+ r'recv',
+ r'recvfrom',
+ r'recvfrom_into',
+ r'recv_into',
+ r'send',
+ r'sendall',
+ r'sendto',
+ r'setblocking',
+ r'settimeout',
+ r'gettimeout',
+ r'setsockopt',
+}
+
+class socketproxy(object):
+ """A proxy around a socket that tells a watcher when events occur.
+
+ This is like ``fileobjectproxy`` except for sockets.
+
+ This type is intended to only be used for testing purposes. Think hard
+ before using it in important code.
+ """
+ __slots__ = (
+ r'_orig',
+ r'_observer',
+ )
+
+ def __init__(self, sock, observer):
+ object.__setattr__(self, r'_orig', sock)
+ object.__setattr__(self, r'_observer', observer)
+
+ def __getattribute__(self, name):
+ if name in PROXIED_SOCKET_METHODS:
+ return object.__getattribute__(self, name)
+
+ return getattr(object.__getattribute__(self, r'_orig'), name)
+
+ def __delattr__(self, name):
+ return delattr(object.__getattribute__(self, r'_orig'), name)
+
+ def __setattr__(self, name, value):
+ return setattr(object.__getattribute__(self, r'_orig'), name, value)
+
+ def __nonzero__(self):
+ return bool(object.__getattribute__(self, r'_orig'))
+
+ __bool__ = __nonzero__
+
+ def _observedcall(self, name, *args, **kwargs):
+ # Call the original object.
+ orig = object.__getattribute__(self, r'_orig')
+ res = getattr(orig, name)(*args, **kwargs)
+
+ # Call a method on the observer of the same name with arguments
+ # so it can react, log, etc.
+ observer = object.__getattribute__(self, r'_observer')
+ fn = getattr(observer, name, None)
+ if fn:
+ fn(res, *args, **kwargs)
+
+ return res
+
+ def makefile(self, *args, **kwargs):
+ res = object.__getattribute__(self, r'_observedcall')(
+ r'makefile', *args, **kwargs)
+
+ # The file object may be used for I/O. So we turn it into a
+ # proxy using our observer.
+ observer = object.__getattribute__(self, r'_observer')
+ return makeloggingfileobject(observer.fh, res, observer.name,
+ reads=observer.reads,
+ writes=observer.writes,
+ logdata=observer.logdata)
+
+ def recv(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'recv', *args, **kwargs)
+
+ def recvfrom(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'recvfrom', *args, **kwargs)
+
+ def recvfrom_into(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'recvfrom_into', *args, **kwargs)
+
+ def recv_into(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'recv_info', *args, **kwargs)
+
+ def send(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'send', *args, **kwargs)
+
+ def sendall(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'sendall', *args, **kwargs)
+
+ def sendto(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'sendto', *args, **kwargs)
+
+ def setblocking(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'setblocking', *args, **kwargs)
+
+ def settimeout(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'settimeout', *args, **kwargs)
+
+ def gettimeout(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'gettimeout', *args, **kwargs)
+
+ def setsockopt(self, *args, **kwargs):
+ return object.__getattribute__(self, r'_observedcall')(
+ r'setsockopt', *args, **kwargs)
+
+DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)}
+DATA_ESCAPE_MAP.update({
+ b'\\': b'\\\\',
+ b'\r': br'\r',
+ b'\n': br'\n',
+})
+DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]')
+
+def escapedata(s):
+ if isinstance(s, bytearray):
+ s = bytes(s)
+
+ return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s)
+
+class baseproxyobserver(object):
+ def _writedata(self, data):
+ if not self.logdata:
+ self.fh.write('\n')
+ self.fh.flush()
+ return
+
+ # Simple case writes all data on a single line.
+ if b'\n' not in data:
+ self.fh.write(': %s\n' % escapedata(data))
+ self.fh.flush()
+ return
+
+ # Data with newlines is written to multiple lines.
+ self.fh.write(':\n')
+ lines = data.splitlines(True)
+ for line in lines:
+ self.fh.write('%s> %s\n' % (self.name, escapedata(line)))
+ self.fh.flush()
+
+class fileobjectobserver(baseproxyobserver):
+ """Logs file object activity."""
+ def __init__(self, fh, name, reads=True, writes=True, logdata=False):
+ self.fh = fh
+ self.name = name
+ self.logdata = logdata
+ self.reads = reads
+ self.writes = writes
+
+ def read(self, res, size=-1):
+ if not self.reads:
+ return
+ # Python 3 can return None from reads at EOF instead of empty strings.
+ if res is None:
+ res = ''
+
+ self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
+ self._writedata(res)
+
+ def readline(self, res, limit=-1):
+ if not self.reads:
+ return
+
+ self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
+ self._writedata(res)
+
+ def readinto(self, res, dest):
+ if not self.reads:
+ return
+
+ self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
+ res))
+ data = dest[0:res] if res is not None else b''
+ self._writedata(data)
+
+ def write(self, res, data):
+ if not self.writes:
+ return
+
+ # Python 2 returns None from some write() calls. Python 3 (reasonably)
+ # returns the integer bytes written.
+ if res is None and data:
+ res = len(data)
+
+ self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
+ self._writedata(data)
+
+ def flush(self, res):
+ if not self.writes:
+ return
+
+ self.fh.write('%s> flush() -> %r\n' % (self.name, res))
+
+ # For observedbufferedinputpipe.
+ def bufferedread(self, res, size):
+ self.fh.write('%s> bufferedread(%d) -> %d' % (
+ self.name, size, len(res)))
+ self._writedata(res)
+
+ def bufferedreadline(self, res):
+ self.fh.write('%s> bufferedreadline() -> %d' % (self.name, len(res)))
+ self._writedata(res)
+
+def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
+ logdata=False):
+ """Turn a file object into a logging file object."""
+
+ observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
+ logdata=logdata)
+ return fileobjectproxy(fh, observer)
+
+class socketobserver(baseproxyobserver):
+ """Logs socket activity."""
+ def __init__(self, fh, name, reads=True, writes=True, states=True,
+ logdata=False):
+ self.fh = fh
+ self.name = name
+ self.reads = reads
+ self.writes = writes
+ self.states = states
+ self.logdata = logdata
+
+ def makefile(self, res, mode=None, bufsize=None):
+ if not self.states:
+ return
+
+ self.fh.write('%s> makefile(%r, %r)\n' % (
+ self.name, mode, bufsize))
+
+ def recv(self, res, size, flags=0):
+ if not self.reads:
+ return
+
+ self.fh.write('%s> recv(%d, %d) -> %d' % (
+ self.name, size, flags, len(res)))
+ self._writedata(res)
+
+ def recvfrom(self, res, size, flags=0):
+ if not self.reads:
+ return
+
+ self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
+ self.name, size, flags, len(res[0])))
+ self._writedata(res[0])
+
+ def recvfrom_into(self, res, buf, size, flags=0):
+ if not self.reads:
+ return
+
+ self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
+ self.name, size, flags, res[0]))
+ self._writedata(buf[0:res[0]])
+
+ def recv_into(self, res, buf, size=0, flags=0):
+ if not self.reads:
+ return
+
+ self.fh.write('%s> recv_into(%d, %d) -> %d' % (
+ self.name, size, flags, res))
+ self._writedata(buf[0:res])
+
+ def send(self, res, data, flags=0):
+ if not self.writes:
+ return
+
+ self.fh.write('%s> send(%d, %d) -> %d' % (
+ self.name, len(data), flags, len(res)))
+ self._writedata(data)
+
+ def sendall(self, res, data, flags=0):
+ if not self.writes:
+ return
+
+ # Returns None on success. So don't bother reporting return value.
+ self.fh.write('%s> sendall(%d, %d)' % (
+ self.name, len(data), flags))
+ self._writedata(data)
+
+ def sendto(self, res, data, flagsoraddress, address=None):
+ if not self.writes:
+ return
+
+ if address:
+ flags = flagsoraddress
+ else:
+ flags = 0
+
+ self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
+ self.name, len(data), flags, address, res))
+ self._writedata(data)
+
+ def setblocking(self, res, flag):
+ if not self.states:
+ return
+
+ self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
+
+ def settimeout(self, res, value):
+ if not self.states:
+ return
+
+ self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
+
+ def gettimeout(self, res):
+ if not self.states:
+ return
+
+ self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
+
+ def setsockopt(self, level, optname, value):
+ if not self.states:
+ return
+
+ self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
+ self.name, level, optname, value))
+
+def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
+ logdata=False):
+ """Turn a socket into a logging socket."""
+
+ observer = socketobserver(logh, name, reads=reads, writes=writes,
+ states=states, logdata=logdata)
+ return socketproxy(fh, observer)
+
def version():
"""Return version information if available."""
try:
@@ -530,48 +1103,6 @@
if n == 4:
return (vints[0], vints[1], vints[2], extra)
-# used by parsedate
-defaultdateformats = (
- '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
- '%Y-%m-%dT%H:%M', # without seconds
- '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
- '%Y-%m-%dT%H%M', # without seconds
- '%Y-%m-%d %H:%M:%S', # our common legal variant
- '%Y-%m-%d %H:%M', # without seconds
- '%Y-%m-%d %H%M%S', # without :
- '%Y-%m-%d %H%M', # without seconds
- '%Y-%m-%d %I:%M:%S%p',
- '%Y-%m-%d %H:%M',
- '%Y-%m-%d %I:%M%p',
- '%Y-%m-%d',
- '%m-%d',
- '%m/%d',
- '%m/%d/%y',
- '%m/%d/%Y',
- '%a %b %d %H:%M:%S %Y',
- '%a %b %d %I:%M:%S%p %Y',
- '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
- '%b %d %H:%M:%S %Y',
- '%b %d %I:%M:%S%p %Y',
- '%b %d %H:%M:%S',
- '%b %d %I:%M:%S%p',
- '%b %d %H:%M',
- '%b %d %I:%M%p',
- '%b %d %Y',
- '%b %d',
- '%H:%M:%S',
- '%I:%M:%S%p',
- '%H:%M',
- '%I:%M%p',
-)
-
-extendeddateformats = defaultdateformats + (
- "%Y",
- "%Y-%m",
- "%b",
- "%b %Y",
- )
-
def cachefunc(func):
'''cache the result of function calls'''
# XXX doesn't handle keywords args
@@ -958,7 +1489,7 @@
inname, outname = None, None
try:
infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
- fp = os.fdopen(infd, pycompat.sysstr('wb'))
+ fp = os.fdopen(infd, r'wb')
fp.write(s)
fp.close()
outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
@@ -1120,7 +1651,7 @@
"""
if _hgexecutable is None:
hg = encoding.environ.get('HG')
- mainmod = sys.modules[pycompat.sysstr('__main__')]
+ mainmod = sys.modules[r'__main__']
if hg:
_sethgexecutable(hg)
elif mainfrozen():
@@ -1142,9 +1673,18 @@
global _hgexecutable
_hgexecutable = path
-def _isstdout(f):
+def _testfileno(f, stdf):
fileno = getattr(f, 'fileno', None)
- return fileno and fileno() == sys.__stdout__.fileno()
+ try:
+ return fileno and fileno() == stdf.fileno()
+ except io.UnsupportedOperation:
+ return False # fileno() raised UnsupportedOperation
+
+def isstdin(f):
+ return _testfileno(f, sys.__stdin__)
+
+def isstdout(f):
+ return _testfileno(f, sys.__stdout__)
def shellenviron(environ=None):
"""return environ with optional override, useful for shelling out"""
@@ -1154,7 +1694,7 @@
return '0'
if val is True:
return '1'
- return str(val)
+ return pycompat.bytestr(val)
env = dict(encoding.environ)
if environ:
env.update((k, py2shell(v)) for k, v in environ.iteritems())
@@ -1173,7 +1713,7 @@
pass
cmd = quotecommand(cmd)
env = shellenviron(environ)
- if out is None or _isstdout(out):
+ if out is None or isstdout(out):
rc = subprocess.call(cmd, shell=True, close_fds=closefds,
env=env, cwd=cwd)
else:
@@ -1263,7 +1803,8 @@
newstat = filestat.frompath(dest)
if newstat.isambig(oldstat):
# stat of copied file is ambiguous to original one
- advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
+ advanced = (
+ oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
os.utime(dest, (advanced, advanced))
except shutil.Error as inst:
raise Abort(str(inst))
@@ -1372,6 +1913,11 @@
timer = time.perf_counter
def makelock(info, pathname):
+ """Create a lock file atomically if possible
+
+ This may leave a stale lock file if symlink isn't supported and signal
+ interrupt is enabled.
+ """
try:
return os.symlink(info, pathname)
except OSError as why:
@@ -1380,7 +1926,8 @@
except AttributeError: # no symlink in os
pass
- ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
+ flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
+ ld = os.open(pathname, flags)
os.write(ld, info)
os.close(ld)
@@ -1392,7 +1939,7 @@
raise
except AttributeError: # no symlink in os
pass
- fp = posixfile(pathname)
+ fp = posixfile(pathname, 'rb')
r = fp.read()
fp.close()
return r
@@ -1654,8 +2201,8 @@
# avoided, comparison of size, ctime and mtime is enough
# to exactly detect change of a file regardless of platform
return (self.stat.st_size == old.stat.st_size and
- self.stat.st_ctime == old.stat.st_ctime and
- self.stat.st_mtime == old.stat.st_mtime)
+ self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
+ self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
except AttributeError:
pass
try:
@@ -1694,7 +2241,7 @@
S[n].mtime", even if size of a file isn't changed.
"""
try:
- return (self.stat.st_ctime == old.stat.st_ctime)
+ return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
except AttributeError:
return False
@@ -1709,7 +2256,7 @@
Otherwise, this returns True, as "ambiguity is avoided".
"""
- advanced = (old.stat.st_mtime + 1) & 0x7fffffff
+ advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
try:
os.utime(path, (advanced, advanced))
except OSError as inst:
@@ -1760,7 +2307,7 @@
newstat = filestat.frompath(filename)
if newstat.isambig(oldstat):
# stat of changed file is ambiguous to original one
- advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
+ advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
os.utime(filename, (advanced, advanced))
else:
rename(self._tempname, filename)
@@ -1947,274 +2494,34 @@
limit -= len(s)
yield s
-def makedate(timestamp=None):
- '''Return a unix timestamp (or the current time) as a (unixtime,
- offset) tuple based off the local timezone.'''
- if timestamp is None:
- timestamp = time.time()
- if timestamp < 0:
- hint = _("check your clock")
- raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
- delta = (datetime.datetime.utcfromtimestamp(timestamp) -
- datetime.datetime.fromtimestamp(timestamp))
- tz = delta.days * 86400 + delta.seconds
- return timestamp, tz
-
-def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
- """represent a (unixtime, offset) tuple as a localized time.
- unixtime is seconds since the epoch, and offset is the time zone's
- number of seconds away from UTC.
-
- >>> datestr((0, 0))
- 'Thu Jan 01 00:00:00 1970 +0000'
- >>> datestr((42, 0))
- 'Thu Jan 01 00:00:42 1970 +0000'
- >>> datestr((-42, 0))
- 'Wed Dec 31 23:59:18 1969 +0000'
- >>> datestr((0x7fffffff, 0))
- 'Tue Jan 19 03:14:07 2038 +0000'
- >>> datestr((-0x80000000, 0))
- 'Fri Dec 13 20:45:52 1901 +0000'
- """
- t, tz = date or makedate()
- if "%1" in format or "%2" in format or "%z" in format:
- sign = (tz > 0) and "-" or "+"
- minutes = abs(tz) // 60
- q, r = divmod(minutes, 60)
- format = format.replace("%z", "%1%2")
- format = format.replace("%1", "%c%02d" % (sign, q))
- format = format.replace("%2", "%02d" % r)
- d = t - tz
- if d > 0x7fffffff:
- d = 0x7fffffff
- elif d < -0x80000000:
- d = -0x80000000
- # Never use time.gmtime() and datetime.datetime.fromtimestamp()
- # because they use the gmtime() system call which is buggy on Windows
- # for negative values.
- t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
- s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
- return s
-
-def shortdate(date=None):
- """turn (timestamp, tzoff) tuple into iso 8631 date."""
- return datestr(date, format='%Y-%m-%d')
-
-def parsetimezone(s):
- """find a trailing timezone, if any, in string, and return a
- (offset, remainder) pair"""
-
- if s.endswith("GMT") or s.endswith("UTC"):
- return 0, s[:-3].rstrip()
-
- # Unix-style timezones [+-]hhmm
- if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
- sign = (s[-5] == "+") and 1 or -1
- hours = int(s[-4:-2])
- minutes = int(s[-2:])
- return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
-
- # ISO8601 trailing Z
- if s.endswith("Z") and s[-2:-1].isdigit():
- return 0, s[:-1]
-
- # ISO8601-style [+-]hh:mm
- if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
- s[-5:-3].isdigit() and s[-2:].isdigit()):
- sign = (s[-6] == "+") and 1 or -1
- hours = int(s[-5:-3])
- minutes = int(s[-2:])
- return -sign * (hours * 60 + minutes) * 60, s[:-6]
-
- return None, s
-
-def strdate(string, format, defaults=None):
- """parse a localized time string and return a (unixtime, offset) tuple.
- if the string cannot be parsed, ValueError is raised."""
- if defaults is None:
- defaults = {}
-
- # NOTE: unixtime = localunixtime + offset
- offset, date = parsetimezone(string)
-
- # add missing elements from defaults
- usenow = False # default to using biased defaults
- for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
- part = pycompat.bytestr(part)
- found = [True for p in part if ("%"+p) in format]
- if not found:
- date += "@" + defaults[part][usenow]
- format += "@%" + part[0]
- else:
- # We've found a specific time element, less specific time
- # elements are relative to today
- usenow = True
-
- timetuple = time.strptime(encoding.strfromlocal(date),
- encoding.strfromlocal(format))
- localunixtime = int(calendar.timegm(timetuple))
- if offset is None:
- # local timezone
- unixtime = int(time.mktime(timetuple))
- offset = unixtime - localunixtime
- else:
- unixtime = localunixtime + offset
- return unixtime, offset
-
-def parsedate(date, formats=None, bias=None):
- """parse a localized date/time and return a (unixtime, offset) tuple.
-
- The date may be a "unixtime offset" string or in one of the specified
- formats. If the date already is a (unixtime, offset) tuple, it is returned.
-
- >>> parsedate(b' today ') == parsedate(
- ... datetime.date.today().strftime('%b %d').encode('ascii'))
- True
- >>> parsedate(b'yesterday ') == parsedate(
- ... (datetime.date.today() - datetime.timedelta(days=1)
- ... ).strftime('%b %d').encode('ascii'))
- True
- >>> now, tz = makedate()
- >>> strnow, strtz = parsedate(b'now')
- >>> (strnow - now) < 1
- True
- >>> tz == strtz
- True
+class cappedreader(object):
+ """A file object proxy that allows reading up to N bytes.
+
+ Given a source file object, instances of this type allow reading up to
+ N bytes from that source file object. Attempts to read past the allowed
+ limit are treated as EOF.
+
+ It is assumed that I/O is not performed on the original file object
+ in addition to I/O that is performed by this instance. If there is,
+ state tracking will get out of sync and unexpected results will ensue.
"""
- if bias is None:
- bias = {}
- if not date:
- return 0, 0
- if isinstance(date, tuple) and len(date) == 2:
- return date
- if not formats:
- formats = defaultdateformats
- date = date.strip()
-
- if date == 'now' or date == _('now'):
- return makedate()
- if date == 'today' or date == _('today'):
- date = datetime.date.today().strftime(r'%b %d')
- date = encoding.strtolocal(date)
- elif date == 'yesterday' or date == _('yesterday'):
- date = (datetime.date.today() -
- datetime.timedelta(days=1)).strftime(r'%b %d')
- date = encoding.strtolocal(date)
-
- try:
- when, offset = map(int, date.split(' '))
- except ValueError:
- # fill out defaults
- now = makedate()
- defaults = {}
- for part in ("d", "mb", "yY", "HI", "M", "S"):
- # this piece is for rounding the specific end of unknowns
- b = bias.get(part)
- if b is None:
- if part[0:1] in "HMS":
- b = "00"
- else:
- b = "0"
-
- # this piece is for matching the generic end to today's date
- n = datestr(now, "%" + part[0:1])
-
- defaults[part] = (b, n)
-
- for format in formats:
- try:
- when, offset = strdate(date, format, defaults)
- except (ValueError, OverflowError):
- pass
- else:
- break
- else:
- raise error.ParseError(_('invalid date: %r') % date)
- # validate explicit (probably user-specified) date and
- # time zone offset. values must fit in signed 32 bits for
- # current 32-bit linux runtimes. timezones go from UTC-12
- # to UTC+14
- if when < -0x80000000 or when > 0x7fffffff:
- raise error.ParseError(_('date exceeds 32 bits: %d') % when)
- if offset < -50400 or offset > 43200:
- raise error.ParseError(_('impossible time zone offset: %d') % offset)
- return when, offset
-
-def matchdate(date):
- """Return a function that matches a given date match specifier
-
- Formats include:
-
- '{date}' match a given date to the accuracy provided
-
- '<{date}' on or before a given date
-
- '>{date}' on or after a given date
-
- >>> p1 = parsedate(b"10:29:59")
- >>> p2 = parsedate(b"10:30:00")
- >>> p3 = parsedate(b"10:30:59")
- >>> p4 = parsedate(b"10:31:00")
- >>> p5 = parsedate(b"Sep 15 10:30:00 1999")
- >>> f = matchdate(b"10:30")
- >>> f(p1[0])
- False
- >>> f(p2[0])
- True
- >>> f(p3[0])
- True
- >>> f(p4[0])
- False
- >>> f(p5[0])
- False
- """
-
- def lower(date):
- d = {'mb': "1", 'd': "1"}
- return parsedate(date, extendeddateformats, d)[0]
-
- def upper(date):
- d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
- for days in ("31", "30", "29"):
- try:
- d["d"] = days
- return parsedate(date, extendeddateformats, d)[0]
- except error.ParseError:
- pass
- d["d"] = "28"
- return parsedate(date, extendeddateformats, d)[0]
-
- date = date.strip()
-
- if not date:
- raise Abort(_("dates cannot consist entirely of whitespace"))
- elif date[0] == "<":
- if not date[1:]:
- raise Abort(_("invalid day spec, use '<DATE'"))
- when = upper(date[1:])
- return lambda x: x <= when
- elif date[0] == ">":
- if not date[1:]:
- raise Abort(_("invalid day spec, use '>DATE'"))
- when = lower(date[1:])
- return lambda x: x >= when
- elif date[0] == "-":
- try:
- days = int(date[1:])
- except ValueError:
- raise Abort(_("invalid day spec: %s") % date[1:])
- if days < 0:
- raise Abort(_("%s must be nonnegative (see 'hg help dates')")
- % date[1:])
- when = makedate()[0] - days * 3600 * 24
- return lambda x: x >= when
- elif " to " in date:
- a, b = date.split(" to ")
- start, stop = lower(a), upper(b)
- return lambda x: x >= start and x <= stop
- else:
- start, stop = lower(date), upper(date)
- return lambda x: x >= start and x <= stop
+ def __init__(self, fh, limit):
+ """Allow reading up to <limit> bytes from <fh>."""
+ self._fh = fh
+ self._left = limit
+
+ def read(self, n=-1):
+ if not self._left:
+ return b''
+
+ if n < 0:
+ n = self._left
+
+ data = self._fh.read(min(n, self._left))
+ self._left -= len(data)
+ assert self._left >= 0
+
+ return data
def stringmatcher(pattern, casesensitive=True):
"""
@@ -2357,6 +2664,22 @@
(1, 1, _('%.0f bytes')),
)
+class transformingwriter(object):
+ """Writable file wrapper to transform data by function"""
+
+ def __init__(self, fp, encode):
+ self._fp = fp
+ self._encode = encode
+
+ def close(self):
+ self._fp.close()
+
+ def flush(self):
+ self._fp.flush()
+
+ def write(self, data):
+ return self._fp.write(self._encode(data))
+
# Matches a single EOL which can either be a CRLF where repeated CR
# are removed or a LF. We do not care about old Macintosh files, so a
# stray CR is an error.
@@ -2368,12 +2691,17 @@
def tocrlf(s):
return _eolre.sub('\r\n', s)
+def _crlfwriter(fp):
+ return transformingwriter(fp, tocrlf)
+
if pycompat.oslinesep == '\r\n':
tonativeeol = tocrlf
fromnativeeol = tolf
+ nativeeolwriter = _crlfwriter
else:
tonativeeol = pycompat.identity
fromnativeeol = pycompat.identity
+ nativeeolwriter = pycompat.identity
def escapestr(s):
# call underlying function of s.encode('string_escape') directly for
@@ -2394,7 +2722,7 @@
def uirepr(s):
# Avoid double backslash in Windows path repr()
- return repr(s).replace('\\\\', '\\')
+ return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
# delay import of textwrap
def MBTextWrapper(**kwargs):
@@ -2684,7 +3012,7 @@
pass
try:
- return socket.getservbyname(port)
+ return socket.getservbyname(pycompat.sysstr(port))
except socket.error:
raise Abort(_("no port number associated with service '%s'") % port)
@@ -3028,7 +3356,7 @@
path = urlreq.unquote(path)
if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
raise error.Abort(_('potentially unsafe url: %r') %
- (path,))
+ (pycompat.bytestr(path),))
def hidepassword(u):
'''hide user credential in a url string'''
@@ -3126,7 +3454,7 @@
results.append(hook(*args))
return results
-def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
+def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
'''Yields lines for a nicely formatted stacktrace.
Skips the 'skip' last entries, then return the last 'depth' entries.
Each file+linenumber is formatted according to fileline.
@@ -3138,7 +3466,7 @@
Not be used in production code but very convenient while developing.
'''
- entries = [(fileline % (fn, ln), func)
+ entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
][-depth:]
if entries:
@@ -3571,7 +3899,7 @@
return zlib.decompress(data)
except zlib.error as e:
raise error.RevlogError(_('revlog decompress error: %s') %
- str(e))
+ forcebytestr(e))
def revlogcompressor(self, opts=None):
return self.zlibrevlogcompressor()
@@ -3797,7 +4125,7 @@
return ''.join(chunks)
except Exception as e:
raise error.RevlogError(_('revlog decompress error: %s') %
- str(e))
+ forcebytestr(e))
def revlogcompressor(self, opts=None):
opts = opts or {}
@@ -3944,3 +4272,53 @@
if not (byte & 0x80):
return result
shift += 7
+
+###
+# Deprecation warnings for util.py splitting
+###
+
+defaultdateformats = dateutil.defaultdateformats
+
+extendeddateformats = dateutil.extendeddateformats
+
+def makedate(*args, **kwargs):
+ msg = ("'util.makedate' is deprecated, "
+ "use 'utils.dateutil.makedate'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.makedate(*args, **kwargs)
+
+def datestr(*args, **kwargs):
+ msg = ("'util.datestr' is deprecated, "
+ "use 'utils.dateutil.datestr'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.datestr(*args, **kwargs)
+
+def shortdate(*args, **kwargs):
+ msg = ("'util.shortdate' is deprecated, "
+ "use 'utils.dateutil.shortdate'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.shortdate(*args, **kwargs)
+
+def parsetimezone(*args, **kwargs):
+ msg = ("'util.parsetimezone' is deprecated, "
+ "use 'utils.dateutil.parsetimezone'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.parsetimezone(*args, **kwargs)
+
+def strdate(*args, **kwargs):
+ msg = ("'util.strdate' is deprecated, "
+ "use 'utils.dateutil.strdate'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.strdate(*args, **kwargs)
+
+def parsedate(*args, **kwargs):
+ msg = ("'util.parsedate' is deprecated, "
+ "use 'utils.dateutil.parsedate'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.parsedate(*args, **kwargs)
+
+def matchdate(*args, **kwargs):
+ msg = ("'util.matchdate' is deprecated, "
+ "use 'utils.dateutil.matchdate'")
+ nouideprecwarn(msg, "4.6")
+ return dateutil.matchdate(*args, **kwargs)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/utils/dateutil.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,332 @@
+# util.py - Mercurial utility functions relative to dates
+#
+# Copyright 2018 Boris Feld <boris.feld@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import, print_function
+
+import calendar
+import datetime
+import time
+
+from ..i18n import _
+from .. import (
+ encoding,
+ error,
+ pycompat,
+)
+
+# used by parsedate
+defaultdateformats = (
+ '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
+ '%Y-%m-%dT%H:%M', # without seconds
+ '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
+ '%Y-%m-%dT%H%M', # without seconds
+ '%Y-%m-%d %H:%M:%S', # our common legal variant
+ '%Y-%m-%d %H:%M', # without seconds
+ '%Y-%m-%d %H%M%S', # without :
+ '%Y-%m-%d %H%M', # without seconds
+ '%Y-%m-%d %I:%M:%S%p',
+ '%Y-%m-%d %H:%M',
+ '%Y-%m-%d %I:%M%p',
+ '%Y-%m-%d',
+ '%m-%d',
+ '%m/%d',
+ '%m/%d/%y',
+ '%m/%d/%Y',
+ '%a %b %d %H:%M:%S %Y',
+ '%a %b %d %I:%M:%S%p %Y',
+ '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
+ '%b %d %H:%M:%S %Y',
+ '%b %d %I:%M:%S%p %Y',
+ '%b %d %H:%M:%S',
+ '%b %d %I:%M:%S%p',
+ '%b %d %H:%M',
+ '%b %d %I:%M%p',
+ '%b %d %Y',
+ '%b %d',
+ '%H:%M:%S',
+ '%I:%M:%S%p',
+ '%H:%M',
+ '%I:%M%p',
+)
+
+extendeddateformats = defaultdateformats + (
+ "%Y",
+ "%Y-%m",
+ "%b",
+ "%b %Y",
+)
+
+def makedate(timestamp=None):
+ '''Return a unix timestamp (or the current time) as a (unixtime,
+ offset) tuple based off the local timezone.'''
+ if timestamp is None:
+ timestamp = time.time()
+ if timestamp < 0:
+ hint = _("check your clock")
+ raise error.Abort(_("negative timestamp: %d") % timestamp, hint=hint)
+ delta = (datetime.datetime.utcfromtimestamp(timestamp) -
+ datetime.datetime.fromtimestamp(timestamp))
+ tz = delta.days * 86400 + delta.seconds
+ return timestamp, tz
+
+def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
+ """represent a (unixtime, offset) tuple as a localized time.
+ unixtime is seconds since the epoch, and offset is the time zone's
+ number of seconds away from UTC.
+
+ >>> datestr((0, 0))
+ 'Thu Jan 01 00:00:00 1970 +0000'
+ >>> datestr((42, 0))
+ 'Thu Jan 01 00:00:42 1970 +0000'
+ >>> datestr((-42, 0))
+ 'Wed Dec 31 23:59:18 1969 +0000'
+ >>> datestr((0x7fffffff, 0))
+ 'Tue Jan 19 03:14:07 2038 +0000'
+ >>> datestr((-0x80000000, 0))
+ 'Fri Dec 13 20:45:52 1901 +0000'
+ """
+ t, tz = date or makedate()
+ if "%1" in format or "%2" in format or "%z" in format:
+ sign = (tz > 0) and "-" or "+"
+ minutes = abs(tz) // 60
+ q, r = divmod(minutes, 60)
+ format = format.replace("%z", "%1%2")
+ format = format.replace("%1", "%c%02d" % (sign, q))
+ format = format.replace("%2", "%02d" % r)
+ d = t - tz
+ if d > 0x7fffffff:
+ d = 0x7fffffff
+ elif d < -0x80000000:
+ d = -0x80000000
+ # Never use time.gmtime() and datetime.datetime.fromtimestamp()
+ # because they use the gmtime() system call which is buggy on Windows
+ # for negative values.
+ t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
+ s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
+ return s
+
+def shortdate(date=None):
+ """turn (timestamp, tzoff) tuple into iso 8631 date."""
+ return datestr(date, format='%Y-%m-%d')
+
+def parsetimezone(s):
+ """find a trailing timezone, if any, in string, and return a
+ (offset, remainder) pair"""
+ s = pycompat.bytestr(s)
+
+ if s.endswith("GMT") or s.endswith("UTC"):
+ return 0, s[:-3].rstrip()
+
+ # Unix-style timezones [+-]hhmm
+ if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
+ sign = (s[-5] == "+") and 1 or -1
+ hours = int(s[-4:-2])
+ minutes = int(s[-2:])
+ return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
+
+ # ISO8601 trailing Z
+ if s.endswith("Z") and s[-2:-1].isdigit():
+ return 0, s[:-1]
+
+ # ISO8601-style [+-]hh:mm
+ if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
+ s[-5:-3].isdigit() and s[-2:].isdigit()):
+ sign = (s[-6] == "+") and 1 or -1
+ hours = int(s[-5:-3])
+ minutes = int(s[-2:])
+ return -sign * (hours * 60 + minutes) * 60, s[:-6]
+
+ return None, s
+
+def strdate(string, format, defaults=None):
+ """parse a localized time string and return a (unixtime, offset) tuple.
+ if the string cannot be parsed, ValueError is raised."""
+ if defaults is None:
+ defaults = {}
+
+ # NOTE: unixtime = localunixtime + offset
+ offset, date = parsetimezone(string)
+
+ # add missing elements from defaults
+ usenow = False # default to using biased defaults
+ for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
+ part = pycompat.bytestr(part)
+ found = [True for p in part if ("%"+p) in format]
+ if not found:
+ date += "@" + defaults[part][usenow]
+ format += "@%" + part[0]
+ else:
+ # We've found a specific time element, less specific time
+ # elements are relative to today
+ usenow = True
+
+ timetuple = time.strptime(encoding.strfromlocal(date),
+ encoding.strfromlocal(format))
+ localunixtime = int(calendar.timegm(timetuple))
+ if offset is None:
+ # local timezone
+ unixtime = int(time.mktime(timetuple))
+ offset = unixtime - localunixtime
+ else:
+ unixtime = localunixtime + offset
+ return unixtime, offset
+
+def parsedate(date, formats=None, bias=None):
+ """parse a localized date/time and return a (unixtime, offset) tuple.
+
+ The date may be a "unixtime offset" string or in one of the specified
+ formats. If the date already is a (unixtime, offset) tuple, it is returned.
+
+ >>> parsedate(b' today ') == parsedate(
+ ... datetime.date.today().strftime('%b %d').encode('ascii'))
+ True
+ >>> parsedate(b'yesterday ') == parsedate(
+ ... (datetime.date.today() - datetime.timedelta(days=1)
+ ... ).strftime('%b %d').encode('ascii'))
+ True
+ >>> now, tz = makedate()
+ >>> strnow, strtz = parsedate(b'now')
+ >>> (strnow - now) < 1
+ True
+ >>> tz == strtz
+ True
+ """
+ if bias is None:
+ bias = {}
+ if not date:
+ return 0, 0
+ if isinstance(date, tuple) and len(date) == 2:
+ return date
+ if not formats:
+ formats = defaultdateformats
+ date = date.strip()
+
+ if date == 'now' or date == _('now'):
+ return makedate()
+ if date == 'today' or date == _('today'):
+ date = datetime.date.today().strftime(r'%b %d')
+ date = encoding.strtolocal(date)
+ elif date == 'yesterday' or date == _('yesterday'):
+ date = (datetime.date.today() -
+ datetime.timedelta(days=1)).strftime(r'%b %d')
+ date = encoding.strtolocal(date)
+
+ try:
+ when, offset = map(int, date.split(' '))
+ except ValueError:
+ # fill out defaults
+ now = makedate()
+ defaults = {}
+ for part in ("d", "mb", "yY", "HI", "M", "S"):
+ # this piece is for rounding the specific end of unknowns
+ b = bias.get(part)
+ if b is None:
+ if part[0:1] in "HMS":
+ b = "00"
+ else:
+ b = "0"
+
+ # this piece is for matching the generic end to today's date
+ n = datestr(now, "%" + part[0:1])
+
+ defaults[part] = (b, n)
+
+ for format in formats:
+ try:
+ when, offset = strdate(date, format, defaults)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ break
+ else:
+ raise error.ParseError(
+ _('invalid date: %r') % pycompat.bytestr(date))
+ # validate explicit (probably user-specified) date and
+ # time zone offset. values must fit in signed 32 bits for
+ # current 32-bit linux runtimes. timezones go from UTC-12
+ # to UTC+14
+ if when < -0x80000000 or when > 0x7fffffff:
+ raise error.ParseError(_('date exceeds 32 bits: %d') % when)
+ if offset < -50400 or offset > 43200:
+ raise error.ParseError(_('impossible time zone offset: %d') % offset)
+ return when, offset
+
+def matchdate(date):
+ """Return a function that matches a given date match specifier
+
+ Formats include:
+
+ '{date}' match a given date to the accuracy provided
+
+ '<{date}' on or before a given date
+
+ '>{date}' on or after a given date
+
+ >>> p1 = parsedate(b"10:29:59")
+ >>> p2 = parsedate(b"10:30:00")
+ >>> p3 = parsedate(b"10:30:59")
+ >>> p4 = parsedate(b"10:31:00")
+ >>> p5 = parsedate(b"Sep 15 10:30:00 1999")
+ >>> f = matchdate(b"10:30")
+ >>> f(p1[0])
+ False
+ >>> f(p2[0])
+ True
+ >>> f(p3[0])
+ True
+ >>> f(p4[0])
+ False
+ >>> f(p5[0])
+ False
+ """
+
+ def lower(date):
+ d = {'mb': "1", 'd': "1"}
+ return parsedate(date, extendeddateformats, d)[0]
+
+ def upper(date):
+ d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
+ for days in ("31", "30", "29"):
+ try:
+ d["d"] = days
+ return parsedate(date, extendeddateformats, d)[0]
+ except error.ParseError:
+ pass
+ d["d"] = "28"
+ return parsedate(date, extendeddateformats, d)[0]
+
+ date = date.strip()
+
+ if not date:
+ raise error.Abort(_("dates cannot consist entirely of whitespace"))
+ elif date[0] == "<":
+ if not date[1:]:
+ raise error.Abort(_("invalid day spec, use '<DATE'"))
+ when = upper(date[1:])
+ return lambda x: x <= when
+ elif date[0] == ">":
+ if not date[1:]:
+ raise error.Abort(_("invalid day spec, use '>DATE'"))
+ when = lower(date[1:])
+ return lambda x: x >= when
+ elif date[0] == "-":
+ try:
+ days = int(date[1:])
+ except ValueError:
+ raise error.Abort(_("invalid day spec: %s") % date[1:])
+ if days < 0:
+ raise error.Abort(_("%s must be nonnegative (see 'hg help dates')")
+ % date[1:])
+ when = makedate()[0] - days * 3600 * 24
+ return lambda x: x >= when
+ elif " to " in date:
+ a, b = date.split(" to ")
+ start, stop = lower(a), upper(b)
+ return lambda x: x >= start and x <= stop
+ else:
+ start, stop = lower(date), upper(date)
+ return lambda x: x >= start and x <= stop
--- a/mercurial/verify.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/verify.py Mon Mar 19 08:07:18 2018 -0700
@@ -60,6 +60,7 @@
def err(self, linkrev, msg, filename=None):
if linkrev is not None:
self.badrevs.add(linkrev)
+ linkrev = "%d" % linkrev
else:
linkrev = '?'
msg = "%s: %s" % (linkrev, msg)
@@ -69,9 +70,10 @@
self.errors += 1
def exc(self, linkrev, msg, inst, filename=None):
- if not str(inst):
- inst = repr(inst)
- self.err(linkrev, "%s: %s" % (msg, inst), filename)
+ fmsg = pycompat.bytestr(inst)
+ if not fmsg:
+ fmsg = pycompat.byterepr(inst)
+ self.err(linkrev, "%s: %s" % (msg, fmsg), filename)
def checklog(self, obj, name, linkrev):
if not len(obj) and (self.havecl or self.havemf):
@@ -455,12 +457,7 @@
if rp:
if lr is not None and ui.verbose:
ctx = lrugetctx(lr)
- found = False
- for pctx in ctx.parents():
- if rp[0] in pctx:
- found = True
- break
- if not found:
+ if not any(rp[0] in pctx for pctx in ctx.parents()):
self.warn(_("warning: copy source of '%s' not"
" in parents of %s") % (f, ctx))
fl2 = repo.file(rp[0])
--- a/mercurial/windows.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/windows.py Mon Mar 19 08:07:18 2018 -0700
@@ -296,6 +296,15 @@
return s
return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
+def _unquote(s):
+ if s.startswith(b'"') and s.endswith(b'"'):
+ return s[1:-1]
+ return s
+
+def shellsplit(s):
+ """Parse a command string in cmd.exe way (best-effort)"""
+ return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
+
def quotecommand(cmd):
"""Build a command string suitable for os.popen* calls."""
if sys.version_info < (2, 7, 1):
@@ -307,7 +316,7 @@
# Work around "popen spawned process may not write to stdout
# under windows"
# http://bugs.python.org/issue1366
- command += " 2> %s" % os.devnull
+ command += " 2> %s" % pycompat.bytestr(os.devnull)
return os.popen(quotecommand(command), mode)
def explainexit(code):
--- a/mercurial/wireproto.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/wireproto.py Mon Mar 19 08:07:18 2018 -0700
@@ -31,56 +31,24 @@
repository,
streamclone,
util,
+ wireprototypes,
)
urlerr = util.urlerr
urlreq = util.urlreq
+bytesresponse = wireprototypes.bytesresponse
+ooberror = wireprototypes.ooberror
+pushres = wireprototypes.pushres
+pusherr = wireprototypes.pusherr
+streamres = wireprototypes.streamres
+streamres_legacy = wireprototypes.streamreslegacy
+
bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
'IncompatibleClient')
bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
-class abstractserverproto(object):
- """abstract class that summarizes the protocol API
-
- Used as reference and documentation.
- """
-
- def getargs(self, args):
- """return the value for arguments in <args>
-
- returns a list of values (same order as <args>)"""
- raise NotImplementedError()
-
- def getfile(self, fp):
- """write the whole content of a file into a file like object
-
- The file is in the form::
-
- (<chunk-size>\n<chunk>)+0\n
-
- chunk size is the ascii version of the int.
- """
- raise NotImplementedError()
-
- def redirect(self):
- """may setup interception for stdout and stderr
-
- See also the `restore` method."""
- raise NotImplementedError()
-
- # If the `redirect` function does install interception, the `restore`
- # function MUST be defined. If interception is not used, this function
- # MUST NOT be defined.
- #
- # left commented here on purpose
- #
- #def restore(self):
- # """reinstall previous stdout and stderr and return intercepted stdout
- # """
- # raise NotImplementedError()
-
class remoteiterbatcher(peer.iterbatcher):
def __init__(self, remote):
super(remoteiterbatcher, self).__init__()
@@ -432,6 +400,13 @@
Returns an iterator of the raw responses from the server.
"""
+ ui = self.ui
+ if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
+ ui.debug('devel-peer-request: batched-content\n')
+ for op, args in req:
+ msg = 'devel-peer-request: - %s (%d arguments)\n'
+ ui.debug(msg % (op, len(args)))
+
rsp = self._callstream("batch", cmds=encodebatchcmds(req))
chunk = rsp.read(1024)
work = [chunk]
@@ -517,58 +492,6 @@
# server side
# wire protocol command can either return a string or one of these classes.
-class streamres(object):
- """wireproto reply: binary stream
-
- The call was successful and the result is a stream.
-
- Accepts a generator containing chunks of data to be sent to the client.
-
- ``prefer_uncompressed`` indicates that the data is expected to be
- uncompressable and that the stream should therefore use the ``none``
- engine.
- """
- def __init__(self, gen=None, prefer_uncompressed=False):
- self.gen = gen
- self.prefer_uncompressed = prefer_uncompressed
-
-class streamres_legacy(object):
- """wireproto reply: uncompressed binary stream
-
- The call was successful and the result is a stream.
-
- Accepts a generator containing chunks of data to be sent to the client.
-
- Like ``streamres``, but sends an uncompressed data for "version 1" clients
- using the application/mercurial-0.1 media type.
- """
- def __init__(self, gen=None):
- self.gen = gen
-
-class pushres(object):
- """wireproto reply: success with simple integer return
-
- The call was successful and returned an integer contained in `self.res`.
- """
- def __init__(self, res):
- self.res = res
-
-class pusherr(object):
- """wireproto reply: failure
-
- The call failed. The `self.res` attribute contains the error message.
- """
- def __init__(self, res):
- self.res = res
-
-class ooberror(object):
- """wireproto reply: failure of a batch of operation
-
- Something failed during a batch call. The error message is stored in
- `self.message`.
- """
- def __init__(self, message):
- self.message = message
def getdispatchrepo(repo, proto, command):
"""Obtain the repo used for processing wire protocol commands.
@@ -625,7 +548,7 @@
return ui.configbool('server', 'bundle1')
-def supportedcompengines(ui, proto, role):
+def supportedcompengines(ui, role):
"""Obtain the list of supported compression engines for a request."""
assert role in (util.CLIENTROLE, util.SERVERROLE)
@@ -674,24 +597,136 @@
return compengines
-# list of commands
-commands = {}
+class commandentry(object):
+ """Represents a declared wire protocol command."""
+ def __init__(self, func, args='', transports=None,
+ permission='push'):
+ self.func = func
+ self.args = args
+ self.transports = transports or set()
+ self.permission = permission
+
+ def _merge(self, func, args):
+ """Merge this instance with an incoming 2-tuple.
+
+ This is called when a caller using the old 2-tuple API attempts
+ to replace an instance. The incoming values are merged with
+ data not captured by the 2-tuple and a new instance containing
+ the union of the two objects is returned.
+ """
+ return commandentry(func, args=args, transports=set(self.transports),
+ permission=self.permission)
+
+ # Old code treats instances as 2-tuples. So expose that interface.
+ def __iter__(self):
+ yield self.func
+ yield self.args
+
+ def __getitem__(self, i):
+ if i == 0:
+ return self.func
+ elif i == 1:
+ return self.args
+ else:
+ raise IndexError('can only access elements 0 and 1')
+
+class commanddict(dict):
+ """Container for registered wire protocol commands.
+
+ It behaves like a dict. But __setitem__ is overwritten to allow silent
+ coercion of values from 2-tuples for API compatibility.
+ """
+ def __setitem__(self, k, v):
+ if isinstance(v, commandentry):
+ pass
+ # Cast 2-tuples to commandentry instances.
+ elif isinstance(v, tuple):
+ if len(v) != 2:
+ raise ValueError('command tuples must have exactly 2 elements')
+
+ # It is common for extensions to wrap wire protocol commands via
+ # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
+ # doing this aren't aware of the new API that uses objects to store
+ # command entries, we automatically merge old state with new.
+ if k in self:
+ v = self[k]._merge(v[0], v[1])
+ else:
+ # Use default values from @wireprotocommand.
+ v = commandentry(v[0], args=v[1],
+ transports=set(wireprototypes.TRANSPORTS),
+ permission='push')
+ else:
+ raise ValueError('command entries must be commandentry instances '
+ 'or 2-tuples')
-# Maps wire protocol name to operation type. This is used for permissions
-# checking. All defined @wireiprotocommand should have an entry in this
-# dict.
-permissions = {}
+ return super(commanddict, self).__setitem__(k, v)
+
+ def commandavailable(self, command, proto):
+ """Determine if a command is available for the requested protocol."""
+ assert proto.name in wireprototypes.TRANSPORTS
+
+ entry = self.get(command)
+
+ if not entry:
+ return False
+
+ if proto.name not in entry.transports:
+ return False
+
+ return True
+
+# Constants specifying which transports a wire protocol command should be
+# available on. For use with @wireprotocommand.
+POLICY_ALL = 'all'
+POLICY_V1_ONLY = 'v1-only'
+POLICY_V2_ONLY = 'v2-only'
+
+commands = commanddict()
+
+def wireprotocommand(name, args='', transportpolicy=POLICY_ALL,
+ permission='push'):
+ """Decorator to declare a wire protocol command.
+
+ ``name`` is the name of the wire protocol command being provided.
-def wireprotocommand(name, args=''):
- """decorator for wire protocol command"""
+ ``args`` is a space-delimited list of named arguments that the command
+ accepts. ``*`` is a special value that says to accept all arguments.
+
+ ``transportpolicy`` is a POLICY_* constant denoting which transports
+ this wire protocol command should be exposed to. By default, commands
+ are exposed to all wire protocol transports.
+
+ ``permission`` defines the permission type needed to run this command.
+ Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
+ respectively. Default is to assume command requires ``push`` permissions
+ because otherwise commands not declaring their permissions could modify
+ a repository that is supposed to be read-only.
+ """
+ if transportpolicy == POLICY_ALL:
+ transports = set(wireprototypes.TRANSPORTS)
+ elif transportpolicy == POLICY_V1_ONLY:
+ transports = {k for k, v in wireprototypes.TRANSPORTS.items()
+ if v['version'] == 1}
+ elif transportpolicy == POLICY_V2_ONLY:
+ transports = {k for k, v in wireprototypes.TRANSPORTS.items()
+ if v['version'] == 2}
+ else:
+ raise error.ProgrammingError('invalid transport policy value: %s' %
+ transportpolicy)
+
+ if permission not in ('push', 'pull'):
+ raise error.ProgrammingError('invalid wire protocol permission; '
+ 'got %s; expected "push" or "pull"' %
+ permission)
+
def register(func):
- commands[name] = (func, args)
+ commands[name] = commandentry(func, args=args, transports=transports,
+ permission=permission)
return func
return register
# TODO define a more appropriate permissions type to use for this.
-permissions['batch'] = 'pull'
-@wireprotocommand('batch', 'cmds *')
+@wireprotocommand('batch', 'cmds *', permission='pull')
def batch(repo, proto, cmds, others):
repo = repo.filtered("served")
res = []
@@ -704,15 +739,10 @@
vals[unescapearg(n)] = unescapearg(v)
func, spec = commands[op]
- # If the protocol supports permissions checking, perform that
- # checking on each batched command.
- # TODO formalize permission checking as part of protocol interface.
- if util.safehasattr(proto, 'checkperm'):
- # Assume commands with no defined permissions are writes / for
- # pushes. This is the safest from a security perspective because
- # it doesn't allow commands with undefined semantics from
- # bypassing permissions checks.
- proto.checkperm(permissions.get(op, 'push'))
+ # Validate that client has permissions to perform this command.
+ perm = commands[op].permission
+ assert perm in ('push', 'pull')
+ proto.checkperm(perm)
if spec:
keys = spec.split()
@@ -731,20 +761,27 @@
result = func(repo, proto)
if isinstance(result, ooberror):
return result
- res.append(escapearg(result))
- return ';'.join(res)
-permissions['between'] = 'pull'
-@wireprotocommand('between', 'pairs')
+ # For now, all batchable commands must return bytesresponse or
+ # raw bytes (for backwards compatibility).
+ assert isinstance(result, (bytesresponse, bytes))
+ if isinstance(result, bytesresponse):
+ result = result.data
+ res.append(escapearg(result))
+
+ return bytesresponse(';'.join(res))
+
+@wireprotocommand('between', 'pairs', transportpolicy=POLICY_V1_ONLY,
+ permission='pull')
def between(repo, proto, pairs):
pairs = [decodelist(p, '-') for p in pairs.split(" ")]
r = []
for b in repo.between(pairs):
r.append(encodelist(b) + "\n")
- return "".join(r)
-permissions['branchmap'] = 'pull'
-@wireprotocommand('branchmap')
+ return bytesresponse(''.join(r))
+
+@wireprotocommand('branchmap', permission='pull')
def branchmap(repo, proto):
branchmap = repo.branchmap()
heads = []
@@ -752,19 +789,20 @@
branchname = urlreq.quote(encoding.fromlocal(branch))
branchnodes = encodelist(nodes)
heads.append('%s %s' % (branchname, branchnodes))
- return '\n'.join(heads)
+
+ return bytesresponse('\n'.join(heads))
-permissions['branches'] = 'pull'
-@wireprotocommand('branches', 'nodes')
+@wireprotocommand('branches', 'nodes', transportpolicy=POLICY_V1_ONLY,
+ permission='pull')
def branches(repo, proto, nodes):
nodes = decodelist(nodes)
r = []
for b in repo.branches(nodes):
r.append(encodelist(b) + "\n")
- return "".join(r)
-permissions['clonebundles'] = 'pull'
-@wireprotocommand('clonebundles', '')
+ return bytesresponse(''.join(r))
+
+@wireprotocommand('clonebundles', '', permission='pull')
def clonebundles(repo, proto):
"""Server command for returning info for available bundles to seed clones.
@@ -774,9 +812,9 @@
depending on the request. e.g. you could advertise URLs for the closest
data center given the client's IP address.
"""
- return repo.vfs.tryread('clonebundles.manifest')
+ return bytesresponse(repo.vfs.tryread('clonebundles.manifest'))
-wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
+wireprotocaps = ['lookup', 'branchmap', 'pushkey',
'known', 'getbundle', 'unbundlehash', 'batch']
def _capabilities(repo, proto):
@@ -791,6 +829,12 @@
"""
# copy to prevent modification of the global list
caps = list(wireprotocaps)
+
+ # Command of same name as capability isn't exposed to version 1 of
+ # transports. So conditionally add it.
+ if commands.commandavailable('changegroupsubset', proto):
+ caps.append('changegroupsubset')
+
if streamclone.allowservergeneration(repo):
if repo.ui.configbool('server', 'preferuncompressed'):
caps.append('stream-preferred')
@@ -806,33 +850,16 @@
caps.append('bundle2=' + urlreq.quote(capsblob))
caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
- if proto.name == 'http':
- caps.append('httpheader=%d' %
- repo.ui.configint('server', 'maxhttpheaderlen'))
- if repo.ui.configbool('experimental', 'httppostargs'):
- caps.append('httppostargs')
-
- # FUTURE advertise 0.2rx once support is implemented
- # FUTURE advertise minrx and mintx after consulting config option
- caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
-
- compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
- if compengines:
- comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
- for e in compengines)
- caps.append('compression=%s' % comptypes)
-
- return caps
+ return proto.addcapabilities(repo, caps)
# If you are writing an extension and consider wrapping this function. Wrap
# `_capabilities` instead.
-permissions['capabilities'] = 'pull'
-@wireprotocommand('capabilities')
+@wireprotocommand('capabilities', permission='pull')
def capabilities(repo, proto):
- return ' '.join(_capabilities(repo, proto))
+ return bytesresponse(' '.join(_capabilities(repo, proto)))
-permissions['changegroup'] = 'pull'
-@wireprotocommand('changegroup', 'roots')
+@wireprotocommand('changegroup', 'roots', transportpolicy=POLICY_V1_ONLY,
+ permission='pull')
def changegroup(repo, proto, roots):
nodes = decodelist(roots)
outgoing = discovery.outgoing(repo, missingroots=nodes,
@@ -841,8 +868,9 @@
gen = iter(lambda: cg.read(32768), '')
return streamres(gen=gen)
-permissions['changegroupsubset'] = 'pull'
-@wireprotocommand('changegroupsubset', 'bases heads')
+@wireprotocommand('changegroupsubset', 'bases heads',
+ transportpolicy=POLICY_V1_ONLY,
+ permission='pull')
def changegroupsubset(repo, proto, bases, heads):
bases = decodelist(bases)
heads = decodelist(heads)
@@ -852,15 +880,15 @@
gen = iter(lambda: cg.read(32768), '')
return streamres(gen=gen)
-permissions['debugwireargs'] = 'pull'
-@wireprotocommand('debugwireargs', 'one two *')
+@wireprotocommand('debugwireargs', 'one two *',
+ permission='pull')
def debugwireargs(repo, proto, one, two, others):
# only accept optional args from the known set
opts = options('debugwireargs', ['three', 'four'], others)
- return repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
+ return bytesresponse(repo.debugwireargs(one, two,
+ **pycompat.strkwargs(opts)))
-permissions['getbundle'] = 'pull'
-@wireprotocommand('getbundle', '*')
+@wireprotocommand('getbundle', '*', permission='pull')
def getbundle(repo, proto, others):
opts = options('getbundle', gboptsmap.keys(), others)
for k, v in opts.iteritems():
@@ -884,7 +912,7 @@
if not bundle1allowed(repo, 'pull'):
if not exchange.bundle2requested(opts.get('bundlecaps')):
- if proto.name == 'http':
+ if proto.name == 'http-v1':
return ooberror(bundle2required)
raise error.Abort(bundle2requiredmain,
hint=bundle2requiredhint)
@@ -910,12 +938,12 @@
except error.Abort as exc:
# cleanly forward Abort error to the client
if not exchange.bundle2requested(opts.get('bundlecaps')):
- if proto.name == 'http':
- return ooberror(str(exc) + '\n')
+ if proto.name == 'http-v1':
+ return ooberror(pycompat.bytestr(exc) + '\n')
raise # cannot do better for bundle1 + ssh
# bundle2 request expect a bundle2 reply
bundler = bundle2.bundle20(repo.ui)
- manargs = [('message', str(exc))]
+ manargs = [('message', pycompat.bytestr(exc))]
advargs = []
if exc.hint is not None:
advargs.append(('hint', exc.hint))
@@ -926,32 +954,32 @@
return streamres(gen=chunks, prefer_uncompressed=not prefercompressed)
-permissions['heads'] = 'pull'
-@wireprotocommand('heads')
+@wireprotocommand('heads', permission='pull')
def heads(repo, proto):
h = repo.heads()
- return encodelist(h) + "\n"
+ return bytesresponse(encodelist(h) + '\n')
-permissions['hello'] = 'pull'
-@wireprotocommand('hello')
+@wireprotocommand('hello', permission='pull')
def hello(repo, proto):
- '''the hello command returns a set of lines describing various
- interesting things about the server, in an RFC822-like format.
- Currently the only one defined is "capabilities", which
- consists of a line in the form:
+ """Called as part of SSH handshake to obtain server info.
+
+ Returns a list of lines describing interesting things about the
+ server, in an RFC822-like format.
+
+ Currently, the only one defined is ``capabilities``, which consists of a
+ line of space separated tokens describing server abilities:
- capabilities: space separated list of tokens
- '''
- return "capabilities: %s\n" % (capabilities(repo, proto))
+ capabilities: <token0> <token1> <token2>
+ """
+ caps = capabilities(repo, proto).data
+ return bytesresponse('capabilities: %s\n' % caps)
-permissions['listkeys'] = 'pull'
-@wireprotocommand('listkeys', 'namespace')
+@wireprotocommand('listkeys', 'namespace', permission='pull')
def listkeys(repo, proto, namespace):
- d = repo.listkeys(encoding.tolocal(namespace)).items()
- return pushkeymod.encodekeys(d)
+ d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
+ return bytesresponse(pushkeymod.encodekeys(d))
-permissions['lookup'] = 'pull'
-@wireprotocommand('lookup', 'key')
+@wireprotocommand('lookup', 'key', permission='pull')
def lookup(repo, proto, key):
try:
k = encoding.tolocal(key)
@@ -959,17 +987,16 @@
r = c.hex()
success = 1
except Exception as inst:
- r = str(inst)
+ r = util.forcebytestr(inst)
success = 0
- return "%d %s\n" % (success, r)
+ return bytesresponse('%d %s\n' % (success, r))
-permissions['known'] = 'pull'
-@wireprotocommand('known', 'nodes *')
+@wireprotocommand('known', 'nodes *', permission='pull')
def known(repo, proto, nodes, others):
- return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
+ v = ''.join(b and '1' or '0' for b in repo.known(decodelist(nodes)))
+ return bytesresponse(v)
-permissions['pushkey'] = 'push'
-@wireprotocommand('pushkey', 'namespace key old new')
+@wireprotocommand('pushkey', 'namespace key old new', permission='push')
def pushkey(repo, proto, namespace, key, old, new):
# compatibility with pre-1.8 clients which were accidentally
# sending raw binary nodes rather than utf-8-encoded hex
@@ -983,26 +1010,14 @@
else:
new = encoding.tolocal(new) # normal path
- if util.safehasattr(proto, 'restore'):
-
- proto.redirect()
-
- try:
- r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
- encoding.tolocal(old), new) or False
- except error.Abort:
- r = False
+ with proto.mayberedirectstdio() as output:
+ r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
+ encoding.tolocal(old), new) or False
- output = proto.restore()
-
- return '%s\n%s' % (int(r), output)
+ output = output.getvalue() if output else ''
+ return bytesresponse('%d\n%s' % (int(r), output))
- r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
- encoding.tolocal(old), new)
- return '%s\n' % int(r)
-
-permissions['stream_out'] = 'pull'
-@wireprotocommand('stream_out')
+@wireprotocommand('stream_out', permission='pull')
def stream(repo, proto):
'''If the server supports streaming clone, it advertises the "stream"
capability with a value representing the version and flags of the repo
@@ -1010,102 +1025,104 @@
'''
return streamres_legacy(streamclone.generatev1wireproto(repo))
-permissions['unbundle'] = 'push'
-@wireprotocommand('unbundle', 'heads')
+@wireprotocommand('unbundle', 'heads', permission='push')
def unbundle(repo, proto, heads):
their_heads = decodelist(heads)
- try:
- proto.redirect()
-
- exchange.check_heads(repo, their_heads, 'preparing changes')
-
- # write bundle data to temporary file because it can be big
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, pycompat.sysstr('wb+'))
- r = 0
+ with proto.mayberedirectstdio() as output:
try:
- proto.getfile(fp)
- fp.seek(0)
- gen = exchange.readbundle(repo.ui, fp, None)
- if (isinstance(gen, changegroupmod.cg1unpacker)
- and not bundle1allowed(repo, 'push')):
- if proto.name == 'http':
- # need to special case http because stderr do not get to
- # the http client on failed push so we need to abuse some
- # other error type to make sure the message get to the
- # user.
- return ooberror(bundle2required)
- raise error.Abort(bundle2requiredmain,
- hint=bundle2requiredhint)
+ exchange.check_heads(repo, their_heads, 'preparing changes')
- r = exchange.unbundle(repo, gen, their_heads, 'serve',
- proto._client())
- if util.safehasattr(r, 'addpart'):
- # The return looks streamable, we are in the bundle2 case and
- # should return a stream.
- return streamres_legacy(gen=r.getchunks())
- return pushres(r)
-
- finally:
- fp.close()
- os.unlink(tempname)
-
- except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
- # handle non-bundle2 case first
- if not getattr(exc, 'duringunbundle2', False):
+ # write bundle data to temporary file because it can be big
+ fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+ fp = os.fdopen(fd, r'wb+')
+ r = 0
try:
- raise
- except error.Abort:
- # The old code we moved used util.stderr directly.
- # We did not change it to minimise code change.
- # This need to be moved to something proper.
- # Feel free to do it.
- util.stderr.write("abort: %s\n" % exc)
- if exc.hint is not None:
- util.stderr.write("(%s)\n" % exc.hint)
- return pushres(0)
- except error.PushRaced:
- return pusherr(str(exc))
+ proto.forwardpayload(fp)
+ fp.seek(0)
+ gen = exchange.readbundle(repo.ui, fp, None)
+ if (isinstance(gen, changegroupmod.cg1unpacker)
+ and not bundle1allowed(repo, 'push')):
+ if proto.name == 'http-v1':
+ # need to special case http because stderr do not get to
+ # the http client on failed push so we need to abuse
+ # some other error type to make sure the message get to
+ # the user.
+ return ooberror(bundle2required)
+ raise error.Abort(bundle2requiredmain,
+ hint=bundle2requiredhint)
- bundler = bundle2.bundle20(repo.ui)
- for out in getattr(exc, '_bundle2salvagedoutput', ()):
- bundler.addpart(out)
- try:
- try:
- raise
- except error.PushkeyFailed as exc:
- # check client caps
- remotecaps = getattr(exc, '_replycaps', None)
- if (remotecaps is not None
- and 'pushkey' not in remotecaps.get('error', ())):
- # no support remote side, fallback to Abort handler.
+ r = exchange.unbundle(repo, gen, their_heads, 'serve',
+ proto.client())
+ if util.safehasattr(r, 'addpart'):
+ # The return looks streamable, we are in the bundle2 case
+ # and should return a stream.
+ return streamres_legacy(gen=r.getchunks())
+ return pushres(r, output.getvalue() if output else '')
+
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+ except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
+ # handle non-bundle2 case first
+ if not getattr(exc, 'duringunbundle2', False):
+ try:
raise
- part = bundler.newpart('error:pushkey')
- part.addparam('in-reply-to', exc.partid)
- if exc.namespace is not None:
- part.addparam('namespace', exc.namespace, mandatory=False)
- if exc.key is not None:
- part.addparam('key', exc.key, mandatory=False)
- if exc.new is not None:
- part.addparam('new', exc.new, mandatory=False)
- if exc.old is not None:
- part.addparam('old', exc.old, mandatory=False)
- if exc.ret is not None:
- part.addparam('ret', exc.ret, mandatory=False)
- except error.BundleValueError as exc:
- errpart = bundler.newpart('error:unsupportedcontent')
- if exc.parttype is not None:
- errpart.addparam('parttype', exc.parttype)
- if exc.params:
- errpart.addparam('params', '\0'.join(exc.params))
- except error.Abort as exc:
- manargs = [('message', str(exc))]
- advargs = []
- if exc.hint is not None:
- advargs.append(('hint', exc.hint))
- bundler.addpart(bundle2.bundlepart('error:abort',
- manargs, advargs))
- except error.PushRaced as exc:
- bundler.newpart('error:pushraced', [('message', str(exc))])
- return streamres_legacy(gen=bundler.getchunks())
+ except error.Abort:
+ # The old code we moved used util.stderr directly.
+ # We did not change it to minimise code change.
+ # This need to be moved to something proper.
+ # Feel free to do it.
+ util.stderr.write("abort: %s\n" % exc)
+ if exc.hint is not None:
+ util.stderr.write("(%s)\n" % exc.hint)
+ util.stderr.flush()
+ return pushres(0, output.getvalue() if output else '')
+ except error.PushRaced:
+ return pusherr(pycompat.bytestr(exc),
+ output.getvalue() if output else '')
+
+ bundler = bundle2.bundle20(repo.ui)
+ for out in getattr(exc, '_bundle2salvagedoutput', ()):
+ bundler.addpart(out)
+ try:
+ try:
+ raise
+ except error.PushkeyFailed as exc:
+ # check client caps
+ remotecaps = getattr(exc, '_replycaps', None)
+ if (remotecaps is not None
+ and 'pushkey' not in remotecaps.get('error', ())):
+ # no support remote side, fallback to Abort handler.
+ raise
+ part = bundler.newpart('error:pushkey')
+ part.addparam('in-reply-to', exc.partid)
+ if exc.namespace is not None:
+ part.addparam('namespace', exc.namespace,
+ mandatory=False)
+ if exc.key is not None:
+ part.addparam('key', exc.key, mandatory=False)
+ if exc.new is not None:
+ part.addparam('new', exc.new, mandatory=False)
+ if exc.old is not None:
+ part.addparam('old', exc.old, mandatory=False)
+ if exc.ret is not None:
+ part.addparam('ret', exc.ret, mandatory=False)
+ except error.BundleValueError as exc:
+ errpart = bundler.newpart('error:unsupportedcontent')
+ if exc.parttype is not None:
+ errpart.addparam('parttype', exc.parttype)
+ if exc.params:
+ errpart.addparam('params', '\0'.join(exc.params))
+ except error.Abort as exc:
+ manargs = [('message', util.forcebytestr(exc))]
+ advargs = []
+ if exc.hint is not None:
+ advargs.append(('hint', exc.hint))
+ bundler.addpart(bundle2.bundlepart('error:abort',
+ manargs, advargs))
+ except error.PushRaced as exc:
+ bundler.newpart('error:pushraced',
+ [('message', util.forcebytestr(exc))])
+ return streamres_legacy(gen=bundler.getchunks())
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/wireprotoserver.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,653 @@
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import contextlib
+import struct
+import sys
+import threading
+
+from .i18n import _
+from . import (
+ encoding,
+ error,
+ hook,
+ pycompat,
+ util,
+ wireproto,
+ wireprototypes,
+)
+
+stringio = util.stringio
+
+urlerr = util.urlerr
+urlreq = util.urlreq
+
+HTTP_OK = 200
+
+HGTYPE = 'application/mercurial-0.1'
+HGTYPE2 = 'application/mercurial-0.2'
+HGERRTYPE = 'application/hg-error'
+
+SSHV1 = wireprototypes.SSHV1
+SSHV2 = wireprototypes.SSHV2
+
+def decodevaluefromheaders(req, headerprefix):
+ """Decode a long value from multiple HTTP request headers.
+
+ Returns the value as a bytes, not a str.
+ """
+ chunks = []
+ i = 1
+ while True:
+ v = req.headers.get(b'%s-%d' % (headerprefix, i))
+ if v is None:
+ break
+ chunks.append(pycompat.bytesurl(v))
+ i += 1
+
+ return ''.join(chunks)
+
+class httpv1protocolhandler(wireprototypes.baseprotocolhandler):
+ def __init__(self, req, ui, checkperm):
+ self._req = req
+ self._ui = ui
+ self._checkperm = checkperm
+
+ @property
+ def name(self):
+ return 'http-v1'
+
+ def getargs(self, args):
+ knownargs = self._args()
+ data = {}
+ keys = args.split()
+ for k in keys:
+ if k == '*':
+ star = {}
+ for key in knownargs.keys():
+ if key != 'cmd' and key not in keys:
+ star[key] = knownargs[key][0]
+ data['*'] = star
+ else:
+ data[k] = knownargs[k][0]
+ return [data[k] for k in keys]
+
+ def _args(self):
+ args = self._req.qsparams.asdictoflists()
+ postlen = int(self._req.headers.get(b'X-HgArgs-Post', 0))
+ if postlen:
+ args.update(urlreq.parseqs(
+ self._req.bodyfh.read(postlen), keep_blank_values=True))
+ return args
+
+ argvalue = decodevaluefromheaders(self._req, b'X-HgArg')
+ args.update(urlreq.parseqs(argvalue, keep_blank_values=True))
+ return args
+
+ def forwardpayload(self, fp):
+ # Existing clients *always* send Content-Length.
+ length = int(self._req.headers[b'Content-Length'])
+
+ # If httppostargs is used, we need to read Content-Length
+ # minus the amount that was consumed by args.
+ length -= int(self._req.headers.get(b'X-HgArgs-Post', 0))
+ for s in util.filechunkiter(self._req.bodyfh, limit=length):
+ fp.write(s)
+
+ @contextlib.contextmanager
+ def mayberedirectstdio(self):
+ oldout = self._ui.fout
+ olderr = self._ui.ferr
+
+ out = util.stringio()
+
+ try:
+ self._ui.fout = out
+ self._ui.ferr = out
+ yield out
+ finally:
+ self._ui.fout = oldout
+ self._ui.ferr = olderr
+
+ def client(self):
+ return 'remote:%s:%s:%s' % (
+ self._req.urlscheme,
+ urlreq.quote(self._req.remotehost or ''),
+ urlreq.quote(self._req.remoteuser or ''))
+
+ def addcapabilities(self, repo, caps):
+ caps.append('httpheader=%d' %
+ repo.ui.configint('server', 'maxhttpheaderlen'))
+ if repo.ui.configbool('experimental', 'httppostargs'):
+ caps.append('httppostargs')
+
+ # FUTURE advertise 0.2rx once support is implemented
+ # FUTURE advertise minrx and mintx after consulting config option
+ caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
+
+ compengines = wireproto.supportedcompengines(repo.ui, util.SERVERROLE)
+ if compengines:
+ comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
+ for e in compengines)
+ caps.append('compression=%s' % comptypes)
+
+ return caps
+
+ def checkperm(self, perm):
+ return self._checkperm(perm)
+
+# This method exists mostly so that extensions like remotefilelog can
+# disable a kludgey legacy method only over http. As of early 2018,
+# there are no other known users, so with any luck we can discard this
+# hook if remotefilelog becomes a first-party extension.
+def iscmd(cmd):
+ return cmd in wireproto.commands
+
+def handlewsgirequest(rctx, req, res, checkperm):
+ """Possibly process a wire protocol request.
+
+ If the current request is a wire protocol request, the request is
+ processed by this function.
+
+ ``req`` is a ``parsedrequest`` instance.
+ ``res`` is a ``wsgiresponse`` instance.
+
+ Returns a bool indicating if the request was serviced. If set, the caller
+ should stop processing the request, as a response has already been issued.
+ """
+ # Avoid cycle involving hg module.
+ from .hgweb import common as hgwebcommon
+
+ repo = rctx.repo
+
+ # HTTP version 1 wire protocol requests are denoted by a "cmd" query
+ # string parameter. If it isn't present, this isn't a wire protocol
+ # request.
+ if 'cmd' not in req.qsparams:
+ return False
+
+ cmd = req.qsparams['cmd']
+
+ # The "cmd" request parameter is used by both the wire protocol and hgweb.
+ # While not all wire protocol commands are available for all transports,
+ # if we see a "cmd" value that resembles a known wire protocol command, we
+ # route it to a protocol handler. This is better than routing possible
+ # wire protocol requests to hgweb because it prevents hgweb from using
+ # known wire protocol commands and it is less confusing for machine
+ # clients.
+ if not iscmd(cmd):
+ return False
+
+ # The "cmd" query string argument is only valid on the root path of the
+ # repo. e.g. ``/?cmd=foo``, ``/repo?cmd=foo``. URL paths within the repo
+ # like ``/blah?cmd=foo`` are not allowed. So don't recognize the request
+ # in this case. We send an HTTP 404 for backwards compatibility reasons.
+ if req.dispatchpath:
+ res.status = hgwebcommon.statusmessage(404)
+ res.headers['Content-Type'] = HGTYPE
+ # TODO This is not a good response to issue for this request. This
+ # is mostly for BC for now.
+ res.setbodybytes('0\n%s\n' % b'Not Found')
+ return True
+
+ proto = httpv1protocolhandler(req, repo.ui,
+ lambda perm: checkperm(rctx, req, perm))
+
+ # The permissions checker should be the only thing that can raise an
+ # ErrorResponse. It is kind of a layer violation to catch an hgweb
+ # exception here. So consider refactoring into a exception type that
+ # is associated with the wire protocol.
+ try:
+ _callhttp(repo, req, res, proto, cmd)
+ except hgwebcommon.ErrorResponse as e:
+ for k, v in e.headers:
+ res.headers[k] = v
+ res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
+ # TODO This response body assumes the failed command was
+ # "unbundle." That assumption is not always valid.
+ res.setbodybytes('0\n%s\n' % pycompat.bytestr(e))
+
+ return True
+
+def _httpresponsetype(ui, req, prefer_uncompressed):
+ """Determine the appropriate response type and compression settings.
+
+ Returns a tuple of (mediatype, compengine, engineopts).
+ """
+ # Determine the response media type and compression engine based
+ # on the request parameters.
+ protocaps = decodevaluefromheaders(req, 'X-HgProto').split(' ')
+
+ if '0.2' in protocaps:
+ # All clients are expected to support uncompressed data.
+ if prefer_uncompressed:
+ return HGTYPE2, util._noopengine(), {}
+
+ # Default as defined by wire protocol spec.
+ compformats = ['zlib', 'none']
+ for cap in protocaps:
+ if cap.startswith('comp='):
+ compformats = cap[5:].split(',')
+ break
+
+ # Now find an agreed upon compression format.
+ for engine in wireproto.supportedcompengines(ui, util.SERVERROLE):
+ if engine.wireprotosupport().name in compformats:
+ opts = {}
+ level = ui.configint('server', '%slevel' % engine.name())
+ if level is not None:
+ opts['level'] = level
+
+ return HGTYPE2, engine, opts
+
+ # No mutually supported compression format. Fall back to the
+ # legacy protocol.
+
+ # Don't allow untrusted settings because disabling compression or
+ # setting a very high compression level could lead to flooding
+ # the server's network or CPU.
+ opts = {'level': ui.configint('server', 'zliblevel')}
+ return HGTYPE, util.compengines['zlib'], opts
+
+def _callhttp(repo, req, res, proto, cmd):
+ # Avoid cycle involving hg module.
+ from .hgweb import common as hgwebcommon
+
+ def genversion2(gen, engine, engineopts):
+ # application/mercurial-0.2 always sends a payload header
+ # identifying the compression engine.
+ name = engine.wireprotosupport().name
+ assert 0 < len(name) < 256
+ yield struct.pack('B', len(name))
+ yield name
+
+ for chunk in gen:
+ yield chunk
+
+ def setresponse(code, contenttype, bodybytes=None, bodygen=None):
+ if code == HTTP_OK:
+ res.status = '200 Script output follows'
+ else:
+ res.status = hgwebcommon.statusmessage(code)
+
+ res.headers['Content-Type'] = contenttype
+
+ if bodybytes is not None:
+ res.setbodybytes(bodybytes)
+ if bodygen is not None:
+ res.setbodygen(bodygen)
+
+ if not wireproto.commands.commandavailable(cmd, proto):
+ setresponse(HTTP_OK, HGERRTYPE,
+ _('requested wire protocol command is not available over '
+ 'HTTP'))
+ return
+
+ proto.checkperm(wireproto.commands[cmd].permission)
+
+ rsp = wireproto.dispatch(repo, proto, cmd)
+
+ if isinstance(rsp, bytes):
+ setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
+ elif isinstance(rsp, wireprototypes.bytesresponse):
+ setresponse(HTTP_OK, HGTYPE, bodybytes=rsp.data)
+ elif isinstance(rsp, wireprototypes.streamreslegacy):
+ setresponse(HTTP_OK, HGTYPE, bodygen=rsp.gen)
+ elif isinstance(rsp, wireprototypes.streamres):
+ gen = rsp.gen
+
+ # This code for compression should not be streamres specific. It
+ # is here because we only compress streamres at the moment.
+ mediatype, engine, engineopts = _httpresponsetype(
+ repo.ui, req, rsp.prefer_uncompressed)
+ gen = engine.compressstream(gen, engineopts)
+
+ if mediatype == HGTYPE2:
+ gen = genversion2(gen, engine, engineopts)
+
+ setresponse(HTTP_OK, mediatype, bodygen=gen)
+ elif isinstance(rsp, wireprototypes.pushres):
+ rsp = '%d\n%s' % (rsp.res, rsp.output)
+ setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
+ elif isinstance(rsp, wireprototypes.pusherr):
+ rsp = '0\n%s\n' % rsp.res
+ res.drain = True
+ setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
+ elif isinstance(rsp, wireprototypes.ooberror):
+ setresponse(HTTP_OK, HGERRTYPE, bodybytes=rsp.message)
+ else:
+ raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
+
+def _sshv1respondbytes(fout, value):
+ """Send a bytes response for protocol version 1."""
+ fout.write('%d\n' % len(value))
+ fout.write(value)
+ fout.flush()
+
+def _sshv1respondstream(fout, source):
+ write = fout.write
+ for chunk in source.gen:
+ write(chunk)
+ fout.flush()
+
+def _sshv1respondooberror(fout, ferr, rsp):
+ ferr.write(b'%s\n-\n' % rsp)
+ ferr.flush()
+ fout.write(b'\n')
+ fout.flush()
+
+class sshv1protocolhandler(wireprototypes.baseprotocolhandler):
+ """Handler for requests services via version 1 of SSH protocol."""
+ def __init__(self, ui, fin, fout):
+ self._ui = ui
+ self._fin = fin
+ self._fout = fout
+
+ @property
+ def name(self):
+ return wireprototypes.SSHV1
+
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ for n in xrange(len(keys)):
+ argline = self._fin.readline()[:-1]
+ arg, l = argline.split()
+ if arg not in keys:
+ raise error.Abort(_("unexpected parameter %r") % arg)
+ if arg == '*':
+ star = {}
+ for k in xrange(int(l)):
+ argline = self._fin.readline()[:-1]
+ arg, l = argline.split()
+ val = self._fin.read(int(l))
+ star[arg] = val
+ data['*'] = star
+ else:
+ val = self._fin.read(int(l))
+ data[arg] = val
+ return [data[k] for k in keys]
+
+ def forwardpayload(self, fpout):
+ # We initially send an empty response. This tells the client it is
+ # OK to start sending data. If a client sees any other response, it
+ # interprets it as an error.
+ _sshv1respondbytes(self._fout, b'')
+
+ # The file is in the form:
+ #
+ # <chunk size>\n<chunk>
+ # ...
+ # 0\n
+ count = int(self._fin.readline())
+ while count:
+ fpout.write(self._fin.read(count))
+ count = int(self._fin.readline())
+
+ @contextlib.contextmanager
+ def mayberedirectstdio(self):
+ yield None
+
+ def client(self):
+ client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
+ return 'remote:ssh:' + client
+
+ def addcapabilities(self, repo, caps):
+ return caps
+
+ def checkperm(self, perm):
+ pass
+
+class sshv2protocolhandler(sshv1protocolhandler):
+ """Protocol handler for version 2 of the SSH protocol."""
+
+ @property
+ def name(self):
+ return wireprototypes.SSHV2
+
+def _runsshserver(ui, repo, fin, fout, ev):
+ # This function operates like a state machine of sorts. The following
+ # states are defined:
+ #
+ # protov1-serving
+ # Server is in protocol version 1 serving mode. Commands arrive on
+ # new lines. These commands are processed in this state, one command
+ # after the other.
+ #
+ # protov2-serving
+ # Server is in protocol version 2 serving mode.
+ #
+ # upgrade-initial
+ # The server is going to process an upgrade request.
+ #
+ # upgrade-v2-filter-legacy-handshake
+ # The protocol is being upgraded to version 2. The server is expecting
+ # the legacy handshake from version 1.
+ #
+ # upgrade-v2-finish
+ # The upgrade to version 2 of the protocol is imminent.
+ #
+ # shutdown
+ # The server is shutting down, possibly in reaction to a client event.
+ #
+ # And here are their transitions:
+ #
+ # protov1-serving -> shutdown
+ # When server receives an empty request or encounters another
+ # error.
+ #
+ # protov1-serving -> upgrade-initial
+ # An upgrade request line was seen.
+ #
+ # upgrade-initial -> upgrade-v2-filter-legacy-handshake
+ # Upgrade to version 2 in progress. Server is expecting to
+ # process a legacy handshake.
+ #
+ # upgrade-v2-filter-legacy-handshake -> shutdown
+ # Client did not fulfill upgrade handshake requirements.
+ #
+ # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish
+ # Client fulfilled version 2 upgrade requirements. Finishing that
+ # upgrade.
+ #
+ # upgrade-v2-finish -> protov2-serving
+ # Protocol upgrade to version 2 complete. Server can now speak protocol
+ # version 2.
+ #
+ # protov2-serving -> protov1-serving
+ # Ths happens by default since protocol version 2 is the same as
+ # version 1 except for the handshake.
+
+ state = 'protov1-serving'
+ proto = sshv1protocolhandler(ui, fin, fout)
+ protoswitched = False
+
+ while not ev.is_set():
+ if state == 'protov1-serving':
+ # Commands are issued on new lines.
+ request = fin.readline()[:-1]
+
+ # Empty lines signal to terminate the connection.
+ if not request:
+ state = 'shutdown'
+ continue
+
+ # It looks like a protocol upgrade request. Transition state to
+ # handle it.
+ if request.startswith(b'upgrade '):
+ if protoswitched:
+ _sshv1respondooberror(fout, ui.ferr,
+ b'cannot upgrade protocols multiple '
+ b'times')
+ state = 'shutdown'
+ continue
+
+ state = 'upgrade-initial'
+ continue
+
+ available = wireproto.commands.commandavailable(request, proto)
+
+ # This command isn't available. Send an empty response and go
+ # back to waiting for a new command.
+ if not available:
+ _sshv1respondbytes(fout, b'')
+ continue
+
+ rsp = wireproto.dispatch(repo, proto, request)
+
+ if isinstance(rsp, bytes):
+ _sshv1respondbytes(fout, rsp)
+ elif isinstance(rsp, wireprototypes.bytesresponse):
+ _sshv1respondbytes(fout, rsp.data)
+ elif isinstance(rsp, wireprototypes.streamres):
+ _sshv1respondstream(fout, rsp)
+ elif isinstance(rsp, wireprototypes.streamreslegacy):
+ _sshv1respondstream(fout, rsp)
+ elif isinstance(rsp, wireprototypes.pushres):
+ _sshv1respondbytes(fout, b'')
+ _sshv1respondbytes(fout, b'%d' % rsp.res)
+ elif isinstance(rsp, wireprototypes.pusherr):
+ _sshv1respondbytes(fout, rsp.res)
+ elif isinstance(rsp, wireprototypes.ooberror):
+ _sshv1respondooberror(fout, ui.ferr, rsp.message)
+ else:
+ raise error.ProgrammingError('unhandled response type from '
+ 'wire protocol command: %s' % rsp)
+
+ # For now, protocol version 2 serving just goes back to version 1.
+ elif state == 'protov2-serving':
+ state = 'protov1-serving'
+ continue
+
+ elif state == 'upgrade-initial':
+ # We should never transition into this state if we've switched
+ # protocols.
+ assert not protoswitched
+ assert proto.name == wireprototypes.SSHV1
+
+ # Expected: upgrade <token> <capabilities>
+ # If we get something else, the request is malformed. It could be
+ # from a future client that has altered the upgrade line content.
+ # We treat this as an unknown command.
+ try:
+ token, caps = request.split(b' ')[1:]
+ except ValueError:
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ # Send empty response if we don't support upgrading protocols.
+ if not ui.configbool('experimental', 'sshserver.support-v2'):
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ try:
+ caps = urlreq.parseqs(caps)
+ except ValueError:
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ # We don't see an upgrade request to protocol version 2. Ignore
+ # the upgrade request.
+ wantedprotos = caps.get(b'proto', [b''])[0]
+ if SSHV2 not in wantedprotos:
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ # It looks like we can honor this upgrade request to protocol 2.
+ # Filter the rest of the handshake protocol request lines.
+ state = 'upgrade-v2-filter-legacy-handshake'
+ continue
+
+ elif state == 'upgrade-v2-filter-legacy-handshake':
+ # Client should have sent legacy handshake after an ``upgrade``
+ # request. Expected lines:
+ #
+ # hello
+ # between
+ # pairs 81
+ # 0000...-0000...
+
+ ok = True
+ for line in (b'hello', b'between', b'pairs 81'):
+ request = fin.readline()[:-1]
+
+ if request != line:
+ _sshv1respondooberror(fout, ui.ferr,
+ b'malformed handshake protocol: '
+ b'missing %s' % line)
+ ok = False
+ state = 'shutdown'
+ break
+
+ if not ok:
+ continue
+
+ request = fin.read(81)
+ if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
+ _sshv1respondooberror(fout, ui.ferr,
+ b'malformed handshake protocol: '
+ b'missing between argument value')
+ state = 'shutdown'
+ continue
+
+ state = 'upgrade-v2-finish'
+ continue
+
+ elif state == 'upgrade-v2-finish':
+ # Send the upgrade response.
+ fout.write(b'upgraded %s %s\n' % (token, SSHV2))
+ servercaps = wireproto.capabilities(repo, proto)
+ rsp = b'capabilities: %s' % servercaps.data
+ fout.write(b'%d\n%s\n' % (len(rsp), rsp))
+ fout.flush()
+
+ proto = sshv2protocolhandler(ui, fin, fout)
+ protoswitched = True
+
+ state = 'protov2-serving'
+ continue
+
+ elif state == 'shutdown':
+ break
+
+ else:
+ raise error.ProgrammingError('unhandled ssh server state: %s' %
+ state)
+
+class sshserver(object):
+ def __init__(self, ui, repo, logfh=None):
+ self._ui = ui
+ self._repo = repo
+ self._fin = ui.fin
+ self._fout = ui.fout
+
+ # Log write I/O to stdout and stderr if configured.
+ if logfh:
+ self._fout = util.makeloggingfileobject(
+ logfh, self._fout, 'o', logdata=True)
+ ui.ferr = util.makeloggingfileobject(
+ logfh, ui.ferr, 'e', logdata=True)
+
+ hook.redirect(True)
+ ui.fout = repo.ui.fout = ui.ferr
+
+ # Prevent insertion/deletion of CRs
+ util.setbinary(self._fin)
+ util.setbinary(self._fout)
+
+ def serve_forever(self):
+ self.serveuntil(threading.Event())
+ sys.exit(0)
+
+ def serveuntil(self, ev):
+ """Serve until a threading.Event is set."""
+ _runsshserver(self._ui, self._repo, self._fin, self._fout, ev)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/wireprototypes.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,157 @@
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import abc
+
+# Names of the SSH protocol implementations.
+SSHV1 = 'ssh-v1'
+# This is advertised over the wire. Incremental the counter at the end
+# to reflect BC breakages.
+SSHV2 = 'exp-ssh-v2-0001'
+
+# All available wire protocol transports.
+TRANSPORTS = {
+ SSHV1: {
+ 'transport': 'ssh',
+ 'version': 1,
+ },
+ SSHV2: {
+ 'transport': 'ssh',
+ 'version': 2,
+ },
+ 'http-v1': {
+ 'transport': 'http',
+ 'version': 1,
+ }
+}
+
+class bytesresponse(object):
+ """A wire protocol response consisting of raw bytes."""
+ def __init__(self, data):
+ self.data = data
+
+class ooberror(object):
+ """wireproto reply: failure of a batch of operation
+
+ Something failed during a batch call. The error message is stored in
+ `self.message`.
+ """
+ def __init__(self, message):
+ self.message = message
+
+class pushres(object):
+ """wireproto reply: success with simple integer return
+
+ The call was successful and returned an integer contained in `self.res`.
+ """
+ def __init__(self, res, output):
+ self.res = res
+ self.output = output
+
+class pusherr(object):
+ """wireproto reply: failure
+
+ The call failed. The `self.res` attribute contains the error message.
+ """
+ def __init__(self, res, output):
+ self.res = res
+ self.output = output
+
+class streamres(object):
+ """wireproto reply: binary stream
+
+ The call was successful and the result is a stream.
+
+ Accepts a generator containing chunks of data to be sent to the client.
+
+ ``prefer_uncompressed`` indicates that the data is expected to be
+ uncompressable and that the stream should therefore use the ``none``
+ engine.
+ """
+ def __init__(self, gen=None, prefer_uncompressed=False):
+ self.gen = gen
+ self.prefer_uncompressed = prefer_uncompressed
+
+class streamreslegacy(object):
+ """wireproto reply: uncompressed binary stream
+
+ The call was successful and the result is a stream.
+
+ Accepts a generator containing chunks of data to be sent to the client.
+
+ Like ``streamres``, but sends an uncompressed data for "version 1" clients
+ using the application/mercurial-0.1 media type.
+ """
+ def __init__(self, gen=None):
+ self.gen = gen
+
+class baseprotocolhandler(object):
+ """Abstract base class for wire protocol handlers.
+
+ A wire protocol handler serves as an interface between protocol command
+ handlers and the wire protocol transport layer. Protocol handlers provide
+ methods to read command arguments, redirect stdio for the duration of
+ the request, handle response types, etc.
+ """
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractproperty
+ def name(self):
+ """The name of the protocol implementation.
+
+ Used for uniquely identifying the transport type.
+ """
+
+ @abc.abstractmethod
+ def getargs(self, args):
+ """return the value for arguments in <args>
+
+ returns a list of values (same order as <args>)"""
+
+ @abc.abstractmethod
+ def forwardpayload(self, fp):
+ """Read the raw payload and forward to a file.
+
+ The payload is read in full before the function returns.
+ """
+
+ @abc.abstractmethod
+ def mayberedirectstdio(self):
+ """Context manager to possibly redirect stdio.
+
+ The context manager yields a file-object like object that receives
+ stdout and stderr output when the context manager is active. Or it
+ yields ``None`` if no I/O redirection occurs.
+
+ The intent of this context manager is to capture stdio output
+ so it may be sent in the response. Some transports support streaming
+ stdio to the client in real time. For these transports, stdio output
+ won't be captured.
+ """
+
+ @abc.abstractmethod
+ def client(self):
+ """Returns a string representation of this client (as bytes)."""
+
+ @abc.abstractmethod
+ def addcapabilities(self, repo, caps):
+ """Adds advertised capabilities specific to this protocol.
+
+ Receives the list of capabilities collected so far.
+
+ Returns a list of capabilities. The passed in argument can be returned.
+ """
+
+ @abc.abstractmethod
+ def checkperm(self, perm):
+ """Validate that the client has permissions to perform a request.
+
+ The argument is the permission required to proceed. If the client
+ doesn't have that permission, the exception should raise or abort
+ in a protocol specific manner.
+ """
--- a/mercurial/worker.py Thu Mar 15 22:35:07 2018 -0700
+++ b/mercurial/worker.py Mon Mar 19 08:07:18 2018 -0700
@@ -176,7 +176,7 @@
os._exit(ret & 255)
pids.add(pid)
os.close(wfd)
- fp = os.fdopen(rfd, pycompat.sysstr('rb'), 0)
+ fp = os.fdopen(rfd, r'rb', 0)
def cleanup():
signal.signal(signal.SIGINT, oldhandler)
waitforworkers()
--- a/setup.py Thu Mar 15 22:35:07 2018 -0700
+++ b/setup.py Mon Mar 19 08:07:18 2018 -0700
@@ -255,6 +255,7 @@
if (not e.startswith(b'not trusting file')
and not e.startswith(b'warning: Not importing')
and not e.startswith(b'obsolete feature not enabled')
+ and not e.startswith(b'*** failed to import extension')
and not e.startswith(b'devel-warn:'))]
return b'\n'.join(b' ' + e for e in err)
@@ -806,13 +807,14 @@
'mercurial.cext',
'mercurial.cffi',
'mercurial.hgweb',
- 'mercurial.httpclient',
'mercurial.pure',
'mercurial.thirdparty',
'mercurial.thirdparty.attr',
+ 'mercurial.utils',
'hgext', 'hgext.convert', 'hgext.fsmonitor',
'hgext.fsmonitor.pywatchman', 'hgext.highlight',
- 'hgext.largefiles', 'hgext.lfs', 'hgext.zeroconf', 'hgext3rd',
+ 'hgext.largefiles', 'hgext.lfs', 'hgext.narrow',
+ 'hgext.zeroconf', 'hgext3rd',
'hgdemandimport']
common_depends = ['mercurial/bitmanipulation.h',
@@ -846,14 +848,30 @@
if sys.platform == 'darwin':
osutil_ldflags += ['-framework', 'ApplicationServices']
+xdiff_srcs = [
+ 'mercurial/thirdparty/xdiff/xdiffi.c',
+ 'mercurial/thirdparty/xdiff/xprepare.c',
+ 'mercurial/thirdparty/xdiff/xutils.c',
+]
+
+xdiff_headers = [
+ 'mercurial/thirdparty/xdiff/xdiff.h',
+ 'mercurial/thirdparty/xdiff/xdiffi.h',
+ 'mercurial/thirdparty/xdiff/xinclude.h',
+ 'mercurial/thirdparty/xdiff/xmacros.h',
+ 'mercurial/thirdparty/xdiff/xprepare.h',
+ 'mercurial/thirdparty/xdiff/xtypes.h',
+ 'mercurial/thirdparty/xdiff/xutils.h',
+]
+
extmodules = [
Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'],
include_dirs=common_include_dirs,
depends=common_depends),
Extension('mercurial.cext.bdiff', ['mercurial/bdiff.c',
- 'mercurial/cext/bdiff.c'],
+ 'mercurial/cext/bdiff.c'] + xdiff_srcs,
include_dirs=common_include_dirs,
- depends=common_depends + ['mercurial/bdiff.h']),
+ depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers),
Extension('mercurial.cext.diffhelpers', ['mercurial/cext/diffhelpers.c'],
include_dirs=common_include_dirs,
depends=common_depends),
--- a/tests/badserverext.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/badserverext.py Mon Mar 19 08:07:18 2018 -0700
@@ -44,16 +44,16 @@
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('badserver', 'closeafteraccept',
+configitem(b'badserver', b'closeafteraccept',
default=False,
)
-configitem('badserver', 'closeafterrecvbytes',
+configitem(b'badserver', b'closeafterrecvbytes',
default=0,
)
-configitem('badserver', 'closeaftersendbytes',
+configitem(b'badserver', b'closeaftersendbytes',
default=0,
)
-configitem('badserver', 'closebeforeaccept',
+configitem(b'badserver', b'closebeforeaccept',
default=False,
)
--- a/tests/blackbox-readonly-dispatch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/blackbox-readonly-dispatch.py Mon Mar 19 08:07:18 2018 -0700
@@ -1,7 +1,8 @@
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import
import os
from mercurial import (
dispatch,
+ ui as uimod,
)
def testdispatch(cmd):
@@ -9,28 +10,29 @@
Prints command and result value, but does not handle quoting.
"""
- print("running: %s" % (cmd,))
- req = dispatch.request(cmd.split())
+ ui = uimod.ui.load()
+ ui.status(b"running: %s\n" % cmd)
+ req = dispatch.request(cmd.split(), ui)
result = dispatch.dispatch(req)
- print("result: %r" % (result,))
+ ui.status(b"result: %r\n" % result)
# create file 'foo', add and commit
-f = open('foo', 'wb')
-f.write('foo\n')
+f = open(b'foo', 'wb')
+f.write(b'foo\n')
f.close()
-testdispatch("--debug add foo")
-testdispatch("--debug commit -m commit1 -d 2000-01-01 foo")
+testdispatch(b"--debug add foo")
+testdispatch(b"--debug commit -m commit1 -d 2000-01-01 foo")
# append to file 'foo' and commit
-f = open('foo', 'ab')
-f.write('bar\n')
+f = open(b'foo', 'ab')
+f.write(b'bar\n')
f.close()
# remove blackbox.log directory (proxy for readonly log file)
-os.rmdir(".hg/blackbox.log")
+os.rmdir(b".hg/blackbox.log")
# replace it with the real blackbox.log file
-os.rename(".hg/blackbox.log-", ".hg/blackbox.log")
-testdispatch("--debug commit -m commit2 -d 2000-01-02 foo")
+os.rename(b".hg/blackbox.log-", b".hg/blackbox.log")
+testdispatch(b"--debug commit -m commit2 -d 2000-01-02 foo")
# check 88803a69b24 (fancyopts modified command table)
-testdispatch("--debug log -r 0")
-testdispatch("--debug log -r tip")
+testdispatch(b"--debug log -r 0")
+testdispatch(b"--debug log -r tip")
--- a/tests/bruterebase.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/bruterebase.py Mon Mar 19 08:07:18 2018 -0700
@@ -65,7 +65,7 @@
desc += getdesc(prev)
descs.append(desc)
descs.sort()
- summary = ' '.join(descs)
+ summary = b' '.join(descs)
ui.popbuffer()
repo.vfs.tryunlink(b'rebasestate')
--- a/tests/common-pattern.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/common-pattern.py Mon Mar 19 08:07:18 2018 -0700
@@ -22,6 +22,7 @@
br'phases%253Dheads%250A'
br'pushkey%250A'
br'remote-changegroup%253Dhttp%252Chttps%250A'
+ br'rev-branch-cache%250A'
br'stream%253Dv2',
# (the replacement patterns)
br'$USUAL_BUNDLE_CAPS$'
@@ -50,6 +51,7 @@
br'phases%3Dheads%0A'
br'pushkey%0A'
br'remote-changegroup%3Dhttp%2Chttps%0A'
+ br'rev-branch-cache%0A'
br'stream%3Dv2',
# (replacement patterns)
br'$USUAL_BUNDLE2_CAPS$'
@@ -64,13 +66,22 @@
br'listkeys%0A'
br'phases%3Dheads%0A'
br'pushkey%0A'
- br'remote-changegroup%3Dhttp%2Chttps',
+ br'remote-changegroup%3Dhttp%2Chttps%0A'
+ br'rev-branch-cache',
# (replacement patterns)
br'$USUAL_BUNDLE2_CAPS_SERVER$'
),
# HTTP log dates
- (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "GET',
- br' - - [$LOGDATE$] "GET'
+ (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "(GET|PUT|POST)',
+ lambda m: br' - - [$LOGDATE$] "' + m.group(1)
+ ),
+ # HTTP header dates- RFC 1123
+ (br'([Dd]ate): [A-Za-z]{3}, \d\d [A-Za-z]{3} \d{4} \d\d:\d\d:\d\d GMT',
+ lambda m: br'%s: $HTTP_DATE$' % m.group(1)
+ ),
+ # LFS expiration value
+ (br'"expires_at": "\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ"',
+ br'"expires_at": "$ISO_8601_DATE_TIME$"'
),
# Windows has an extra '/' in the following lines that get globbed away:
# pushing to file:/*/$TESTTMP/r2 (glob)
--- a/tests/dummysmtpd.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/dummysmtpd.py Mon Mar 19 08:07:18 2018 -0700
@@ -12,6 +12,7 @@
import traceback
from mercurial import (
+ pycompat,
server,
sslutil,
ui as uimod,
@@ -63,6 +64,19 @@
except KeyboardInterrupt:
pass
+def _encodestrsonly(v):
+ if isinstance(v, type(u'')):
+ return v.encode('ascii')
+ return v
+
+def bytesvars(obj):
+ unidict = vars(obj)
+ bd = {k.encode('ascii'): _encodestrsonly(v) for k, v in unidict.items()}
+ if bd[b'daemon_postexec'] is not None:
+ bd[b'daemon_postexec'] = [
+ _encodestrsonly(v) for v in bd[b'daemon_postexec']]
+ return bd
+
def main():
op = optparse.OptionParser()
op.add_option('-d', '--daemon', action='store_true')
@@ -85,8 +99,10 @@
dummysmtpsecureserver(addr, opts.certificate)
log('listening at %s:%d\n' % addr)
- server.runservice(vars(opts), initfn=init, runfn=run,
- runargs=[sys.executable, __file__] + sys.argv[1:])
+ server.runservice(
+ bytesvars(opts), initfn=init, runfn=run,
+ runargs=[pycompat.sysexecutable,
+ pycompat.fsencode(__file__)] + pycompat.sysargv[1:])
if __name__ == '__main__':
main()
--- a/tests/dummyssh Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/dummyssh Mon Mar 19 08:07:18 2018 -0700
@@ -15,8 +15,8 @@
log = open("dummylog", "ab")
log.write(b"Got arguments")
for i, arg in enumerate(sys.argv[1:]):
- log.write(b" %d:%s" % (i + 1, arg))
-log.write("\n")
+ log.write(b" %d:%s" % (i + 1, arg.encode('latin1')))
+log.write(b"\n")
log.close()
hgcmd = sys.argv[2]
if os.name == 'nt':
--- a/tests/f Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/f Mon Mar 19 08:07:18 2018 -0700
@@ -25,6 +25,7 @@
from __future__ import absolute_import
+import binascii
import glob
import hashlib
import optparse
@@ -58,46 +59,48 @@
facts = []
if isfile:
if opts.type:
- facts.append('file')
+ facts.append(b'file')
if any((opts.hexdump, opts.dump, opts.md5, opts.sha1, opts.sha256)):
- content = open(f, 'rb').read()
+ with open(f, 'rb') as fobj:
+ content = fobj.read()
elif islink:
if opts.type:
- facts.append('link')
+ facts.append(b'link')
content = os.readlink(f)
elif isstdin:
content = getattr(sys.stdin, 'buffer', sys.stdin).read()
if opts.size:
- facts.append('size=%s' % len(content))
+ facts.append(b'size=%d' % len(content))
elif isdir:
if opts.recurse or opts.type:
dirfiles = glob.glob(f + '/*')
- facts.append('directory with %s files' % len(dirfiles))
+ facts.append(b'directory with %d files' % len(dirfiles))
elif opts.type:
- facts.append('type unknown')
+ facts.append(b'type unknown')
if not isstdin:
stat = os.lstat(f)
if opts.size and not isdir:
- facts.append('size=%s' % stat.st_size)
+ facts.append(b'size=%d' % stat.st_size)
if opts.mode and not islink:
- facts.append('mode=%o' % (stat.st_mode & 0o777))
+ facts.append(b'mode=%o' % (stat.st_mode & 0o777))
if opts.links:
- facts.append('links=%s' % stat.st_nlink)
+ facts.append(b'links=%s' % stat.st_nlink)
if opts.newer:
# mtime might be in whole seconds so newer file might be same
if stat.st_mtime >= os.stat(opts.newer).st_mtime:
- facts.append('newer than %s' % opts.newer)
+ facts.append(b'newer than %s' % opts.newer)
else:
- facts.append('older than %s' % opts.newer)
+ facts.append(b'older than %s' % opts.newer)
if opts.md5 and content is not None:
h = hashlib.md5(content)
- facts.append('md5=%s' % h.hexdigest()[:opts.bytes])
+ facts.append(b'md5=%s' % binascii.hexlify(h.digest())[:opts.bytes])
if opts.sha1 and content is not None:
h = hashlib.sha1(content)
- facts.append('sha1=%s' % h.hexdigest()[:opts.bytes])
+ facts.append(b'sha1=%s' % binascii.hexlify(h.digest())[:opts.bytes])
if opts.sha256 and content is not None:
h = hashlib.sha256(content)
- facts.append('sha256=%s' % h.hexdigest()[:opts.bytes])
+ facts.append(b'sha256=%s' %
+ binascii.hexlify(h.digest())[:opts.bytes])
if isstdin:
outfile.write(b', '.join(facts) + b'\n')
elif facts:
--- a/tests/fakedirstatewritetime.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/fakedirstatewritetime.py Mon Mar 19 08:07:18 2018 -0700
@@ -13,13 +13,13 @@
extensions,
policy,
registrar,
- util,
)
+from mercurial.utils import dateutil
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('fakedirstatewritetime', 'fakenow',
+configitem(b'fakedirstatewritetime', b'fakenow',
default=None,
)
@@ -29,7 +29,7 @@
# execute what original parsers.pack_dirstate should do actually
# for consistency
actualnow = int(now)
- for f, e in dmap.iteritems():
+ for f, e in dmap.items():
if e[0] == 'n' and e[3] == actualnow:
e = parsers.dirstatetuple(e[0], e[1], e[2], -1)
dmap[f] = e
@@ -39,7 +39,7 @@
def fakewrite(ui, func):
# fake "now" of 'pack_dirstate' only if it is invoked while 'func'
- fakenow = ui.config('fakedirstatewritetime', 'fakenow')
+ fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
if not fakenow:
# Execute original one, if fakenow isn't configured. This is
# useful to prevent subrepos from executing replaced one,
@@ -49,7 +49,7 @@
# parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
# 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
- fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
+ fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
orig_pack_dirstate = parsers.pack_dirstate
orig_dirstate_getfsnow = dirstate._getfsnow
--- a/tests/fakemergerecord.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/fakemergerecord.py Mon Mar 19 08:07:18 2018 -0700
@@ -12,15 +12,15 @@
cmdtable = {}
command = registrar.command(cmdtable)
-@command('fakemergerecord',
- [('X', 'mandatory', None, 'add a fake mandatory record'),
- ('x', 'advisory', None, 'add a fake advisory record')], '')
+@command(b'fakemergerecord',
+ [(b'X', b'mandatory', None, b'add a fake mandatory record'),
+ (b'x', b'advisory', None, b'add a fake advisory record')], '')
def fakemergerecord(ui, repo, *pats, **opts):
with repo.wlock():
ms = merge.mergestate.read(repo)
records = ms._makerecords()
if opts.get('mandatory'):
- records.append(('X', 'mandatory record'))
+ records.append((b'X', b'mandatory record'))
if opts.get('advisory'):
- records.append(('x', 'advisory record'))
+ records.append((b'x', b'advisory record'))
ms._writerecords(records)
--- a/tests/fakepatchtime.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/fakepatchtime.py Mon Mar 19 08:07:18 2018 -0700
@@ -7,30 +7,30 @@
extensions,
patch as patchmod,
registrar,
- util,
)
+from mercurial.utils import dateutil
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('fakepatchtime', 'fakenow',
+configitem(b'fakepatchtime', b'fakenow',
default=None,
)
def internalpatch(orig, ui, repo, patchobj, strip,
- prefix='', files=None,
- eolmode='strict', similarity=0):
+ prefix=b'', files=None,
+ eolmode=b'strict', similarity=0):
if files is None:
files = set()
r = orig(ui, repo, patchobj, strip,
prefix=prefix, files=files,
eolmode=eolmode, similarity=similarity)
- fakenow = ui.config('fakepatchtime', 'fakenow')
+ fakenow = ui.config(b'fakepatchtime', b'fakenow')
if fakenow:
# parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
# 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
- fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
+ fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
for f in files:
repo.wvfs.utime(f, (fakenow, fakenow))
--- a/tests/flagprocessorext.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/flagprocessorext.py Mon Mar 19 08:07:18 2018 -0700
@@ -45,14 +45,14 @@
def supportedoutgoingversions(orig, repo):
versions = orig(repo)
- versions.discard('01')
- versions.discard('02')
- versions.add('03')
+ versions.discard(b'01')
+ versions.discard(b'02')
+ versions.add(b'03')
return versions
def allsupportedversions(orig, ui):
versions = orig(ui)
- versions.add('03')
+ versions.add(b'03')
return versions
def noopaddrevision(orig, self, text, transaction, link, p1, p2,
@@ -106,7 +106,7 @@
# Teach exchange to use changegroup 3
for k in exchange._bundlespeccgversions.keys():
- exchange._bundlespeccgversions[k] = '03'
+ exchange._bundlespeccgversions[k] = b'03'
# Add wrappers for addrevision, responsible to set flags depending on the
# revision data contents.
--- a/tests/generate-working-copy-states.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/generate-working-copy-states.py Mon Mar 19 08:07:18 2018 -0700
@@ -42,12 +42,12 @@
def generatestates(maxchangesets, parentcontents):
depth = len(parentcontents)
if depth == maxchangesets + 1:
- for tracked in ('untracked', 'tracked'):
- filename = "_".join([(content is None and 'missing' or content) for
- content in parentcontents]) + "-" + tracked
+ for tracked in (b'untracked', b'tracked'):
+ filename = b"_".join([(content is None and b'missing' or content)
+ for content in parentcontents]) + b"-" + tracked
yield (filename, parentcontents)
else:
- for content in ({None, 'content' + str(depth + 1)} |
+ for content in ({None, b'content' + (b"%d" % (depth + 1))} |
set(parentcontents)):
for combination in generatestates(maxchangesets,
parentcontents + [content]):
@@ -66,12 +66,12 @@
content = []
for filename, states in combinations:
if target == 'filelist':
- print(filename)
+ print(filename.decode('ascii'))
elif target == 'state':
if depth == 'wc':
# Make sure there is content so the file gets written and can be
# tracked. It will be deleted outside of this script.
- content.append((filename, states[maxchangesets] or 'TOBEDELETED'))
+ content.append((filename, states[maxchangesets] or b'TOBEDELETED'))
else:
content.append((filename, states[int(depth) - 1]))
else:
@@ -82,7 +82,7 @@
for filename, data in content:
if data is not None:
f = open(filename, 'wb')
- f.write(data + '\n')
+ f.write(data + b'\n')
f.close()
elif os.path.exists(filename):
os.remove(filename)
--- a/tests/get-with-headers.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/get-with-headers.py Mon Mar 19 08:07:18 2018 -0700
@@ -3,7 +3,7 @@
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import
import argparse
import json
@@ -23,6 +23,8 @@
except ImportError:
pass
+stdout = getattr(sys.stdout, 'buffer', sys.stdout)
+
parser = argparse.ArgumentParser()
parser.add_argument('--twice', action='store_true')
parser.add_argument('--headeronly', action='store_true')
@@ -62,21 +64,23 @@
conn = httplib.HTTPConnection(host)
conn.request("GET", '/' + path, None, headers)
response = conn.getresponse()
- print(response.status, response.reason)
+ stdout.write(b'%d %s\n' % (response.status,
+ response.reason.encode('ascii')))
if show[:1] == ['-']:
show = sorted(h for h, v in response.getheaders()
if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
- print("%s: %s" % (h, response.getheader(h)))
+ stdout.write(b"%s: %s\n" % (h.encode('ascii'),
+ response.getheader(h).encode('ascii')))
if not headeronly:
- print()
+ stdout.write(b'\n')
data = response.read()
if args.bodyfile:
bodyfh = open(args.bodyfile, 'wb')
else:
- bodyfh = sys.stdout
+ bodyfh = stdout
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
--- a/tests/hghave.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/hghave.py Mon Mar 19 08:07:18 2018 -0700
@@ -372,7 +372,7 @@
def has_hardlink_whitelisted():
from mercurial import util
try:
- fstype = util.getfstype('.')
+ fstype = util.getfstype(b'.')
except OSError:
return False
return fstype in util._hardlinkfswhitelist
@@ -703,8 +703,17 @@
@check("clang-libfuzzer", "clang new enough to include libfuzzer")
def has_clang_libfuzzer():
- mat = matchoutput('clang --version', 'clang version (\d)')
+ mat = matchoutput('clang --version', b'clang version (\d)')
if mat:
# libfuzzer is new in clang 6
return int(mat.group(1)) > 5
return False
+
+@check("xdiff", "xdiff algorithm")
+def has_xdiff():
+ try:
+ from mercurial import policy
+ bdiff = policy.importmod('bdiff')
+ return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)]
+ except (ImportError, AttributeError):
+ return False
--- a/tests/hgweberror.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/hgweberror.py Mon Mar 19 08:07:18 2018 -0700
@@ -6,13 +6,16 @@
webcommands,
)
-def raiseerror(web, req, tmpl):
+def raiseerror(web):
'''Dummy web command that raises an uncaught Exception.'''
# Simulate an error after partial response.
- if 'partialresponse' in req.form:
- req.respond(200, 'text/plain')
- req.write('partial content\n')
+ if 'partialresponse' in web.req.qsparams:
+ web.res.status = b'200 Script output follows'
+ web.res.headers[b'Content-Type'] = b'text/plain'
+ web.res.setbodywillwrite()
+ list(web.res.sendresponse())
+ web.res.getbodyfile().write(b'partial content\n')
raise AttributeError('I am an uncaught error!')
--- a/tests/logexceptions.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/logexceptions.py Mon Mar 19 08:07:18 2018 -0700
@@ -65,6 +65,7 @@
primaryframe,
hgframe,
hgline,
+ ui.environ[b'TESTNAME'].decode('utf-8', 'replace'),
]
fh.write(b'\0'.join(p.encode('utf-8', 'replace') for p in parts))
--- a/tests/mockblackbox.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/mockblackbox.py Mon Mar 19 08:07:18 2018 -0700
@@ -5,7 +5,7 @@
# XXX: we should probably offer a devel option to do this in blackbox directly
def getuser():
- return 'bob'
+ return b'bob'
def getpid():
return 5000
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/narrow-library.sh Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,8 @@
+cat >> $HGRCPATH <<EOF
+[extensions]
+narrow=
+[ui]
+ssh=python "$TESTDIR/dummyssh"
+[experimental]
+changegroup3 = True
+EOF
--- a/tests/printenv.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/printenv.py Mon Mar 19 08:07:18 2018 -0700
@@ -35,7 +35,7 @@
# variables with empty values may not exist on all platforms, filter
# them now for portability sake.
-env = [(k, v) for k, v in os.environ.iteritems()
+env = [(k, v) for k, v in os.environ.items()
if k.startswith("HG_") and v]
env.sort()
--- a/tests/revlog-formatv0.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/revlog-formatv0.py Mon Mar 19 08:07:18 2018 -0700
@@ -18,6 +18,7 @@
"""
from __future__ import absolute_import
+import binascii
import os
import sys
@@ -56,7 +57,7 @@
for name, data in files:
f = open(name, 'wb')
- f.write(data.decode('hex'))
+ f.write(binascii.unhexlify(data))
f.close()
sys.exit(0)
--- a/tests/revnamesext.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/revnamesext.py Mon Mar 19 08:07:18 2018 -0700
@@ -7,12 +7,12 @@
)
def reposetup(ui, repo):
- names = {'r%d' % rev: repo[rev].node() for rev in repo}
+ names = {b'r%d' % rev: repo[rev].node() for rev in repo}
namemap = lambda r, name: names.get(name)
- nodemap = lambda r, node: ['r%d' % repo[node].rev()]
+ nodemap = lambda r, node: [b'r%d' % repo[node].rev()]
- ns = namespaces.namespace('revnames', templatename='revname',
- logname='revname',
+ ns = namespaces.namespace(b'revnames', templatename=b'revname',
+ logname=b'revname',
listnames=lambda r: names.keys(),
namemap=namemap, nodemap=nodemap)
repo.names.addnamespace(ns)
--- a/tests/run-tests.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/run-tests.py Mon Mar 19 08:07:18 2018 -0700
@@ -120,6 +120,7 @@
}
class TestRunnerLexer(lexer.RegexLexer):
+ testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?'
tokens = {
'root': [
(r'^Skipped', token.Generic.Skipped, 'skipped'),
@@ -127,11 +128,11 @@
(r'^ERROR: ', token.Generic.Failed, 'failed'),
],
'skipped': [
- (r'[\w-]+\.(t|py)', token.Generic.SName),
+ (testpattern, token.Generic.SName),
(r':.*', token.Generic.Skipped),
],
'failed': [
- (r'[\w-]+\.(t|py)', token.Generic.FName),
+ (testpattern, token.Generic.FName),
(r'(:| ).*', token.Generic.Failed),
]
}
@@ -344,6 +345,8 @@
help="loop tests repeatedly")
harness.add_argument('--random', action="store_true",
help='run tests in random order')
+ harness.add_argument('--order-by-runtime', action="store_true",
+ help='run slowest tests first, according to .testtimes')
harness.add_argument("-p", "--port", type=int,
help="port on which servers should listen"
" (default: $%s or %d)" % defaults['port'])
@@ -989,7 +992,12 @@
# the intermediate 'compile' step help with debugging
code = compile(source.read(), replacementfile, 'exec')
exec(code, data)
- r.extend(data.get('substitutions', ()))
+ for value in data.get('substitutions', ()):
+ if len(value) != 2:
+ msg = 'malformatted substitution in %s: %r'
+ msg %= (replacementfile, value)
+ raise ValueError(msg)
+ r.append(value)
return r
def _escapepath(self, p):
@@ -1046,6 +1054,7 @@
env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
env['HGEMITWARNINGS'] = '1'
env['TESTTMP'] = self._testtmp
+ env['TESTNAME'] = self.name
env['HOME'] = self._testtmp
# This number should match portneeded in _getport
for port in xrange(3):
@@ -1080,7 +1089,7 @@
del env[k]
# unset env related to hooks
- for k in env.keys():
+ for k in list(env):
if k.startswith('HG_'):
del env[k]
@@ -1110,6 +1119,7 @@
hgrc.write(b'[web]\n')
hgrc.write(b'address = localhost\n')
hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
+ hgrc.write(b'server-header = testing stub value\n')
for opt in self._extraconfigopts:
section, key = opt.encode('utf-8').split(b'.', 1)
@@ -1229,6 +1239,7 @@
self.name = '%s (case %s)' % (self.name, _strpath(case))
self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
self._tmpname += b'-%s' % case
+ self._have = {}
@property
def refpath(self):
@@ -1268,11 +1279,15 @@
return self._processoutput(exitcode, output, salt, after, expected)
def _hghave(self, reqs):
+ allreqs = b' '.join(reqs)
+ if allreqs in self._have:
+ return self._have.get(allreqs)
+
# TODO do something smarter when all other uses of hghave are gone.
runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
tdir = runtestdir.replace(b'\\', b'/')
proc = Popen4(b'%s -c "%s/hghave %s"' %
- (self._shell, tdir, b' '.join(reqs)),
+ (self._shell, tdir, allreqs),
self._testtmp, 0, self._getenv())
stdout, stderr = proc.communicate()
ret = proc.wait()
@@ -1283,10 +1298,13 @@
sys.exit(1)
if ret != 0:
+ self._have[allreqs] = (False, stdout)
return False, stdout
if b'slow' in reqs:
self._timeout = self._slowtimeout
+
+ self._have[allreqs] = (True, None)
return True, None
def _iftest(self, args):
@@ -1341,7 +1359,11 @@
if os.getenv('MSYSTEM'):
script.append(b'alias pwd="pwd -W"\n')
if self._case:
- script.append(b'TESTCASE=%s\n' % shellquote(self._case))
+ if isinstance(self._case, str):
+ quoted = shellquote(self._case)
+ else:
+ quoted = shellquote(self._case.decode('utf8')).encode('utf8')
+ script.append(b'TESTCASE=%s\n' % quoted)
script.append(b'export TESTCASE\n')
n = 0
@@ -1352,10 +1374,11 @@
lsplit = l.split()
if len(lsplit) < 2 or lsplit[0] != b'#require':
after.setdefault(pos, []).append(' !!! invalid #require\n')
- haveresult, message = self._hghave(lsplit[1:])
- if not haveresult:
- script = [b'echo "%s"\nexit 80\n' % message]
- break
+ if not skipping:
+ haveresult, message = self._hghave(lsplit[1:])
+ if not haveresult:
+ script = [b'echo "%s"\nexit 80\n' % message]
+ break
after.setdefault(pos, []).append(l)
elif l.startswith(b'#if'):
lsplit = l.split()
@@ -1751,20 +1774,20 @@
else:
servefail, lines = getdiff(expected, got,
test.refpath, test.errpath)
+ self.stream.write('\n')
+ for line in lines:
+ line = highlightdiff(line, self.color)
+ if PYTHON3:
+ self.stream.flush()
+ self.stream.buffer.write(line)
+ self.stream.buffer.flush()
+ else:
+ self.stream.write(line)
+ self.stream.flush()
+
if servefail:
raise test.failureException(
'server failed to start (HGPORT=%s)' % test._startport)
- else:
- self.stream.write('\n')
- for line in lines:
- line = highlightdiff(line, self.color)
- if PYTHON3:
- self.stream.flush()
- self.stream.buffer.write(line)
- self.stream.buffer.flush()
- else:
- self.stream.write(line)
- self.stream.flush()
# handle interactive prompt without releasing iolock
if self._options.interactive:
@@ -2012,10 +2035,11 @@
def loadtimes(outputdir):
times = []
try:
- with open(os.path.join(outputdir, b'.testtimes-')) as fp:
+ with open(os.path.join(outputdir, b'.testtimes')) as fp:
for line in fp:
- ts = line.split()
- times.append((ts[0], [float(t) for t in ts[1:]]))
+ m = re.match('(.*?) ([0-9. ]+)', line)
+ times.append((m.group(1),
+ [float(t) for t in m.group(2).split()]))
except IOError as err:
if err.errno != errno.ENOENT:
raise
@@ -2124,13 +2148,21 @@
if self._runner.options.exceptions:
exceptions = aggregateexceptions(
os.path.join(self._runner._outputdir, b'exceptions'))
- total = sum(exceptions.values())
self.stream.writeln('Exceptions Report:')
self.stream.writeln('%d total from %d frames' %
- (total, len(exceptions)))
- for (frame, line, exc), count in exceptions.most_common():
- self.stream.writeln('%d\t%s: %s' % (count, frame, exc))
+ (exceptions['total'],
+ len(exceptions['exceptioncounts'])))
+ combined = exceptions['combined']
+ for key in sorted(combined, key=combined.get, reverse=True):
+ frame, line, exc = key
+ totalcount, testcount, leastcount, leasttest = combined[key]
+
+ self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
+ % (totalcount,
+ testcount,
+ frame, exc,
+ leasttest, leastcount))
self.stream.flush()
@@ -2279,47 +2311,57 @@
separators=(',', ': '))
outf.writelines(("testreport =", jsonout))
-def sorttests(testdescs, shuffle=False):
+def sorttests(testdescs, previoustimes, shuffle=False):
"""Do an in-place sort of tests."""
if shuffle:
random.shuffle(testdescs)
return
- # keywords for slow tests
- slow = {b'svn': 10,
- b'cvs': 10,
- b'hghave': 10,
- b'largefiles-update': 10,
- b'run-tests': 10,
- b'corruption': 10,
- b'race': 10,
- b'i18n': 10,
- b'check': 100,
- b'gendoc': 100,
- b'contrib-perf': 200,
- }
- perf = {}
-
- def sortkey(f):
- # run largest tests first, as they tend to take the longest
- f = f['path']
- try:
- return perf[f]
- except KeyError:
+ if previoustimes:
+ def sortkey(f):
+ f = f['path']
+ if f in previoustimes:
+ # Use most recent time as estimate
+ return -previoustimes[f][-1]
+ else:
+ # Default to a rather arbitrary value of 1 second for new tests
+ return -1.0
+ else:
+ # keywords for slow tests
+ slow = {b'svn': 10,
+ b'cvs': 10,
+ b'hghave': 10,
+ b'largefiles-update': 10,
+ b'run-tests': 10,
+ b'corruption': 10,
+ b'race': 10,
+ b'i18n': 10,
+ b'check': 100,
+ b'gendoc': 100,
+ b'contrib-perf': 200,
+ }
+ perf = {}
+
+ def sortkey(f):
+ # run largest tests first, as they tend to take the longest
+ f = f['path']
try:
- val = -os.stat(f).st_size
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- perf[f] = -1e9 # file does not exist, tell early
- return -1e9
- for kw, mul in slow.items():
- if kw in f:
- val *= mul
- if f.endswith(b'.py'):
- val /= 10.0
- perf[f] = val / 1000.0
- return perf[f]
+ return perf[f]
+ except KeyError:
+ try:
+ val = -os.stat(f).st_size
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ perf[f] = -1e9 # file does not exist, tell early
+ return -1e9
+ for kw, mul in slow.items():
+ if kw in f:
+ val *= mul
+ if f.endswith(b'.py'):
+ val /= 10.0
+ perf[f] = val / 1000.0
+ return perf[f]
testdescs.sort(key=sortkey)
@@ -2390,8 +2432,6 @@
os.umask(oldmask)
def _run(self, testdescs):
- sorttests(testdescs, shuffle=self.options.random)
-
self._testdir = osenvironb[b'TESTDIR'] = getattr(
os, 'getcwdb', os.getcwd)()
# assume all tests in same folder for now
@@ -2406,6 +2446,10 @@
self._outputdir = self._testdir
if testdescs and pathname:
self._outputdir = os.path.join(self._outputdir, pathname)
+ previoustimes = {}
+ if self.options.order_by_runtime:
+ previoustimes = dict(loadtimes(self._outputdir))
+ sorttests(testdescs, previoustimes, shuffle=self.options.random)
if 'PYTHONHASHSEED' not in os.environ:
# use a random python hash seed all the time
@@ -3001,22 +3045,57 @@
p.decode("utf-8"))
def aggregateexceptions(path):
- exceptions = collections.Counter()
+ exceptioncounts = collections.Counter()
+ testsbyfailure = collections.defaultdict(set)
+ failuresbytest = collections.defaultdict(set)
for f in os.listdir(path):
with open(os.path.join(path, f), 'rb') as fh:
data = fh.read().split(b'\0')
- if len(data) != 4:
+ if len(data) != 5:
continue
- exc, mainframe, hgframe, hgline = data
+ exc, mainframe, hgframe, hgline, testname = data
exc = exc.decode('utf-8')
mainframe = mainframe.decode('utf-8')
hgframe = hgframe.decode('utf-8')
hgline = hgline.decode('utf-8')
- exceptions[(hgframe, hgline, exc)] += 1
-
- return exceptions
+ testname = testname.decode('utf-8')
+
+ key = (hgframe, hgline, exc)
+ exceptioncounts[key] += 1
+ testsbyfailure[key].add(testname)
+ failuresbytest[testname].add(key)
+
+ # Find test having fewest failures for each failure.
+ leastfailing = {}
+ for key, tests in testsbyfailure.items():
+ fewesttest = None
+ fewestcount = 99999999
+ for test in sorted(tests):
+ if len(failuresbytest[test]) < fewestcount:
+ fewesttest = test
+ fewestcount = len(failuresbytest[test])
+
+ leastfailing[key] = (fewestcount, fewesttest)
+
+ # Create a combined counter so we can sort by total occurrences and
+ # impacted tests.
+ combined = {}
+ for key in exceptioncounts:
+ combined[key] = (exceptioncounts[key],
+ len(testsbyfailure[key]),
+ leastfailing[key][0],
+ leastfailing[key][1])
+
+ return {
+ 'exceptioncounts': exceptioncounts,
+ 'total': sum(exceptioncounts.values()),
+ 'combined': combined,
+ 'leastfailing': leastfailing,
+ 'byfailure': testsbyfailure,
+ 'bytest': failuresbytest,
+ }
if __name__ == '__main__':
runner = TestRunner()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/sshprotoext.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,98 @@
+# sshprotoext.py - Extension to test behavior of SSH protocol
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# This extension replaces the SSH server started via `hg serve --stdio`.
+# The server behaves differently depending on environment variables.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ error,
+ extensions,
+ registrar,
+ sshpeer,
+ wireproto,
+ wireprotoserver,
+)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(b'sshpeer', b'mode', default=None)
+configitem(b'sshpeer', b'handshake-mode', default=None)
+
+class bannerserver(wireprotoserver.sshserver):
+ """Server that sends a banner to stdout."""
+ def serve_forever(self):
+ for i in range(10):
+ self._fout.write(b'banner: line %d\n' % i)
+
+ super(bannerserver, self).serve_forever()
+
+class prehelloserver(wireprotoserver.sshserver):
+ """Tests behavior when connecting to <0.9.1 servers.
+
+ The ``hello`` wire protocol command was introduced in Mercurial
+ 0.9.1. Modern clients send the ``hello`` command when connecting
+ to SSH servers. This mock server tests behavior of the handshake
+ when ``hello`` is not supported.
+ """
+ def serve_forever(self):
+ l = self._fin.readline()
+ assert l == b'hello\n'
+ # Respond to unknown commands with an empty reply.
+ wireprotoserver._sshv1respondbytes(self._fout, b'')
+ l = self._fin.readline()
+ assert l == b'between\n'
+ proto = wireprotoserver.sshv1protocolhandler(self._ui, self._fin,
+ self._fout)
+ rsp = wireproto.dispatch(self._repo, proto, b'between')
+ wireprotoserver._sshv1respondbytes(self._fout, rsp.data)
+
+ super(prehelloserver, self).serve_forever()
+
+def performhandshake(orig, ui, stdin, stdout, stderr):
+ """Wrapped version of sshpeer._performhandshake to send extra commands."""
+ mode = ui.config(b'sshpeer', b'handshake-mode')
+ if mode == b'pre-no-args':
+ ui.debug(b'sending no-args command\n')
+ stdin.write(b'no-args\n')
+ stdin.flush()
+ return orig(ui, stdin, stdout, stderr)
+ elif mode == b'pre-multiple-no-args':
+ ui.debug(b'sending unknown1 command\n')
+ stdin.write(b'unknown1\n')
+ ui.debug(b'sending unknown2 command\n')
+ stdin.write(b'unknown2\n')
+ ui.debug(b'sending unknown3 command\n')
+ stdin.write(b'unknown3\n')
+ stdin.flush()
+ return orig(ui, stdin, stdout, stderr)
+ else:
+ raise error.ProgrammingError(b'unknown HANDSHAKECOMMANDMODE: %s' %
+ mode)
+
+def extsetup(ui):
+ # It's easier for tests to define the server behavior via environment
+ # variables than config options. This is because `hg serve --stdio`
+ # has to be invoked with a certain form for security reasons and
+ # `dummyssh` can't just add `--config` flags to the command line.
+ servermode = ui.environ.get(b'SSHSERVERMODE')
+
+ if servermode == b'banner':
+ wireprotoserver.sshserver = bannerserver
+ elif servermode == b'no-hello':
+ wireprotoserver.sshserver = prehelloserver
+ elif servermode:
+ raise error.ProgrammingError(b'unknown server mode: %s' % servermode)
+
+ peermode = ui.config(b'sshpeer', b'mode')
+
+ if peermode == b'extra-handshake-commands':
+ extensions.wrapfunction(sshpeer, '_performhandshake', performhandshake)
+ elif peermode:
+ raise error.ProgrammingError(b'unknown peer mode: %s' % peermode)
--- a/tests/svn-safe-append.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/svn-safe-append.py Mon Mar 19 08:07:18 2018 -0700
@@ -6,6 +6,7 @@
Without this svn will not detect workspace changes."""
import os
+import stat
import sys
text = sys.argv[1]
@@ -13,16 +14,15 @@
f = open(fname, "ab")
try:
- before = os.fstat(f.fileno()).st_mtime
+ before = os.fstat(f.fileno())[stat.ST_MTIME]
f.write(text)
f.write("\n")
finally:
f.close()
inc = 1
-now = os.stat(fname).st_mtime
+now = os.stat(fname)[stat.ST_MTIME]
while now == before:
t = now + inc
inc += 1
os.utime(fname, (t, t))
- now = os.stat(fname).st_mtime
-
+ now = os.stat(fname)[stat.ST_MTIME]
--- a/tests/test-abort-checkin.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-abort-checkin.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,9 +1,9 @@
$ cat > abortcommit.py <<EOF
> from mercurial import error
> def hook(**args):
- > raise error.Abort("no commits allowed")
+ > raise error.Abort(b"no commits allowed")
> def reposetup(ui, repo):
- > repo.ui.setconfig("hooks", "pretxncommit.nocommits", hook)
+ > repo.ui.setconfig(b"hooks", b"pretxncommit.nocommits", hook)
> EOF
$ abspath=`pwd`/abortcommit.py
--- a/tests/test-acl.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-acl.t Mon Mar 19 08:07:18 2018 -0700
@@ -21,6 +21,15 @@
> echo
> }
+ > cat > posixgetuser.py <<'EOF'
+ > import getpass
+ > from mercurial import pycompat, util
+ > def posixgetuser():
+ > return pycompat.fsencode(getpass.getuser())
+ > if not pycompat.isposix:
+ > util.getuser = posixgetuser # forcibly trust $LOGNAME
+ > EOF
+
> init_config()
> {
> cat > fakegroups.py <<EOF
@@ -41,6 +50,7 @@
> sources = push
> [extensions]
> f=`pwd`/fakegroups.py
+ > posixgetuser=$TESTTMP/posixgetuser.py
> EOF
> }
@@ -72,6 +82,10 @@
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ config=b/.hg/hgrc
+ $ cat >> "$config" <<EOF
+ > [extensions]
+ > posixgetuser=$TESTTMP/posixgetuser.py
+ > EOF
Extension disabled for lack of a hook
@@ -93,14 +107,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -156,14 +170,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -222,14 +236,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -298,14 +312,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -366,14 +380,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -439,14 +453,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -509,14 +523,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -584,14 +598,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -656,14 +670,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -730,14 +744,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -813,14 +827,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -894,14 +908,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -970,14 +984,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -1057,14 +1071,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -1126,6 +1140,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow]
** = fred
"""
@@ -1143,14 +1158,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -1206,6 +1221,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow]
** = fred
[acl.deny]
@@ -1225,14 +1241,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -1287,6 +1303,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow]
** = @group1
"""
@@ -1304,14 +1321,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -1368,6 +1385,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow]
** = @group1
[acl.deny]
@@ -1387,14 +1405,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:heads" supported
@@ -1491,6 +1509,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
"""
pushing to ../b
query 1; heads
@@ -1507,14 +1526,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -1573,6 +1592,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.deny.branches]
foobar = *
"""
@@ -1591,14 +1611,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -1651,6 +1671,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow.branches]
"""
pushing to ../b
@@ -1668,14 +1689,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -1723,6 +1744,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow.branches]
* = george
"""
@@ -1741,14 +1763,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -1790,6 +1812,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow.branches]
* = george
"""
@@ -1808,14 +1831,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -1878,6 +1901,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.allow.branches]
foobar = astro
* = george
@@ -1897,14 +1921,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -1965,6 +1989,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.deny.branches]
foobar = astro
default = astro
@@ -1985,14 +2010,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -2039,6 +2064,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.deny.branches]
default = !astro
"""
@@ -2057,14 +2083,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
@@ -2121,6 +2147,7 @@
[acl]
sources = push
[extensions]
+ posixgetuser=$TESTTMP/posixgetuser.py
[acl.deny.branches]
default = !astro
"""
@@ -2139,14 +2166,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 188
+ bundle2-input-part: total payload size 205
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:heads" supported
--- a/tests/test-add.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-add.t Mon Mar 19 08:07:18 2018 -0700
@@ -146,6 +146,13 @@
M a
? a.orig
+excluded file shouldn't be added even if it is explicitly specified
+
+ $ hg add a.orig -X '*.orig'
+ $ hg st
+ M a
+ ? a.orig
+
Forgotten file can be added back (as either clean or modified)
$ hg forget b
@@ -249,3 +256,19 @@
#endif
$ cd ..
+
+test --dry-run mode in forget
+
+ $ hg init testdir_forget
+ $ cd testdir_forget
+ $ echo foo > foo
+ $ hg add foo
+ $ hg commit -m "foo"
+ $ hg forget foo --dry-run -v
+ removing foo
+ $ hg diff
+ $ hg forget not_exist -n
+ not_exist: $ENOENT$
+ [1]
+
+ $ cd ..
--- a/tests/test-alias.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-alias.t Mon Mar 19 08:07:18 2018 -0700
@@ -548,12 +548,12 @@
> from mercurial import cmdutil, commands, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('expandalias')
+ > @command(b'expandalias')
> def expandalias(ui, repo, name):
> alias = cmdutil.findcmd(name, commands.table)[1][0]
- > ui.write('%s args: %s\n' % (name, ' '.join(alias.args)))
+ > ui.write(b'%s args: %s\n' % (name, b' '.join(alias.args)))
> os.environ['COUNT'] = '2'
- > ui.write('%s args: %s (with COUNT=2)\n' % (name, ' '.join(alias.args)))
+ > ui.write(b'%s args: %s (with COUNT=2)\n' % (name, b' '.join(alias.args)))
> EOF
$ cat >> $HGRCPATH <<'EOF'
--- a/tests/test-ancestor.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-ancestor.py Mon Mar 19 08:07:18 2018 -0700
@@ -220,9 +220,9 @@
# DAGs that have been known to be problematic, and, optionally, known pairs
# of revisions and their expected ancestor list.
dagtests = [
- ('+2*2*2/*3/2', {}),
- ('+3*3/*2*2/*4*4/*4/2*4/2*2', {}),
- ('+2*2*/2*4*/4*/3*2/4', {(6, 7): [3, 5]}),
+ (b'+2*2*2/*3/2', {}),
+ (b'+3*3/*2*2/*4*4/*4/2*4/2*2', {}),
+ (b'+2*2*/2*4*/4*/3*2/4', {(6, 7): [3, 5]}),
]
def test_gca():
u = uimod.ui.load()
--- a/tests/test-annotate.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-annotate.py Mon Mar 19 08:07:18 2018 -0700
@@ -6,7 +6,7 @@
from mercurial import (
mdiff,
)
-from mercurial.context import (
+from mercurial.dagop import (
annotateline,
_annotatepair,
)
@@ -25,9 +25,9 @@
childdata = b'a\nb2\nc\nc2\nd\n'
diffopts = mdiff.diffopts()
- def decorate(text, rev):
- return ([annotateline(fctx=rev, lineno=i)
- for i in xrange(1, text.count(b'\n') + 1)],
+ def decorate(text, fctx):
+ return ([annotateline(fctx=fctx, lineno=i)
+ for i in range(1, text.count(b'\n') + 1)],
text)
# Basic usage
@@ -36,17 +36,17 @@
p1ann = decorate(p1data, p1fctx)
p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
self.assertEqual(p1ann[0], [
- annotateline('old', 1),
- annotateline('old', 2),
- annotateline('p1', 3),
+ annotateline(b'old', 1),
+ annotateline(b'old', 2),
+ annotateline(b'p1', 3),
])
p2ann = decorate(p2data, p2fctx)
p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
self.assertEqual(p2ann[0], [
- annotateline('old', 1),
- annotateline('p2', 2),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'p2', 2),
+ annotateline(b'p2', 3),
])
# Test with multiple parents (note the difference caused by ordering)
@@ -55,22 +55,22 @@
childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('c', 2),
- annotateline('p2', 2),
- annotateline('c', 4),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'c', 2),
+ annotateline(b'p2', 2),
+ annotateline(b'c', 4),
+ annotateline(b'p2', 3),
])
childann = decorate(childdata, childfctx)
childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('c', 2),
- annotateline('p1', 3),
- annotateline('c', 4),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'c', 2),
+ annotateline(b'p1', 3),
+ annotateline(b'c', 4),
+ annotateline(b'p2', 3),
])
# Test with skipchild (note the difference caused by ordering)
@@ -79,24 +79,24 @@
childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('old', 2, True),
+ annotateline(b'old', 1),
+ annotateline(b'old', 2, True),
# note that this line was carried over from earlier so it is *not*
# marked skipped
- annotateline('p2', 2),
- annotateline('p2', 2, True),
- annotateline('p2', 3),
+ annotateline(b'p2', 2),
+ annotateline(b'p2', 2, True),
+ annotateline(b'p2', 3),
])
childann = decorate(childdata, childfctx)
childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('old', 2, True),
- annotateline('p1', 3),
- annotateline('p1', 3, True),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'old', 2, True),
+ annotateline(b'p1', 3),
+ annotateline(b'p1', 3, True),
+ annotateline(b'p2', 3),
])
if __name__ == '__main__':
--- a/tests/test-annotate.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-annotate.t Mon Mar 19 08:07:18 2018 -0700
@@ -71,6 +71,11 @@
}
]
+log-like templating
+
+ $ hg annotate -T'{lines % "{rev} {node|shortest}: {line}"}' a
+ 0 8435: a
+
$ cat <<EOF >>a
> a
> a
@@ -814,6 +819,8 @@
[255]
$ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
hg: parse error at 43: not a prefix: [
+ (followlines(baz, 2:4, startrev=20, descend=[1])
+ ^ here)
[255]
$ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
hg: parse error: descend argument must be a boolean
@@ -912,10 +919,10 @@
> EOF
>>> with open('a', 'wb') as f:
- ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g')
+ ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g') and None
$ hg ci -qAm0
>>> with open('a', 'wb') as f:
- ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g')
+ ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g') and None
$ hg ci -m1
$ hg annotate -r0 a | $PYTHON "$TESTTMP/substcr.py"
--- a/tests/test-arbitraryfilectx.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-arbitraryfilectx.t Mon Mar 19 08:07:18 2018 -0700
@@ -5,11 +5,11 @@
> from mercurial import commands, context, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'eval', [], 'hg eval CMD')
+ > @command(b'eval', [], b'hg eval CMD')
> def eval_(ui, repo, *cmds, **opts):
- > cmd = " ".join(cmds)
+ > cmd = b" ".join(cmds)
> res = str(eval(cmd, globals(), locals()))
- > ui.warn("%s" % res)
+ > ui.warn(b"%s" % res)
> EOF
$ echo "[extensions]" >> $HGRCPATH
--- a/tests/test-archive.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-archive.t Mon Mar 19 08:07:18 2018 -0700
@@ -106,10 +106,13 @@
> --config extensions.blackbox= --config blackbox.track=develwarn
> cat hg.pid >> $DAEMON_PIDS
> echo % $1 allowed should give 200
- > get-with-headers.py localhost:$HGPORT "archive/tip.$2" | head -n 1
+ > get-with-headers.py --bodyfile body localhost:$HGPORT "archive/tip.$2" -
+ > f --size --sha1 body
> echo % $3 and $4 disallowed should both give 403
- > get-with-headers.py localhost:$HGPORT "archive/tip.$3" | head -n 1
- > get-with-headers.py localhost:$HGPORT "archive/tip.$4" | head -n 1
+ > get-with-headers.py --bodyfile body localhost:$HGPORT "archive/tip.$3" -
+ > f --size --sha1 body
+ > get-with-headers.py --bodyfile body localhost:$HGPORT "archive/tip.$4" -
+ > f --size --sha1 body
> killdaemons.py
> cat errors.log
> hg blackbox --config extensions.blackbox= --config blackbox.track=
@@ -121,42 +124,174 @@
$ test_archtype gz tar.gz tar.bz2 zip
% gz allowed should give 200
200 Script output follows
+ content-disposition: attachment; filename=test-archive-1701ef1f1510.tar.gz
+ content-type: application/x-gzip
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=408, sha1=8fa06531bddecc365a9f5edb0f88b65974bfe505
% tar.bz2 and zip disallowed should both give 403
403 Archive type not allowed: bz2
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=4c5cf0f574446c44feb7f88f4e0e2a56bd92c352
403 Archive type not allowed: zip
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=cbfa5574b337348bfd0564cc534474d002e7d6c7
$ test_archtype bz2 tar.bz2 zip tar.gz
% bz2 allowed should give 200
200 Script output follows
+ content-disposition: attachment; filename=test-archive-1701ef1f1510.tar.bz2
+ content-type: application/x-bzip2
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=426, sha1=8d87f5aba6e14f1bfea6c232985982c278b2fb0b
% zip and tar.gz disallowed should both give 403
403 Archive type not allowed: zip
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=cbfa5574b337348bfd0564cc534474d002e7d6c7
403 Archive type not allowed: gz
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1450, sha1=71f0b12d59f85fdcfe8ff493e2dc66863f2f7734
$ test_archtype zip zip tar.gz tar.bz2
% zip allowed should give 200
200 Script output follows
+ content-disposition: attachment; filename=test-archive-1701ef1f1510.zip
+ content-type: application/zip
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1377, sha1=677b14d3d048778d5eb5552c14a67e6192068650
% tar.gz and tar.bz2 disallowed should both give 403
403 Archive type not allowed: gz
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1450, sha1=71f0b12d59f85fdcfe8ff493e2dc66863f2f7734
403 Archive type not allowed: bz2
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=4c5cf0f574446c44feb7f88f4e0e2a56bd92c352
check http return codes (with deprecated option)
$ test_archtype_deprecated gz tar.gz tar.bz2 zip
% gz allowed should give 200
200 Script output follows
+ content-disposition: attachment; filename=test-archive-1701ef1f1510.tar.gz
+ content-type: application/x-gzip
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=408, sha1=8fa06531bddecc365a9f5edb0f88b65974bfe505
% tar.bz2 and zip disallowed should both give 403
403 Archive type not allowed: bz2
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=4c5cf0f574446c44feb7f88f4e0e2a56bd92c352
403 Archive type not allowed: zip
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=cbfa5574b337348bfd0564cc534474d002e7d6c7
$ test_archtype_deprecated bz2 tar.bz2 zip tar.gz
% bz2 allowed should give 200
200 Script output follows
+ content-disposition: attachment; filename=test-archive-1701ef1f1510.tar.bz2
+ content-type: application/x-bzip2
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=426, sha1=8d87f5aba6e14f1bfea6c232985982c278b2fb0b
% zip and tar.gz disallowed should both give 403
403 Archive type not allowed: zip
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=cbfa5574b337348bfd0564cc534474d002e7d6c7
403 Archive type not allowed: gz
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1450, sha1=71f0b12d59f85fdcfe8ff493e2dc66863f2f7734
$ test_archtype_deprecated zip zip tar.gz tar.bz2
% zip allowed should give 200
200 Script output follows
+ content-disposition: attachment; filename=test-archive-1701ef1f1510.zip
+ content-type: application/zip
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1377, sha1=677b14d3d048778d5eb5552c14a67e6192068650
% tar.gz and tar.bz2 disallowed should both give 403
403 Archive type not allowed: gz
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1450, sha1=71f0b12d59f85fdcfe8ff493e2dc66863f2f7734
403 Archive type not allowed: bz2
+ content-type: text/html; charset=ascii
+ date: $HTTP_DATE$
+ etag: W/"*" (glob)
+ server: testing stub value
+ transfer-encoding: chunked
+
+ body: size=1451, sha1=4c5cf0f574446c44feb7f88f4e0e2a56bd92c352
$ echo "allow_archive = gz bz2 zip" >> .hg/hgrc
$ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
--- a/tests/test-atomictempfile.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-atomictempfile.py Mon Mar 19 08:07:18 2018 -0700
@@ -3,18 +3,23 @@
import glob
import os
import shutil
+import stat
import tempfile
import unittest
from mercurial import (
+ pycompat,
util,
)
atomictempfile = util.atomictempfile
+if pycompat.ispy3:
+ xrange = range
+
class testatomictempfile(unittest.TestCase):
def setUp(self):
- self._testdir = tempfile.mkdtemp('atomictempfiletest')
- self._filename = os.path.join(self._testdir, 'testfilename')
+ self._testdir = tempfile.mkdtemp(b'atomictempfiletest')
+ self._filename = os.path.join(self._testdir, b'testfilename')
def tearDown(self):
shutil.rmtree(self._testdir, True)
@@ -24,14 +29,14 @@
self.assertFalse(os.path.isfile(self._filename))
tempfilename = file._tempname
self.assertTrue(tempfilename in glob.glob(
- os.path.join(self._testdir, '.testfilename-*')))
+ os.path.join(self._testdir, b'.testfilename-*')))
file.write(b'argh\n')
file.close()
self.assertTrue(os.path.isfile(self._filename))
self.assertTrue(tempfilename not in glob.glob(
- os.path.join(self._testdir, '.testfilename-*')))
+ os.path.join(self._testdir, b'.testfilename-*')))
# discard() removes the temp file without making the write permanent
def testdiscard(self):
@@ -42,7 +47,7 @@
file.discard()
self.assertFalse(os.path.isfile(self._filename))
- self.assertTrue(basename not in os.listdir('.'))
+ self.assertTrue(basename not in os.listdir(b'.'))
# if a programmer screws up and passes bad args to atomictempfile, they
# get a plain ordinary TypeError, not infinite recursion
@@ -54,7 +59,7 @@
def testcheckambig(self):
def atomicwrite(checkambig):
f = atomictempfile(self._filename, checkambig=checkambig)
- f.write('FOO')
+ f.write(b'FOO')
f.close()
# try some times, because reproduction of ambiguity depends on
@@ -62,7 +67,7 @@
for i in xrange(5):
atomicwrite(False)
oldstat = os.stat(self._filename)
- if oldstat.st_ctime != oldstat.st_mtime:
+ if oldstat[stat.ST_CTIME] != oldstat[stat.ST_MTIME]:
# subsequent changing never causes ambiguity
continue
@@ -73,14 +78,14 @@
for j in xrange(repetition):
atomicwrite(True)
newstat = os.stat(self._filename)
- if oldstat.st_ctime != newstat.st_ctime:
+ if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]:
# timestamp ambiguity was naturally avoided while repetition
continue
# st_mtime should be advanced "repetition" times, because
# all atomicwrite() occurred at same time (in sec)
- self.assertTrue(newstat.st_mtime ==
- ((oldstat.st_mtime + repetition) & 0x7fffffff))
+ oldtime = (oldstat[stat.ST_MTIME] + repetition) & 0x7fffffff
+ self.assertTrue(newstat[stat.ST_MTIME] == oldtime)
# no more examination is needed, if assumption above is true
break
else:
@@ -93,27 +98,27 @@
def testread(self):
with open(self._filename, 'wb') as f:
f.write(b'foobar\n')
- file = atomictempfile(self._filename, mode='rb')
+ file = atomictempfile(self._filename, mode=b'rb')
self.assertTrue(file.read(), b'foobar\n')
file.discard()
def testcontextmanagersuccess(self):
"""When the context closes, the file is closed"""
- with atomictempfile('foo') as f:
- self.assertFalse(os.path.isfile('foo'))
+ with atomictempfile(b'foo') as f:
+ self.assertFalse(os.path.isfile(b'foo'))
f.write(b'argh\n')
- self.assertTrue(os.path.isfile('foo'))
+ self.assertTrue(os.path.isfile(b'foo'))
def testcontextmanagerfailure(self):
"""On exception, the file is discarded"""
try:
- with atomictempfile('foo') as f:
- self.assertFalse(os.path.isfile('foo'))
+ with atomictempfile(b'foo') as f:
+ self.assertFalse(os.path.isfile(b'foo'))
f.write(b'argh\n')
raise ValueError
except ValueError:
pass
- self.assertFalse(os.path.isfile('foo'))
+ self.assertFalse(os.path.isfile(b'foo'))
if __name__ == '__main__':
import silenttestrunner
--- a/tests/test-basic.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-basic.t Mon Mar 19 08:07:18 2018 -0700
@@ -12,6 +12,7 @@
ui.promptecho=True
web.address=localhost
web\.ipv6=(?:True|False) (re)
+ web.server-header=testing stub value
$ hg init t
$ cd t
@@ -59,7 +60,7 @@
$ cat <<EOF > update_to_rev0.py
> from mercurial import ui, hg, commands
> myui = ui.ui.load()
- > repo = hg.repository(myui, path='.')
+ > repo = hg.repository(myui, path=b'.')
> commands.update(myui, repo, rev=0)
> EOF
$ hg up null
@@ -87,6 +88,13 @@
checking files
1 files, 1 changesets, 1 total revisions
+Repository root:
+
+ $ hg root
+ $TESTTMP/t
+ $ hg log -l1 -T '{reporoot}\n'
+ $TESTTMP/t
+
At the end...
$ cd ..
--- a/tests/test-bisect.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bisect.t Mon Mar 19 08:07:18 2018 -0700
@@ -465,8 +465,8 @@
> from __future__ import absolute_import
> import sys
> from mercurial import hg, ui as uimod
- > repo = hg.repository(uimod.ui.load(), '.')
- > if repo['.'].rev() < 6:
+ > repo = hg.repository(uimod.ui.load(), b'.')
+ > if repo[b'.'].rev() < 6:
> sys.exit(1)
> EOF
$ chmod +x script.py
--- a/tests/test-blackbox.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-blackbox.t Mon Mar 19 08:07:18 2018 -0700
@@ -265,7 +265,7 @@
> from mercurial import context, error, extensions
> x=[False]
> def status(orig, *args, **opts):
- > args[0].repo().ui.log("broken", "recursion?")
+ > args[0].repo().ui.log(b"broken", b"recursion?")
> return orig(*args, **opts)
> def reposetup(ui, repo):
> extensions.wrapfunction(context.basectx, 'status', status)
@@ -344,7 +344,7 @@
> from mercurial import registrar, scmutil
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('raise')
+ > @command(b'raise')
> def raisecmd(*args):
> raise RuntimeError('raise')
> EOF
--- a/tests/test-bookmarks-pushpull.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bookmarks-pushpull.t Mon Mar 19 08:07:18 2018 -0700
@@ -129,10 +129,10 @@
bundle2-output: bundle parameter:
bundle2-output: start of parts
bundle2-output: bundle part: "replycaps"
- bundle2-output-part: "replycaps" 205 bytes payload
+ bundle2-output-part: "replycaps" 222 bytes payload
bundle2-output: part 0: "REPLYCAPS"
bundle2-output: header chunk size: 16
- bundle2-output: payload chunk size: 205
+ bundle2-output: payload chunk size: 222
bundle2-output: closing payload chunk
bundle2-output: bundle part: "check:bookmarks"
bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -162,9 +162,9 @@
bundle2-input: part parameters: 0
bundle2-input: found a handler for part replycaps
bundle2-input-part: "replycaps" supported
- bundle2-input: payload chunk size: 205
+ bundle2-input: payload chunk size: 222
bundle2-input: payload chunk size: 0
- bundle2-input-part: total payload size 205
+ bundle2-input-part: total payload size 222
bundle2-input: part header size: 22
bundle2-input: part type: "CHECK:BOOKMARKS"
bundle2-input: part id: "1"
@@ -241,10 +241,10 @@
bundle2-output: bundle parameter:
bundle2-output: start of parts
bundle2-output: bundle part: "replycaps"
- bundle2-output-part: "replycaps" 205 bytes payload
+ bundle2-output-part: "replycaps" 222 bytes payload
bundle2-output: part 0: "REPLYCAPS"
bundle2-output: header chunk size: 16
- bundle2-output: payload chunk size: 205
+ bundle2-output: payload chunk size: 222
bundle2-output: closing payload chunk
bundle2-output: bundle part: "check:bookmarks"
bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -275,9 +275,9 @@
bundle2-input: part parameters: 0
bundle2-input: found a handler for part replycaps
bundle2-input-part: "replycaps" supported
- bundle2-input: payload chunk size: 205
+ bundle2-input: payload chunk size: 222
bundle2-input: payload chunk size: 0
- bundle2-input-part: total payload size 205
+ bundle2-input-part: total payload size 222
bundle2-input: part header size: 22
bundle2-input: part type: "CHECK:BOOKMARKS"
bundle2-input: part id: "1"
@@ -1030,6 +1030,34 @@
no changes found
[1]
+Pushing a really long bookmark should work fine (issue5165)
+===============================================
+
+#if b2-binary
+ >>> open('longname', 'w').write('wat' * 100)
+ $ hg book `cat longname`
+ $ hg push -B `cat longname` ../unchanged-b
+ pushing to ../unchanged-b
+ searching for changes
+ no changes found
+ exporting bookmark (wat){100} (re)
+ [1]
+ $ hg -R ../unchanged-b book --delete `cat longname`
+
+Test again but forcing bundle2 exchange to make sure that doesn't regress.
+
+ $ hg push -B `cat longname` ../unchanged-b --config devel.legacy.exchange=bundle1
+ pushing to ../unchanged-b
+ searching for changes
+ no changes found
+ exporting bookmark (wat){100} (re)
+ [1]
+ $ hg -R ../unchanged-b book --delete `cat longname`
+ $ hg book --delete `cat longname`
+ $ hg co @
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (activating bookmark @)
+#endif
Check hook preventing push (issue4455)
======================================
--- a/tests/test-bookmarks.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bookmarks.t Mon Mar 19 08:07:18 2018 -0700
@@ -980,14 +980,14 @@
> tr = orig(self, desc, report)
> def sleep(*args, **kwargs):
> retry = 20
- > while retry > 0 and not os.path.exists("$TESTTMP/unpause"):
+ > while retry > 0 and not os.path.exists(b"$TESTTMP/unpause"):
> retry -= 1
> time.sleep(0.5)
- > if os.path.exists("$TESTTMP/unpause"):
- > os.remove("$TESTTMP/unpause")
+ > if os.path.exists(b"$TESTTMP/unpause"):
+ > os.remove(b"$TESTTMP/unpause")
> # It is important that this finalizer start with 'a', so it runs before
> # the changelog finalizer appends to the changelog.
- > tr.addfinalize('a-sleep', sleep)
+ > tr.addfinalize(b'a-sleep', sleep)
> return tr
>
> def extsetup(ui):
--- a/tests/test-bundle-phases.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bundle-phases.t Mon Mar 19 08:07:18 2018 -0700
@@ -42,6 +42,7 @@
26805aba1e600a82e93661149f2313866a221a7b
f585351a92f85104bff7c284233c338b10eb1df7
9bc730a19041f9ec7cb33c626e811aa233efb18c
+ cache:rev-branch-cache -- {}
phase-heads -- {}
26805aba1e600a82e93661149f2313866a221a7b draft
$ hg strip --no-backup C
@@ -233,6 +234,7 @@
dc0947a82db884575bb76ea10ac97b08536bfa03
4e4f9194f9f181c57f62e823e8bdfa46ab9e4ff4
03ca77807e919db8807c3749086dc36fb478cac0
+ cache:rev-branch-cache -- {}
phase-heads -- {}
dc0947a82db884575bb76ea10ac97b08536bfa03 public
03ca77807e919db8807c3749086dc36fb478cac0 draft
@@ -258,6 +260,7 @@
changegroup -- {nbchanges: 2, targetphase: 2, version: 02}
112478962961147124edd43549aedd1a335e44bf
4e4f9194f9f181c57f62e823e8bdfa46ab9e4ff4
+ cache:rev-branch-cache -- {}
phase-heads -- {}
$ rm bundle
@@ -269,6 +272,7 @@
112478962961147124edd43549aedd1a335e44bf
dc0947a82db884575bb76ea10ac97b08536bfa03
4e4f9194f9f181c57f62e823e8bdfa46ab9e4ff4
+ cache:rev-branch-cache -- {}
phase-heads -- {}
dc0947a82db884575bb76ea10ac97b08536bfa03 public
$ rm bundle
@@ -280,6 +284,7 @@
changegroup -- {nbchanges: 2, targetphase: 2, version: 02}
4e4f9194f9f181c57f62e823e8bdfa46ab9e4ff4
03ca77807e919db8807c3749086dc36fb478cac0
+ cache:rev-branch-cache -- {}
phase-heads -- {}
03ca77807e919db8807c3749086dc36fb478cac0 draft
$ rm bundle
--- a/tests/test-bundle-type.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bundle-type.t Mon Mar 19 08:07:18 2018 -0700
@@ -76,6 +76,7 @@
Stream params: {}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
none-v2
% test bundle type bzip2
@@ -85,6 +86,7 @@
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
bzip2-v2
% test bundle type gzip
@@ -94,6 +96,7 @@
Stream params: {Compression: GZ}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
gzip-v2
% test bundle type none-v2
@@ -103,6 +106,7 @@
Stream params: {}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
none-v2
% test bundle type v2
@@ -112,6 +116,7 @@
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
bzip2-v2
% test bundle type v1
@@ -150,12 +155,12 @@
$ hg bundle -a -t gzip-v2 gzip-v2.hg
1 changesets found
$ f --size gzip-v2.hg
- gzip-v2.hg: size=427
+ gzip-v2.hg: size=468
$ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
1 changesets found
$ f --size gzip-v2-level1.hg
- gzip-v2-level1.hg: size=435
+ gzip-v2-level1.hg: size=475
$ cd ..
@@ -171,6 +176,7 @@
Stream params: {Compression: ZS}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
zstd-v2
% test bundle type zstd-v2
@@ -180,6 +186,7 @@
Stream params: {Compression: ZS}
changegroup -- {nbchanges: 1, version: 02}
c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+ cache:rev-branch-cache -- {}
zstd-v2
--- a/tests/test-bundle.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bundle.t Mon Mar 19 08:07:18 2018 -0700
@@ -774,7 +774,7 @@
list of changesets:
1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
057f4db07f61970e1c11e83be79e9d08adc4dc31
- bundle2-output-bundle: "HG20", (1 params) 1 parts total
+ bundle2-output-bundle: "HG20", (1 params) 2 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
bundling: 1/2 changesets (50.00%)
bundling: 2/2 changesets (100.00%)
@@ -783,6 +783,7 @@
bundling: b 1/3 files (33.33%)
bundling: b1 2/3 files (66.67%)
bundling: x 3/3 files (100.00%)
+ bundle2-output-part: "cache:rev-branch-cache" streamed payload
== Test for issue3441
--- a/tests/test-bundle2-exchange.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bundle2-exchange.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
Test exchange of common information using bundle2
--- a/tests/test-bundle2-pushback.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bundle2-pushback.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
$ cat > bundle2.py << EOF
> """A small extension to test bundle2 pushback parts.
> Current bundle2 implementation doesn't provide a way to generate those
--- a/tests/test-bundle2-remote-changegroup.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-bundle2-remote-changegroup.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,5 +1,15 @@
#require killdaemons
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
Create an extension to test bundle2 remote-changegroup parts
$ cat > bundle2.py << EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-cappedreader.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,91 @@
+from __future__ import absolute_import, print_function
+
+import io
+import unittest
+
+from mercurial import (
+ util,
+)
+
+class CappedReaderTests(unittest.TestCase):
+ def testreadfull(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 10)
+ res = reader.read(10)
+ self.assertEqual(res, b'x' * 10)
+ self.assertEqual(source.tell(), 10)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 15)
+ res = reader.read(16)
+ self.assertEqual(res, b'x' * 15)
+ self.assertEqual(source.tell(), 15)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 100)
+ res = reader.read(100)
+ self.assertEqual(res, b'x' * 100)
+ self.assertEqual(source.tell(), 100)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 50)
+ res = reader.read()
+ self.assertEqual(res, b'x' * 50)
+ self.assertEqual(source.tell(), 50)
+ source.seek(0)
+
+ def testreadnegative(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 20)
+ res = reader.read(-1)
+ self.assertEqual(res, b'x' * 20)
+ self.assertEqual(source.tell(), 20)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 100)
+ res = reader.read(-1)
+ self.assertEqual(res, b'x' * 100)
+ self.assertEqual(source.tell(), 100)
+ source.seek(0)
+
+ def testreadmultiple(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 10)
+ for i in range(10):
+ res = reader.read(1)
+ self.assertEqual(res, b'x')
+ self.assertEqual(source.tell(), i + 1)
+
+ self.assertEqual(source.tell(), 10)
+ res = reader.read(1)
+ self.assertEqual(res, b'')
+ self.assertEqual(source.tell(), 10)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 45)
+ for i in range(4):
+ res = reader.read(10)
+ self.assertEqual(res, b'x' * 10)
+ self.assertEqual(source.tell(), (i + 1) * 10)
+
+ res = reader.read(10)
+ self.assertEqual(res, b'x' * 5)
+ self.assertEqual(source.tell(), 45)
+
+ def readlimitpasteof(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 1024)
+ res = reader.read(1000)
+ self.assertEqual(res, b'x' * 100)
+ self.assertEqual(source.tell(), 100)
+ res = reader.read(1000)
+ self.assertEqual(res, b'')
+ self.assertEqual(source.tell(), 100)
+
+if __name__ == '__main__':
+ import silenttestrunner
+ silenttestrunner.main(__name__)
--- a/tests/test-check-code.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-check-code.t Mon Mar 19 08:07:18 2018 -0700
@@ -13,8 +13,6 @@
> -X mercurial/thirdparty \
> | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
Skipping i18n/polib.py it has no-che?k-code (glob)
- Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob)
- Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
Skipping mercurial/statprof.py it has no-che?k-code (glob)
Skipping tests/badserverext.py it has no-che?k-code (glob)
--- a/tests/test-check-help.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-check-help.t Mon Mar 19 08:07:18 2018 -0700
@@ -10,9 +10,9 @@
> import os, msvcrt
> msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
> topics = set()
- > topicre = re.compile(r':hg:`help ([a-z0-9\-.]+)`')
+ > topicre = re.compile(br':hg:`help ([a-z0-9\-.]+)`')
> for fname in sys.argv:
- > with open(fname) as f:
+ > with open(fname, 'rb') as f:
> topics.update(m.group(1) for m in topicre.finditer(f.read()))
> for s in sorted(topics):
> print(s)
--- a/tests/test-check-interfaces.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-check-interfaces.py Mon Mar 19 08:07:18 2018 -0700
@@ -50,26 +50,32 @@
def _restrictcapabilities(self, caps):
pass
+class dummyopener(object):
+ handlers = []
+
# Facilitates testing sshpeer without requiring an SSH server.
-class testingsshpeer(sshpeer.sshpeer):
- def _validaterepo(self, *args, **kwargs):
- pass
-
class badpeer(httppeer.httppeer):
def __init__(self):
- super(badpeer, self).__init__(uimod.ui(), 'http://localhost')
+ super(badpeer, self).__init__(None, None, None, dummyopener())
self.badattribute = True
def badmethod(self):
pass
+class dummypipe(object):
+ def close(self):
+ pass
+
def main():
ui = uimod.ui()
checkobject(badpeer())
- checkobject(httppeer.httppeer(ui, 'http://localhost'))
+ checkobject(httppeer.httppeer(None, None, None, dummyopener()))
checkobject(localrepo.localpeer(dummyrepo()))
- checkobject(testingsshpeer(ui, 'ssh://localhost/foo'))
+ checkobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
+ dummypipe(), None, None))
+ checkobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
+ dummypipe(), None, None))
checkobject(bundlerepo.bundlepeer(dummyrepo()))
checkobject(statichttprepo.statichttppeer(dummyrepo()))
checkobject(unionrepo.unionpeer(dummyrepo()))
--- a/tests/test-clone-uncompressed.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-clone-uncompressed.t Mon Mar 19 08:07:18 2018 -0700
@@ -71,6 +71,7 @@
remote-changegroup
http
https
+ rev-branch-cache
$ hg clone --stream -U http://localhost:$HGPORT server-disabled
warning: stream clone requested but server has them disabled
@@ -136,6 +137,7 @@
remote-changegroup
http
https
+ rev-branch-cache
$ hg clone --stream -U http://localhost:$HGPORT server-disabled
warning: stream clone requested but server has them disabled
--- a/tests/test-clone.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-clone.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
Prepare repo a:
$ hg init a
@@ -10,7 +20,7 @@
Create a non-inlined filelog:
- $ $PYTHON -c 'file("data1", "wb").write("".join("%s\n" % x for x in range(10000)))'
+ $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
$ for j in 0 1 2 3 4 5 6 7 8 9; do
> cat data1 >> b
> hg commit -m test
@@ -1142,12 +1152,14 @@
#if windows
$ hg clone "ssh://%26touch%20owned%20/" --debug
running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
[255]
$ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
@@ -1155,12 +1167,14 @@
#else
$ hg clone "ssh://%3btouch%20owned%20/" --debug
running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
[255]
$ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
@@ -1169,6 +1183,7 @@
$ hg clone "ssh://v-alid.example.com/" --debug
running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
--- a/tests/test-clonebundles.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-clonebundles.t Mon Mar 19 08:07:18 2018 -0700
@@ -53,7 +53,7 @@
$ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
$ hg clone http://localhost:$HGPORT 404-url
applying clone bundle from http://does.not.exist/bundle.hg
- error fetching bundle: (.* not known|No address associated with hostname) (re) (no-windows !)
+ error fetching bundle: (.* not known|(\[Errno -?\d+])? No address associated with hostname) (re) (no-windows !)
error fetching bundle: [Errno 11004] getaddrinfo failed (windows !)
abort: error applying bundle
(if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
@@ -163,7 +163,7 @@
by old clients.
$ f --size --hexdump full.hg
- full.hg: size=396
+ full.hg: size=442
0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
@@ -188,7 +188,10 @@
0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
- 0180: 54 47 75 2b 89 49 b1 00 d2 8a eb 92 |TGu+.I......|
+ 0180: 54 47 75 2b 89 48 b1 b2 62 ce 8e ce 1e ae 56 41 |TGu+.H..b.....VA|
+ 0190: ae 61 ba 4e 41 8e 7e ce 1e ba 60 01 a0 14 23 58 |.a.NA.~...`...#X|
+ 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
+ 01b0: 33 17 5f 54 00 00 01 1b 0a ec |3._T......|
$ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
$ hg clone -U http://localhost:$HGPORT full-bundle
@@ -529,14 +532,14 @@
$ cat hg.pid >> $DAEMON_PIDS
$ hg -R server debuglfput gz-a.hg
- f6eca29e25359f6a92f1ea64527cdcf1b5abe62a
+ 14ee2f0b3f1d14aeeb2fe037e09fc295c3cf59f5
$ cat > server/.hg/clonebundles.manifest << EOF
- > largefile://f6eca29e25359f6a92f1ea64527cdcf1b5abe62a BUNDLESPEC=gzip-v2
+ > largefile://14ee2f0b3f1d14aeeb2fe037e09fc295c3cf59f5 BUNDLESPEC=gzip-v2
> EOF
$ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
- applying clone bundle from largefile://f6eca29e25359f6a92f1ea64527cdcf1b5abe62a
+ applying clone bundle from largefile://14ee2f0b3f1d14aeeb2fe037e09fc295c3cf59f5
adding changesets
adding manifests
adding file changes
--- a/tests/test-command-template.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-command-template.t Mon Mar 19 08:07:18 2018 -0700
@@ -214,6 +214,8 @@
abort: template resource not available: ctx
[255]
+ $ hg config -T '{author}'
+
Quoting for ui.logtemplate
$ hg tip --config "ui.logtemplate={rev}\n"
@@ -2215,9 +2217,9 @@
>>> from __future__ import absolute_import
>>> import datetime
- >>> fp = open('a', 'w')
+ >>> fp = open('a', 'wb')
>>> n = datetime.datetime.now() + datetime.timedelta(366 * 7)
- >>> fp.write('%d-%d-%d 00:00' % (n.year, n.month, n.day))
+ >>> fp.write(b'%d-%d-%d 00:00' % (n.year, n.month, n.day)) and None
>>> fp.close()
$ hg add a
$ hg commit -m future -d "`cat a`"
@@ -2232,6 +2234,10 @@
$ hg debugtemplate '{"foo/bar"|basename}|{"foo/"|basename}|{"foo"|basename}|\n'
bar||foo|
+ $ hg debugtemplate '{"foo/bar"|dirname}|{"foo/"|dirname}|{"foo"|dirname}|\n'
+ foo|foo||
+ $ hg debugtemplate '{"foo/bar"|stripdir}|{"foo/"|stripdir}|{"foo"|stripdir}|\n'
+ foo|foo|foo|
Add a dummy commit to make up for the instability of the above:
@@ -2760,19 +2766,29 @@
$ hg log -T '{date'
hg: parse error at 1: unterminated template expansion
+ ({date
+ ^ here)
[255]
$ hg log -T '{date(}'
- hg: parse error at 7: not a prefix: end
+ hg: parse error at 6: not a prefix: end
+ ({date(}
+ ^ here)
[255]
$ hg log -T '{date)}'
hg: parse error at 5: invalid token
+ ({date)}
+ ^ here)
[255]
$ hg log -T '{date date}'
hg: parse error at 6: invalid token
+ ({date date}
+ ^ here)
[255]
$ hg log -T '{}'
- hg: parse error at 2: not a prefix: end
+ hg: parse error at 1: not a prefix: end
+ ({}
+ ^ here)
[255]
$ hg debugtemplate -v '{()}'
(template
@@ -2821,10 +2837,14 @@
$ hg log -T '{"date'
hg: parse error at 2: unterminated string
+ ({"date
+ ^ here)
[255]
$ hg log -T '{"foo{date|?}"}'
hg: parse error at 11: syntax error
+ ({"foo{date|?}"}
+ ^ here)
[255]
Thrown an error if a template function doesn't exist
@@ -3222,6 +3242,35 @@
$ hg log -R latesttag -l1 -T '{max(revset("9:10"))}\n'
10
+Test min/max of if() result
+
+ $ cd latesttag
+ $ hg log -l1 -T '{min(if(true, revset("9:10"), ""))}\n'
+ 9
+ $ hg log -l1 -T '{max(if(false, "", revset("9:10")))}\n'
+ 10
+ $ hg log -l1 -T '{min(ifcontains("a", "aa", revset("9:10"), ""))}\n'
+ 9
+ $ hg log -l1 -T '{max(ifcontains("a", "bb", "", revset("9:10")))}\n'
+ 10
+ $ hg log -l1 -T '{min(ifeq(0, 0, revset("9:10"), ""))}\n'
+ 9
+ $ hg log -l1 -T '{max(ifeq(0, 1, "", revset("9:10")))}\n'
+ 10
+ $ cd ..
+
+Test laziness of if() then/else clause
+
+ $ hg debugtemplate '{count(0)}'
+ abort: incompatible use of template filter 'count'
+ [255]
+ $ hg debugtemplate '{if(true, "", count(0))}'
+ $ hg debugtemplate '{if(false, count(0), "")}'
+ $ hg debugtemplate '{ifcontains("a", "aa", "", count(0))}'
+ $ hg debugtemplate '{ifcontains("a", "bb", count(0), "")}'
+ $ hg debugtemplate '{ifeq(0, 0, "", count(0))}'
+ $ hg debugtemplate '{ifeq(0, 1, count(0), "")}'
+
Test dot operator precedence:
$ hg debugtemplate -R latesttag -r0 -v '{manifest.node|short}\n'
@@ -3356,6 +3405,8 @@
-4
$ hg debugtemplate '{(-)}\n'
hg: parse error at 3: not a prefix: )
+ ({(-)}\n
+ ^ here)
[255]
$ hg debugtemplate '{(-a)}\n'
hg: parse error: negation needs an integer argument
@@ -3521,6 +3572,8 @@
foo
$ hg log -r 2 -T '{if(rev, "{if(rev, \")}")}\n'
hg: parse error at 21: unterminated string
+ ({if(rev, "{if(rev, \")}")}\n
+ ^ here)
[255]
$ hg log -r 2 -T '{if(rev, \"\\"")}\n'
hg: parse error: trailing \ in string
@@ -4432,7 +4485,7 @@
hg: parse error: trailing \ in string
[255]
$ hg log -T "\\xy" -R a
- hg: parse error: invalid \x escape
+ hg: parse error: invalid \x escape* (glob)
[255]
json filter should escape HTML tags so that the output can be embedded in hgweb:
@@ -4567,8 +4620,8 @@
$ hg init nonascii
$ cd nonascii
$ $PYTHON <<EOF
- > open('latin1', 'w').write('\xe9')
- > open('utf-8', 'w').write('\xc3\xa9')
+ > open('latin1', 'wb').write(b'\xe9')
+ > open('utf-8', 'wb').write(b'\xc3\xa9')
> EOF
$ HGENCODING=utf-8 hg branch -q `cat utf-8`
$ HGENCODING=utf-8 hg ci -qAm "non-ascii branch: `cat utf-8`" utf-8
@@ -4616,9 +4669,9 @@
>
> templatefunc = registrar.templatefunc()
>
- > @templatefunc('custom()')
+ > @templatefunc(b'custom()')
> def custom(context, mapping, args):
- > return 'custom'
+ > return b'custom'
> EOF
$ cat <<EOF > .hg/hgrc
> [extensions]
--- a/tests/test-commandserver.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-commandserver.t Mon Mar 19 08:07:18 2018 -0700
@@ -211,17 +211,16 @@
ui.slash=True
ui.interactive=False
ui.mergemarkers=detailed
- ui.usehttp2=true (?)
ui.foo=bar
ui.nontty=true
web.address=localhost
web\.ipv6=(?:True|False) (re)
+ web.server-header=testing stub value
*** runcommand init foo
*** runcommand -R foo showconfig ui defaults
ui.slash=True
ui.interactive=False
ui.mergemarkers=detailed
- ui.usehttp2=true (?)
ui.nontty=true
$ rm -R foo
@@ -411,7 +410,7 @@
... # load _phasecache._phaserevs and _phasesets
... runcommand(server, ['log', '-qr', 'draft()'])
... # create draft commits by another process
- ... for i in xrange(5, 7):
+ ... for i in range(5, 7):
... f = open('a', 'ab')
... f.seek(0, os.SEEK_END)
... f.write('a\n')
--- a/tests/test-commit-interactive.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-commit-interactive.t Mon Mar 19 08:07:18 2018 -0700
@@ -898,15 +898,18 @@
$ cat > $TESTTMP/escape.py <<EOF
> from __future__ import absolute_import
- > import sys
+ > from mercurial import (
+ > pycompat,
+ > util,
+ > )
> def escape(c):
> o = ord(c)
> if o < 0x80:
> return c
> else:
- > return r'\x%02x' % o # escape char setting MSB
- > for l in sys.stdin:
- > sys.stdout.write(''.join(escape(c) for c in l))
+ > return br'\x%02x' % o # escape char setting MSB
+ > for l in util.stdin:
+ > util.stdout.write(b''.join(escape(c) for c in pycompat.iterbytestr(l)))
> EOF
$ hg commit -i --encoding cp932 2>&1 <<EOF | $PYTHON $TESTTMP/escape.py | grep '^y - '
--- a/tests/test-commit-multiple.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-commit-multiple.t Mon Mar 19 08:07:18 2018 -0700
@@ -90,23 +90,25 @@
> f.close()
>
> def printfiles(repo, rev):
- > print("revision %s files: %s" % (rev, repo[rev].files()))
+ > repo.ui.status(b"revision %d files: [%s]\n"
+ > % (rev, b', '.join(b"'%s'" % f
+ > for f in repo[rev].files())))
>
- > repo = hg.repository(ui.ui.load(), '.')
+ > repo = hg.repository(ui.ui.load(), b'.')
> assert len(repo) == 6, \
> "initial: len(repo): %d, expected: 6" % len(repo)
>
- > replacebyte("bugfix", "u")
+ > replacebyte(b"bugfix", b"u")
> sleep(2)
> try:
- > print("PRE: len(repo): %d" % len(repo))
+ > repo.ui.status(b"PRE: len(repo): %d\n" % len(repo))
> wlock = repo.wlock()
> lock = repo.lock()
- > replacebyte("file1", "x")
- > repo.commit(text="x", user="test", date=(0, 0))
- > replacebyte("file1", "y")
- > repo.commit(text="y", user="test", date=(0, 0))
- > print("POST: len(repo): %d" % len(repo))
+ > replacebyte(b"file1", b"x")
+ > repo.commit(text=b"x", user=b"test", date=(0, 0))
+ > replacebyte(b"file1", b"y")
+ > repo.commit(text=b"y", user=b"test", date=(0, 0))
+ > repo.ui.status(b"POST: len(repo): %d\n" % len(repo))
> finally:
> lock.release()
> wlock.release()
--- a/tests/test-commit.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-commit.t Mon Mar 19 08:07:18 2018 -0700
@@ -644,14 +644,14 @@
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
> from mercurial import context, hg, node, ui as uimod
- > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
+ > notrc = u".h\u200cg".encode('utf-8') + b'/hgrc'
> u = uimod.ui.load()
- > r = hg.repository(u, '.')
+ > r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
- > '[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r['tip'].node(), node.nullid],
- > 'evil', [notrc], filectxfn, 0)
+ > b'[hooks]\nupdate = echo owned')
+ > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
$ $PYTHON evil-commit.py
@@ -670,14 +670,14 @@
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
> from mercurial import context, hg, node, ui as uimod
- > notrc = "HG~1/hgrc"
+ > notrc = b"HG~1/hgrc"
> u = uimod.ui.load()
- > r = hg.repository(u, '.')
+ > r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
- > '[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r['tip'].node(), node.nullid],
- > 'evil', [notrc], filectxfn, 0)
+ > b'[hooks]\nupdate = echo owned')
+ > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
$ $PYTHON evil-commit.py
@@ -690,14 +690,14 @@
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
> from mercurial import context, hg, node, ui as uimod
- > notrc = "HG8B6C~2/hgrc"
+ > notrc = b"HG8B6C~2/hgrc"
> u = uimod.ui.load()
- > r = hg.repository(u, '.')
+ > r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
- > '[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r['tip'].node(), node.nullid],
- > 'evil', [notrc], filectxfn, 0)
+ > b'[hooks]\nupdate = echo owned')
+ > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
$ $PYTHON evil-commit.py
@@ -831,4 +831,3 @@
second line
$ cd ..
-
--- a/tests/test-completion.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-completion.t Mon Mar 19 08:07:18 2018 -0700
@@ -102,6 +102,7 @@
debugnamecomplete
debugobsolete
debugpathcomplete
+ debugpeer
debugpickmergetool
debugpushkey
debugpvec
@@ -110,15 +111,20 @@
debugrename
debugrevlog
debugrevspec
+ debugserve
debugsetparents
debugssl
debugsub
debugsuccessorssets
debugtemplate
+ debuguigetpass
+ debuguiprompt
debugupdatecaches
debugupgraderepo
debugwalk
+ debugwhyunstable
debugwireargs
+ debugwireproto
Do not show the alias of a debug command if there are other candidates
(this should hide rawcommit)
@@ -226,7 +232,7 @@
commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
export: output, switch-parent, rev, text, git, binary, nodates
- forget: include, exclude
+ forget: include, exclude, dry-run
init: ssh, remotecmd, insecure
log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
merge: force, rev, preview, abort, tool
@@ -281,6 +287,7 @@
debugnamecomplete:
debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
debugpathcomplete: full, normal, added, removed
+ debugpeer:
debugpickmergetool: rev, changedelete, include, exclude, tool
debugpushkey:
debugpvec:
@@ -289,15 +296,20 @@
debugrename: rev
debugrevlog: changelog, manifest, dir, dump
debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
+ debugserve: sshstdio, logiofd, logiofile
debugsetparents:
debugssl:
debugsub: rev
debugsuccessorssets: closest
debugtemplate: rev, define
+ debuguigetpass: prompt
+ debuguiprompt: prompt
debugupdatecaches:
debugupgraderepo: optimize, run
debugwalk: include, exclude
+ debugwhyunstable:
debugwireargs: three, four, five, ssh, remotecmd, insecure
+ debugwireproto: localssh, peer, noreadstderr, ssh, remotecmd, insecure
files: rev, print0, include, exclude, template, subrepos
graft: rev, continue, edit, log, force, currentdate, currentuser, date, user, tool, dry-run
grep: print0, all, text, follow, ignore-case, files-with-matches, line-number, rev, user, date, template, include, exclude
--- a/tests/test-config-env.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-config-env.py Mon Mar 19 08:07:18 2018 -0700
@@ -11,24 +11,24 @@
util,
)
-testtmp = encoding.environ['TESTTMP']
+testtmp = encoding.environ[b'TESTTMP']
# prepare hgrc files
def join(name):
return os.path.join(testtmp, name)
-with open(join('sysrc'), 'w') as f:
- f.write('[ui]\neditor=e0\n[pager]\npager=p0\n')
+with open(join(b'sysrc'), 'wb') as f:
+ f.write(b'[ui]\neditor=e0\n[pager]\npager=p0\n')
-with open(join('userrc'), 'w') as f:
- f.write('[ui]\neditor=e1')
+with open(join(b'userrc'), 'wb') as f:
+ f.write(b'[ui]\neditor=e1')
# replace rcpath functions so they point to the files above
def systemrcpath():
- return [join('sysrc')]
+ return [join(b'sysrc')]
def userrcpath():
- return [join('userrc')]
+ return [join(b'userrc')]
rcutil.systemrcpath = systemrcpath
rcutil.userrcpath = userrcpath
@@ -41,9 +41,10 @@
ui = uimod.ui.load()
for section, name, value in ui.walkconfig():
source = ui.configsource(section, name)
- print('%s.%s=%s # %s' % (section, name, value, util.pconvert(source)))
- print('')
+ util.stdout.write(b'%s.%s=%s # %s\n'
+ % (section, name, value, util.pconvert(source)))
+ util.stdout.write(b'\n')
# environment variable overrides
printconfigs({})
-printconfigs({'EDITOR': 'e2', 'PAGER': 'p2'})
+printconfigs({b'EDITOR': b'e2', b'PAGER': b'p2'})
--- a/tests/test-config.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-config.t Mon Mar 19 08:07:18 2018 -0700
@@ -88,7 +88,7 @@
$ cat <<EOF > emptysource.py
> def reposetup(ui, repo):
- > ui.setconfig('empty', 'source', 'value')
+ > ui.setconfig(b'empty', b'source', b'value')
> EOF
$ cp .hg/hgrc .hg/hgrc.orig
$ cat <<EOF >> .hg/hgrc
--- a/tests/test-conflict.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-conflict.t Mon Mar 19 08:07:18 2018 -0700
@@ -138,9 +138,9 @@
$ hg up -q --clean .
$ $PYTHON <<EOF
- > fp = open('logfile', 'w')
- > fp.write('12345678901234567890123456789012345678901234567890' +
- > '1234567890') # there are 5 more columns for 80 columns
+ > fp = open('logfile', 'wb')
+ > fp.write(b'12345678901234567890123456789012345678901234567890' +
+ > b'1234567890') # there are 5 more columns for 80 columns
>
> # 2 x 4 = 8 columns, but 3 x 4 = 12 bytes
> fp.write(u'\u3042\u3044\u3046\u3048'.encode('utf-8'))
--- a/tests/test-context-metadata.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-context-metadata.t Mon Mar 19 08:07:18 2018 -0700
@@ -13,18 +13,19 @@
$ cat > metaedit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, registrar
+ > from mercurial import context, pycompat, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('metaedit')
+ > @command(b'metaedit')
> def metaedit(ui, repo, arg):
> # Modify commit message to "FOO"
- > with repo.wlock(), repo.lock(), repo.transaction('metaedit'):
- > old = repo['.']
- > kwargs = dict(s.split('=', 1) for s in arg.split(';'))
+ > with repo.wlock(), repo.lock(), repo.transaction(b'metaedit'):
+ > old = repo[b'.']
+ > kwargs = dict(s.split(b'=', 1) for s in arg.split(b';'))
> if 'parents' in kwargs:
- > kwargs['parents'] = kwargs['parents'].split(',')
- > new = context.metadataonlyctx(repo, old, **kwargs)
+ > kwargs[b'parents'] = kwargs[b'parents'].split(b',')
+ > new = context.metadataonlyctx(repo, old,
+ > **pycompat.strkwargs(kwargs))
> new.commit()
> EOF
$ hg --config extensions.metaedit=$TESTTMP/metaedit.py metaedit 'text=Changed'
--- a/tests/test-context.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-context.py Mon Mar 19 08:07:18 2018 -0700
@@ -1,5 +1,6 @@
from __future__ import absolute_import, print_function
import os
+import stat
from mercurial.node import hex
from mercurial import (
context,
@@ -170,7 +171,8 @@
# touch 00manifest.i mtime so storecache could expire.
# repo.__dict__['manifestlog'] is deleted by transaction releasefn.
st = repo.svfs.stat('00manifest.i')
- repo.svfs.utime('00manifest.i', (st.st_mtime + 1, st.st_mtime + 1))
+ repo.svfs.utime('00manifest.i',
+ (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1))
# read the file just committed
try:
--- a/tests/test-contrib-perf.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-contrib-perf.t Mon Mar 19 08:07:18 2018 -0700
@@ -114,6 +114,7 @@
perftags (no help text available)
perftemplating
(no help text available)
+ perfunidiff benchmark a unified diff between revisions
perfvolatilesets
benchmark the computation of various volatile set
perfwalk (no help text available)
@@ -126,6 +127,8 @@
$ hg perfannotate a
$ hg perfbdiff -c 1
$ hg perfbdiff --alldata 1
+ $ hg perfunidiff -c 1
+ $ hg perfunidiff --alldata 1
$ hg perfbookmarks
$ hg perfbranchmap
$ hg perfcca
--- a/tests/test-contrib.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-contrib.t Mon Mar 19 08:07:18 2018 -0700
@@ -201,7 +201,7 @@
binary file
- $ $PYTHON -c "f = file('binary-local', 'w'); f.write('\x00'); f.close()"
+ $ $PYTHON -c "f = open('binary-local', 'w'); f.write('\x00'); f.close()"
$ cat orig >> binary-local
$ $PYTHON simplemerge -p binary-local base other
warning: binary-local looks like a binary file.
--- a/tests/test-convert-git.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-convert-git.t Mon Mar 19 08:07:18 2018 -0700
@@ -420,7 +420,7 @@
$ mkdir git-repo3
$ cd git-repo3
$ git init-db >/dev/null 2>/dev/null
- $ $PYTHON -c 'file("b", "wb").write("".join([chr(i) for i in range(256)])*16)'
+ $ $PYTHON -c 'import struct; open("b", "wb").write(b"".join([struct.Struct(">B").pack(i) for i in range(256)])*16)'
$ git add b
$ commit -a -m addbinary
$ cd ..
@@ -437,7 +437,7 @@
$ cd git-repo3-hg
$ hg up -C
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ $PYTHON -c 'print len(file("b", "rb").read())'
+ $ $PYTHON -c 'print len(open("b", "rb").read())'
4096
$ cd ..
--- a/tests/test-convert-hg-source.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-convert-hg-source.t Mon Mar 19 08:07:18 2018 -0700
@@ -126,9 +126,9 @@
$ cat > rewrite.py <<EOF
> import sys
> # Interlace LF and CRLF
- > lines = [(l.rstrip() + ((i % 2) and '\n' or '\r\n'))
- > for i, l in enumerate(file(sys.argv[1]))]
- > file(sys.argv[1], 'wb').write(''.join(lines))
+ > lines = [(l.rstrip() + ((i % 2) and b'\n' or b'\r\n'))
+ > for i, l in enumerate(open(sys.argv[1], 'rb'))]
+ > open(sys.argv[1], 'wb').write(b''.join(lines))
> EOF
$ $PYTHON rewrite.py new/.hg/shamap
$ cd orig
--- a/tests/test-convert-mtn.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-convert-mtn.t Mon Mar 19 08:07:18 2018 -0700
@@ -43,7 +43,7 @@
$ mkdir dir
$ echo b > dir/b
$ echo d > dir/d
- $ $PYTHON -c 'file("bin", "wb").write("a\\x00b")'
+ $ $PYTHON -c 'open("bin", "wb").write(b"a\\x00b")'
$ echo c > c
$ mtn add a dir/b dir/d c bin
mtn: adding 'a' to workspace manifest
@@ -65,7 +65,7 @@
$ echo b >> dir/b
$ mtn drop c
mtn: dropping 'c' from workspace manifest
- $ $PYTHON -c 'file("bin", "wb").write("b\\x00c")'
+ $ $PYTHON -c 'open("bin", "wb").write(b"b\\x00c")'
$ mtn ci -m update1
mtn: beginning commit on branch 'com.selenic.test'
mtn: committed revision 51d0a982464573a2a2cf5ee2c9219c652aaebeff
@@ -217,8 +217,8 @@
test large file support (> 32kB)
- >>> fp = file('large-file', 'wb')
- >>> for x in xrange(10000): fp.write('%d\n' % x)
+ >>> fp = open('large-file', 'wb')
+ >>> for x in range(10000): fp.write(b'%d\n' % x)
>>> fp.close()
$ md5sum.py large-file
5d6de8a95c3b6bf9e0ffb808ba5299c1 large-file
--- a/tests/test-convert-p4-filetypes.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-convert-p4-filetypes.t Mon Mar 19 08:07:18 2018 -0700
@@ -52,7 +52,7 @@
> p4 add -t $T file_$T2
> ;;
> binary*)
- > $PYTHON -c "file('file_$T2', 'wb').write('this is $T')"
+ > $PYTHON -c "open('file_$T2', 'wb').write(b'this is $T')"
> p4 add -t $T file_$T2
> ;;
> *)
--- a/tests/test-debugbundle.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-debugbundle.t Mon Mar 19 08:07:18 2018 -0700
@@ -34,6 +34,14 @@
changegroup -- {nbchanges: 2, version: 02}
0e067c57feba1a5694ca4844f05588bb1bf82342
991a3460af53952d10ec8a295d3d2cc2e5fa9690
+ cache:rev-branch-cache -- {}
+
+Quiet output
+
+ $ hg debugbundle --quiet bundle2.hg
+ Stream params: {}
+ changegroup -- {nbchanges: 2, version: 02}
+ cache:rev-branch-cache -- {}
Verbose output:
@@ -72,5 +80,6 @@
c
b80de5d138758541c5f05265ad144ab9fa86d1db 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 991a3460af53952d10ec8a295d3d2cc2e5fa9690 0000000000000000000000000000000000000000 0
+ cache:rev-branch-cache -- {}
$ cd ..
--- a/tests/test-debugcommands.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-debugcommands.t Mon Mar 19 08:07:18 2018 -0700
@@ -379,5 +379,28 @@
remote-changegroup
http
https
+ rev-branch-cache
stream
v2
+
+Test debugpeer
+
+ $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog
+ url: ssh://user@dummy/debugrevlog
+ local: no
+ pushable: yes
+
+ $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog
+ running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !)
+ running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 403
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/debugrevlog
+ local: no
+ pushable: yes
--- a/tests/test-debugextensions.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-debugextensions.t Mon Mar 19 08:07:18 2018 -0700
@@ -5,8 +5,8 @@
$ cat > extwithoutinfos.py <<EOF
> EOF
$ cat > extwithinfos.py <<EOF
- > testedwith = '3.0 3.1 3.2.1'
- > buglink = 'https://example.org/bts'
+ > testedwith = b'3.0 3.1 3.2.1'
+ > buglink = b'https://example.org/bts'
> EOF
$ cat >> $HGRCPATH <<EOF
--- a/tests/test-default-push.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-default-push.t Mon Mar 19 08:07:18 2018 -0700
@@ -142,6 +142,8 @@
$ hg --config 'paths.default:pushrev=(' push
pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
hg: parse error at 1: not a prefix: end
+ ((
+ ^ here)
[255]
$ cd ..
--- a/tests/test-demandimport.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-demandimport.py Mon Mar 19 08:07:18 2018 -0700
@@ -31,6 +31,27 @@
l = rsub("'<[a-z]*>'", "'<whatever>'", l)
return l
+demandimport.disable()
+os.environ['HGDEMANDIMPORT'] = 'disable'
+# this enable call should not actually enable demandimport!
+demandimport.enable()
+from mercurial import node
+print("node =", f(node))
+# now enable it for real
+del os.environ['HGDEMANDIMPORT']
+demandimport.enable()
+
+# Test access to special attributes through demandmod proxy
+from mercurial import error as errorproxy
+print("errorproxy =", f(errorproxy))
+print("errorproxy.__doc__ = %r"
+ % (' '.join(errorproxy.__doc__.split()[:3]) + ' ...'))
+print("errorproxy.__name__ = %r" % errorproxy.__name__)
+# __name__ must be accessible via __dict__ so the relative imports can be
+# resolved
+print("errorproxy.__dict__['__name__'] = %r" % errorproxy.__dict__['__name__'])
+print("errorproxy =", f(errorproxy))
+
import os
print("os =", f(os))
@@ -69,17 +90,6 @@
print("re.stderr =", f(re.stderr))
print("re =", f(re))
-# Test access to special attributes through demandmod proxy
-from mercurial import pvec as pvecproxy
-print("pvecproxy =", f(pvecproxy))
-print("pvecproxy.__doc__ = %r"
- % (' '.join(pvecproxy.__doc__.split()[:3]) + ' ...'))
-print("pvecproxy.__name__ = %r" % pvecproxy.__name__)
-# __name__ must be accessible via __dict__ so the relative imports can be
-# resolved
-print("pvecproxy.__dict__['__name__'] = %r" % pvecproxy.__dict__['__name__'])
-print("pvecproxy =", f(pvecproxy))
-
import contextlib
print("contextlib =", f(contextlib))
try:
@@ -97,10 +107,3 @@
print("__import__('contextlib', ..., ['unknownattr']) =", f(contextlibimp))
print("hasattr(contextlibimp, 'unknownattr') =",
util.safehasattr(contextlibimp, 'unknownattr'))
-
-demandimport.disable()
-os.environ['HGDEMANDIMPORT'] = 'disable'
-# this enable call should not actually enable demandimport!
-demandimport.enable()
-from mercurial import node
-print("node =", f(node))
--- a/tests/test-demandimport.py.out Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-demandimport.py.out Mon Mar 19 08:07:18 2018 -0700
@@ -1,3 +1,9 @@
+node = <module 'mercurial.node' from '?'>
+errorproxy = <unloaded module 'error'>
+errorproxy.__doc__ = 'Mercurial exceptions. This ...'
+errorproxy.__name__ = 'mercurial.error'
+errorproxy.__dict__['__name__'] = 'mercurial.error'
+errorproxy = <proxied module 'error'>
os = <unloaded module 'os'>
os.system = <built-in function system>
os = <module 'os' from '?'>
@@ -18,13 +24,7 @@
re = <unloaded module 'sys'>
re.stderr = <open file '<whatever>', mode 'w' at 0x?>
re = <proxied module 'sys'>
-pvecproxy = <unloaded module 'pvec'>
-pvecproxy.__doc__ = 'A "pvec" is ...'
-pvecproxy.__name__ = 'mercurial.pvec'
-pvecproxy.__dict__['__name__'] = 'mercurial.pvec'
-pvecproxy = <proxied module 'pvec'>
contextlib = <unloaded module 'contextlib'>
contextlib.unknownattr = ImportError: cannot import name unknownattr
__import__('contextlib', ..., ['unknownattr']) = <module 'contextlib' from '?'>
hasattr(contextlibimp, 'unknownattr') = False
-node = <module 'mercurial.node' from '?'>
--- a/tests/test-devel-warnings.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-devel-warnings.t Mon Mar 19 08:07:18 2018 -0700
@@ -17,7 +17,7 @@
>
> @command(b'buggytransaction', [], '')
> def buggylocking(ui, repo):
- > tr = repo.transaction('buggy')
+ > tr = repo.transaction(b'buggy')
> # make sure we rollback the transaction as we don't want to rely on the__del__
> tr.release()
>
@@ -26,8 +26,8 @@
> """check that reentrance is fine"""
> wl = repo.wlock()
> lo = repo.lock()
- > tr = repo.transaction('proper')
- > tr2 = repo.transaction('proper')
+ > tr = repo.transaction(b'proper')
+ > tr2 = repo.transaction(b'proper')
> lo2 = repo.lock()
> wl2 = repo.wlock()
> wl2.release()
@@ -46,34 +46,34 @@
>
> @command(b'no-wlock-write', [], '')
> def nowlockwrite(ui, repo):
- > with repo.vfs(b'branch', 'a'):
+ > with repo.vfs(b'branch', b'a'):
> pass
>
> @command(b'no-lock-write', [], '')
> def nolockwrite(ui, repo):
- > with repo.svfs(b'fncache', 'a'):
+ > with repo.svfs(b'fncache', b'a'):
> pass
>
> @command(b'stripintr', [], '')
> def stripintr(ui, repo):
> lo = repo.lock()
- > tr = repo.transaction('foobar')
+ > tr = repo.transaction(b'foobar')
> try:
- > repair.strip(repo.ui, repo, [repo['.'].node()])
+ > repair.strip(repo.ui, repo, [repo[b'.'].node()])
> finally:
> lo.release()
> @command(b'oldanddeprecated', [], '')
> def oldanddeprecated(ui, repo):
> """test deprecation warning API"""
> def foobar(ui):
- > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
+ > ui.deprecwarn(b'foorbar is deprecated, go shopping', b'42.1337')
> foobar(ui)
> @command(b'nouiwarning', [], '')
> def nouiwarning(ui, repo):
- > util.nouideprecwarn('this is a test', '13.37')
+ > util.nouideprecwarn(b'this is a test', b'13.37')
> @command(b'programmingerror', [], '')
> def programmingerror(ui, repo):
- > raise error.ProgrammingError('something went wrong', hint='try again')
+ > raise error.ProgrammingError(b'something went wrong', hint=b'try again')
> EOF
$ cat << EOF >> $HGRCPATH
@@ -331,7 +331,7 @@
$ hg nouiwarning
$TESTTMP/buggylocking.py:*: DeprecationWarning: this is a test (glob)
(compatibility will be dropped after Mercurial-13.37, update your code.)
- util.nouideprecwarn('this is a test', '13.37')
+ util.nouideprecwarn(b'this is a test', b'13.37')
(disabled outside of test run)
@@ -350,25 +350,25 @@
> configtable = {}
> configitem = registrar.configitem(configtable)
>
- > configitem('test', 'some', default='foo')
- > configitem('test', 'dynamic', default=configitems.dynamicdefault)
- > configitem('test', 'callable', default=list)
+ > configitem(b'test', b'some', default=b'foo')
+ > configitem(b'test', b'dynamic', default=configitems.dynamicdefault)
+ > configitem(b'test', b'callable', default=list)
> # overwrite a core config
- > configitem('ui', 'quiet', default=False)
- > configitem('ui', 'interactive', default=None)
+ > configitem(b'ui', b'quiet', default=False)
+ > configitem(b'ui', b'interactive', default=None)
>
> @command(b'buggyconfig')
> def cmdbuggyconfig(ui, repo):
- > repo.ui.config('ui', 'quiet', True)
- > repo.ui.config('ui', 'interactive', False)
- > repo.ui.config('test', 'some', 'bar')
- > repo.ui.config('test', 'some', 'foo')
- > repo.ui.config('test', 'dynamic', 'some-required-default')
- > repo.ui.config('test', 'dynamic')
- > repo.ui.config('test', 'callable', [])
- > repo.ui.config('test', 'callable', 'foo')
- > repo.ui.config('test', 'unregistered')
- > repo.ui.config('unregistered', 'unregistered')
+ > repo.ui.config(b'ui', b'quiet', True)
+ > repo.ui.config(b'ui', b'interactive', False)
+ > repo.ui.config(b'test', b'some', b'bar')
+ > repo.ui.config(b'test', b'some', b'foo')
+ > repo.ui.config(b'test', b'dynamic', b'some-required-default')
+ > repo.ui.config(b'test', b'dynamic')
+ > repo.ui.config(b'test', b'callable', [])
+ > repo.ui.config(b'test', b'callable', b'foo')
+ > repo.ui.config(b'test', b'unregistered')
+ > repo.ui.config(b'unregistered', b'unregistered')
> EOF
$ hg --config "extensions.buggyconfig=${TESTTMP}/buggyconfig.py" buggyconfig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-diff-antipatience.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,91 @@
+#testcases bdiff xdiff
+
+#if xdiff
+#require xdiff
+ $ cat >> $HGRCPATH <<EOF
+ > [experimental]
+ > xdiff = true
+ > EOF
+#endif
+
+Test case that makes use of the weakness of patience diff algorithm
+
+ $ hg init
+ >>> open('a', 'wb').write(b'\n'.join(list(b'a' + b'x' * 10 + b'u' + b'x' * 30 + b'a\n')))
+ $ hg commit -m 1 -A a
+ >>> open('a', 'wb').write(b'\n'.join(list(b'b' + b'x' * 30 + b'u' + b'x' * 10 + b'b\n')))
+#if xdiff
+ $ hg diff
+ diff -r f0aeecb49805 a
+ --- a/a Thu Jan 01 00:00:00 1970 +0000
+ +++ b/a Thu Jan 01 00:00:00 1970 +0000
+ @@ -1,4 +1,4 @@
+ -a
+ +b
+ x
+ x
+ x
+ @@ -9,7 +9,6 @@
+ x
+ x
+ x
+ -u
+ x
+ x
+ x
+ @@ -30,6 +29,7 @@
+ x
+ x
+ x
+ +u
+ x
+ x
+ x
+ @@ -40,5 +40,5 @@
+ x
+ x
+ x
+ -a
+ +b
+
+#else
+ $ hg diff
+ diff -r f0aeecb49805 a
+ --- a/a Thu Jan 01 00:00:00 1970 +0000
+ +++ b/a Thu Jan 01 00:00:00 1970 +0000
+ @@ -1,15 +1,4 @@
+ -a
+ -x
+ -x
+ -x
+ -x
+ -x
+ -x
+ -x
+ -x
+ -x
+ -x
+ -u
+ +b
+ x
+ x
+ x
+ @@ -40,5 +29,16 @@
+ x
+ x
+ x
+ -a
+ +u
+ +x
+ +x
+ +x
+ +x
+ +x
+ +x
+ +x
+ +x
+ +x
+ +x
+ +b
+
+#endif
--- a/tests/test-diff-binary-file.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-diff-binary-file.t Mon Mar 19 08:07:18 2018 -0700
@@ -81,7 +81,7 @@
$ cat > writebin.py <<EOF
> import sys
> path = sys.argv[1]
- > open(path, 'wb').write('\x00\x01\x02\x03')
+ > open(path, 'wb').write(b'\x00\x01\x02\x03')
> EOF
$ $PYTHON writebin.py binfile.bin
$ hg add binfile.bin
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-diff-indent-heuristic.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,339 @@
+#testcases bdiff xdiff
+
+#if xdiff
+#require xdiff
+ $ cat >> $HGRCPATH <<EOF
+ > [experimental]
+ > xdiff = true
+ > EOF
+#endif
+
+ $ hg init
+
+ $ cat > a.c <<'EOF'
+ > /*
+ > * This function returns 1.
+ > */
+ > int f() {
+ > return 1;
+ > }
+ > /*
+ > * This function returns 2.
+ > */
+ > int g() {
+ > return 2;
+ > }
+ > /*
+ > * This function returns 3.
+ > */
+ > int h() {
+ > return 3;
+ > }
+ > EOF
+
+ $ cat > b.c <<'EOF'
+ > if (x) {
+ > do_something();
+ > }
+ >
+ > if (y) {
+ > do_something_else();
+ > }
+ > EOF
+
+ $ cat > c.rb <<'EOF'
+ > #!ruby
+ > ["foo", "bar", "baz"].map do |i|
+ > i.upcase
+ > end
+ > EOF
+
+ $ cat > d.py <<'EOF'
+ > try:
+ > import foo
+ > except ImportError:
+ > pass
+ > try:
+ > import bar
+ > except ImportError:
+ > pass
+ > EOF
+
+The below two files are taken from git: t/t4061-diff-indent.sh
+
+ $ cat > spaces.txt <<'EOF'
+ > 1
+ > 2
+ > a
+ >
+ > b
+ > 3
+ > 4
+ > EOF
+
+ $ cat > functions.c <<'EOF'
+ > 1
+ > 2
+ > /* function */
+ > foo() {
+ > foo
+ > }
+ >
+ > 3
+ > 4
+ > EOF
+
+ $ hg commit -m 1 -A . -q
+
+ $ cat > a.c <<'EOF'
+ > /*
+ > * This function returns 1.
+ > */
+ > int f() {
+ > return 1;
+ > }
+ > /*
+ > * This function returns 3.
+ > */
+ > int h() {
+ > return 3;
+ > }
+ > EOF
+
+ $ cat > b.c <<'EOF'
+ > if (x) {
+ > do_something();
+ > }
+ >
+ > if (y) {
+ > do_another_thing();
+ > }
+ >
+ > if (y) {
+ > do_something_else();
+ > }
+ > EOF
+
+ $ cat > c.rb <<'EOF'
+ > #!ruby
+ > ["foo", "bar", "baz"].map do |i|
+ > i
+ > end
+ > ["foo", "bar", "baz"].map do |i|
+ > i.upcase
+ > end
+ > EOF
+
+ $ cat > d.py <<'EOF'
+ > try:
+ > import foo
+ > except ImportError:
+ > pass
+ > try:
+ > import baz
+ > except ImportError:
+ > pass
+ > try:
+ > import bar
+ > except ImportError:
+ > pass
+ > EOF
+
+ $ cat > spaces.txt <<'EOF'
+ > 1
+ > 2
+ > a
+ >
+ > b
+ > a
+ >
+ > b
+ > 3
+ > 4
+ > EOF
+
+ $ cat > functions.c <<'EOF'
+ > 1
+ > 2
+ > /* function */
+ > bar() {
+ > foo
+ > }
+ >
+ > /* function */
+ > foo() {
+ > foo
+ > }
+ >
+ > 3
+ > 4
+ > EOF
+
+#if xdiff
+ $ hg diff --git
+ diff --git a/a.c b/a.c
+ --- a/a.c
+ +++ b/a.c
+ @@ -4,12 +4,6 @@
+ int f() {
+ return 1;
+ }
+ -/*
+ - * This function returns 2.
+ - */
+ -int g() {
+ - return 2;
+ -}
+ /*
+ * This function returns 3.
+ */
+ diff --git a/b.c b/b.c
+ --- a/b.c
+ +++ b/b.c
+ @@ -2,6 +2,10 @@
+ do_something();
+ }
+
+ +if (y) {
+ + do_another_thing();
+ +}
+ +
+ if (y) {
+ do_something_else();
+ }
+ diff --git a/c.rb b/c.rb
+ --- a/c.rb
+ +++ b/c.rb
+ @@ -1,4 +1,7 @@
+ #!ruby
+ +["foo", "bar", "baz"].map do |i|
+ + i
+ +end
+ ["foo", "bar", "baz"].map do |i|
+ i.upcase
+ end
+ diff --git a/d.py b/d.py
+ --- a/d.py
+ +++ b/d.py
+ @@ -2,6 +2,10 @@
+ import foo
+ except ImportError:
+ pass
+ +try:
+ + import baz
+ +except ImportError:
+ + pass
+ try:
+ import bar
+ except ImportError:
+ diff --git a/functions.c b/functions.c
+ --- a/functions.c
+ +++ b/functions.c
+ @@ -1,5 +1,10 @@
+ 1
+ 2
+ +/* function */
+ +bar() {
+ + foo
+ +}
+ +
+ /* function */
+ foo() {
+ foo
+ diff --git a/spaces.txt b/spaces.txt
+ --- a/spaces.txt
+ +++ b/spaces.txt
+ @@ -2,6 +2,9 @@
+ 2
+ a
+
+ +b
+ +a
+ +
+ b
+ 3
+ 4
+#else
+ $ hg diff --git
+ diff --git a/a.c b/a.c
+ --- a/a.c
+ +++ b/a.c
+ @@ -5,12 +5,6 @@
+ return 1;
+ }
+ /*
+ - * This function returns 2.
+ - */
+ -int g() {
+ - return 2;
+ -}
+ -/*
+ * This function returns 3.
+ */
+ int h() {
+ diff --git a/b.c b/b.c
+ --- a/b.c
+ +++ b/b.c
+ @@ -3,5 +3,9 @@
+ }
+
+ if (y) {
+ + do_another_thing();
+ +}
+ +
+ +if (y) {
+ do_something_else();
+ }
+ diff --git a/c.rb b/c.rb
+ --- a/c.rb
+ +++ b/c.rb
+ @@ -1,4 +1,7 @@
+ #!ruby
+ ["foo", "bar", "baz"].map do |i|
+ + i
+ +end
+ +["foo", "bar", "baz"].map do |i|
+ i.upcase
+ end
+ diff --git a/d.py b/d.py
+ --- a/d.py
+ +++ b/d.py
+ @@ -3,6 +3,10 @@
+ except ImportError:
+ pass
+ try:
+ + import baz
+ +except ImportError:
+ + pass
+ +try:
+ import bar
+ except ImportError:
+ pass
+ diff --git a/functions.c b/functions.c
+ --- a/functions.c
+ +++ b/functions.c
+ @@ -1,6 +1,11 @@
+ 1
+ 2
+ /* function */
+ +bar() {
+ + foo
+ +}
+ +
+ +/* function */
+ foo() {
+ foo
+ }
+ diff --git a/spaces.txt b/spaces.txt
+ --- a/spaces.txt
+ +++ b/spaces.txt
+ @@ -3,5 +3,8 @@
+ a
+
+ b
+ +a
+ +
+ +b
+ 3
+ 4
+#endif
--- a/tests/test-diff-unified.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-diff-unified.t Mon Mar 19 08:07:18 2018 -0700
@@ -386,3 +386,73 @@
}
$ cd ..
+
+Long function names should be abbreviated, but multi-byte character shouldn't
+be broken up
+
+ $ hg init longfunc
+ $ cd longfunc
+
+ >>> with open('a', 'wb') as f:
+ ... f.write(b'a' * 39 + b'bb' + b'\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b' 0 b\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b' 0 a with grave (single code point)\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b' 0 a with grave (composition)\n')
+ ... f.write(b' .\n' * 3)
+ $ hg ci -qAm0
+
+ >>> with open('a', 'wb') as f:
+ ... f.write(b'a' * 39 + b'bb' + b'\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b' 1 b\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b' 1 a with grave (single code point)\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n')
+ ... f.write(b' .\n' * 3)
+ ... f.write(b' 1 a with grave (composition)\n')
+ ... f.write(b' .\n' * 3)
+ $ hg ci -m1
+
+ $ hg diff -c1 --nodates --show-function
+ diff -r 3e92dd6fa812 -r a256341606cb a
+ --- a/a
+ +++ b/a
+ @@ -2,7 +2,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab
+ .
+ .
+ .
+ - 0 b
+ + 1 b
+ .
+ .
+ .
+ @@ -10,7 +10,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xc3\xa0 (esc)
+ .
+ .
+ .
+ - 0 a with grave (single code point)
+ + 1 a with grave (single code point)
+ .
+ .
+ .
+ @@ -18,7 +18,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xcc\x80 (esc)
+ .
+ .
+ .
+ - 0 a with grave (composition)
+ + 1 a with grave (composition)
+ .
+ .
+ .
+
+ $ cd ..
--- a/tests/test-dispatch.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-dispatch.py Mon Mar 19 08:07:18 2018 -0700
@@ -9,27 +9,27 @@
Prints command and result value, but does not handle quoting.
"""
- print("running: %s" % (cmd,))
+ print(b"running: %s" % (cmd,))
req = dispatch.request(cmd.split())
result = dispatch.dispatch(req)
- print("result: %r" % (result,))
+ print(b"result: %r" % (result,))
-testdispatch("init test1")
+testdispatch(b"init test1")
os.chdir('test1')
# create file 'foo', add and commit
f = open('foo', 'wb')
-f.write('foo\n')
+f.write(b'foo\n')
f.close()
-testdispatch("add foo")
-testdispatch("commit -m commit1 -d 2000-01-01 foo")
+testdispatch(b"add foo")
+testdispatch(b"commit -m commit1 -d 2000-01-01 foo")
# append to file 'foo' and commit
f = open('foo', 'ab')
-f.write('bar\n')
+f.write(b'bar\n')
f.close()
-testdispatch("commit -m commit2 -d 2000-01-02 foo")
+testdispatch(b"commit -m commit2 -d 2000-01-02 foo")
# check 88803a69b24 (fancyopts modified command table)
-testdispatch("log -r 0")
-testdispatch("log -r tip")
+testdispatch(b"log -r 0")
+testdispatch(b"log -r tip")
--- a/tests/test-doctest.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-doctest.py Mon Mar 19 08:07:18 2018 -0700
@@ -42,6 +42,7 @@
testmod('mercurial.changegroup')
testmod('mercurial.changelog')
+testmod('mercurial.cmdutil')
testmod('mercurial.color')
testmod('mercurial.config')
testmod('mercurial.context')
--- a/tests/test-encoding-align.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-encoding-align.t Mon Mar 19 08:07:18 2018 -0700
@@ -6,16 +6,16 @@
$ cd t
$ $PYTHON << EOF
> # (byte, width) = (6, 4)
- > s = "\xe7\x9f\xad\xe5\x90\x8d"
+ > s = b"\xe7\x9f\xad\xe5\x90\x8d"
> # (byte, width) = (7, 7): odd width is good for alignment test
- > m = "MIDDLE_"
+ > m = b"MIDDLE_"
> # (byte, width) = (18, 12)
- > l = "\xe9\x95\xb7\xe3\x81\x84\xe9\x95\xb7\xe3\x81\x84\xe5\x90\x8d\xe5\x89\x8d"
- > f = file('s', 'w'); f.write(s); f.close()
- > f = file('m', 'w'); f.write(m); f.close()
- > f = file('l', 'w'); f.write(l); f.close()
+ > l = b"\xe9\x95\xb7\xe3\x81\x84\xe9\x95\xb7\xe3\x81\x84\xe5\x90\x8d\xe5\x89\x8d"
+ > f = open('s', 'wb'); f.write(s); f.close()
+ > f = open('m', 'wb'); f.write(m); f.close()
+ > f = open('l', 'wb'); f.write(l); f.close()
> # instant extension to show list of options
- > f = file('showoptlist.py', 'w'); f.write("""# encoding: utf-8
+ > f = open('showoptlist.py', 'wb'); f.write(b"""# encoding: utf-8
> from mercurial import registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
--- a/tests/test-encoding.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-encoding.t Mon Mar 19 08:07:18 2018 -0700
@@ -15,9 +15,9 @@
$ hg co
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ $PYTHON << EOF
- > f = file('latin-1', 'w'); f.write("latin-1 e' encoded: \xe9"); f.close()
- > f = file('utf-8', 'w'); f.write("utf-8 e' encoded: \xc3\xa9"); f.close()
- > f = file('latin-1-tag', 'w'); f.write("\xe9"); f.close()
+ > f = open('latin-1', 'wb'); f.write(b"latin-1 e' encoded: \xe9"); f.close()
+ > f = open('utf-8', 'wb'); f.write(b"utf-8 e' encoded: \xc3\xa9"); f.close()
+ > f = open('latin-1-tag', 'wb'); f.write(b"\xe9"); f.close()
> EOF
should fail with encoding error
--- a/tests/test-eol.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-eol.t Mon Mar 19 08:07:18 2018 -0700
@@ -17,12 +17,12 @@
> msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
> except ImportError:
> pass
- > (old, new) = sys.argv[1] == 'LF' and ('\n', '\r\n') or ('\r\n', '\n')
+ > (old, new) = sys.argv[1] == 'LF' and (b'\n', b'\r\n') or (b'\r\n', b'\n')
> print("%% switching encoding from %r to %r" % (old, new))
> for path in sys.argv[2:]:
- > data = file(path, 'rb').read()
+ > data = open(path, 'rb').read()
> data = data.replace(old, new)
- > file(path, 'wb').write(data)
+ > open(path, 'wb').write(data)
> EOF
$ seteol () {
--- a/tests/test-export.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-export.t Mon Mar 19 08:07:18 2018 -0700
@@ -184,7 +184,49 @@
$ hg commit -m " !\"#$%&(,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]"'^'"_\`abcdefghijklmnopqrstuvwxyz{|}~"
$ hg export -v -o %m.patch tip
exporting patch:
- ____________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz____.patch
+ ___________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz____.patch
+
+Template fragments in file name:
+
+ $ hg export -v -o '{node|shortest}.patch' tip
+ exporting patch:
+ 197e.patch
+
+Backslash should be preserved because it is a directory separator on Windows:
+
+ $ mkdir out
+ $ hg export -v -o 'out\{node|shortest}.patch' tip
+ exporting patch:
+ out\197e.patch
+
+Still backslash is taken as an escape character in inner template strings:
+
+ $ hg export -v -o '{"out\{foo}.patch"}' tip
+ exporting patch:
+ out{foo}.patch
+
+Invalid pattern in file name:
+
+ $ hg export -o '%x.patch' tip
+ abort: invalid format spec '%x' in output filename
+ [255]
+ $ hg export -o '%' tip
+ abort: incomplete format spec in output filename
+ [255]
+ $ hg export -o '%{"foo"}' tip
+ abort: incomplete format spec in output filename
+ [255]
+ $ hg export -o '%m{' tip
+ hg: parse error at 3: unterminated template expansion
+ (%m{
+ ^ here)
+ [255]
+ $ hg export -o '%\' tip
+ abort: invalid format spec '%\' in output filename
+ [255]
+ $ hg export -o '\%' tip
+ abort: incomplete format spec in output filename
+ [255]
Catch exporting unknown revisions (especially empty revsets, see issue3353)
--- a/tests/test-extdiff.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-extdiff.t Mon Mar 19 08:07:18 2018 -0700
@@ -252,8 +252,8 @@
> #!$PYTHON
> import time
> time.sleep(1) # avoid unchanged-timestamp problems
- > file('a/a', 'ab').write('edited\n')
- > file('a/b', 'ab').write('edited\n')
+ > open('a/a', 'ab').write(b'edited\n')
+ > open('a/b', 'ab').write(b'edited\n')
> EOT
#if execbit
@@ -424,7 +424,8 @@
Test handling of non-ASCII paths in generated docstrings (issue5301)
- >>> open("u", "w").write("\xa5\xa5")
+ >>> with open("u", "wb") as f:
+ ... n = f.write(b"\xa5\xa5")
$ U=`cat u`
$ HGPLAIN=1 hg --config hgext.extdiff= --config extdiff.cmd.td=hi help -k xyzzy
--- a/tests/test-extension.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-extension.t Mon Mar 19 08:07:18 2018 -0700
@@ -7,22 +7,22 @@
> command = registrar.command(cmdtable)
> configtable = {}
> configitem = registrar.configitem(configtable)
- > configitem('tests', 'foo', default="Foo")
+ > configitem(b'tests', b'foo', default=b"Foo")
> def uisetup(ui):
- > ui.write("uisetup called\\n")
+ > ui.write(b"uisetup called\\n")
> ui.flush()
> def reposetup(ui, repo):
- > ui.write("reposetup called for %s\\n" % os.path.basename(repo.root))
- > ui.write("ui %s= repo.ui\\n" % (ui == repo.ui and "=" or "!"))
+ > ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root))
+ > ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!"))
> ui.flush()
- > @command(b'foo', [], 'hg foo')
+ > @command(b'foo', [], b'hg foo')
> def foo(ui, *args, **kwargs):
- > foo = ui.config('tests', 'foo')
+ > foo = ui.config(b'tests', b'foo')
> ui.write(foo)
- > ui.write("\\n")
- > @command(b'bar', [], 'hg bar', norepo=True)
+ > ui.write(b"\\n")
+ > @command(b'bar', [], b'hg bar', norepo=True)
> def bar(ui, *args, **kwargs):
- > ui.write("Bar\\n")
+ > ui.write(b"Bar\\n")
> EOF
$ abspath=`pwd`/foobar.py
@@ -440,12 +440,12 @@
> @command(b'showabsolute', [], norepo=True)
> def showabsolute(ui, *args, **opts):
> from absextroot import absolute
- > ui.write('ABS: %s\n' % '\nABS: '.join(absolute.getresult()))
+ > ui.write(b'ABS: %s\n' % '\nABS: '.join(absolute.getresult()))
>
> @command(b'showrelative', [], norepo=True)
> def showrelative(ui, *args, **opts):
> from . import relative
- > ui.write('REL: %s\n' % '\nREL: '.join(relative.getresult()))
+ > ui.write(b'REL: %s\n' % '\nREL: '.join(relative.getresult()))
>
> # import modules from external library
> from extlibroot.lsub1.lsub2 import used as lused, unused as lunused
@@ -564,11 +564,11 @@
> from mercurial import registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'debugfoobar', [], 'hg debugfoobar')
+ > @command(b'debugfoobar', [], b'hg debugfoobar')
> def debugfoobar(ui, repo, *args, **opts):
> "yet another debug command"
> pass
- > @command(b'foo', [], 'hg foo')
+ > @command(b'foo', [], b'hg foo')
> def foo(ui, repo, *args, **opts):
> """yet another foo command
> This command has been DEPRECATED since forever.
@@ -805,7 +805,7 @@
> command = registrar.command(cmdtable)
> """multirevs extension
> Big multi-line module docstring."""
- > @command(b'multirevs', [], 'ARG', norepo=True)
+ > @command(b'multirevs', [], b'ARG', norepo=True)
> def multirevs(ui, repo, arg, *args, **opts):
> """multirevs command"""
> pass
@@ -880,14 +880,14 @@
> from mercurial import commands, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'dodo', [], 'hg dodo')
+ > @command(b'dodo', [], b'hg dodo')
> def dodo(ui, *args, **kwargs):
> """Does nothing"""
- > ui.write("I do nothing. Yay\\n")
- > @command(b'foofoo', [], 'hg foofoo')
+ > ui.write(b"I do nothing. Yay\\n")
+ > @command(b'foofoo', [], b'hg foofoo')
> def foofoo(ui, *args, **kwargs):
> """Writes 'Foo foo'"""
- > ui.write("Foo foo\\n")
+ > ui.write(b"Foo foo\\n")
> EOF
$ dodopath=$TESTTMP/d/dodo.py
@@ -991,14 +991,14 @@
> from mercurial import commands, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'something', [], 'hg something')
+ > @command(b'something', [], b'hg something')
> def something(ui, *args, **kwargs):
> """Does something"""
- > ui.write("I do something. Yaaay\\n")
- > @command(b'beep', [], 'hg beep')
+ > ui.write(b"I do something. Yaaay\\n")
+ > @command(b'beep', [], b'hg beep')
> def beep(ui, *args, **kwargs):
> """Writes 'Beep beep'"""
- > ui.write("Beep beep\\n")
+ > ui.write(b"Beep beep\\n")
> EOF
$ dudupath=$TESTTMP/d/dudu.py
@@ -1235,7 +1235,7 @@
> cmdtable = {}
> command = registrar.command(cmdtable)
> class Bogon(Exception): pass
- > @command(b'throw', [], 'hg throw', norepo=True)
+ > @command(b'throw', [], b'hg throw', norepo=True)
> def throw(ui, **opts):
> """throws an exception"""
> raise Bogon()
@@ -1278,8 +1278,8 @@
If the extensions declare outdated versions, accuse the older extension first:
$ echo "from mercurial import util" >> older.py
$ echo "util.version = lambda:'2.2'" >> older.py
- $ echo "testedwith = '1.9.3'" >> older.py
- $ echo "testedwith = '2.1.1'" >> throw.py
+ $ echo "testedwith = b'1.9.3'" >> older.py
+ $ echo "testedwith = b'2.1.1'" >> throw.py
$ rm -f throw.pyc throw.pyo
$ rm -Rf __pycache__
$ hg --config extensions.throw=throw.py --config extensions.older=older.py \
@@ -1293,7 +1293,7 @@
** Extensions loaded: throw, older
One extension only tested with older, one only with newer versions:
- $ echo "util.version = lambda:'2.1'" >> older.py
+ $ echo "util.version = lambda:b'2.1'" >> older.py
$ rm -f older.pyc older.pyo
$ rm -Rf __pycache__
$ hg --config extensions.throw=throw.py --config extensions.older=older.py \
@@ -1307,7 +1307,7 @@
** Extensions loaded: throw, older
Older extension is tested with current version, the other only with newer:
- $ echo "util.version = lambda:'1.9.3'" >> older.py
+ $ echo "util.version = lambda:b'1.9.3'" >> older.py
$ rm -f older.pyc older.pyo
$ rm -Rf __pycache__
$ hg --config extensions.throw=throw.py --config extensions.older=older.py \
@@ -1345,8 +1345,8 @@
** Extensions loaded: throw
Patch version is ignored during compatibility check
- $ echo "testedwith = '3.2'" >> throw.py
- $ echo "util.version = lambda:'3.2.2'" >> throw.py
+ $ echo "testedwith = b'3.2'" >> throw.py
+ $ echo "util.version = lambda:b'3.2.2'" >> throw.py
$ rm -f throw.pyc throw.pyo
$ rm -Rf __pycache__
$ hg --config extensions.throw=throw.py throw 2>&1 | egrep '^\*\*'
@@ -1438,8 +1438,8 @@
$ cat > minversion1.py << EOF
> from mercurial import util
- > util.version = lambda: '3.5.2'
- > minimumhgversion = '3.6'
+ > util.version = lambda: b'3.5.2'
+ > minimumhgversion = b'3.6'
> EOF
$ hg --config extensions.minversion=minversion1.py version
(third party extension minversion requires version 3.6 or newer of Mercurial; disabling)
@@ -1452,8 +1452,8 @@
$ cat > minversion2.py << EOF
> from mercurial import util
- > util.version = lambda: '3.6'
- > minimumhgversion = '3.7'
+ > util.version = lambda: b'3.6'
+ > minimumhgversion = b'3.7'
> EOF
$ hg --config extensions.minversion=minversion2.py version 2>&1 | egrep '\(third'
(third party extension minversion requires version 3.7 or newer of Mercurial; disabling)
@@ -1462,8 +1462,8 @@
$ cat > minversion2.py << EOF
> from mercurial import util
- > util.version = lambda: '3.6.1'
- > minimumhgversion = '3.6'
+ > util.version = lambda: b'3.6.1'
+ > minimumhgversion = b'3.6'
> EOF
$ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
[1]
@@ -1472,8 +1472,8 @@
$ cat > minversion3.py << EOF
> from mercurial import util
- > util.version = lambda: '3.5'
- > minimumhgversion = '3.5'
+ > util.version = lambda: b'3.5'
+ > minimumhgversion = b'3.5'
> EOF
$ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third'
[1]
@@ -1492,7 +1492,7 @@
$ cat > $TESTTMP/reposetuptest.py <<EOF
> from mercurial import extensions
> def reposetup(ui, repo):
- > ui.write('reposetup() for %s\n' % (repo.root))
+ > ui.write(b'reposetup() for %s\n' % (repo.root))
> ui.flush()
> EOF
$ hg init src
@@ -1626,7 +1626,7 @@
> def deprecatedcmd(repo, ui):
> pass
> cmdtable = {
- > 'deprecatedcmd': (deprecatedcmd, [], ''),
+ > b'deprecatedcmd': (deprecatedcmd, [], b''),
> }
> EOF
$ cat <<EOF > .hg/hgrc
@@ -1663,7 +1663,7 @@
> docstring = '''
> GREPME make sure that this is in the help!
> '''
- > extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
+ > extensions.wrapcommand(commands.table, b'bookmarks', exbookmarks,
> synopsis, docstring)
> EOF
$ abspath=`pwd`/exthelp.py
@@ -1698,7 +1698,7 @@
> from mercurial import registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('dummy', [('', 'opt', u'value', u'help')], 'ext [OPTIONS]')
+ > @command(b'dummy', [('', 'opt', u'value', u'help')], 'ext [OPTIONS]')
> def ext(*args, **opts):
> print(opts['opt'])
> EOF
@@ -1707,8 +1707,8 @@
> test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
> EOF
$ hg -R $TESTTMP/opt-unicode-default dummy
- *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: option 'dummy.opt' has a unicode default value
- *** (change the dummy.opt default value to a non-unicode string)
+ *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode u'value' found in cmdtable.dummy
+ *** (use b'' to make it byte string)
hg: unknown command 'dummy'
(did you mean summary?)
[255]
--- a/tests/test-filecache.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-filecache.py Mon Mar 19 08:07:18 2018 -0700
@@ -1,5 +1,6 @@
from __future__ import absolute_import, print_function
import os
+import stat
import subprocess
import sys
@@ -11,11 +12,15 @@
extensions,
hg,
localrepo,
+ pycompat,
ui as uimod,
util,
vfs as vfsmod,
)
+if pycompat.ispy3:
+ xrange = range
+
class fakerepo(object):
def __init__(self):
self._filecache = {}
@@ -196,7 +201,7 @@
fp.close()
oldstat = os.stat(filename)
- if oldstat.st_ctime != oldstat.st_mtime:
+ if oldstat[stat.ST_CTIME] != oldstat[stat.ST_MTIME]:
# subsequent changing never causes ambiguity
continue
@@ -215,16 +220,17 @@
fp.write('BAR')
newstat = os.stat(filename)
- if oldstat.st_ctime != newstat.st_ctime:
+ if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]:
# timestamp ambiguity was naturally avoided while repetition
continue
# st_mtime should be advanced "repetition * 2" times, because
# all changes occurred at same time (in sec)
- expected = (oldstat.st_mtime + repetition * 2) & 0x7fffffff
- if newstat.st_mtime != expected:
- print("'newstat.st_mtime %s is not %s (as %s + %s * 2)" %
- (newstat.st_mtime, expected, oldstat.st_mtime, repetition))
+ expected = (oldstat[stat.ST_MTIME] + repetition * 2) & 0x7fffffff
+ if newstat[stat.ST_MTIME] != expected:
+ print("'newstat[stat.ST_MTIME] %s is not %s (as %s + %s * 2)" %
+ (newstat[stat.ST_MTIME], expected,
+ oldstat[stat.ST_MTIME], repetition))
# no more examination is needed regardless of result
break
--- a/tests/test-fileset.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-fileset.t Mon Mar 19 08:07:18 2018 -0700
@@ -180,7 +180,7 @@
Test files properties
- >>> file('bin', 'wb').write('\0a')
+ >>> open('bin', 'wb').write(b'\0a')
$ fileset 'binary()'
$ fileset 'binary() and unknown()'
bin
@@ -219,8 +219,8 @@
$ hg --config ui.portablefilenames=ignore add con.xml
#endif
- >>> file('1k', 'wb').write(' '*1024)
- >>> file('2k', 'wb').write(' '*2048)
+ >>> open('1k', 'wb').write(b' '*1024)
+ >>> open('2k', 'wb').write(b' '*2048)
$ hg add 1k 2k
$ fileset 'size("bar")'
hg: parse error: couldn't parse size: bar
@@ -666,7 +666,11 @@
$ fileset "status(' ', '4', added())"
hg: parse error at 1: not a prefix: end
+ (
+ ^ here)
[255]
$ fileset "status('2', ' ', added())"
hg: parse error at 1: not a prefix: end
+ (
+ ^ here)
[255]
--- a/tests/test-fncache.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-fncache.t Mon Mar 19 08:07:18 2018 -0700
@@ -236,7 +236,7 @@
> wlock.release()
>
> def extsetup(ui):
- > extensions.wrapcommand(commands.table, "commit", commitwrap)
+ > extensions.wrapcommand(commands.table, b"commit", commitwrap)
> EOF
$ extpath=`pwd`/exceptionext.py
$ hg init fncachetxn
@@ -259,14 +259,14 @@
> def wrapper(orig, self, *args, **kwargs):
> tr = orig(self, *args, **kwargs)
> def fail(tr):
- > raise error.Abort("forced transaction failure")
+ > raise error.Abort(b"forced transaction failure")
> # zzz prefix to ensure it sorted after store.write
- > tr.addfinalize('zzz-forcefails', fail)
+ > tr.addfinalize(b'zzz-forcefails', fail)
> return tr
>
> def uisetup(ui):
> extensions.wrapfunction(
- > localrepo.localrepository, 'transaction', wrapper)
+ > localrepo.localrepository, b'transaction', wrapper)
>
> cmdtable = {}
>
--- a/tests/test-generaldelta.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-generaldelta.t Mon Mar 19 08:07:18 2018 -0700
@@ -159,6 +159,7 @@
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
+ cache:rev-branch-cache -- {}
phase-heads -- {}
1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9 draft
--- a/tests/test-glog.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-glog.t Mon Mar 19 08:07:18 2018 -0700
@@ -87,21 +87,22 @@
> cmdutil,
> commands,
> extensions,
+ > logcmdutil,
> revsetlang,
> smartset,
> )
>
> def logrevset(repo, pats, opts):
- > revs = cmdutil._logrevs(repo, opts)
+ > revs = logcmdutil._initialrevs(repo, opts)
> if not revs:
> return None
- > match, pats, slowpath = cmdutil._makelogmatcher(repo, revs, pats, opts)
- > return cmdutil._makelogrevset(repo, match, pats, slowpath, opts)
+ > match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
+ > return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
>
> def uisetup(ui):
> def printrevset(orig, repo, pats, opts):
> revs, filematcher = orig(repo, pats, opts)
- > if opts.get('print_revset'):
+ > if opts.get(b'print_revset'):
> expr = logrevset(repo, pats, opts)
> if expr:
> tree = revsetlang.parse(expr)
@@ -109,15 +110,15 @@
> else:
> tree = []
> ui = repo.ui
- > ui.write('%r\n' % (opts.get('rev', []),))
- > ui.write(revsetlang.prettyformat(tree) + '\n')
- > ui.write(smartset.prettyformat(revs) + '\n')
+ > ui.write(b'%r\n' % (opts.get(b'rev', []),))
+ > ui.write(revsetlang.prettyformat(tree) + b'\n')
+ > ui.write(smartset.prettyformat(revs) + b'\n')
> revs = smartset.baseset() # display no revisions
> return revs, filematcher
- > extensions.wrapfunction(cmdutil, 'getlogrevs', printrevset)
- > aliases, entry = cmdutil.findcmd('log', commands.table)
- > entry[1].append(('', 'print-revset', False,
- > 'print generated revset and exit (DEPRECATED)'))
+ > extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
+ > aliases, entry = cmdutil.findcmd(b'log', commands.table)
+ > entry[1].append((b'', b'print-revset', False,
+ > b'print generated revset and exit (DEPRECATED)'))
> EOF
$ echo "[extensions]" >> $HGRCPATH
@@ -2420,7 +2421,7 @@
|
~
-node template with changeset_printer:
+node template with changesetprinter:
$ hg log -Gqr 5:7 --config ui.graphnodetemplate='"{rev}"'
7 7:02dbb8e276b8
@@ -2432,7 +2433,7 @@
|
~
-node template with changeset_templater (shared cache variable):
+node template with changesettemplater (shared cache variable):
$ hg log -Gr 5:7 -T '{latesttag % "{rev} {tag}+{distance}"}\n' \
> --config ui.graphnodetemplate='{ifeq(latesttagdistance, 0, "#", graphnode)}'
--- a/tests/test-grep.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-grep.t Mon Mar 19 08:07:18 2018 -0700
@@ -271,7 +271,7 @@
match in last "line" without newline
- $ $PYTHON -c 'fp = open("noeol", "wb"); fp.write("no infinite loop"); fp.close();'
+ $ $PYTHON -c 'fp = open("noeol", "wb"); fp.write(b"no infinite loop"); fp.close();'
$ hg ci -Amnoeol
adding noeol
$ hg grep loop
--- a/tests/test-help.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-help.t Mon Mar 19 08:07:18 2018 -0700
@@ -274,6 +274,7 @@
purge command to delete untracked files from the working
directory
relink recreates hardlinks between repository clones
+ remotenames showing remotebookmarks and remotebranches in UI
schemes extend schemes with shortcuts to repository swarms
share share a common history between several working directories
shelve save and restore changes to the working directory
@@ -282,6 +283,11 @@
win32mbcs allow the use of MBCS paths with problematic encodings
zeroconf discover and advertise repositories on the local network
+Verify that deprecated extensions are included if --verbose:
+
+ $ hg -v help extensions | grep children
+ children command to display child changesets (DEPRECATED)
+
Verify that extension keywords appear in help templates
$ hg help --config extensions.transplant= templating|grep transplant > /dev/null
@@ -948,6 +954,7 @@
debugoptEXP (no help text available)
debugpathcomplete
complete part or all of a tracked path
+ debugpeer establish a connection to a peer repository
debugpickmergetool
examine which merge tool is chosen for specified file
debugpushkey access the pushkey key/value protocol
@@ -960,6 +967,7 @@
debugrename dump rename information
debugrevlog show data and statistics about a revlog
debugrevspec parse and apply a revision specification
+ debugserve run a server with advanced settings
debugsetparents
manually set the parents of the current working directory
debugssl test a secure connection to a server
@@ -968,13 +976,21 @@
show set of successors for revision
debugtemplate
parse and apply a template
+ debuguigetpass
+ show prompt to type password
+ debuguiprompt
+ show plain prompt
debugupdatecaches
warm all known caches in the repository
debugupgraderepo
upgrade a repository to use different features
debugwalk show how files match on given patterns
+ debugwhyunstable
+ explain instabilities of a changeset
debugwireargs
(no help text available)
+ debugwireproto
+ send wire protocol commands to a server
(use 'hg help -v debug' to show built-in aliases and global options)
@@ -986,6 +1002,7 @@
To access a subtopic, use "hg help internals.{subtopic-name}"
+ bundle2 Bundle2
bundles Bundles
censor Censor
changegroups Changegroups
@@ -1492,6 +1509,8 @@
Extensions:
clonebundles advertise pre-generated bundles to seed clones
+ narrow create clones which fetch history data for subset of files
+ (EXPERIMENTAL)
prefixedname matched against word "clone"
relink recreates hardlinks between repository clones
@@ -3050,6 +3069,13 @@
<tr><td colspan="2"><h2><a name="topics" href="#topics">Topics</a></h2></td></tr>
<tr><td>
+ <a href="/help/internals.bundle2">
+ bundle2
+ </a>
+ </td><td>
+ Bundle2
+ </td></tr>
+ <tr><td>
<a href="/help/internals.bundles">
bundles
</a>
@@ -3387,6 +3413,70 @@
</html>
+ $ get-with-headers.py 127.0.0.1:$HGPORT "help/unknowntopic"
+ 404 Not Found
+
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+ <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+ <head>
+ <link rel="icon" href="/static/hgicon.png" type="image/png" />
+ <meta name="robots" content="index, nofollow" />
+ <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+ <script type="text/javascript" src="/static/mercurial.js"></script>
+
+ <title>test: error</title>
+ </head>
+ <body>
+
+ <div class="container">
+ <div class="menu">
+ <div class="logo">
+ <a href="https://mercurial-scm.org/">
+ <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a>
+ </div>
+ <ul>
+ <li><a href="/shortlog">log</a></li>
+ <li><a href="/graph">graph</a></li>
+ <li><a href="/tags">tags</a></li>
+ <li><a href="/bookmarks">bookmarks</a></li>
+ <li><a href="/branches">branches</a></li>
+ </ul>
+ <ul>
+ <li><a href="/help">help</a></li>
+ </ul>
+ </div>
+
+ <div class="main">
+
+ <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+ <h3>error</h3>
+
+
+ <form class="search" action="/log">
+
+ <p><input name="rev" id="search1" type="text" size="30" value="" /></p>
+ <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+ number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+ </form>
+
+ <div class="description">
+ <p>
+ An error occurred while processing your request:
+ </p>
+ <p>
+ Not Found
+ </p>
+ </div>
+ </div>
+ </div>
+
+
+
+ </body>
+ </html>
+
+ [1]
+
$ killdaemons.py
#endif
--- a/tests/test-hgrc.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgrc.t Mon Mar 19 08:07:18 2018 -0700
@@ -58,7 +58,7 @@
unexpected leading whitespace
[255]
- $ $PYTHON -c "print '[foo]\nbar = a\n b\n c \n de\n fg \nbaz = bif cb \n'" \
+ $ $PYTHON -c "from __future__ import print_function; print('[foo]\nbar = a\n b\n c \n de\n fg \nbaz = bif cb \n')" \
> > $HGRC
$ hg showconfig foo
foo.bar=a\nb\nc\nde\nfg
@@ -126,12 +126,16 @@
$ hg showconfig alias defaults
alias.log=log -g
defaults.identify=-n
+ $ hg showconfig alias alias
+ alias.log=log -g
+ $ hg showconfig alias.log alias.log
+ alias.log=log -g
$ hg showconfig alias defaults.identify
- abort: only one config item permitted
- [255]
+ alias.log=log -g
+ defaults.identify=-n
$ hg showconfig alias.log defaults.identify
- abort: only one config item permitted
- [255]
+ alias.log=log -g
+ defaults.identify=-n
HGPLAIN
--- a/tests/test-hgweb-auth.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgweb-auth.py Mon Mar 19 08:07:18 2018 -0700
@@ -19,7 +19,7 @@
def writeauth(items):
ui = origui.copy()
- for name, value in items.iteritems():
+ for name, value in items.items():
ui.setconfig('auth', name, value)
return ui
@@ -36,7 +36,7 @@
for name in ('.username', '.password'):
if (p + name) not in auth:
auth[p + name] = p
- auth = dict((k, v) for k, v in auth.iteritems() if v is not None)
+ auth = dict((k, v) for k, v in auth.items() if v is not None)
ui = writeauth(auth)
--- a/tests/test-hgweb-commands.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgweb-commands.t Mon Mar 19 08:07:18 2018 -0700
@@ -903,6 +903,7 @@
<td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
</tr>
+
<tr>
<th class="author">parents</th>
<td class="author"></td>
@@ -1914,7 +1915,20 @@
$ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'; echo
200 Script output follows
- lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$
+ lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$
+
+wire protocol command to wrong base URL
+
+ $ get-with-headers.py $LOCALIP:$HGPORT 'foo?cmd=capabilities' -
+ 404 Not Found
+ content-length: 12
+ content-type: application/mercurial-0.1
+ date: $HTTP_DATE$
+ server: testing stub value
+
+ 0
+ Not Found
+ [1]
heads
@@ -2113,10 +2127,10 @@
(plain version to check the format)
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo
+ $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=76 count=1 2> /dev/null; echo
200 Script output follows
- lookup changegroupsubset branchmap pushkey known
+ lookup branchmap pushkey known getbundle unbundle
(spread version to check the content)
@@ -2127,13 +2141,13 @@
follows
lookup
- changegroupsubset
branchmap
pushkey
known
getbundle
unbundlehash
batch
+ changegroupsubset
stream-preferred
streamreqs=generaldelta,revlogv1
$USUAL_BUNDLE2_CAPS_SERVER$
--- a/tests/test-hgweb-diffs.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgweb-diffs.t Mon Mar 19 08:07:18 2018 -0700
@@ -104,6 +104,7 @@
<td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
</tr>
+
<tr>
<th class="author">parents</th>
<td class="author"></td>
@@ -400,6 +401,7 @@
<td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
</tr>
+
<tr>
<th class="author">parents</th>
<td class="author"></td>
--- a/tests/test-hgweb-raw.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgweb-raw.t Mon Mar 19 08:07:18 2018 -0700
@@ -17,7 +17,7 @@
$ hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ (get-with-headers.py localhost:$HGPORT '?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw' content-type content-length content-disposition) >getoutput.txt
+ $ (get-with-headers.py localhost:$HGPORT 'raw-file/bf0ff59095c9/sub/some%20text%25.txt' content-type content-length content-disposition) >getoutput.txt
$ killdaemons.py hg.pid
@@ -32,14 +32,14 @@
It is very boring to read, but computers don't
care about things like that.
$ cat access.log error.log
- $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /raw-file/bf0ff59095c9/sub/some%20text%25.txt HTTP/1.1" 200 - (glob)
$ rm access.log error.log
$ hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid \
> --config web.guessmime=True
$ cat hg.pid >> $DAEMON_PIDS
- $ (get-with-headers.py localhost:$HGPORT '?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw' content-type content-length content-disposition) >getoutput.txt
+ $ (get-with-headers.py localhost:$HGPORT 'raw-file/bf0ff59095c9/sub/some%20text%25.txt' content-type content-length content-disposition) >getoutput.txt
$ killdaemons.py hg.pid
$ cat getoutput.txt
@@ -53,6 +53,6 @@
It is very boring to read, but computers don't
care about things like that.
$ cat access.log error.log
- $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /raw-file/bf0ff59095c9/sub/some%20text%25.txt HTTP/1.1" 200 - (glob)
$ cd ..
--- a/tests/test-hgweb-removed.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgweb-removed.t Mon Mar 19 08:07:18 2018 -0700
@@ -85,6 +85,7 @@
<td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
</tr>
+
<tr>
<th class="author">parents</th>
<td class="author"><a href="/rev/cb9a9f314b8b">cb9a9f314b8b</a> </td>
--- a/tests/test-hgweb.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hgweb.t Mon Mar 19 08:07:18 2018 -0700
@@ -333,14 +333,14 @@
Test the access/error files are opened in append mode
- $ $PYTHON -c "print len(file('access.log').readlines()), 'log lines written'"
+ $ $PYTHON -c "print len(open('access.log', 'rb').readlines()), 'log lines written'"
14 log lines written
static file
$ get-with-headers.py --twice localhost:$HGPORT 'static/style-gitweb.css' - date etag server
200 Script output follows
- content-length: 9118
+ content-length: 9126
content-type: text/css
body { font-family: sans-serif; font-size: 12px; border:solid #d9d8d1; border-width:1px; margin:10px; background: white; color: black; }
@@ -374,7 +374,7 @@
div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; }
div.log_body { padding:8px 8px 8px 150px; }
.age { white-space:nowrap; }
- span.age { position:relative; float:left; width:142px; font-style:italic; }
+ a.title span.age { position:relative; float:left; width:142px; font-style:italic; }
div.log_link {
padding:0px 8px;
font-size:10px; font-family:sans-serif; font-style:normal;
--- a/tests/test-histedit-arguments.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-histedit-arguments.t Mon Mar 19 08:07:18 2018 -0700
@@ -280,9 +280,9 @@
--------------------------------------------------------------------
$ $PYTHON <<EOF
- > fp = open('logfile', 'w')
- > fp.write('12345678901234567890123456789012345678901234567890' +
- > '12345') # there are 5 more columns for 80 columns
+ > fp = open('logfile', 'wb')
+ > fp.write(b'12345678901234567890123456789012345678901234567890' +
+ > b'12345') # there are 5 more columns for 80 columns
>
> # 2 x 4 = 8 columns, but 3 x 4 = 12 bytes
> fp.write(u'\u3042\u3044\u3046\u3048'.encode('utf-8'))
--- a/tests/test-histedit-fold.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-histedit-fold.t Mon Mar 19 08:07:18 2018 -0700
@@ -154,9 +154,9 @@
> from mercurial import util
> def abortfolding(ui, repo, hooktype, **kwargs):
> ctx = repo[kwargs.get('node')]
- > if set(ctx.files()) == {'c', 'd', 'f'}:
+ > if set(ctx.files()) == {b'c', b'd', b'f'}:
> return True # abort folding commit only
- > ui.warn('allow non-folding commit\\n')
+ > ui.warn(b'allow non-folding commit\\n')
> EOF
$ cat > .hg/hgrc <<EOF
> [hooks]
--- a/tests/test-hook.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-hook.t Mon Mar 19 08:07:18 2018 -0700
@@ -417,9 +417,9 @@
> def printargs(ui, args):
> a = list(args.items())
> a.sort()
- > ui.write('hook args:\n')
+ > ui.write(b'hook args:\n')
> for k, v in a:
- > ui.write(' %s %s\n' % (k, v))
+ > ui.write(b' %s %s\n' % (k, v))
>
> def passhook(ui, repo, **args):
> printargs(ui, args)
@@ -432,19 +432,19 @@
> pass
>
> def raisehook(**args):
- > raise LocalException('exception from hook')
+ > raise LocalException(b'exception from hook')
>
> def aborthook(**args):
- > raise error.Abort('raise abort from hook')
+ > raise error.Abort(b'raise abort from hook')
>
> def brokenhook(**args):
> return 1 + {}
>
> def verbosehook(ui, **args):
- > ui.note('verbose output from hook\n')
+ > ui.note(b'verbose output from hook\n')
>
> def printtags(ui, repo, **args):
- > ui.write('%s\n' % sorted(repo.tags()))
+ > ui.write(b'%s\n' % sorted(repo.tags()))
>
> class container:
> unreachable = 1
@@ -667,7 +667,7 @@
$ cd hooks
$ cat > testhooks.py <<EOF
> def testhook(ui, **args):
- > ui.write('hook works\n')
+ > ui.write(b'hook works\n')
> EOF
$ echo '[hooks]' > ../repo/.hg/hgrc
$ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
@@ -886,7 +886,7 @@
> def uisetup(ui):
> class untrustedui(ui.__class__):
> def _trusted(self, fp, f):
- > if util.normpath(fp.name).endswith('untrusted/.hg/hgrc'):
+ > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
> return False
> return super(untrustedui, self)._trusted(fp, f)
> ui.__class__ = untrustedui
--- a/tests/test-http-bad-server.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-http-bad-server.t Mon Mar 19 08:07:18 2018 -0700
@@ -116,11 +116,11 @@
readline(4? from -1) -> (2) \r\n (glob)
write(36) -> HTTP/1.1 200 Script output follows\r\n
write(23) -> Server: badhttpserver\r\n
- write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37) -> Date: $HTTP_DATE$\r\n
write(41) -> Content-Type: application/mercurial-0.1\r\n
- write(21) -> Content-Length: 417\r\n
+ write(21) -> Content-Length: 436\r\n
write(2) -> \r\n
- write(417) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(436) -> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(4? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
readline(1? from -1) -> (1?) Accept-Encoding* (glob)
read limit reached; closing socket
@@ -157,11 +157,11 @@
readline(13? from -1) -> (2) \r\n (glob)
write(36) -> HTTP/1.1 200 Script output follows\r\n
write(23) -> Server: badhttpserver\r\n
- write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37) -> Date: $HTTP_DATE$\r\n
write(41) -> Content-Type: application/mercurial-0.1\r\n
- write(21) -> Content-Length: 417\r\n
+ write(21) -> Content-Length: 436\r\n
write(2) -> \r\n
- write(417) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(436) -> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(13? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
readline(1?? from -1) -> (27) Accept-Encoding: identity\r\n (glob)
readline(8? from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -179,7 +179,7 @@
readline(2? from -1) -> (2) \r\n (glob)
write(36) -> HTTP/1.1 200 Script output follows\r\n
write(23) -> Server: badhttpserver\r\n
- write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37) -> Date: $HTTP_DATE$\r\n
write(41) -> Content-Type: application/mercurial-0.1\r\n
write(20) -> Content-Length: 42\r\n
write(2) -> \r\n
@@ -214,11 +214,11 @@
readline(14? from -1) -> (2) \r\n (glob)
write(36) -> HTTP/1.1 200 Script output follows\r\n
write(23) -> Server: badhttpserver\r\n
- write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37) -> Date: $HTTP_DATE$\r\n
write(41) -> Content-Type: application/mercurial-0.1\r\n
- write(21) -> Content-Length: 430\r\n
+ write(21) -> Content-Length: 449\r\n
write(2) -> \r\n
- write(430) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httppostargs httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(449) -> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httppostargs httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline\(14[67] from 65537\) -> \(2[67]\) POST /\?cmd=batch HTTP/1.1\\r\\n (re)
readline\(1(19|20) from -1\) -> \(27\) Accept-Encoding: identity\\r\\n (re)
readline(9? from -1) -> (41) content-type: application/mercurial-0.1\r\n (glob)
@@ -275,7 +275,7 @@
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
- abort: HTTP request error (incomplete response; expected 397 bytes got 20)
+ abort: HTTP request error (incomplete response; expected 416 bytes got 20)
(this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
[255]
@@ -290,18 +290,18 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (144) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (121) Server: badhttpserver\r\n
- write(37 from 37) -> (84) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (84) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n
- write(21 from 21) -> (22) Content-Length: 417\r\n
+ write(21 from 21) -> (22) Content-Length: 436\r\n
write(2 from 2) -> (20) \r\n
- write(20 from 417) -> (0) lookup changegroupsu
+ write(20 from 436) -> (0) lookup branchmap pus
write limit reached; closing socket
$ rm -f error.log
Server sends incomplete headers for batch request
- $ hg serve --config badserver.closeaftersendbytes=695 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=714 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO this output is horrible
@@ -323,13 +323,13 @@
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
readline(-1) -> (2) \r\n
- write(36 from 36) -> (659) HTTP/1.1 200 Script output follows\r\n
- write(23 from 23) -> (636) Server: badhttpserver\r\n
- write(37 from 37) -> (599) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
- write(41 from 41) -> (558) Content-Type: application/mercurial-0.1\r\n
- write(21 from 21) -> (537) Content-Length: 417\r\n
- write(2 from 2) -> (535) \r\n
- write(417 from 417) -> (118) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(36 from 36) -> (678) HTTP/1.1 200 Script output follows\r\n
+ write(23 from 23) -> (655) Server: badhttpserver\r\n
+ write(37 from 37) -> (618) Date: $HTTP_DATE$\r\n
+ write(41 from 41) -> (577) Content-Type: application/mercurial-0.1\r\n
+ write(21 from 21) -> (556) Content-Length: 436\r\n
+ write(2 from 2) -> (554) \r\n
+ write(436 from 436) -> (118) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -341,7 +341,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (82) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (59) Server: badhttpserver\r\n
- write(37 from 37) -> (22) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (22) Date: $HTTP_DATE$\r\n
write(22 from 41) -> (0) Content-Type: applicat
write limit reached; closing socket
write(36) -> HTTP/1.1 500 Internal Server Error\r\n
@@ -350,7 +350,7 @@
Server sends an incomplete HTTP response body to batch request
- $ hg serve --config badserver.closeaftersendbytes=760 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=779 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO client spews a stack due to uncaught ValueError in batch.results()
@@ -371,13 +371,13 @@
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
readline(-1) -> (2) \r\n
- write(36 from 36) -> (724) HTTP/1.1 200 Script output follows\r\n
- write(23 from 23) -> (701) Server: badhttpserver\r\n
- write(37 from 37) -> (664) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
- write(41 from 41) -> (623) Content-Type: application/mercurial-0.1\r\n
- write(21 from 21) -> (602) Content-Length: 417\r\n
- write(2 from 2) -> (600) \r\n
- write(417 from 417) -> (183) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(36 from 36) -> (743) HTTP/1.1 200 Script output follows\r\n
+ write(23 from 23) -> (720) Server: badhttpserver\r\n
+ write(37 from 37) -> (683) Date: $HTTP_DATE$\r\n
+ write(41 from 41) -> (642) Content-Type: application/mercurial-0.1\r\n
+ write(21 from 21) -> (621) Content-Length: 436\r\n
+ write(2 from 2) -> (619) \r\n
+ write(436 from 436) -> (183) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -389,7 +389,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (147) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (124) Server: badhttpserver\r\n
- write(37 from 37) -> (87) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (87) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (46) Content-Type: application/mercurial-0.1\r\n
write(20 from 20) -> (26) Content-Length: 42\r\n
write(2 from 2) -> (24) \r\n
@@ -400,7 +400,7 @@
Server sends incomplete headers for getbundle response
- $ hg serve --config badserver.closeaftersendbytes=907 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=926 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO this output is terrible
@@ -423,13 +423,13 @@
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
readline(-1) -> (2) \r\n
- write(36 from 36) -> (871) HTTP/1.1 200 Script output follows\r\n
- write(23 from 23) -> (848) Server: badhttpserver\r\n
- write(37 from 37) -> (811) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
- write(41 from 41) -> (770) Content-Type: application/mercurial-0.1\r\n
- write(21 from 21) -> (749) Content-Length: 417\r\n
- write(2 from 2) -> (747) \r\n
- write(417 from 417) -> (330) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(36 from 36) -> (890) HTTP/1.1 200 Script output follows\r\n
+ write(23 from 23) -> (867) Server: badhttpserver\r\n
+ write(37 from 37) -> (830) Date: $HTTP_DATE$\r\n
+ write(41 from 41) -> (789) Content-Type: application/mercurial-0.1\r\n
+ write(21 from 21) -> (768) Content-Length: 436\r\n
+ write(2 from 2) -> (766) \r\n
+ write(436 from 436) -> (330) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -441,7 +441,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (294) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (271) Server: badhttpserver\r\n
- write(37 from 37) -> (234) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (234) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (193) Content-Type: application/mercurial-0.1\r\n
write(20 from 20) -> (173) Content-Length: 42\r\n
write(2 from 2) -> (171) \r\n
@@ -449,7 +449,7 @@
readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
- readline(-1) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
+ readline(-1) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$\r\n
readline(-1) -> (35) accept: application/mercurial-0.1\r\n
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -457,7 +457,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (93) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (70) Server: badhttpserver\r\n
- write(37 from 37) -> (33) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (33) Date: $HTTP_DATE$\r\n
write(33 from 41) -> (0) Content-Type: application/mercuri
write limit reached; closing socket
write(36) -> HTTP/1.1 500 Internal Server Error\r\n
@@ -466,7 +466,7 @@
Server sends empty HTTP body for getbundle
- $ hg serve --config badserver.closeaftersendbytes=945 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=964 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -484,13 +484,13 @@
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
readline(-1) -> (2) \r\n
- write(36 from 36) -> (909) HTTP/1.1 200 Script output follows\r\n
- write(23 from 23) -> (886) Server: badhttpserver\r\n
- write(37 from 37) -> (849) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
- write(41 from 41) -> (808) Content-Type: application/mercurial-0.1\r\n
- write(21 from 21) -> (787) Content-Length: 417\r\n
- write(2 from 2) -> (785) \r\n
- write(417 from 417) -> (368) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(36 from 36) -> (928) HTTP/1.1 200 Script output follows\r\n
+ write(23 from 23) -> (905) Server: badhttpserver\r\n
+ write(37 from 37) -> (868) Date: $HTTP_DATE$\r\n
+ write(41 from 41) -> (827) Content-Type: application/mercurial-0.1\r\n
+ write(21 from 21) -> (806) Content-Length: 436\r\n
+ write(2 from 2) -> (804) \r\n
+ write(436 from 436) -> (368) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -502,7 +502,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (332) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (309) Server: badhttpserver\r\n
- write(37 from 37) -> (272) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (272) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (231) Content-Type: application/mercurial-0.1\r\n
write(20 from 20) -> (211) Content-Length: 42\r\n
write(2 from 2) -> (209) \r\n
@@ -510,7 +510,7 @@
readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
- readline(-1) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
+ readline(-1) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$\r\n
readline(-1) -> (35) accept: application/mercurial-0.1\r\n
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -518,7 +518,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (131) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (108) Server: badhttpserver\r\n
- write(37 from 37) -> (71) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (71) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (30) Content-Type: application/mercurial-0.2\r\n
write(28 from 28) -> (2) Transfer-Encoding: chunked\r\n
write(2 from 2) -> (0) \r\n
@@ -529,7 +529,7 @@
Server sends partial compression string
- $ hg serve --config badserver.closeaftersendbytes=969 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=988 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -547,13 +547,13 @@
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
readline(-1) -> (2) \r\n
- write(36 from 36) -> (933) HTTP/1.1 200 Script output follows\r\n
- write(23 from 23) -> (910) Server: badhttpserver\r\n
- write(37 from 37) -> (873) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
- write(41 from 41) -> (832) Content-Type: application/mercurial-0.1\r\n
- write(21 from 21) -> (811) Content-Length: 417\r\n
- write(2 from 2) -> (809) \r\n
- write(417 from 417) -> (392) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+ write(36 from 36) -> (952) HTTP/1.1 200 Script output follows\r\n
+ write(23 from 23) -> (929) Server: badhttpserver\r\n
+ write(37 from 37) -> (892) Date: $HTTP_DATE$\r\n
+ write(41 from 41) -> (851) Content-Type: application/mercurial-0.1\r\n
+ write(21 from 21) -> (830) Content-Length: 436\r\n
+ write(2 from 2) -> (828) \r\n
+ write(436 from 436) -> (392) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps%0Arev-branch-cache unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -565,7 +565,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (356) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (333) Server: badhttpserver\r\n
- write(37 from 37) -> (296) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (296) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (255) Content-Type: application/mercurial-0.1\r\n
write(20 from 20) -> (235) Content-Length: 42\r\n
write(2 from 2) -> (233) \r\n
@@ -573,7 +573,7 @@
readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(-1) -> (27) Accept-Encoding: identity\r\n
readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
- readline(-1) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
+ readline(-1) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$\r\n
readline(-1) -> (35) accept: application/mercurial-0.1\r\n
readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -581,7 +581,7 @@
readline(-1) -> (2) \r\n
write(36 from 36) -> (155) HTTP/1.1 200 Script output follows\r\n
write(23 from 23) -> (132) Server: badhttpserver\r\n
- write(37 from 37) -> (95) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+ write(37 from 37) -> (95) Date: $HTTP_DATE$\r\n
write(41 from 41) -> (54) Content-Type: application/mercurial-0.2\r\n
write(28 from 28) -> (26) Transfer-Encoding: chunked\r\n
write(2 from 2) -> (24) \r\n
@@ -595,7 +595,7 @@
Server sends partial bundle2 header magic
- $ hg serve --config badserver.closeaftersendbytes=966 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=985 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -619,7 +619,7 @@
Server sends incomplete bundle2 stream params length
- $ hg serve --config badserver.closeaftersendbytes=975 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=994 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -644,7 +644,7 @@
Servers stops after bundle2 stream params header
- $ hg serve --config badserver.closeaftersendbytes=978 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=997 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -669,7 +669,7 @@
Server stops sending after bundle2 part header length
- $ hg serve --config badserver.closeaftersendbytes=987 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1006 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -695,7 +695,7 @@
Server stops sending after bundle2 part header
- $ hg serve --config badserver.closeaftersendbytes=1034 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1053 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -725,7 +725,7 @@
Server stops after bundle2 part payload chunk size
- $ hg serve --config badserver.closeaftersendbytes=1055 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1074 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -756,7 +756,7 @@
Server stops sending in middle of bundle2 payload chunk
- $ hg serve --config badserver.closeaftersendbytes=1516 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1535 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -788,7 +788,7 @@
Server stops sending after 0 length payload chunk size
- $ hg serve --config badserver.closeaftersendbytes=1547 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1566 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -825,7 +825,8 @@
Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
This is before the 0 size chunked transfer part that signals end of HTTP response.
- $ hg serve --config badserver.closeaftersendbytes=1722 -p $HGPORT -d --pid-file=hg.pid -E error.log
+# $ hg serve --config badserver.closeaftersendbytes=1741 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1848 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -841,24 +842,24 @@
$ killdaemons.py $DAEMON_PIDS
$ tail -22 error.log
- write(28 from 28) -> (779) Transfer-Encoding: chunked\r\n
- write(2 from 2) -> (777) \r\n
- write(6 from 6) -> (771) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (762) 4\r\nnone\r\n
- write(9 from 9) -> (753) 4\r\nHG20\r\n
- write(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- write(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- write(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- write(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- write(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- write(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ write(9 from 9) -> (851) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (842) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47 from 47) -> (795) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9 from 9) -> (786) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473 from 473) -> (313) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (304) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (295) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(38 from 38) -> (257) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ write(9 from 9) -> (248) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ write(64 from 64) -> (184) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ write(9 from 9) -> (175) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (166) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ write(41 from 41) -> (125) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ write(9 from 9) -> (116) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (107) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
+ write(35 from 35) -> (72) 1d\\r\\n\x16CACHE:REV-BRANCH-CACHE\x00\x00\x00\x03\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (63) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
+ write(45 from 45) -> (18) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
@@ -869,7 +870,7 @@
Server sends a size 0 chunked-transfer size without terminating \r\n
- $ hg serve --config badserver.closeaftersendbytes=1725 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1851 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -885,24 +886,24 @@
$ killdaemons.py $DAEMON_PIDS
$ tail -23 error.log
- write(28 from 28) -> (782) Transfer-Encoding: chunked\r\n
- write(2 from 2) -> (780) \r\n
- write(6 from 6) -> (774) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (765) 4\r\nnone\r\n
- write(9 from 9) -> (756) 4\r\nHG20\r\n
- write(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- write(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- write(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- write(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- write(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- write(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ write(9 from 9) -> (854) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (845) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47 from 47) -> (798) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9 from 9) -> (789) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473 from 473) -> (316) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (307) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (298) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(38 from 38) -> (260) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ write(9 from 9) -> (251) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ write(64 from 64) -> (187) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ write(9 from 9) -> (178) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (169) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ write(41 from 41) -> (128) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ write(9 from 9) -> (119) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (110) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
+ write(35 from 35) -> (75) 1d\\r\\n\x16CACHE:REV-BRANCH-CACHE\x00\x00\x00\x03\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (66) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
+ write(45 from 45) -> (21) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
write(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(3 from 5) -> (0) 0\r\n
--- a/tests/test-http-branchmap.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-http-branchmap.t Mon Mar 19 08:07:18 2018 -0700
@@ -68,22 +68,22 @@
> self._file = stdout
>
> def write(self, data):
- > if data == '47\n':
+ > if data == b'47\n':
> # latin1 encoding is one %xx (3 bytes) shorter
- > data = '44\n'
- > elif data.startswith('%C3%A6 '):
+ > data = b'44\n'
+ > elif data.startswith(b'%C3%A6 '):
> # translate to latin1 encoding
- > data = '%%E6 %s' % data[7:]
+ > data = b'%%E6 %s' % data[7:]
> self._file.write(data)
>
> def __getattr__(self, name):
> return getattr(self._file, name)
>
- > sys.stdout = StdoutWrapper(sys.stdout)
- > sys.stderr = StdoutWrapper(sys.stderr)
+ > sys.stdout = StdoutWrapper(getattr(sys.stdout, 'buffer', sys.stdout))
+ > sys.stderr = StdoutWrapper(getattr(sys.stderr, 'buffer', sys.stderr))
>
> myui = ui.ui.load()
- > repo = hg.repository(myui, 'a')
+ > repo = hg.repository(myui, b'a')
> commands.serve(myui, repo, stdio=True, cmdserver=False)
> EOF
$ echo baz >> b/foo
--- a/tests/test-http-bundle1.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-http-bundle1.t Mon Mar 19 08:07:18 2018 -0700
@@ -68,7 +68,7 @@
$ cat > $TESTTMP/removesupportedformat.py << EOF
> from mercurial import localrepo
> def extsetup(ui):
- > localrepo.localrepository.supportedformats.remove('generaldelta')
+ > localrepo.localrepository.supportedformats.remove(b'generaldelta')
> EOF
$ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
@@ -177,11 +177,12 @@
> import base64
> from mercurial.hgweb import common
> def perform_authentication(hgweb, req, op):
- > auth = req.env.get('HTTP_AUTHORIZATION')
+ > auth = req.headers.get('Authorization')
> if not auth:
> raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
> [('WWW-Authenticate', 'Basic Realm="mercurial"')])
- > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
+ > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
+ > b'pass']:
> raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
> def extsetup():
> common.permhooks.insert(0, perform_authentication)
--- a/tests/test-http-permissions.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-http-permissions.t Mon Mar 19 08:07:18 2018 -0700
@@ -21,12 +21,10 @@
> @wireproto.wireprotocommand('customwritenoperm')
> def customwritenoperm(repo, proto):
> return b'write command no defined permissions\n'
- > wireproto.permissions['customreadwithperm'] = 'pull'
- > @wireproto.wireprotocommand('customreadwithperm')
+ > @wireproto.wireprotocommand('customreadwithperm', permission='pull')
> def customreadwithperm(repo, proto):
> return b'read-only command w/ defined permissions\n'
- > wireproto.permissions['customwritewithperm'] = 'push'
- > @wireproto.wireprotocommand('customwritewithperm')
+ > @wireproto.wireprotocommand('customwritewithperm', permission='push')
> def customwritewithperm(repo, proto):
> return b'write command w/ defined permissions\n'
> EOF
--- a/tests/test-http-protocol.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-http-protocol.t Mon Mar 19 08:07:18 2018 -0700
@@ -49,8 +49,8 @@
$ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
200 Script output follows
content-type: application/mercurial-0.1
- date: * (glob)
- server: * (glob)
+ date: $HTTP_DATE$
+ server: testing stub value
transfer-encoding: chunked
Server should send application/mercurial-0.1 when client says it wants it
@@ -58,8 +58,8 @@
$ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
200 Script output follows
content-type: application/mercurial-0.1
- date: * (glob)
- server: * (glob)
+ date: $HTTP_DATE$
+ server: testing stub value
transfer-encoding: chunked
Server should send application/mercurial-0.2 when client says it wants it
@@ -67,15 +67,15 @@
$ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
200 Script output follows
content-type: application/mercurial-0.2
- date: * (glob)
- server: * (glob)
+ date: $HTTP_DATE$
+ server: testing stub value
transfer-encoding: chunked
$ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
200 Script output follows
content-type: application/mercurial-0.2
- date: * (glob)
- server: * (glob)
+ date: $HTTP_DATE$
+ server: testing stub value
transfer-encoding: chunked
Requesting a compression format that server doesn't support results will fall back to 0.1
@@ -83,8 +83,8 @@
$ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
200 Script output follows
content-type: application/mercurial-0.1
- date: * (glob)
- server: * (glob)
+ date: $HTTP_DATE$
+ server: testing stub value
transfer-encoding: chunked
#if zstd
@@ -105,8 +105,8 @@
200 Script output follows
content-length: 41
content-type: application/mercurial-0.1
- date: * (glob)
- server: * (glob)
+ date: $HTTP_DATE$
+ server: testing stub value
e93700bd72895c5addab234c56d4024b487a362f
@@ -161,3 +161,104 @@
0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 6c 69 62 |t follows...zlib|
0020: 78 |x|
+
+ $ killdaemons.py
+ $ cd ..
+
+Test listkeys for listing namespaces
+
+ $ hg init empty
+ $ hg -R empty serve -p $HGPORT -d --pid-file hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+
+ $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
+ > command listkeys
+ > namespace namespaces
+ > EOF
+ s> sendall(*, 0): (glob)
+ s> GET /?cmd=capabilities HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> accept: application/mercurial-0.1\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> user-agent: mercurial/proto-1.0 (Mercurial *)\r\n (glob)
+ s> \r\n
+ s> makefile('rb', None)
+ s> readline() -> 36:
+ s> HTTP/1.1 200 Script output follows\r\n
+ s> readline() -> 28:
+ s> Server: testing stub value\r\n
+ s> readline() -> *: (glob)
+ s> Date: $HTTP_DATE$\r\n
+ s> readline() -> 41:
+ s> Content-Type: application/mercurial-0.1\r\n
+ s> readline() -> 21:
+ s> Content-Length: *\r\n (glob)
+ s> readline() -> 2:
+ s> \r\n
+ s> read(*) -> *: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$ (glob)
+ sending listkeys command
+ s> sendall(*, 0): (glob)
+ s> GET /?cmd=listkeys HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> vary: X-HgArg-1,X-HgProto-1\r\n
+ s> x-hgarg-1: namespace=namespaces\r\n
+ s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$\r\n
+ s> accept: application/mercurial-0.1\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> user-agent: mercurial/proto-1.0 (Mercurial *)\r\n (glob)
+ s> \r\n
+ s> makefile('rb', None)
+ s> readline() -> 36:
+ s> HTTP/1.1 200 Script output follows\r\n
+ s> readline() -> 28:
+ s> Server: testing stub value\r\n
+ s> readline() -> *: (glob)
+ s> Date: $HTTP_DATE$\r\n
+ s> readline() -> 41:
+ s> Content-Type: application/mercurial-0.1\r\n
+ s> readline() -> 20:
+ s> Content-Length: 30\r\n
+ s> readline() -> 2:
+ s> \r\n
+ s> read(30) -> 30:
+ s> bookmarks \n
+ s> namespaces \n
+ s> phases
+ response: bookmarks \nnamespaces \nphases
+
+Same thing, but with "httprequest" command
+
+ $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
+ > httprequest GET ?cmd=listkeys
+ > accept: application/mercurial-0.1
+ > user-agent: mercurial/proto-1.0 (Mercurial 42)
+ > x-hgarg-1: namespace=namespaces
+ > EOF
+ using raw connection to peer
+ s> sendall(*, 0): (glob)
+ s> GET /?cmd=listkeys HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> accept: application/mercurial-0.1\r\n
+ s> user-agent: mercurial/proto-1.0 (Mercurial 42)\r\n (glob)
+ s> x-hgarg-1: namespace=namespaces\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> \r\n
+ s> makefile('rb', None)
+ s> readline() -> 36:
+ s> HTTP/1.1 200 Script output follows\r\n
+ s> readline() -> 28:
+ s> Server: testing stub value\r\n
+ s> readline() -> *: (glob)
+ s> Date: $HTTP_DATE$\r\n
+ s> readline() -> 41:
+ s> Content-Type: application/mercurial-0.1\r\n
+ s> readline() -> 20:
+ s> Content-Length: 30\r\n
+ s> readline() -> 2:
+ s> \r\n
+ s> read(30) -> 30:
+ s> bookmarks \n
+ s> namespaces \n
+ s> phases
+
+ $ killdaemons.py
--- a/tests/test-http.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-http.t Mon Mar 19 08:07:18 2018 -0700
@@ -168,7 +168,7 @@
> import base64
> from mercurial.hgweb import common
> def perform_authentication(hgweb, req, op):
- > auth = req.env.get('HTTP_AUTHORIZATION')
+ > auth = req.headers.get('Authorization')
> if not auth:
> raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
> [('WWW-Authenticate', 'Basic Realm="mercurial"')])
@@ -257,6 +257,9 @@
http auth: user user, password ****
devel-peer-request: finished in *.???? seconds (200) (glob)
query 1; heads
+ devel-peer-request: batched-content
+ devel-peer-request: - heads (0 arguments)
+ devel-peer-request: - known (1 arguments)
sending batch command
devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
devel-peer-request: Vary X-HgArg-1,X-HgProto-1
@@ -304,20 +307,20 @@
list of changesets:
7f4e523d01f2cc3765ac8934da3d14db775ff872
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 188 bytes payload
+ bundle2-output-part: "replycaps" 205 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
sending unbundle command
- sending 996 bytes
+ sending 1013 bytes
devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
- devel-peer-request: Content-length 996
+ devel-peer-request: Content-length 1013
devel-peer-request: Content-type application/mercurial-0.1
devel-peer-request: Vary X-HgArg-1,X-HgProto-1
devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
devel-peer-request: 16 bytes of commands arguments in headers
- devel-peer-request: 996 bytes of data
+ devel-peer-request: 1013 bytes of data
devel-peer-request: finished in *.???? seconds (200) (glob)
bundle2-input-bundle: no-transaction
bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
@@ -510,7 +513,7 @@
> from mercurial import util
> from mercurial.hgweb import common
> def perform_authentication(hgweb, req, op):
- > cookie = req.env.get('HTTP_COOKIE')
+ > cookie = req.headers.get('Cookie')
> if not cookie:
> raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'no-cookie')
> raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'Cookie: %s' % cookie)
--- a/tests/test-i18n.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-i18n.t Mon Mar 19 08:07:18 2018 -0700
@@ -54,8 +54,8 @@
Check i18n cache isn't reused after encoding change:
$ cat > $TESTTMP/encodingchange.py << EOF
+ > from mercurial.i18n import _
> from mercurial import encoding, registrar
- > from mercurial.i18n import _
> cmdtable = {}
> command = registrar.command(cmdtable)
> @command(b'encodingchange', norepo=True)
--- a/tests/test-impexp-branch.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-impexp-branch.t Mon Mar 19 08:07:18 2018 -0700
@@ -74,9 +74,9 @@
$ hg strip --no-backup .
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
>>> import re
- >>> p = file('../r1.patch', 'rb').read()
+ >>> p = open('../r1.patch', 'rb').read()
>>> p = re.sub(r'Parent\s+', 'Parent ', p)
- >>> file('../r1-ws.patch', 'wb').write(p)
+ >>> open('../r1-ws.patch', 'wb').write(p)
$ hg import --exact ../r1-ws.patch
applying ../r1-ws.patch
--- a/tests/test-import-bypass.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-import-bypass.t Mon Mar 19 08:07:18 2018 -0700
@@ -227,7 +227,7 @@
(this also tests that editor is not invoked for '--bypass', if the
commit message is explicitly specified, regardless of '--edit')
- $ $PYTHON -c 'file("a", "wb").write("a\r\n")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\r\n")'
$ hg ci -m makeacrlf
$ HGEDITOR=cat hg import -m 'should fail because of eol' --edit --bypass ../test.diff
applying ../test.diff
--- a/tests/test-import-context.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-import-context.t Mon Mar 19 08:07:18 2018 -0700
@@ -7,7 +7,7 @@
> lasteol = sys.argv[2] == '1'
> patterns = sys.argv[3:]
>
- > fp = file(path, 'wb')
+ > fp = open(path, 'wb')
> for i, pattern in enumerate(patterns):
> count = int(pattern[0:-1])
> char = pattern[-1] + '\n'
@@ -19,7 +19,7 @@
> EOF
$ cat > cat.py <<EOF
> import sys
- > sys.stdout.write(repr(file(sys.argv[1], 'rb').read()) + '\n')
+ > sys.stdout.write(repr(open(sys.argv[1], 'rb').read()) + '\n')
> EOF
Initialize the test repository
--- a/tests/test-import-eol.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-import-eol.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,21 +1,21 @@
$ cat > makepatch.py <<EOF
- > f = file('eol.diff', 'wb')
+ > f = open('eol.diff', 'wb')
> w = f.write
- > w('test message\n')
- > w('diff --git a/a b/a\n')
- > w('--- a/a\n')
- > w('+++ b/a\n')
- > w('@@ -1,5 +1,5 @@\n')
- > w(' a\n')
- > w('-bbb\r\n')
- > w('+yyyy\r\n')
- > w(' cc\r\n')
- > w(' \n')
- > w(' d\n')
- > w('-e\n')
- > w('\ No newline at end of file\n')
- > w('+z\r\n')
- > w('\ No newline at end of file\r\n')
+ > w(b'test message\n')
+ > w(b'diff --git a/a b/a\n')
+ > w(b'--- a/a\n')
+ > w(b'+++ b/a\n')
+ > w(b'@@ -1,5 +1,5 @@\n')
+ > w(b' a\n')
+ > w(b'-bbb\r\n')
+ > w(b'+yyyy\r\n')
+ > w(b' cc\r\n')
+ > w(b' \n')
+ > w(b' d\n')
+ > w(b'-e\n')
+ > w(b'\ No newline at end of file\n')
+ > w(b'+z\r\n')
+ > w(b'\ No newline at end of file\r\n')
> EOF
$ hg init repo
@@ -25,7 +25,7 @@
Test different --eol values
- $ $PYTHON -c 'file("a", "wb").write("a\nbbb\ncc\n\nd\ne")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\nbbb\ncc\n\nd\ne")'
$ hg ci -Am adda
adding .hgignore
adding a
@@ -89,7 +89,7 @@
auto EOL on CRLF file
- $ $PYTHON -c 'file("a", "wb").write("a\r\nbbb\r\ncc\r\n\r\nd\r\ne")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\r\nbbb\r\ncc\r\n\r\nd\r\ne")'
$ hg commit -m 'switch EOLs in a'
$ hg --traceback --config patch.eol='auto' import eol.diff
applying eol.diff
@@ -105,11 +105,11 @@
auto EOL on new file or source without any EOL
- $ $PYTHON -c 'file("noeol", "wb").write("noeol")'
+ $ $PYTHON -c 'open("noeol", "wb").write(b"noeol")'
$ hg add noeol
$ hg commit -m 'add noeol'
- $ $PYTHON -c 'file("noeol", "wb").write("noeol\r\nnoeol\n")'
- $ $PYTHON -c 'file("neweol", "wb").write("neweol\nneweol\r\n")'
+ $ $PYTHON -c 'open("noeol", "wb").write(b"noeol\r\nnoeol\n")'
+ $ $PYTHON -c 'open("neweol", "wb").write(b"neweol\nneweol\r\n")'
$ hg add neweol
$ hg diff --git > noeol.diff
$ hg revert --no-backup noeol neweol
@@ -127,10 +127,10 @@
Test --eol and binary patches
- $ $PYTHON -c 'file("b", "wb").write("a\x00\nb\r\nd")'
+ $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nb\r\nd")'
$ hg ci -Am addb
adding b
- $ $PYTHON -c 'file("b", "wb").write("a\x00\nc\r\nd")'
+ $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nc\r\nd")'
$ hg diff --git > bin.diff
$ hg revert --no-backup b
--- a/tests/test-import-git.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-import-git.t Mon Mar 19 08:07:18 2018 -0700
@@ -563,10 +563,10 @@
> Mc$`b*O5$Pw00T?_*Z=?k
>
> EOF
- >>> fp = file('binary.diff', 'rb')
+ >>> fp = open('binary.diff', 'rb')
>>> data = fp.read()
>>> fp.close()
- >>> file('binary.diff', 'wb').write(data.replace('\n', '\r\n'))
+ >>> open('binary.diff', 'wb').write(data.replace(b'\n', b'\r\n'))
$ rm binary2
$ hg import --no-commit binary.diff
applying binary.diff
--- a/tests/test-import.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-import.t Mon Mar 19 08:07:18 2018 -0700
@@ -56,7 +56,7 @@
$ cat > dummypatch.py <<EOF
> from __future__ import print_function
> print('patching file a')
- > file('a', 'wb').write('line2\n')
+ > open('a', 'wb').write(b'line2\n')
> EOF
$ hg clone -r0 a b
adding changesets
@@ -291,7 +291,7 @@
> msg.set_payload('email commit message\n' + patch)
> msg['Subject'] = 'email patch'
> msg['From'] = 'email patcher'
- > file(sys.argv[2], 'wb').write(msg.as_string())
+ > open(sys.argv[2], 'wb').write(msg.as_string())
> EOF
@@ -389,7 +389,7 @@
> msg.set_payload('email patch\n\nnext line\n---\n' + patch)
> msg['Subject'] = '[PATCH] email patch'
> msg['From'] = 'email patcher'
- > file(sys.argv[2], 'wb').write(msg.as_string())
+ > open(sys.argv[2], 'wb').write(msg.as_string())
> EOF
@@ -829,7 +829,7 @@
$ hg init binaryremoval
$ cd binaryremoval
$ echo a > a
- $ $PYTHON -c "file('b', 'wb').write('a\x00b')"
+ $ $PYTHON -c "open('b', 'wb').write(b'a\x00b')"
$ hg ci -Am addall
adding a
adding b
--- a/tests/test-install.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-install.t Mon Mar 19 08:07:18 2018 -0700
@@ -17,7 +17,7 @@
checking "re2" regexp engine \((available|missing)\) (re)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
- checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
+ checking commit editor... (*) (glob)
checking username (test)
no problems detected
@@ -31,7 +31,7 @@
"defaulttemplate": "*mercurial?templates?map-cmdline.default", (glob)
"defaulttemplateerror": null,
"defaulttemplatenotfound": "default",
- "editor": "* -c \"import sys; sys.exit(0)\"", (glob)
+ "editor": "*", (glob)
"editornotfound": false,
"encoding": "ascii",
"encodingerror": null,
@@ -72,7 +72,7 @@
checking "re2" regexp engine \((available|missing)\) (re)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
- checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
+ checking commit editor... (*) (glob)
checking username...
no username supplied
(specify a username in your configuration file)
@@ -120,6 +120,35 @@
checking username (test)
no problems detected
+print out the binary post-shlexsplit in the error message when commit editor is
+not found (this is intentionally using backslashes to mimic a windows usecase).
+ $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
+ checking encoding (ascii)...
+ checking Python executable (*) (glob)
+ checking Python version (*) (glob)
+ checking Python lib (*lib*)... (glob)
+ checking Python security support (*) (glob)
+ TLS 1.2 not supported by Python install; network connections lack modern security (?)
+ SNI not supported by Python install; may have connectivity issues with some servers (?)
+ checking Mercurial version (*) (glob)
+ checking Mercurial custom build (*) (glob)
+ checking module policy (*) (glob)
+ checking installed modules (*mercurial)... (glob)
+ checking registered compression engines (*zlib*) (glob)
+ checking available compression engines (*zlib*) (glob)
+ checking available compression engines for wire protocol (*zlib*) (glob)
+ checking "re2" regexp engine \((available|missing)\) (re)
+ checking templates (*mercurial?templates)... (glob)
+ checking default template (*mercurial?templates?map-cmdline.default) (glob)
+ checking commit editor... (c:\foo\bar\baz.exe) (windows !)
+ Can't find editor 'c:\foo\bar\baz.exe' in PATH (windows !)
+ checking commit editor... (c:foobarbaz.exe) (no-windows !)
+ Can't find editor 'c:foobarbaz.exe' in PATH (no-windows !)
+ (specify a commit editor in your configuration file)
+ checking username (test)
+ 1 problems detected, please check your install!
+ [1]
+
#if test-repo
$ . "$TESTDIR/helpers-testrepo.sh"
--- a/tests/test-issue2137.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-issue2137.t Mon Mar 19 08:07:18 2018 -0700
@@ -18,7 +18,7 @@
> tip1 = node.short(repo.changelog.tip())
> tip2 = node.short(repo.lookup(tip1))
> assert tip1 == tip2
- > ui.write('new tip: %s\n' % tip1)
+ > ui.write(b'new tip: %s\n' % tip1)
> return result
> repo.__class__ = wraprepo
>
--- a/tests/test-issue4074.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-issue4074.t Mon Mar 19 08:07:18 2018 -0700
@@ -4,7 +4,7 @@
$ cat > s.py <<EOF
> import random
- > for x in xrange(100000):
+ > for x in range(100000):
> print
> if random.randint(0, 100) >= 50:
> x += 1
--- a/tests/test-journal.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-journal.t Mon Mar 19 08:07:18 2018 -0700
@@ -4,6 +4,7 @@
> # mock out util.getuser() and util.makedate() to supply testable values
> import os
> from mercurial import util
+ > from mercurial.utils import dateutil
> def mockgetuser():
> return 'foobar'
>
@@ -19,7 +20,7 @@
> return (time, 0)
>
> util.getuser = mockgetuser
- > util.makedate = mockmakedate
+ > dateutil.makedate = mockmakedate
> EOF
$ cat >> $HGRCPATH << EOF
--- a/tests/test-largefiles-small-disk.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-largefiles-small-disk.t Mon Mar 19 08:07:18 2018 -0700
@@ -11,7 +11,7 @@
> _origcopyfileobj = shutil.copyfileobj
> def copyfileobj(fsrc, fdst, length=16*1024):
> # allow journal files (used by transaction) to be written
- > if 'journal.' in fdst.name:
+ > if b'journal.' in fdst.name:
> return _origcopyfileobj(fsrc, fdst, length)
> fdst.write(fsrc.read(4))
> raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
--- a/tests/test-largefiles-wireproto.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-largefiles-wireproto.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
This file contains testcases that tend to be related to the wire protocol part
of largefiles.
@@ -414,7 +424,7 @@
> import base64
> from mercurial.hgweb import common
> def perform_authentication(hgweb, req, op):
- > auth = req.env.get('HTTP_AUTHORIZATION')
+ > auth = req.headers.get('Authorization')
> if not auth:
> raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
> [('WWW-Authenticate', 'Basic Realm="mercurial"')])
--- a/tests/test-lfs-largefiles.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-lfs-largefiles.t Mon Mar 19 08:07:18 2018 -0700
@@ -298,7 +298,7 @@
$TESTTMP/nolargefiles/.hg/hgrc:*: extensions.lfs= (glob)
$ hg log -r 'all()' -G -T '{rev} {join(lfs_files, ", ")} ({desc})\n'
- o 8 (remove large_by_size.bin)
+ o 8 large_by_size.bin (remove large_by_size.bin)
|
o 7 large_by_size.bin (large by size)
|
@@ -338,7 +338,10 @@
No diffs when comparing merge and p1 that kept p1's changes. Diff of lfs to
largefiles no longer operates in standin files.
- $ hg diff -r 2:3
+This `head -n 20` looks dumb (since we expect no output), but if something
+breaks you can get 1048576 lines of +y in the output, which takes a looooooong
+time to print.
+ $ hg diff -r 2:3 | head -n 20
$ hg diff -r 2:6
diff -r e989d0fa3764 -r 752e3a0d8488 large.bin
--- a/large.bin Thu Jan 01 00:00:00 1970 +0000
--- a/tests/test-lfs-test-server.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-lfs-test-server.t Mon Mar 19 08:07:18 2018 -0700
@@ -43,32 +43,117 @@
$ hg init ../repo2
$ mv .hg/store/lfs .hg/store/lfs_
- $ hg push ../repo2 -v
+ $ hg push ../repo2 --debug
+ http auth: user foo, password ***
pushing to ../repo2
+ http auth: user foo, password ***
+ query 1; heads
searching for changes
+ 1 total queries in *s (glob)
+ listing keys for "phases"
+ checking for updated bookmarks
+ listing keys for "bookmarks"
+ lfs: computing set of blobs to upload
+ Status: 200
+ Content-Length: 309
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "upload": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b"
+ }
+ },
+ "oid": "31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b",
+ "size": 12
+ }
+ ]
+ }
lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ Status: 200
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: uploaded 1 files (12 bytes)
1 changesets found
- uncompressed size of bundle content:
- * (changelog) (glob)
- * (manifests) (glob)
- * a (glob)
+ list of changesets:
+ 99a7098854a3984a5c9eab0fc7a2906697b7cb5c
+ bundle2-output-bundle: "HG20", 4 parts total
+ bundle2-output-part: "replycaps" * bytes payload (glob)
+ bundle2-output-part: "check:heads" streamed payload
+ bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
+ bundle2-output-part: "phase-heads" 24 bytes payload
+ bundle2-input-bundle: with-transaction
+ bundle2-input-part: "replycaps" supported
+ bundle2-input-part: total payload size * (glob)
+ bundle2-input-part: "check:heads" supported
+ bundle2-input-part: total payload size 20
+ bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
+ add changeset 99a7098854a3
adding manifests
adding file changes
+ adding a revisions
added 1 changesets with 1 changes to 1 files
calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
+ bundle2-input-part: total payload size 617
+ bundle2-input-part: "phase-heads" supported
+ bundle2-input-part: total payload size 24
+ bundle2-input-bundle: 3 parts total
+ updating the branch cache
+ bundle2-output-bundle: "HG20", 1 parts total
+ bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
+ bundle2-input-bundle: no-transaction
+ bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
+ bundle2-input-bundle: 0 parts total
+ listing keys for "phases"
$ mv .hg/store/lfs_ .hg/store/lfs
Clear the cache to force a download
$ rm -rf `hg config lfs.usercache`
$ cd ../repo2
- $ hg update tip -v
+ $ hg update tip --debug
+ http auth: user foo, password ***
resolving manifests
- getting a
+ branchmerge: False, force: False, partial: False
+ ancestor: 000000000000, local: 000000000000+, remote: 99a7098854a3
+ Status: 200
+ Content-Length: 311
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b"
+ }
+ },
+ "oid": "31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b",
+ "size": 12
+ }
+ ]
+ }
lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ Status: 200
+ Content-Length: 12
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ a: remote created -> g
+ getting a
lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -78,36 +163,180 @@
$ echo ANOTHER-LARGE-FILE > c
$ echo ANOTHER-LARGE-FILE2 > d
$ hg commit -m b-and-c -A b c d
- $ hg push ../repo1 -v | grep -v '^ '
+ $ hg push ../repo1 --debug
+ http auth: user foo, password ***
pushing to ../repo1
+ http auth: user foo, password ***
+ query 1; heads
searching for changes
+ all remote heads known locally
+ listing keys for "phases"
+ checking for updated bookmarks
+ listing keys for "bookmarks"
+ listing keys for "bookmarks"
+ lfs: computing set of blobs to upload
+ Status: 200
+ Content-Length: 901
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b"
+ }
+ },
+ "oid": "31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b",
+ "size": 12
+ },
+ {
+ "actions": {
+ "upload": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19"
+ }
+ },
+ "oid": "37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19",
+ "size": 20
+ },
+ {
+ "actions": {
+ "upload": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998"
+ }
+ },
+ "oid": "d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998",
+ "size": 19
+ }
+ ]
+ }
lfs: need to transfer 2 objects (39 bytes)
lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ Status: 200
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ Status: 200
+ Content-Length: 0
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: uploaded 2 files (39 bytes)
1 changesets found
- uncompressed size of bundle content:
+ list of changesets:
+ dfca2c9e2ef24996aa61ba2abd99277d884b3d63
+ bundle2-output-bundle: "HG20", 5 parts total
+ bundle2-output-part: "replycaps" * bytes payload (glob)
+ bundle2-output-part: "check:phases" 24 bytes payload
+ bundle2-output-part: "check:heads" streamed payload
+ bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
+ bundle2-output-part: "phase-heads" 24 bytes payload
+ bundle2-input-bundle: with-transaction
+ bundle2-input-part: "replycaps" supported
+ bundle2-input-part: total payload size * (glob)
+ bundle2-input-part: "check:phases" supported
+ bundle2-input-part: total payload size 24
+ bundle2-input-part: "check:heads" supported
+ bundle2-input-part: total payload size 20
+ bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
+ add changeset dfca2c9e2ef2
adding manifests
adding file changes
+ adding b revisions
+ adding c revisions
+ adding d revisions
added 1 changesets with 3 changes to 3 files
+ bundle2-input-part: total payload size 1315
+ bundle2-input-part: "phase-heads" supported
+ bundle2-input-part: total payload size 24
+ bundle2-input-bundle: 4 parts total
+ updating the branch cache
+ bundle2-output-bundle: "HG20", 1 parts total
+ bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
+ bundle2-input-bundle: no-transaction
+ bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
+ bundle2-input-bundle: 0 parts total
+ listing keys for "phases"
Clear the cache to force a download
$ rm -rf `hg config lfs.usercache`
- $ hg --repo ../repo1 update tip -v
+ $ hg --repo ../repo1 update tip --debug
+ http auth: user foo, password ***
resolving manifests
+ branchmerge: False, force: False, partial: False
+ ancestor: 99a7098854a3, local: 99a7098854a3+, remote: dfca2c9e2ef2
+ Status: 200
+ Content-Length: 608
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19"
+ }
+ },
+ "oid": "37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19",
+ "size": 20
+ },
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998"
+ }
+ },
+ "oid": "d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998",
+ "size": 19
+ }
+ ]
+ }
+ lfs: need to transfer 2 objects (39 bytes)
+ lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ Status: 200
+ Content-Length: 20
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
+ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ Status: 200
+ Content-Length: 19
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ b: remote created -> g
getting b
lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ c: remote created -> g
getting c
- lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
- lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
- lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ d: remote created -> g
getting d
- lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
- lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
- lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -119,14 +348,37 @@
$ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
$ rm ../repo1/*
- $ hg --repo ../repo1 update -C tip -v
+ $ hg --repo ../repo1 update -C tip --debug
+ http auth: user foo, password ***
resolving manifests
- getting a
- lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
- getting b
- lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
- getting c
+ branchmerge: False, force: True, partial: False
+ ancestor: dfca2c9e2ef2+, local: dfca2c9e2ef2+, remote: dfca2c9e2ef2
+ Status: 200
+ Content-Length: 311
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998"
+ }
+ },
+ "oid": "d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998",
+ "size": 19
+ }
+ ]
+ }
lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ Status: 200
+ Content-Length: 7
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
[255]
@@ -143,14 +395,276 @@
$ echo 'another lfs blob' > b
$ hg ci -m 'another blob'
$ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
- $ hg push -v ../repo1
+ $ hg push --debug ../repo1
+ http auth: user foo, password ***
pushing to ../repo1
+ http auth: user foo, password ***
+ query 1; heads
searching for changes
+ all remote heads known locally
+ listing keys for "phases"
+ checking for updated bookmarks
+ listing keys for "bookmarks"
+ listing keys for "bookmarks"
+ lfs: computing set of blobs to upload
+ Status: 200
+ Content-Length: 309
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "upload": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0"
+ }
+ },
+ "oid": "e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0",
+ "size": 17
+ }
+ ]
+ }
lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
(run hg verify)
[255]
+Archive will prefetch blobs in a group
+
+ $ rm -rf .hg/store/lfs `hg config lfs.usercache`
+ $ hg archive --debug -r 1 ../archive
+ http auth: user foo, password ***
+ Status: 200
+ Content-Length: 905
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b"
+ }
+ },
+ "oid": "31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b",
+ "size": 12
+ },
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19"
+ }
+ },
+ "oid": "37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19",
+ "size": 20
+ },
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998"
+ }
+ },
+ "oid": "d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998",
+ "size": 19
+ }
+ ]
+ }
+ lfs: need to transfer 3 objects (51 bytes)
+ lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ Status: 200
+ Content-Length: 12
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
+ lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ Status: 200
+ Content-Length: 20
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
+ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ Status: 200
+ Content-Length: 19
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
+ $ find ../archive | sort
+ ../archive
+ ../archive/.hg_archival.txt
+ ../archive/a
+ ../archive/b
+ ../archive/c
+ ../archive/d
+
+Cat will prefetch blobs in a group
+
+ $ rm -rf .hg/store/lfs `hg config lfs.usercache`
+ $ hg cat --debug -r 1 a b c
+ http auth: user foo, password ***
+ Status: 200
+ Content-Length: 608
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b"
+ }
+ },
+ "oid": "31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b",
+ "size": 12
+ },
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998"
+ }
+ },
+ "oid": "d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998",
+ "size": 19
+ }
+ ]
+ }
+ lfs: need to transfer 2 objects (31 bytes)
+ lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ Status: 200
+ Content-Length: 12
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
+ lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ Status: 200
+ Content-Length: 19
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ THIS-IS-LFS
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ THIS-IS-LFS
+ lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ ANOTHER-LARGE-FILE
+
+Revert will prefetch blobs in a group
+
+ $ rm -rf .hg/store/lfs
+ $ rm -rf `hg config lfs.usercache`
+ $ rm *
+ $ hg revert --all -r 1 --debug
+ http auth: user foo, password ***
+ adding a
+ reverting b
+ reverting c
+ reverting d
+ Status: 200
+ Content-Length: 905
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b"
+ }
+ },
+ "oid": "31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b",
+ "size": 12
+ },
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19"
+ }
+ },
+ "oid": "37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19",
+ "size": 20
+ },
+ {
+ "actions": {
+ "download": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998"
+ }
+ },
+ "oid": "d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998",
+ "size": 19
+ }
+ ]
+ }
+ lfs: need to transfer 3 objects (51 bytes)
+ lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ Status: 200
+ Content-Length: 12
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
+ lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ Status: 200
+ Content-Length: 20
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
+ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ Status: 200
+ Content-Length: 19
+ Content-Type: text/plain; charset=utf-8
+ Date: $HTTP_DATE$
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+
Check error message when the remote missed a blob:
$ echo FFFFF > b
@@ -159,7 +673,32 @@
$ hg commit -m b b
$ rm -rf .hg/store/lfs
$ rm -rf `hg config lfs.usercache`
- $ hg update -C '.^'
+ $ hg update -C '.^' --debug
+ http auth: user foo, password ***
+ resolving manifests
+ branchmerge: False, force: True, partial: False
+ ancestor: 62fdbaf221c6+, local: 62fdbaf221c6+, remote: ef0564edf47e
+ Status: 200
+ Content-Length: 308
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "upload": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13"
+ }
+ },
+ "oid": "8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13",
+ "size": 6
+ }
+ ]
+ }
abort: LFS server error. Remote object for "b" not found:(.*)! (re)
[255]
@@ -198,8 +737,35 @@
#endif
$ cd $TESTTMP
- $ hg clone test test2
+ $ hg --debug clone test test2
+ http auth: user foo, password ***
+ linked 6 files
+ http auth: user foo, password ***
updating to branch default
+ resolving manifests
+ branchmerge: False, force: False, partial: False
+ ancestor: 000000000000, local: 000000000000+, remote: d2a338f184a8
+ Status: 200
+ Content-Length: 308
+ Content-Type: application/vnd.git-lfs+json
+ Date: $HTTP_DATE$
+ {
+ "objects": [
+ {
+ "actions": {
+ "upload": {
+ "expires_at": "$ISO_8601_DATE_TIME$",
+ "header": {
+ "Accept": "application/vnd.git-lfs"
+ },
+ "href": "http://localhost:$HGPORT/objects/bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a"
+ }
+ },
+ "oid": "bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a",
+ "size": 6
+ }
+ ]
+ }
abort: LFS server error. Remote object for "a" not found:(.*)! (re)
[255]
--- a/tests/test-lfs.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-lfs.t Mon Mar 19 08:07:18 2018 -0700
@@ -154,10 +154,32 @@
$ hg add . -q
$ hg commit -m 'commit with lfs content'
+ $ hg files -r . 'set:added()'
+ large
+ small
+ $ hg files -r . 'set:added() & lfs()'
+ large
+
$ hg mv large l
$ hg mv small s
+ $ hg status 'set:removed()'
+ R large
+ R small
+ $ hg status 'set:removed() & lfs()'
+ R large
$ hg commit -m 'renames'
+ $ hg files -r . 'set:copied()'
+ l
+ s
+ $ hg files -r . 'set:copied() & lfs()'
+ l
+ $ hg status --change . 'set:removed()'
+ R large
+ R small
+ $ hg status --change . 'set:removed() & lfs()'
+ R large
+
$ echo SHORT > l
$ echo BECOME-LARGER-FROM-SHORTER > s
$ hg commit -m 'large to small, small to large'
@@ -174,7 +196,7 @@
$ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
0 large
- 1 l
+ 1 l, large
2 s
3 s
4 l
@@ -594,8 +616,8 @@
$ cat > $TESTTMP/dumpflog.py << EOF
> # print raw revision sizes, flags, and hashes for certain files
> import hashlib
+ > from mercurial.node import short
> from mercurial import revlog
- > from mercurial.node import short
> def hash(rawtext):
> h = hashlib.sha512()
> h.update(rawtext)
@@ -760,7 +782,6 @@
$ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2
updating to branch default
resolving manifests
- getting l
abort: corrupt remote lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
[255]
@@ -1007,7 +1028,7 @@
The LFS policy stops when the .hglfs is gone
- $ hg rm .hglfs
+ $ mv .hglfs .hglfs_
$ echo 'largefile3' > lfs.test
$ echo '012345678901234567890abc' > nolfs.exclude
$ echo '01234567890123456abc' > lfs.catchall
@@ -1015,6 +1036,28 @@
$ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
4:
+ $ mv .hglfs_ .hglfs
+ $ echo '012345678901234567890abc' > lfs.test
+ $ hg ci -m 'back to lfs'
+ $ hg rm lfs.test
+ $ hg ci -qm 'remove lfs'
+
+{lfs_files} will list deleted files too
+
+ $ hg log -T "{lfs_files % '{rev} {file}: {lfspointer.oid}\n'}"
+ 6 lfs.test:
+ 5 lfs.test: sha256:43f8f41171b6f62a6b61ba4ce98a8a6c1649240a47ebafd43120aa215ac9e7f6
+ 3 lfs.catchall: sha256:31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573
+ 3 lfs.test: sha256:8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6
+ 2 lfs.catchall: sha256:d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9
+ 2 lfs.test: sha256:5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c
+
+ $ hg log -r 'file("set:lfs()")' -T '{rev} {join(lfs_files, ", ")}\n'
+ 2 lfs.catchall, lfs.test
+ 3 lfs.catchall, lfs.test
+ 5 lfs.test
+ 6 lfs.test
+
$ cd ..
Unbundling adds a requirement to a non-lfs repo, if necessary.
--- a/tests/test-lock-badness.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-lock-badness.t Mon Mar 19 08:07:18 2018 -0700
@@ -22,8 +22,8 @@
> def acquiretestlock(repo, releaseexc):
> def unlock():
> if releaseexc:
- > raise error.Abort('expected release exception')
- > l = repo._lock(repo.vfs, 'testlock', False, unlock, None, 'test lock')
+ > raise error.Abort(b'expected release exception')
+ > l = repo._lock(repo.vfs, b'testlock', False, unlock, None, b'test lock')
> return l
>
> @command(b'testlockexc')
@@ -35,7 +35,7 @@
> try:
> testlock = acquiretestlock(repo, False)
> except error.LockHeld:
- > raise error.Abort('lockfile on disk even after releasing!')
+ > raise error.Abort(b'lockfile on disk even after releasing!')
> testlock.release()
> EOF
$ cat >> $HGRCPATH << EOF
--- a/tests/test-log-exthook.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-log-exthook.t Mon Mar 19 08:07:18 2018 -0700
@@ -4,8 +4,8 @@
$ cat > $TESTTMP/logexthook.py <<EOF
> from __future__ import absolute_import
> from mercurial import (
- > cmdutil,
> commands,
+ > logcmdutil,
> repair,
> )
> def rot13description(self, ctx):
@@ -13,7 +13,7 @@
> description = ctx.description().strip().splitlines()[0].encode('rot13')
> self.ui.write("%s: %s\n" % (summary, description))
> def reposetup(ui, repo):
- > cmdutil.changeset_printer._exthook = rot13description
+ > logcmdutil.changesetprinter._exthook = rot13description
> EOF
Prepare the repository
--- a/tests/test-log-linerange.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-log-linerange.t Mon Mar 19 08:07:18 2018 -0700
@@ -172,6 +172,77 @@
+3
+4
+ $ hg log -f --graph -L foo,5:7 -p
+ @ changeset: 5:cfdf972b3971
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: foo: 3 -> 3+ and 11+ -> 11-; bar: a -> a+
+ |
+ | diff --git a/foo b/foo
+ | --- a/foo
+ | +++ b/foo
+ | @@ -4,7 +4,7 @@
+ | 0
+ | 1
+ | 2+
+ | -3
+ | +3+
+ | 4
+ | 5
+ | 6
+ |
+ o changeset: 4:eaec41c1a0c9
+ : user: test
+ : date: Thu Jan 01 00:00:00 1970 +0000
+ : summary: 11 -> 11+; leading space before "1"
+ :
+ : diff --git a/foo b/foo
+ : --- a/foo
+ : +++ b/foo
+ : @@ -2,7 +2,7 @@
+ : 0
+ : 0
+ : 0
+ : -1
+ : + 1
+ : 2+
+ : 3
+ : 4
+ :
+ o changeset: 2:63a884426fd0
+ : user: test
+ : date: Thu Jan 01 00:00:00 1970 +0000
+ : summary: 2 -> 2+; added bar
+ :
+ : diff --git a/foo b/foo
+ : --- a/foo
+ : +++ b/foo
+ : @@ -3,6 +3,6 @@
+ : 0
+ : 0
+ : 1
+ : -2
+ : +2+
+ : 3
+ : 4
+ :
+ o changeset: 0:5ae1f82b9a00
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: init
+
+ diff --git a/foo b/foo
+ new file mode 100644
+ --- /dev/null
+ +++ b/foo
+ @@ -0,0 +1,5 @@
+ +0
+ +1
+ +2
+ +3
+ +4
+
With --template.
@@ -800,7 +871,7 @@
Binary files work but without diff hunks filtering.
(Checking w/ and w/o diff.git option.)
- >>> open('binary', 'wb').write('this\nis\na\nbinary\0')
+ >>> open('binary', 'wb').write(b'this\nis\na\nbinary\0') and None
$ hg add binary
$ hg ci -m 'add a binary file' --quiet
$ hg log -f -L binary,1:2 -p
@@ -849,9 +920,3 @@
$ hg log -f -L dir/baz,5:7 -p
abort: cannot follow file not in parent revision: "dir/baz"
[255]
-
-Graph log does work yet.
-
- $ hg log -f -L dir/baz,5:7 --graph
- abort: graph not supported with line range patterns
- [255]
--- a/tests/test-log.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-log.t Mon Mar 19 08:07:18 2018 -0700
@@ -2016,33 +2016,31 @@
$ hg init problematicencoding
$ cd problematicencoding
- $ $PYTHON > setup.sh <<EOF
- > print(u'''
- > echo a > text
- > hg add text
- > hg --encoding utf-8 commit -u '\u30A2' -m none
- > echo b > text
- > hg --encoding utf-8 commit -u '\u30C2' -m none
- > echo c > text
- > hg --encoding utf-8 commit -u none -m '\u30A2'
- > echo d > text
- > hg --encoding utf-8 commit -u none -m '\u30C2'
- > '''.encode('utf-8'))
- > EOF
+ >>> with open('setup.sh', 'wb') as f:
+ ... f.write(u'''
+ ... echo a > text
+ ... hg add text
+ ... hg --encoding utf-8 commit -u '\u30A2' -m none
+ ... echo b > text
+ ... hg --encoding utf-8 commit -u '\u30C2' -m none
+ ... echo c > text
+ ... hg --encoding utf-8 commit -u none -m '\u30A2'
+ ... echo d > text
+ ... hg --encoding utf-8 commit -u none -m '\u30C2'
+ ... '''.encode('utf-8')) and None
$ sh < setup.sh
test in problematic encoding
- $ $PYTHON > test.sh <<EOF
- > print(u'''
- > hg --encoding cp932 log --template '{rev}\\n' -u '\u30A2'
- > echo ====
- > hg --encoding cp932 log --template '{rev}\\n' -u '\u30C2'
- > echo ====
- > hg --encoding cp932 log --template '{rev}\\n' -k '\u30A2'
- > echo ====
- > hg --encoding cp932 log --template '{rev}\\n' -k '\u30C2'
- > '''.encode('cp932'))
- > EOF
+ >>> with open('test.sh', 'wb') as f:
+ ... f.write(u'''
+ ... hg --encoding cp932 log --template '{rev}\\n' -u '\u30A2'
+ ... echo ====
+ ... hg --encoding cp932 log --template '{rev}\\n' -u '\u30C2'
+ ... echo ====
+ ... hg --encoding cp932 log --template '{rev}\\n' -k '\u30A2'
+ ... echo ====
+ ... hg --encoding cp932 log --template '{rev}\\n' -k '\u30C2'
+ ... '''.encode('cp932')) and None
$ sh < test.sh
0
====
@@ -2255,14 +2253,14 @@
> from mercurial import namespaces
>
> def reposetup(ui, repo):
- > foo = {'foo': repo[0].node()}
+ > foo = {b'foo': repo[0].node()}
> names = lambda r: foo.keys()
> namemap = lambda r, name: foo.get(name)
- > nodemap = lambda r, node: [name for name, n in foo.iteritems()
+ > nodemap = lambda r, node: [name for name, n in foo.items()
> if n == node]
> ns = namespaces.namespace(
- > "bars", templatename="bar", logname="barlog",
- > colorname="barcolor", listnames=names, namemap=namemap,
+ > b"bars", templatename=b"bar", logname=b"barlog",
+ > colorname=b"barcolor", listnames=names, namemap=namemap,
> nodemap=nodemap)
>
> repo.names.addnamespace(ns)
@@ -2289,6 +2287,25 @@
$ hg --config extensions.names=../names.py log -r 0 --template '{bars}\n'
foo
+Templater parse errors:
+
+simple error
+ $ hg log -r . -T '{shortest(node}'
+ hg: parse error at 14: unexpected token: end
+ ({shortest(node}
+ ^ here)
+ [255]
+
+multi-line template with error
+ $ hg log -r . -T 'line 1
+ > line2
+ > {shortest(node}
+ > line4\nline5'
+ hg: parse error at 27: unexpected token: end
+ (line 1\nline2\n{shortest(node}\nline4\nline5
+ ^ here)
+ [255]
+
$ cd ..
hg log -f dir across branches
--- a/tests/test-logexchange.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-logexchange.t Mon Mar 19 08:07:18 2018 -0700
@@ -6,6 +6,9 @@
> glog = log -G -T '{rev}:{node|short} {desc}'
> [experimental]
> remotenames = True
+ > [extensions]
+ > remotenames =
+ > show =
> EOF
Making a server repo
@@ -57,14 +60,27 @@
$ cat .hg/logexchange/bookmarks
0
- 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
- 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
+ 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc)
+ 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc)
$ cat .hg/logexchange/branches
0
- ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
- 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
+ ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc)
+ 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc)
+
+ $ hg show work
+ o 3e14 (wat) (default/wat) added bar
+ |
+ ~
+ @ ec24 (default/default) Added h
+ |
+ ~
+
+ $ hg update "default/wat"
+ 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ $ hg identify
+ 3e1487808078 (wat) tip
Making a new server
-------------------
@@ -94,15 +110,152 @@
$ cat .hg/logexchange/bookmarks
0
- 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
- 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
- 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
- 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
+ 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc)
+ 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc)
+ 87d6d66763085b629e6d7ed56778c79827273022\x00$TESTTMP/server2\x00bar (esc)
+ 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00$TESTTMP/server2\x00foo (esc)
$ cat .hg/logexchange/branches
0
- 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
- ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
- ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
- 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
+ 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc)
+ ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc)
+ ec2426147f0e39dbc9cef599b066be6035ce691d\x00$TESTTMP/server2\x00default (esc)
+ 3e1487808078543b0af6d10dadf5d46943578db0\x00$TESTTMP/server2\x00wat (esc)
+
+ $ hg log -G
+ @ changeset: 8:3e1487808078
+ | branch: wat
+ | tag: tip
+ | remote branch: $TESTTMP/server2/wat
+ | remote branch: default/wat
+ | parent: 4:aa98ab95a928
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: added bar
+ |
+ | o changeset: 7:ec2426147f0e
+ | | remote branch: $TESTTMP/server2/default
+ | | remote branch: default/default
+ | | user: test
+ | | date: Thu Jan 01 00:00:00 1970 +0000
+ | | summary: Added h
+ | |
+ | o changeset: 6:87d6d6676308
+ | | bookmark: bar
+ | | remote bookmark: $TESTTMP/server2/bar
+ | | remote bookmark: default/bar
+ | | user: test
+ | | date: Thu Jan 01 00:00:00 1970 +0000
+ | | summary: Added g
+ | |
+ | o changeset: 5:825660c69f0c
+ |/ user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added f
+ |
+ o changeset: 4:aa98ab95a928
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added e
+ |
+ o changeset: 3:62615734edd5
+ | bookmark: foo
+ | remote bookmark: $TESTTMP/server2/foo
+ | remote bookmark: default/foo
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added d
+ |
+ o changeset: 2:28ad74487de9
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added c
+ |
+ o changeset: 1:29becc82797a
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added b
+ |
+ o changeset: 0:18d04c59bb5d
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Added a
+
+Testing the templates provided by remotenames extension
+
+`remotenames` keyword
+
+ $ hg log -G -T "{rev}:{node|short} {remotenames}\n"
+ @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
+ |
+ | o 7:ec2426147f0e $TESTTMP/server2/default default/default
+ | |
+ | o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
+ | |
+ | o 5:825660c69f0c
+ |/
+ o 4:aa98ab95a928
+ |
+ o 3:62615734edd5 $TESTTMP/server2/foo default/foo
+ |
+ o 2:28ad74487de9
+ |
+ o 1:29becc82797a
+ |
+ o 0:18d04c59bb5d
+
+`remotebookmarks` and `remotebranches` keywords
+
+ $ hg log -G -T "{rev}:{node|short} [{remotebookmarks}] ({remotebranches})"
+ @ 8:3e1487808078 [] ($TESTTMP/server2/wat default/wat)
+ |
+ | o 7:ec2426147f0e [] ($TESTTMP/server2/default default/default)
+ | |
+ | o 6:87d6d6676308 [$TESTTMP/server2/bar default/bar] ()
+ | |
+ | o 5:825660c69f0c [] ()
+ |/
+ o 4:aa98ab95a928 [] ()
+ |
+ o 3:62615734edd5 [$TESTTMP/server2/foo default/foo] ()
+ |
+ o 2:28ad74487de9 [] ()
+ |
+ o 1:29becc82797a [] ()
+ |
+ o 0:18d04c59bb5d [] ()
+
+Testing the revsets provided by remotenames extension
+
+`remotenames` revset
+
+ $ hg log -r "remotenames()" -GT "{rev}:{node|short} {remotenames}\n"
+ @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
+ :
+ : o 7:ec2426147f0e $TESTTMP/server2/default default/default
+ : |
+ : o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
+ :/
+ o 3:62615734edd5 $TESTTMP/server2/foo default/foo
+ |
+ ~
+
+`remotebranches` revset
+
+ $ hg log -r "remotebranches()" -GT "{rev}:{node|short} {remotenames}\n"
+ @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
+ |
+ ~
+ o 7:ec2426147f0e $TESTTMP/server2/default default/default
+ |
+ ~
+
+`remotebookmarks` revset
+
+ $ hg log -r "remotebookmarks()" -GT "{rev}:{node|short} {remotenames}\n"
+ o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
+ :
+ o 3:62615734edd5 $TESTTMP/server2/foo default/foo
+ |
+ ~
--- a/tests/test-mactext.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-mactext.t Mon Mar 19 08:07:18 2018 -0700
@@ -3,9 +3,9 @@
> import sys
>
> for path in sys.argv[1:]:
- > data = file(path, 'rb').read()
- > data = data.replace('\n', '\r')
- > file(path, 'wb').write(data)
+ > data = open(path, 'rb').read()
+ > data = data.replace(b'\n', b'\r')
+ > open(path, 'wb').write(data)
> EOF
$ cat > print.py <<EOF
> import sys
--- a/tests/test-manifest.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-manifest.py Mon Mar 19 08:07:18 2018 -0700
@@ -11,7 +11,6 @@
)
EMTPY_MANIFEST = b''
-EMTPY_MANIFEST_V2 = b'\0\n'
HASH_1 = b'1' * 40
BIN_HASH_1 = binascii.unhexlify(HASH_1)
@@ -28,42 +27,6 @@
b'flag2': b'l',
}
-# Same data as A_SHORT_MANIFEST
-A_SHORT_MANIFEST_V2 = (
- b'\0\n'
- b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
- b'\x00foo\0%(flag1)s\n%(hash1)s\n'
- ) % {b'hash1': BIN_HASH_1,
- b'flag1': b'',
- b'hash2': BIN_HASH_2,
- b'flag2': b'l',
- }
-
-# Same data as A_SHORT_MANIFEST
-A_METADATA_MANIFEST = (
- b'\0foo\0bar\n'
- b'\x00bar/baz/qux.py\0%(flag2)s\0foo\0bar\n%(hash2)s\n' # flag and metadata
- b'\x00foo\0%(flag1)s\0foo\n%(hash1)s\n' # no flag, but metadata
- ) % {b'hash1': BIN_HASH_1,
- b'flag1': b'',
- b'hash2': BIN_HASH_2,
- b'flag2': b'l',
- }
-
-A_STEM_COMPRESSED_MANIFEST = (
- b'\0\n'
- b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
- b'\x04qux/foo.py\0%(flag1)s\n%(hash1)s\n' # simple case of 4 stem chars
- b'\x0az.py\0%(flag1)s\n%(hash1)s\n' # tricky newline = 10 stem characters
- b'\x00%(verylongdir)sx/x\0\n%(hash1)s\n'
- b'\xffx/y\0\n%(hash2)s\n' # more than 255 stem chars
- ) % {b'hash1': BIN_HASH_1,
- b'flag1': b'',
- b'hash2': BIN_HASH_2,
- b'flag2': b'l',
- b'verylongdir': 255 * b'x',
- }
-
A_DEEPER_MANIFEST = (
b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
@@ -111,11 +74,6 @@
self.assertEqual(0, len(m))
self.assertEqual([], list(m))
- def testEmptyManifestv2(self):
- m = self.parsemanifest(EMTPY_MANIFEST_V2)
- self.assertEqual(0, len(m))
- self.assertEqual([], list(m))
-
def testManifest(self):
m = self.parsemanifest(A_SHORT_MANIFEST)
self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
@@ -126,31 +84,6 @@
with self.assertRaises(KeyError):
m[b'wat']
- def testParseManifestV2(self):
- m1 = self.parsemanifest(A_SHORT_MANIFEST)
- m2 = self.parsemanifest(A_SHORT_MANIFEST_V2)
- # Should have same content as A_SHORT_MANIFEST
- self.assertEqual(m1.text(), m2.text())
-
- def testParseManifestMetadata(self):
- # Metadata is for future-proofing and should be accepted but ignored
- m = self.parsemanifest(A_METADATA_MANIFEST)
- self.assertEqual(A_SHORT_MANIFEST, m.text())
-
- def testParseManifestStemCompression(self):
- m = self.parsemanifest(A_STEM_COMPRESSED_MANIFEST)
- self.assertIn(b'bar/baz/qux.py', m)
- self.assertIn(b'bar/qux/foo.py', m)
- self.assertIn(b'bar/qux/foz.py', m)
- self.assertIn(256 * b'x' + b'/x', m)
- self.assertIn(256 * b'x' + b'/y', m)
- self.assertEqual(A_STEM_COMPRESSED_MANIFEST, m.text(usemanifestv2=True))
-
- def testTextV2(self):
- m1 = self.parsemanifest(A_SHORT_MANIFEST)
- v2text = m1.text(usemanifestv2=True)
- self.assertEqual(A_SHORT_MANIFEST_V2, v2text)
-
def testSetItem(self):
want = BIN_HASH_1
@@ -223,7 +156,7 @@
self.assertEqual(want, m[b'foo'])
self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
(b'foo', BIN_HASH_1 + b'a')],
- list(m.iteritems()))
+ list(m.items()))
# Sometimes it even tries a 22-byte fake hash, but we can
# return 21 and it'll work out
m[b'foo'] = want + b'+'
@@ -238,7 +171,7 @@
# suffix with iteration
self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
(b'foo', want)],
- list(m.iteritems()))
+ list(m.items()))
# shows up in diff
self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
--- a/tests/test-manifestv2.t Thu Mar 15 22:35:07 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-Create repo with old manifest
-
- $ cat << EOF >> $HGRCPATH
- > [format]
- > usegeneraldelta=yes
- > EOF
-
- $ hg init existing
- $ cd existing
- $ echo footext > foo
- $ hg add foo
- $ hg commit -m initial
-
-We're using v1, so no manifestv2 entry is in requires yet.
-
- $ grep manifestv2 .hg/requires
- [1]
-
-Let's clone this with manifestv2 enabled to switch to the new format for
-future commits.
-
- $ cd ..
- $ hg clone --pull existing new --config experimental.manifestv2=1
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 1 changesets with 1 changes to 1 files
- new changesets 0fc9a4fafa44
- updating to branch default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cd new
-
-Check that entry was added to .hg/requires.
-
- $ grep manifestv2 .hg/requires
- manifestv2
-
-Make a new commit.
-
- $ echo newfootext > foo
- $ hg commit -m new
-
-Check that the manifest actually switched to v2.
-
- $ hg debugdata -m 0
- foo\x0021e958b1dca695a60ee2e9cf151753204ee0f9e9 (esc)
-
- $ hg debugdata -m 1
- \x00 (esc)
- \x00foo\x00 (esc)
- I\xab\x7f\xb8(\x83\xcas\x15\x9d\xc2\xd3\xd3:5\x08\xbad5_ (esc)
-
-Check that manifestv2 is used if the requirement is present, even if it's
-disabled in the config.
-
- $ echo newerfootext > foo
- $ hg --config experimental.manifestv2=False commit -m newer
-
- $ hg debugdata -m 2
- \x00 (esc)
- \x00foo\x00 (esc)
- \xa6\xb1\xfb\xef]\x91\xa1\x19`\xf3.#\x90S\xf8\x06 \xe2\x19\x00 (esc)
-
-Check that we can still read v1 manifests.
-
- $ hg files -r 0
- foo
-
- $ cd ..
-
-Check that entry is added to .hg/requires on repo creation
-
- $ hg --config experimental.manifestv2=True init repo
- $ cd repo
- $ grep manifestv2 .hg/requires
- manifestv2
-
-Set up simple repo
-
- $ echo a > file1
- $ echo b > file2
- $ echo c > file3
- $ hg ci -Aqm 'initial'
- $ echo d > file2
- $ hg ci -m 'modify file2'
-
-Check that 'hg verify', which uses manifest.readdelta(), works
-
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- 3 files, 2 changesets, 4 total revisions
-
-Check that manifest revlog is smaller than for v1
-
- $ hg debugindex -m
- rev offset length delta linkrev nodeid p1 p2
- 0 0 81 -1 0 57361477c778 000000000000 000000000000
- 1 81 33 0 1 aeaab5a2ef74 57361477c778 000000000000
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-mdiff.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,24 @@
+from __future__ import absolute_import
+from __future__ import print_function
+
+import unittest
+
+from mercurial import (
+ mdiff,
+)
+
+class splitnewlinesTests(unittest.TestCase):
+
+ def test_splitnewlines(self):
+ cases = {b'a\nb\nc\n': [b'a\n', b'b\n', b'c\n'],
+ b'a\nb\nc': [b'a\n', b'b\n', b'c'],
+ b'a\nb\nc\n\n': [b'a\n', b'b\n', b'c\n', b'\n'],
+ b'': [],
+ b'abcabc': [b'abcabc'],
+ }
+ for inp, want in cases.items():
+ self.assertEqual(mdiff.splitnewlines(inp), want)
+
+if __name__ == '__main__':
+ import silenttestrunner
+ silenttestrunner.main(__name__)
--- a/tests/test-merge-tools.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-merge-tools.t Mon Mar 19 08:07:18 2018 -0700
@@ -1059,6 +1059,150 @@
# hg resolve --list
R f
+premerge=keep respects ui.mergemarkers=basic:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep --config ui.mergemarkers=basic
+ merging f
+ <<<<<<< working copy
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
+
+premerge=keep ignores ui.mergemarkers=basic if true.mergemarkers=detailed:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep \
+ > --config ui.mergemarkers=basic \
+ > --config merge-tools.true.mergemarkers=detailed
+ merging f
+ <<<<<<< working copy: ef83787e2614 - test: revision 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy: ef83787e2614 - test: revision 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
+
+premerge=keep respects ui.mergemarkertemplate instead of
+true.mergemarkertemplate if true.mergemarkers=basic:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}'
+ merging f
+ <<<<<<< working copy: uitmpl 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: uitmpl 4
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy: uitmpl 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: uitmpl 4
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
+
+premerge=keep respects true.mergemarkertemplate instead of
+true.mergemarkertemplate if true.mergemarkers=detailed:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config merge-tools.true.mergemarkers=detailed
+ merging f
+ <<<<<<< working copy: tooltmpl ef83787e2614
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: tooltmpl 81448d39c9a0
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy: tooltmpl ef83787e2614
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: tooltmpl 81448d39c9a0
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
Tool execution
@@ -1190,6 +1334,169 @@
# hg resolve --list
R f
+Merge using a tool that supports labellocal, labelother, and labelbase, checking
+that they're quoted properly as well. This is using the default 'basic'
+mergemarkers even though ui.mergemarkers is 'detailed', so it's ignoring both
+mergemarkertemplate settings:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > printargs_merge_tool
+ > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ arg: "ll:working copy"
+ arg: "lo:"
+ arg: "merge rev"
+ arg: "lb:base: */f~base.*" (glob)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'printargs_merge_tool'
+
+Same test with experimental.mergetempdirprefix set:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > printargs_merge_tool
+ > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > EOF
+ $ hg --config experimental.mergetempdirprefix=$TESTTMP/hgmerge. \
+ > --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ arg: "ll:working copy"
+ arg: "lo:"
+ arg: "merge rev"
+ arg: "lb:base: $TESTTMP/hgmerge.*/f~base" (glob)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'printargs_merge_tool'
+
+Merge using a tool that supports labellocal, labelother, and labelbase, checking
+that they're quoted properly as well. This is using 'detailed' mergemarkers,
+even though ui.mergemarkers is 'basic', and using the tool's
+mergemarkertemplate:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > printargs_merge_tool
+ > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
+ > --config merge-tools.true.mergemarkers=detailed \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=basic \
+ > merge -r 2
+ merging f
+ arg: "ll:working copy: tooltmpl ef83787e2614"
+ arg: "lo:"
+ arg: "merge rev: tooltmpl 0185f4e0cf02"
+ arg: "lb:base: */f~base.*" (glob)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'printargs_merge_tool'
+
+The merge tool still gets labellocal and labelother as 'basic' even when
+premerge=keep is used and has 'detailed' markers:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > mytool
+ > echo labellocal: \"\$1\"
+ > echo labelother: \"\$2\"
+ > echo "output (arg)": \"\$3\"
+ > echo "output (contents)":
+ > cat "\$3"
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='mytool $labellocal $labelother $output' \
+ > --config merge-tools.true.premerge=keep \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ labellocal: "working copy"
+ labelother: "merge rev"
+ output (arg): "$TESTTMP/f"
+ output (contents):
+ <<<<<<< working copy: uitmpl 1
+ revision 1
+ =======
+ revision 2
+ >>>>>>> merge rev: uitmpl 2
+ space
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'mytool'
+
+premerge=keep uses the *tool's* mergemarkertemplate if tool's
+mergemarkers=detailed; labellocal and labelother also use the tool's template
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > mytool
+ > echo labellocal: \"\$1\"
+ > echo labelother: \"\$2\"
+ > echo "output (arg)": \"\$3\"
+ > echo "output (contents)":
+ > cat "\$3"
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='mytool $labellocal $labelother $output' \
+ > --config merge-tools.true.premerge=keep \
+ > --config merge-tools.true.mergemarkers=detailed \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ labellocal: "working copy: tooltmpl ef83787e2614"
+ labelother: "merge rev: tooltmpl 0185f4e0cf02"
+ output (arg): "$TESTTMP/f"
+ output (contents):
+ <<<<<<< working copy: tooltmpl ef83787e2614
+ revision 1
+ =======
+ revision 2
+ >>>>>>> merge rev: tooltmpl 0185f4e0cf02
+ space
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'mytool'
+
Issue3581: Merging a filename that needs to be quoted
(This test doesn't work on Windows filesystems even on Linux, so check
for Unix-like permission)
@@ -1278,7 +1585,22 @@
$ hg update -q -C 2
$ hg merge -y -r tip --tool echo --config merge-tools.echo.args='$base $local $other $output'
merging f and f.txt to f.txt
- */f~base.?????? $TESTTMP/f.txt.orig */f~other.??????.txt $TESTTMP/f.txt (glob)
+ */f~base.* $TESTTMP/f.txt.orig */f~other.*.txt $TESTTMP/f.txt (glob)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+
+Verify naming of temporary files and that extension is preserved
+(experimental.mergetempdirprefix version):
+
+ $ hg update -q -C 1
+ $ hg mv f f.txt
+ $ hg ci -qm "f.txt"
+ $ hg update -q -C 2
+ $ hg merge -y -r tip --tool echo \
+ > --config merge-tools.echo.args='$base $local $other $output' \
+ > --config experimental.mergetempdirprefix=$TESTTMP/hgmerge.
+ merging f and f.txt to f.txt
+ $TESTTMP/hgmerge.*/f~base $TESTTMP/f.txt.orig $TESTTMP/hgmerge.*/f~other.txt $TESTTMP/f.txt (glob)
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
--- a/tests/test-minirst.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-minirst.py Mon Mar 19 08:07:18 2018 -0700
@@ -28,7 +28,7 @@
debugformat(text, 30, **kwargs)
debugformat(text, 'html', **kwargs)
-paragraphs = """
+paragraphs = b"""
This is some text in the first paragraph.
A small indented paragraph.
@@ -37,9 +37,9 @@
\n \n \nThe third and final paragraph.
"""
-debugformats('paragraphs', paragraphs)
+debugformats(b'paragraphs', paragraphs)
-definitions = """
+definitions = b"""
A Term
Definition. The indented
lines make up the definition.
@@ -52,9 +52,9 @@
Definition.
"""
-debugformats('definitions', definitions)
+debugformats(b'definitions', definitions)
-literals = r"""
+literals = br"""
The fully minimized form is the most
convenient form::
@@ -76,9 +76,9 @@
with '::' disappears in the final output.
"""
-debugformats('literals', literals)
+debugformats(b'literals', literals)
-lists = """
+lists = b"""
- This is the first list item.
Second paragraph in the first list item.
@@ -127,9 +127,9 @@
* This is the third bullet
"""
-debugformats('lists', lists)
+debugformats(b'lists', lists)
-options = """
+options = b"""
There is support for simple option lists,
but only with long options:
@@ -153,9 +153,9 @@
--foo bar baz
"""
-debugformats('options', options)
+debugformats(b'options', options)
-fields = """
+fields = b"""
:a: First item.
:ab: Second item. Indentation and wrapping
is handled automatically.
@@ -166,9 +166,9 @@
:much too large: This key is big enough to get its own line.
"""
-debugformats('fields', fields)
+debugformats(b'fields', fields)
-containers = """
+containers = b"""
Normal output.
.. container:: debug
@@ -184,17 +184,17 @@
Debug output.
"""
-debugformats('containers (normal)', containers)
-debugformats('containers (verbose)', containers, keep=['verbose'])
-debugformats('containers (debug)', containers, keep=['debug'])
-debugformats('containers (verbose debug)', containers,
+debugformats(b'containers (normal)', containers)
+debugformats(b'containers (verbose)', containers, keep=['verbose'])
+debugformats(b'containers (debug)', containers, keep=['debug'])
+debugformats(b'containers (verbose debug)', containers,
keep=['verbose', 'debug'])
-roles = """Please see :hg:`add`."""
-debugformats('roles', roles)
+roles = b"""Please see :hg:`add`."""
+debugformats(b'roles', roles)
-sections = """
+sections = b"""
Title
=====
@@ -207,10 +207,10 @@
Markup: ``foo`` and :hg:`help`
------------------------------
"""
-debugformats('sections', sections)
+debugformats(b'sections', sections)
-admonitions = """
+admonitions = b"""
.. note::
This is a note
@@ -225,9 +225,9 @@
This is danger
"""
-debugformats('admonitions', admonitions)
+debugformats(b'admonitions', admonitions)
-comments = """
+comments = b"""
Some text.
.. A comment
@@ -241,27 +241,27 @@
Empty comment above
"""
-debugformats('comments', comments)
+debugformats(b'comments', comments)
-data = [['a', 'b', 'c'],
- ['1', '2', '3'],
- ['foo', 'bar', 'baz this list is very very very long man']]
+data = [[b'a', b'b', b'c'],
+ [b'1', b'2', b'3'],
+ [b'foo', b'bar', b'baz this list is very very very long man']]
rst = minirst.maketable(data, 2, True)
-table = ''.join(rst)
+table = b''.join(rst)
print(table)
-debugformats('table', table)
+debugformats(b'table', table)
-data = [['s', 'long', 'line\ngoes on here'],
- ['', 'xy', 'tried to fix here\n by indenting']]
+data = [[b's', b'long', b'line\ngoes on here'],
+ [b'', b'xy', b'tried to fix here\n by indenting']]
rst = minirst.maketable(data, 1, False)
-table = ''.join(rst)
+table = b''.join(rst)
print(table)
-debugformats('table+nl', table)
+debugformats(b'table+nl', table)
--- a/tests/test-mq-eol.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-mq-eol.t Mon Mar 19 08:07:18 2018 -0700
@@ -10,29 +10,29 @@
> EOF
$ cat > makepatch.py <<EOF
- > f = file('eol.diff', 'wb')
+ > f = open('eol.diff', 'wb')
> w = f.write
- > w('test message\n')
- > w('diff --git a/a b/a\n')
- > w('--- a/a\n')
- > w('+++ b/a\n')
- > w('@@ -1,5 +1,5 @@\n')
- > w(' a\n')
- > w('-b\r\n')
- > w('+y\r\n')
- > w(' c\r\n')
- > w(' d\n')
- > w('-e\n')
- > w('\ No newline at end of file\n')
- > w('+z\r\n')
- > w('\ No newline at end of file\r\n')
+ > w(b'test message\n')
+ > w(b'diff --git a/a b/a\n')
+ > w(b'--- a/a\n')
+ > w(b'+++ b/a\n')
+ > w(b'@@ -1,5 +1,5 @@\n')
+ > w(b' a\n')
+ > w(b'-b\r\n')
+ > w(b'+y\r\n')
+ > w(b' c\r\n')
+ > w(b' d\n')
+ > w(b'-e\n')
+ > w(b'\ No newline at end of file\n')
+ > w(b'+z\r\n')
+ > w(b'\ No newline at end of file\r\n')
> EOF
$ cat > cateol.py <<EOF
> import sys
- > for line in file(sys.argv[1], 'rb'):
- > line = line.replace('\r', '<CR>')
- > line = line.replace('\n', '<LF>')
+ > for line in open(sys.argv[1], 'rb'):
+ > line = line.replace(b'\r', b'<CR>')
+ > line = line.replace(b'\n', b'<LF>')
> print(line)
> EOF
@@ -44,7 +44,7 @@
Test different --eol values
- $ $PYTHON -c 'file("a", "wb").write("a\nb\nc\nd\ne")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\nb\nc\nd\ne")'
$ hg ci -Am adda
adding .hgignore
adding a
@@ -152,15 +152,15 @@
$ hg init testeol
$ cd testeol
- $ $PYTHON -c "file('a', 'wb').write('1\r\n2\r\n3\r\n4')"
+ $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n3\r\n4')"
$ hg ci -Am adda
adding a
- $ $PYTHON -c "file('a', 'wb').write('1\r\n2\r\n33\r\n4')"
+ $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n33\r\n4')"
$ hg qnew patch1
$ hg qpop
popping patch1
patch queue now empty
- $ $PYTHON -c "file('a', 'wb').write('1\r\n22\r\n33\r\n4')"
+ $ $PYTHON -c "open('a', 'wb').write(b'1\r\n22\r\n33\r\n4')"
$ hg ci -m changea
$ hg --config 'patch.eol=LF' qpush
--- a/tests/test-mq-missingfiles.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-mq-missingfiles.t Mon Mar 19 08:07:18 2018 -0700
@@ -9,8 +9,8 @@
> args = sys.argv[2:]
> assert (len(args) % 2) == 0
>
- > f = file(path, 'wb')
- > for i in xrange(len(args)/2):
+ > f = open(path, 'wb')
+ > for i in range(len(args) // 2):
> count, s = args[2*i:2*i+2]
> count = int(count)
> s = s.decode('string_escape')
--- a/tests/test-mq-qimport.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-mq-qimport.t Mon Mar 19 08:07:18 2018 -0700
@@ -6,8 +6,8 @@
> args = sys.argv[2:]
> assert (len(args) % 2) == 0
>
- > f = file(path, 'wb')
- > for i in xrange(len(args)/2):
+ > f = open(path, 'wb')
+ > for i in range(len(args)/2):
> count, s = args[2*i:2*i+2]
> count = int(count)
> s = s.decode('string_escape')
--- a/tests/test-mq-qrefresh-replace-log-message.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-mq-qrefresh-replace-log-message.t Mon Mar 19 08:07:18 2018 -0700
@@ -119,7 +119,7 @@
> def reposetup(ui, repo):
> class commitfailure(repo.__class__):
> def commit(self, *args, **kwargs):
- > raise error.Abort('emulating unexpected abort')
+ > raise error.Abort(b'emulating unexpected abort')
> repo.__class__ = commitfailure
> EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-acl.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,42 @@
+Make a narrow clone then archive it
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ for x in `$TESTDIR/seq.py 3`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+ $ cat >> .hg/hgrc << EOF
+ > [narrowhgacl]
+ > default.includes=f1 f2
+ > EOF
+ $ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+
+ $ cd ..
+ $ hg clone http://localhost:$HGPORT1 narrowclone1
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ new changesets * (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+The clone directory should only contain f1 and f2
+ $ ls -1 narrowclone1 | sort
+ f1
+ f2
+
+Requirements should contain narrowhg
+ $ cat narrowclone1/.hg/requires | grep narrowhg
+ narrowhg-experimental
+
+NarrowHG should track f1 and f2
+ $ hg -R narrowclone1 tracked
+ I path:f1
+ I path:f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-archive.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,32 @@
+Make a narrow clone then archive it
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ for x in `$TESTDIR/seq.py 3`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+
+ $ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+
+ $ cd ..
+ $ hg clone --narrow --include f1 --include f2 http://localhost:$HGPORT1/ narrowclone1
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ new changesets * (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+The tar should only contain f1 and f2
+ $ cd narrowclone1
+ $ hg archive -t tgz repo.tgz
+ $ tar tfz repo.tgz
+ repo/f1
+ repo/f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-no-ellipsis.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,130 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ mkdir dir
+ $ mkdir dir/src
+ $ cd dir/src
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+ $ cd ..
+ $ mkdir tests
+ $ cd tests
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
+ $ cd ../../..
+
+narrow clone a file, f10
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ cd narrow
+ $ cat .hg/requires | grep -v generaldelta
+ dotencode
+ fncache
+ narrowhg-experimental
+ revlogv1
+ store
+
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/src/f10
+ [excludes]
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f10
+ $ cat dir/src/f10
+ 10
+
+ $ cd ..
+
+narrow clone a directory, tests/, except tests/t19
+
+ $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 19 changes to 19 files
+ new changesets *:* (glob)
+ $ cd narrowdir
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/tests
+ [excludes]
+ path:dir/tests/t19
+ $ hg update
+ 19 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/tests
+ dir/tests/t1
+ dir/tests/t10
+ dir/tests/t11
+ dir/tests/t12
+ dir/tests/t13
+ dir/tests/t14
+ dir/tests/t15
+ dir/tests/t16
+ dir/tests/t17
+ dir/tests/t18
+ dir/tests/t2
+ dir/tests/t20
+ dir/tests/t3
+ dir/tests/t4
+ dir/tests/t5
+ dir/tests/t6
+ dir/tests/t7
+ dir/tests/t8
+ dir/tests/t9
+
+ $ cd ..
+
+narrow clone everything but a directory (tests/)
+
+ $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 20 changes to 20 files
+ new changesets *:* (glob)
+ $ cd narrowroot
+ $ cat .hg/narrowspec
+ [includes]
+ path:.
+ [excludes]
+ path:dir/tests
+ $ hg update
+ 20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f1
+ dir/src/f10
+ dir/src/f11
+ dir/src/f12
+ dir/src/f13
+ dir/src/f14
+ dir/src/f15
+ dir/src/f16
+ dir/src/f17
+ dir/src/f18
+ dir/src/f19
+ dir/src/f2
+ dir/src/f20
+ dir/src/f3
+ dir/src/f4
+ dir/src/f5
+ dir/src/f6
+ dir/src/f7
+ dir/src/f8
+ dir/src/f9
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-non-narrow-server.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,53 @@
+Test attempting a narrow clone against a server that doesn't support narrowhg.
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+
+ $ hg serve -a localhost -p $HGPORT1 --config extensions.narrow=! -d \
+ > --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+ $ hg serve -a localhost -p $HGPORT2 -d --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+
+Verify that narrow is advertised in the bundle2 capabilities:
+ $ echo hello | hg -R . serve --stdio | \
+ > $PYTHON -c "from __future__ import print_function; import sys, urllib; print(urllib.unquote_plus(list(sys.stdin)[1]))" | grep narrow
+ narrow=v0
+
+ $ cd ..
+
+ $ hg clone --narrow --include f1 http://localhost:$HGPORT1/ narrowclone
+ requesting all changes
+ abort: server doesn't support narrow clones
+ [255]
+
+Make a narrow clone (via HGPORT2), then try to narrow and widen
+into it (from HGPORT1) to prove that narrowing is fine and widening fails
+gracefully:
+ $ hg clone -r 0 --narrow --include f1 http://localhost:$HGPORT2/ narrowclone
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets * (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrowclone
+ $ hg tracked --addexclude f2 http://localhost:$HGPORT1/
+ comparing with http://localhost:$HGPORT1/
+ searching for changes
+ looking for local changes to affected paths
+ $ hg tracked --addinclude f1 http://localhost:$HGPORT1/
+ comparing with http://localhost:$HGPORT1/
+ searching for changes
+ no changes found
+ abort: server doesn't support narrow clones
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-nonlinear.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,148 @@
+Testing narrow clones when changesets modifying a matching file exist on
+multiple branches
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ hg branch default
+ marked working directory as branch default
+ (branches are permanent and global, did you want a bookmark?)
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+
+ $ hg branch release-v1
+ marked working directory as branch release-v1
+ (branches are permanent and global, did you want a bookmark?)
+ $ hg commit -m "Start release for v1"
+
+ $ hg update default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo "$x v2" > "f$x"
+ > hg commit -m "Update $x to v2"
+ > done
+
+ $ hg update release-v1
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch release-v1
+ marked working directory as branch release-v1
+ $ for x in `$TESTDIR/seq.py 1 5`; do
+ > echo "$x v1 hotfix" > "f$x"
+ > hg commit -m "Hotfix $x in v1"
+ > done
+
+ $ hg update default
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch release-v2
+ marked working directory as branch release-v2
+ $ hg commit -m "Start release for v2"
+
+ $ hg update default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch default
+ marked working directory as branch default
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo "$x v3" > "f$x"
+ > hg commit -m "Update $x to v3"
+ > done
+
+ $ hg update release-v2
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch release-v2
+ marked working directory as branch release-v2
+ $ for x in `$TESTDIR/seq.py 4 9`; do
+ > echo "$x v2 hotfix" > "f$x"
+ > hg commit -m "Hotfix $x in v2"
+ > done
+
+ $ hg heads -T '{rev} <- {p1rev} ({branch}): {desc}\n'
+ 42 <- 41 (release-v2): Hotfix 9 in v2
+ 36 <- 35 (default): Update 10 to v3
+ 25 <- 24 (release-v1): Hotfix 5 in v1
+
+ $ cd ..
+
+We now have 3 branches: default, which has v3 of all files, release-v1 which
+has v1 of all files, and release-v2 with v2 of all files.
+
+Narrow clone which should get all branches
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include "f5"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 12 changesets with 5 changes to 1 files (+2 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg log -G -T "{if(ellipsis, '...')}{node|short} ({branch}): {desc}\n"
+ o ...031f516143fe (release-v2): Hotfix 9 in v2
+ |
+ o 9cd7f7bb9ca1 (release-v2): Hotfix 5 in v2
+ |
+ o ...37bbc88f3ef0 (release-v2): Hotfix 4 in v2
+ |
+ | @ ...dae2f368ca07 (default): Update 10 to v3
+ | |
+ | o 9c224e89cb31 (default): Update 5 to v3
+ | |
+ | o ...04fb59c7c9dc (default): Update 4 to v3
+ |/
+ | o b2253e82401f (release-v1): Hotfix 5 in v1
+ | |
+ | o ...960ac37d74fd (release-v1): Hotfix 4 in v1
+ | |
+ o | 986298e3f347 (default): Update 5 to v2
+ | |
+ o | ...75d539c667ec (default): Update 4 to v2
+ |/
+ o 04c71bd5707f (default): Add 5
+ |
+ o ...881b3891d041 (default): Add 4
+
+
+Narrow clone the first file, hitting edge condition where unaligned
+changeset and manifest revnums cross branches.
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include "f1"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 10 changesets with 4 changes to 1 files (+2 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg log -G -T "{if(ellipsis, '...')}{node|short} ({branch}): {desc}\n"
+ o ...031f516143fe (release-v2): Hotfix 9 in v2
+ |
+ | @ ...dae2f368ca07 (default): Update 10 to v3
+ | |
+ | o 1f5d184b8e96 (default): Update 1 to v3
+ |/
+ | o ...b2253e82401f (release-v1): Hotfix 5 in v1
+ | |
+ | o 133502f6b7e5 (release-v1): Hotfix 1 in v1
+ | |
+ o | ...79165c83d644 (default): Update 10 to v2
+ | |
+ o | c7b7a5f2f088 (default): Update 1 to v2
+ | |
+ | o ...f0531a3db7a9 (release-v1): Start release for v1
+ |/
+ o ...6a3f0f0abef3 (default): Add 10
+ |
+ o e012ac15eaaa (default): Add 1
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,225 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ mkdir dir
+ $ mkdir dir/src
+ $ cd dir/src
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+ $ cd ..
+ $ mkdir tests
+ $ cd tests
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
+ $ cd ../../..
+
+narrow clone a file, f10
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ cd narrow
+ $ cat .hg/requires | grep -v generaldelta
+ dotencode
+ fncache
+ narrowhg-experimental
+ revlogv1
+ store
+
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/src/f10
+ [excludes]
+ $ hg tracked
+ I path:dir/src/f10
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f10
+ $ cat dir/src/f10
+ 10
+
+ $ cd ..
+
+narrow clone with a newline should fail
+
+ $ hg clone --narrow ssh://user@dummy/master narrow_fail --noupdate --include 'dir/src/f10
+ > '
+ requesting all changes
+ abort: newlines are not allowed in narrowspec paths
+ [255]
+
+narrow clone a directory, tests/, except tests/t19
+
+ $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 21 changesets with 19 changes to 19 files
+ new changesets *:* (glob)
+ $ cd narrowdir
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/tests
+ [excludes]
+ path:dir/tests/t19
+ $ hg tracked
+ I path:dir/tests
+ X path:dir/tests/t19
+ $ hg update
+ 19 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/tests
+ dir/tests/t1
+ dir/tests/t10
+ dir/tests/t11
+ dir/tests/t12
+ dir/tests/t13
+ dir/tests/t14
+ dir/tests/t15
+ dir/tests/t16
+ dir/tests/t17
+ dir/tests/t18
+ dir/tests/t2
+ dir/tests/t20
+ dir/tests/t3
+ dir/tests/t4
+ dir/tests/t5
+ dir/tests/t6
+ dir/tests/t7
+ dir/tests/t8
+ dir/tests/t9
+
+ $ cd ..
+
+narrow clone everything but a directory (tests/)
+
+ $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 21 changesets with 20 changes to 20 files
+ new changesets *:* (glob)
+ $ cd narrowroot
+ $ cat .hg/narrowspec
+ [includes]
+ path:.
+ [excludes]
+ path:dir/tests
+ $ hg tracked
+ I path:.
+ X path:dir/tests
+ $ hg update
+ 20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f1
+ dir/src/f10
+ dir/src/f11
+ dir/src/f12
+ dir/src/f13
+ dir/src/f14
+ dir/src/f15
+ dir/src/f16
+ dir/src/f17
+ dir/src/f18
+ dir/src/f19
+ dir/src/f2
+ dir/src/f20
+ dir/src/f3
+ dir/src/f4
+ dir/src/f5
+ dir/src/f6
+ dir/src/f7
+ dir/src/f8
+ dir/src/f9
+
+ $ cd ..
+
+narrow clone no paths at all
+
+ $ hg clone --narrow ssh://user@dummy/master narrowempty --noupdate
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets * (glob)
+ $ cd narrowempty
+ $ hg tracked
+ $ hg update
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ ls
+
+ $ cd ..
+
+simple clone
+ $ hg clone ssh://user@dummy/master simpleclone
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 40 changes to 40 files
+ new changesets * (glob)
+ updating to branch default
+ 40 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd simpleclone
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f1
+ dir/src/f10
+ dir/src/f11
+ dir/src/f12
+ dir/src/f13
+ dir/src/f14
+ dir/src/f15
+ dir/src/f16
+ dir/src/f17
+ dir/src/f18
+ dir/src/f19
+ dir/src/f2
+ dir/src/f20
+ dir/src/f3
+ dir/src/f4
+ dir/src/f5
+ dir/src/f6
+ dir/src/f7
+ dir/src/f8
+ dir/src/f9
+ dir/tests
+ dir/tests/t1
+ dir/tests/t10
+ dir/tests/t11
+ dir/tests/t12
+ dir/tests/t13
+ dir/tests/t14
+ dir/tests/t15
+ dir/tests/t16
+ dir/tests/t17
+ dir/tests/t18
+ dir/tests/t19
+ dir/tests/t2
+ dir/tests/t20
+ dir/tests/t3
+ dir/tests/t4
+ dir/tests/t5
+ dir/tests/t6
+ dir/tests/t7
+ dir/tests/t8
+ dir/tests/t9
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-commit.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,102 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+ $ hg update -q 0
+
+Can not modify dirstate outside
+
+ $ mkdir outside
+ $ touch outside/f1
+ $ hg debugwalk -I 'relglob:f1'
+ matcher: <includematcher includes='(?:(?:|.*/)f1(?:/|$))'>
+ f inside/f1 inside/f1
+ $ hg add outside/f1
+ abort: cannot track 'outside/f1' - it is outside the narrow clone
+ [255]
+ $ touch outside/f3
+ $ hg add outside/f3
+ abort: cannot track 'outside/f3' - it is outside the narrow clone
+ [255]
+
+But adding a truly excluded file shouldn't count
+
+ $ hg add outside/f3 -X outside/f3
+
+ $ rm -r outside
+
+Can modify dirstate inside
+
+ $ echo modified > inside/f1
+ $ touch inside/f3
+ $ hg add inside/f3
+ $ hg status
+ M inside/f1
+ A inside/f3
+ $ hg revert -qC .
+ $ rm inside/f3
+
+Can commit changes inside. Leaves outside unchanged.
+
+ $ hg update -q 'desc("initial")'
+ $ echo modified2 > inside/f1
+ $ hg manifest --debug
+ 4d6a634d5ba06331a60c29ee0db8412490a54fcd 644 inside/f1
+ 7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644 outside/f1 (flat !)
+ d0f2f706468ab0e8bec7af87446835fb1b13511b 755 d outside/ (tree !)
+ $ hg commit -m 'modify inside/f1'
+ created new head
+ $ hg files -r .
+ inside/f1
+ outside/f1 (flat !)
+ outside/ (tree !)
+ $ hg manifest --debug
+ 3f4197b4a11b9016e77ebc47fe566944885fd11b 644 inside/f1
+ 7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644 outside/f1 (flat !)
+ d0f2f706468ab0e8bec7af87446835fb1b13511b 755 d outside/ (tree !)
+Some filesystems (notably FAT/exFAT only store timestamps with 2
+seconds of precision, so by sleeping for 3 seconds, we can ensure that
+the timestamps of files stored by dirstate will appear older than the
+dirstate file, and therefore we'll be able to get stable output from
+debugdirstate. If we don't do this, the test can be slightly flaky.
+ $ sleep 3
+ $ hg status
+ $ hg debugdirstate --nodates
+ n 644 10 set inside/f1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-copies.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,57 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f2
+ $ hg ci -Aqm 'initial'
+
+ $ hg mv outside/f2 inside/f2
+ $ hg ci -qm 'move f2 from outside'
+
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+ $ hg co 'desc("move f2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg status
+ $ hg diff
+ $ hg diff --change . --git
+ diff --git a/inside/f2 b/inside/f2
+ new file mode 100644
+ --- /dev/null
+ +++ b/inside/f2
+ @@ -0,0 +1,1 @@
+ +outside
+
+ $ hg log --follow inside/f2 -r tip
+ changeset: 2:bcfb756e0ca9
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify inside/f2
+
+ changeset: 1:5a016133b2bb
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: move f2 from outside
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-debugcommands.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,43 @@
+ $ . "$TESTDIR/narrow-library.sh"
+ $ hg init repo
+ $ cd repo
+ $ cat << EOF > .hg/narrowspec
+ > [includes]
+ > path:foo
+ > [excludes]
+ > EOF
+ $ echo treemanifest >> .hg/requires
+ $ echo narrowhg-experimental >> .hg/requires
+ $ mkdir -p foo/bar
+ $ echo b > foo/f
+ $ echo c > foo/bar/f
+ $ hg commit -Am hi
+ adding foo/bar/f
+ adding foo/f
+ $ hg debugindex -m
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 47 -1 0 14a5d056d75a 000000000000 000000000000
+ $ hg debugindex --dir foo
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 77 -1 0 e635c7857aef 000000000000 000000000000
+ $ hg debugindex --dir foo/
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 77 -1 0 e635c7857aef 000000000000 000000000000
+ $ hg debugindex --dir foo/bar
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 44 -1 0 e091d4224761 000000000000 000000000000
+ $ hg debugindex --dir foo/bar/
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 44 -1 0 e091d4224761 000000000000 000000000000
+ $ hg debugdata -m 0
+ foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
+ $ hg debugdata --dir foo 0
+ bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
+ f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
+ $ hg debugdata --dir foo/ 0
+ bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
+ f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
+ $ hg debugdata --dir foo/bar 0
+ f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
+ $ hg debugdata --dir foo/bar/ 0
+ f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-debugrebuilddirstate.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,31 @@
+ $ . "$TESTDIR/narrow-library.sh"
+ $ hg init master
+ $ cd master
+ $ echo treemanifest >> .hg/requires
+ $ echo 'contents of file' > file
+ $ mkdir foo
+ $ echo 'contents of foo/bar' > foo/bar
+ $ hg ci -Am 'some change'
+ adding file
+ adding foo/bar
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/master copy --include=foo
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets * (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd copy
+
+ $ hg debugdirstate
+ n * 20 unset foo/bar (glob)
+ $ mv .hg/dirstate .hg/old_dirstate
+ $ dd bs=40 count=1 if=.hg/old_dirstate of=.hg/dirstate 2>/dev/null
+ $ hg debugdirstate
+ $ hg debugrebuilddirstate
+ $ hg debugdirstate
+ n * * unset foo/bar (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-exchange-merges.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,207 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 1 > inside/f
+ $ hg commit -Aqm 'initial inside'
+
+ $ mkdir outside
+ $ echo 1 > outside/f
+ $ hg commit -Aqm 'initial outside'
+
+ $ echo 2a > outside/f
+ $ hg commit -Aqm 'outside 2a'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4a > outside/f
+ $ hg commit -Aqm 'outside 4a'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2b > outside/f
+ $ hg commit -Aqm 'outside 2b'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4b > outside/f
+ $ hg commit -Aqm 'outside 4b'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2c > outside/f
+ $ hg commit -Aqm 'outside 2c'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4c > outside/f
+ $ hg commit -Aqm 'outside 4c'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2d > outside/f
+ $ hg commit -Aqm 'outside 2d'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4d > outside/f
+ $ hg commit -Aqm 'outside 4d'
+
+ $ hg update -r 'desc("outside 4a")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 5 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge a/b 5'
+ $ echo 6 > outside/f
+ $ hg commit -Aqm 'outside 6'
+
+ $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 7 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c 7'
+ $ echo 8 > outside/f
+ $ hg commit -Aqm 'outside 8'
+
+ $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 9 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c/d 9'
+ $ echo 10 > outside/f
+ $ hg commit -Aqm 'outside 10'
+
+ $ echo 11 > inside/f
+ $ hg commit -Aqm 'inside 11'
+ $ echo 12 > outside/f
+ $ hg commit -Aqm 'outside 12'
+
+ $ hg log -G -T '{rev} {node|short} {desc}\n'
+ @ 21 8d874d57adea outside 12
+ |
+ o 20 7ef88b4dd4fa inside 11
+ |
+ o 19 2a20009de83e outside 10
+ |
+ o 18 3ac1f5779de3 merge a/b/c/d 9
+ |\
+ | o 17 38a9c2f7e546 outside 8
+ | |
+ | o 16 094aa62fc898 merge a/b/c 7
+ | |\
+ | | o 15 f29d083d32e4 outside 6
+ | | |
+ | | o 14 2dc11382541d merge a/b 5
+ | | |\
+ o | | | 13 27d07ef97221 outside 4d
+ | | | |
+ o | | | 12 465567bdfb2d inside 3
+ | | | |
+ o | | | 11 d1c61993ec83 outside 2d
+ | | | |
+ | o | | 10 56859a8e33b9 outside 4c
+ | | | |
+ | o | | 9 bb96a08b062a inside 3
+ | | | |
+ | o | | 8 b844052e7b3b outside 2c
+ |/ / /
+ | | o 7 9db2d8fcc2a6 outside 4b
+ | | |
+ | | o 6 6418167787a6 inside 3
+ | | |
+ +---o 5 77344f344d83 outside 2b
+ | |
+ | o 4 9cadde08dc9f outside 4a
+ | |
+ | o 3 019ef06f125b inside 3
+ | |
+ | o 2 75e40c075a19 outside 2a
+ |/
+ o 1 906d6c682641 initial outside
+ |
+ o 0 9f8e82b51004 initial inside
+
+
+Now narrow clone this and get a hopefully correct graph
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 14 changesets with 3 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+To make updating the tests easier, we print the emitted nodes
+sorted. This makes it easier to identify when the same node structure
+has been emitted, just in a different order.
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...094aa62fc898 6418167787a6 bb96a08b062a merge a/b/c 7
+ ...2a20009de83e 019ef06f125b 3ac1f5779de3 outside 10
+ ...3ac1f5779de3 465567bdfb2d 094aa62fc898 merge a/b/c/d 9
+ ...75e40c075a19 9f8e82b51004 000000000000 outside 2a
+ ...77344f344d83 9f8e82b51004 000000000000 outside 2b
+ ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
+ ...b844052e7b3b 9f8e82b51004 000000000000 outside 2c
+ ...d1c61993ec83 9f8e82b51004 000000000000 outside 2d
+ 019ef06f125b 75e40c075a19 000000000000 inside 3
+ 465567bdfb2d d1c61993ec83 000000000000 inside 3
+ 6418167787a6 77344f344d83 000000000000 inside 3
+ 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11
+ 9f8e82b51004 000000000000 000000000000 initial inside
+ bb96a08b062a b844052e7b3b 000000000000 inside 3
+
+But seeing the graph is also nice:
+ $ hg log -G -T '{if(ellipsis,"...")}{node|short} {desc}\n'
+ @ ...8d874d57adea outside 12
+ |
+ o 7ef88b4dd4fa inside 11
+ |
+ o ...2a20009de83e outside 10
+ |\
+ | o ...3ac1f5779de3 merge a/b/c/d 9
+ | |\
+ | | o ...094aa62fc898 merge a/b/c 7
+ | | |\
+ | o | | 465567bdfb2d inside 3
+ | | | |
+ | o | | ...d1c61993ec83 outside 2d
+ | | | |
+ | | | o bb96a08b062a inside 3
+ | | | |
+ | +---o ...b844052e7b3b outside 2c
+ | | |
+ | | o 6418167787a6 inside 3
+ | | |
+ | | o ...77344f344d83 outside 2b
+ | |/
+ o | 019ef06f125b inside 3
+ | |
+ o | ...75e40c075a19 outside 2a
+ |/
+ o 9f8e82b51004 initial inside
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-exchange.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,209 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 1 > inside/f
+ $ mkdir inside2
+ $ echo 1 > inside2/f
+ $ mkdir outside
+ $ echo 1 > outside/f
+ $ hg ci -Aqm 'initial'
+
+ $ echo 2 > inside/f
+ $ hg ci -qm 'inside 2'
+
+ $ echo 2 > inside2/f
+ $ hg ci -qm 'inside2 2'
+
+ $ echo 2 > outside/f
+ $ hg ci -qm 'outside 2'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ hg clone --narrow ssh://user@dummy/master narrow2 --include inside --include inside2
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Can push to wider repo if change does not affect paths in wider repo that are
+not also in narrower repo
+
+ $ cd narrow
+ $ echo 3 > inside/f
+ $ hg ci -m 'inside 3'
+ $ hg push ssh://user@dummy/narrow2
+ pushing to ssh://user@dummy/narrow2
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+
+Can push to narrower repo if change affects only paths within remote's
+narrow spec
+
+ $ cd ../narrow2
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ hg co -r 'desc("inside 3")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 4 > inside/f
+ $ hg ci -m 'inside 4'
+ $ hg push ssh://user@dummy/narrow
+ pushing to ssh://user@dummy/narrow
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+
+Can push to narrow repo if change affects only paths outside remote's
+narrow spec
+
+ $ echo 3 > inside2/f
+ $ hg ci -m 'inside2 3'
+TODO: this should be successful
+ $ hg push ssh://user@dummy/narrow
+ pushing to ssh://user@dummy/narrow
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: transaction abort!
+ remote: rollback completed
+ remote: abort: data/inside2/f.i@4a1aa07735e6: unknown parent!
+ abort: stream ended unexpectedly (got 0 bytes, expected 4)
+ [255]
+
+Can pull from wider repo if change affects only paths outside remote's
+narrow spec
+ $ echo 4 > inside2/f
+ $ hg ci -m 'inside2 4'
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ @ 7 d78a96df731d inside2/f
+ |
+ o 6 8c26f5218962 inside2/f
+ |
+ o 5 ba3480e2f9de inside/f
+ |
+ o 4 4e5edd526618 inside/f
+ |
+ o 3 81e7e07b7ab0 outside/f
+ |
+ o 2 f3993b8c0c2b inside2/f
+ |
+ o 1 8cd66ca966b4 inside/f
+ |
+ o 0 c8057d6f53ab inside/f inside2/f outside/f
+
+ $ cd ../narrow
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ o 4 ba3480e2f9de inside/f
+ |
+ @ 3 4e5edd526618 inside/f
+ |
+ o 2 81e7e07b7ab0 outside/f
+ |
+ o 1 8cd66ca966b4 inside/f
+ |
+ o 0 c8057d6f53ab inside/f inside2/f outside/f
+
+ $ hg pull ssh://user@dummy/narrow2
+ pulling from ssh://user@dummy/narrow2
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets d78a96df731d
+ (run 'hg update' to get a working copy)
+
+Check that the resulting history is valid in the full repo
+
+ $ cd ../narrow2
+ $ hg push ssh://user@dummy/master
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 4 changesets with 4 changes to 2 files
+ $ cd ../master
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ 3 files, 8 changesets, 10 total revisions
+
+Can not push to wider repo if change affects paths in wider repo that are
+not also in narrower repo
+ $ cd ../master
+ $ hg co -r 'desc("inside2 4")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 5 > inside2/f
+ $ hg ci -m 'inside2 5'
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ @ 8 5970befb64ba inside2/f
+ |
+ o 7 d78a96df731d inside2/f
+ |
+ o 6 8c26f5218962 inside2/f
+ |
+ o 5 ba3480e2f9de inside/f
+ |
+ o 4 4e5edd526618 inside/f
+ |
+ o 3 81e7e07b7ab0 outside/f
+ |
+ o 2 f3993b8c0c2b inside2/f
+ |
+ o 1 8cd66ca966b4 inside/f
+ |
+ o 0 c8057d6f53ab inside/f inside2/f outside/f
+
+ $ cd ../narrow
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets * (glob)
+ (run 'hg update' to get a working copy)
+TODO: this should tell the user that their narrow clone does not have the
+necessary content to be able to push to the target
+ $ hg push ssh://user@dummy/narrow2
+ pushing to ssh://user@dummy/narrow2
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 0 changes to 0 files
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-expanddirstate.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,162 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f2
+ $ mkdir patchdir
+ $ echo patch_this > patchdir/f3
+ $ hg ci -Aqm 'initial'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets dff6a2a6d433
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd narrow
+
+ $ mkdir outside
+ $ echo other_contents > outside/f2
+ $ grep outside .hg/narrowspec
+ [1]
+ $ grep outside .hg/dirstate
+ [1]
+ $ hg status
+
+`hg status` did not add outside.
+ $ grep outside .hg/narrowspec
+ [1]
+ $ grep outside .hg/dirstate
+ [1]
+
+Unfortunately this is not really a candidate for adding to narrowhg proper,
+since it depends on some other source for providing the manifests (when using
+treemanifests) and file contents. Something like a virtual filesystem and/or
+remotefilelog. We want to be useful when not using those systems, so we do not
+have this method available in narrowhg proper at the moment.
+ $ cat > "$TESTTMP/expand_extension.py" <<EOF
+ > import os
+ > import sys
+ >
+ > from mercurial import encoding
+ > from mercurial import extensions
+ > from mercurial import localrepo
+ > from mercurial import match as matchmod
+ > from mercurial import narrowspec
+ > from mercurial import patch
+ > from mercurial import util as hgutil
+ >
+ > def expandnarrowspec(ui, repo, newincludes=None):
+ > if not newincludes:
+ > return
+ > import sys
+ > newincludes = set([newincludes])
+ > includes, excludes = repo.narrowpats
+ > currentmatcher = narrowspec.match(repo.root, includes, excludes)
+ > includes = includes | newincludes
+ > if not repo.currenttransaction():
+ > ui.develwarn(b'expandnarrowspec called outside of transaction!')
+ > repo.setnarrowpats(includes, excludes)
+ > newmatcher = narrowspec.match(repo.root, includes, excludes)
+ > added = matchmod.differencematcher(newmatcher, currentmatcher)
+ > for f in repo[b'.'].manifest().walk(added):
+ > repo.dirstate.normallookup(f)
+ >
+ > def makeds(ui, repo):
+ > def wrapds(orig, self):
+ > ds = orig(self)
+ > class expandingdirstate(ds.__class__):
+ > @hgutil.propertycache
+ > def _map(self):
+ > ret = super(expandingdirstate, self)._map
+ > with repo.wlock(), repo.lock(), repo.transaction(
+ > b'expandnarrowspec'):
+ > expandnarrowspec(ui, repo,
+ > encoding.environ.get(b'DIRSTATEINCLUDES'))
+ > return ret
+ > ds.__class__ = expandingdirstate
+ > return ds
+ > return wrapds
+ >
+ > def reposetup(ui, repo):
+ > extensions.wrapfilecache(localrepo.localrepository, b'dirstate',
+ > makeds(ui, repo))
+ > def overridepatch(orig, *args, **kwargs):
+ > with repo.wlock():
+ > expandnarrowspec(ui, repo, encoding.environ.get(b'PATCHINCLUDES'))
+ > return orig(*args, **kwargs)
+ >
+ > extensions.wrapfunction(patch, b'patch', overridepatch)
+ > EOF
+ $ cat >> ".hg/hgrc" <<EOF
+ > [extensions]
+ > expand_extension = $TESTTMP/expand_extension.py
+ > EOF
+
+Since we do not have the ability to rely on a virtual filesystem or
+remotefilelog in the test, we just fake it by copying the data from the 'master'
+repo.
+ $ cp -a ../master/.hg/store/data/* .hg/store/data
+Do that for patchdir as well.
+ $ cp -a ../master/patchdir .
+
+`hg status` will now add outside, but not patchdir.
+ $ DIRSTATEINCLUDES=path:outside hg status
+ M outside/f2
+ $ grep outside .hg/narrowspec
+ path:outside
+ $ grep outside .hg/dirstate > /dev/null
+ $ grep patchdir .hg/narrowspec
+ [1]
+ $ grep patchdir .hg/dirstate
+ [1]
+
+Get rid of the modification to outside/f2.
+ $ hg update -C .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+This patch will not apply cleanly at the moment, so `hg import` will break
+ $ cat > "$TESTTMP/foo.patch" <<EOF
+ > --- patchdir/f3
+ > +++ patchdir/f3
+ > @@ -1,1 +1,1 @@
+ > -this should be "patch_this", but its not, so patch fails
+ > +this text is irrelevant
+ > EOF
+ $ PATCHINCLUDES=path:patchdir hg import -p0 -e "$TESTTMP/foo.patch" -m ignored
+ applying $TESTTMP/foo.patch
+ patching file patchdir/f3
+ Hunk #1 FAILED at 0
+ 1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej
+ abort: patch failed to apply
+ [255]
+ $ grep patchdir .hg/narrowspec
+ [1]
+ $ grep patchdir .hg/dirstate > /dev/null
+ [1]
+
+Let's make it apply cleanly and see that it *did* expand properly
+ $ cat > "$TESTTMP/foo.patch" <<EOF
+ > --- patchdir/f3
+ > +++ patchdir/f3
+ > @@ -1,1 +1,1 @@
+ > -patch_this
+ > +patched_this
+ > EOF
+ $ PATCHINCLUDES=path:patchdir hg import -p0 -e "$TESTTMP/foo.patch" -m message
+ applying $TESTTMP/foo.patch
+ $ cat patchdir/f3
+ patched_this
+ $ grep patchdir .hg/narrowspec
+ path:patchdir
+ $ grep patchdir .hg/dirstate > /dev/null
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-merge.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,104 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo inside1 > inside/f1
+ $ echo inside2 > inside/f2
+ $ mkdir outside
+ $ echo outside1 > outside/f1
+ $ echo outside2 > outside/f2
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside/f1'
+
+ $ hg update -q 0
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+
+ $ hg update -q 0
+ $ echo modified2 > inside/f1
+ $ hg ci -qm 'conflicting inside/f1'
+
+ $ hg update -q 0
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside/f1'
+
+ $ hg update -q 0
+ $ echo modified2 > outside/f1
+ $ hg ci -qm 'conflicting outside/f1'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 5 changes to 2 files (+4 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+ $ hg update -q 0
+
+Can merge in when no files outside narrow spec are involved
+
+ $ hg update -q 'desc("modify inside/f1")'
+ $ hg merge 'desc("modify inside/f2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg commit -m 'merge inside changes'
+
+Can merge conflicting changes inside narrow spec
+
+ $ hg update -q 'desc("modify inside/f1")'
+ $ hg merge 'desc("conflicting inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging inside/f1
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo modified3 > inside/f1
+ $ hg resolve -m
+ (no more unresolved files)
+ $ hg commit -m 'merge inside/f1'
+
+TODO: Can merge non-conflicting changes outside narrow spec
+
+ $ hg update -q 'desc("modify inside/f1")'
+ $ hg merge 'desc("modify outside/f1")'
+ abort: merge affects file 'outside/f1' outside narrow, which is not yet supported (flat !)
+ abort: merge affects file 'outside/' outside narrow, which is not yet supported (tree !)
+ (merging in the other direction may work)
+ [255]
+
+ $ hg update -q 'desc("modify outside/f1")'
+ $ hg merge 'desc("modify inside/f1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'merge from inside to outside'
+
+Refuses merge of conflicting outside changes
+
+ $ hg update -q 'desc("modify outside/f1")'
+ $ hg merge 'desc("conflicting outside/f1")'
+ abort: conflict in file 'outside/f1' is outside narrow clone (flat !)
+ abort: conflict in file 'outside/' is outside narrow clone (tree !)
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-patch.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,84 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+Can show patch touching paths outside
+
+ $ hg log -p
+ changeset: 2:* (glob)
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify outside
+
+
+ changeset: 1:* (glob)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify inside
+
+ diff -r * -r * inside/f1 (glob)
+ --- a/inside/f1 Thu Jan 01 00:00:00 1970 +0000
+ +++ b/inside/f1 Thu Jan 01 00:00:00 1970 +0000
+ @@ -1,1 +1,1 @@
+ -inside
+ +modified
+
+ changeset: 0:* (glob)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
+ diff -r 000000000000 -r * inside/f1 (glob)
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/inside/f1 Thu Jan 01 00:00:00 1970 +0000
+ @@ -0,0 +1,1 @@
+ +inside
+
+
+ $ hg status --rev 1 --rev 2
+
+Can show copies inside the narrow clone
+
+ $ hg cp inside/f1 inside/f2
+ $ hg diff --git
+ diff --git a/inside/f1 b/inside/f2
+ copy from inside/f1
+ copy to inside/f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-patterns.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,435 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+initialize nested directories to validate complex include/exclude patterns
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ echo root > root
+ $ hg add root
+ $ hg commit -m 'add root'
+
+ $ for d in dir1 dir2 dir1/dirA dir1/dirB dir2/dirA dir2/dirB
+ > do
+ > mkdir -p $d
+ > echo $d/foo > $d/foo
+ > hg add $d/foo
+ > hg commit -m "add $d/foo"
+ > echo $d/bar > $d/bar
+ > hg add $d/bar
+ > hg commit -m "add $d/bar"
+ > done
+#if execbit
+ $ chmod +x dir1/dirA/foo
+ $ hg commit -m "make dir1/dirA/foo executable"
+#else
+ $ hg import --bypass - <<EOF
+ > # HG changeset patch
+ > make dir1/dirA/foo executable
+ >
+ > diff --git a/dir1/dirA/foo b/dir1/dirA/foo
+ > old mode 100644
+ > new mode 100755
+ > EOF
+ applying patch from stdin
+ $ hg update -qr tip
+#endif
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ @ 13 c87ca422d521 dir1/dirA/foo
+ |
+ o 12 951b8a83924e dir2/dirB/bar
+ |
+ o 11 01ae5a51b563 dir2/dirB/foo
+ |
+ o 10 5eababdf0ac5 dir2/dirA/bar
+ |
+ o 9 99d690663739 dir2/dirA/foo
+ |
+ o 8 8e80155d5445 dir1/dirB/bar
+ |
+ o 7 406760310428 dir1/dirB/foo
+ |
+ o 6 623466a5f475 dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da root
+
+ $ cd ..
+
+clone a narrow portion of the master, such that we can widen it later
+
+ $ hg clone --narrow ssh://user@dummy/master narrow \
+ > --include dir1 \
+ > --include dir2 \
+ > --exclude dir1/dirA \
+ > --exclude dir1/dirB \
+ > --exclude dir2/dirA \
+ > --exclude dir2/dirB
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 4 changes to 4 files
+ new changesets *:* (glob)
+ updating to branch default
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd narrow
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA
+ path:dir1/dirB
+ path:dir2/dirA
+ path:dir2/dirB
+ $ hg manifest -r tip
+ dir1/bar
+ dir1/dirA/bar
+ dir1/dirA/foo
+ dir1/dirB/bar
+ dir1/dirB/foo
+ dir1/foo
+ dir2/bar
+ dir2/dirA/bar
+ dir2/dirA/foo
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ root
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 5 c87ca422d521... dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+widen the narrow checkout
+
+ $ hg tracked --removeexclude dir1/dirA
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 9 changesets with 6 changes to 6 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirB
+ path:dir2/dirA
+ path:dir2/dirB
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/bar
+ dir1/dirA/foo
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/foo
+
+#if execbit
+ $ test -x dir1/dirA/foo && echo executable
+ executable
+ $ test -x dir1/dirA/bar || echo not executable
+ not executable
+#endif
+
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 8 c87ca422d521 dir1/dirA/foo
+ |
+ o 7 951b8a83924e... dir2/dirB/bar
+ |
+ o 6 623466a5f475 dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+widen narrow spec again, but exclude a file in previously included spec
+
+ $ hg tracked --removeexclude dir2/dirB --addexclude dir1/dirA/bar
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/dir1/dirA/bar.i
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 11 changesets with 7 changes to 7 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA/bar
+ path:dir1/dirB
+ path:dir2/dirA
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/foo
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/dirB
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 10 c87ca422d521 dir1/dirA/foo
+ |
+ o 9 951b8a83924e dir2/dirB/bar
+ |
+ o 8 01ae5a51b563 dir2/dirB/foo
+ |
+ o 7 5eababdf0ac5... dir2/dirA/bar
+ |
+ o 6 623466a5f475... dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+widen narrow spec yet again, excluding a directory in previous spec
+
+ $ hg tracked --removeexclude dir2/dirA --addexclude dir1/dirA
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/dir1/dirA/foo.i
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 13 changesets with 8 changes to 8 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA
+ path:dir1/dirA/bar
+ path:dir1/dirB
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/dirA
+ dir2/dirA/bar
+ dir2/dirA/foo
+ dir2/dirB
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 12 c87ca422d521... dir1/dirA/foo
+ |
+ o 11 951b8a83924e dir2/dirB/bar
+ |
+ o 10 01ae5a51b563 dir2/dirB/foo
+ |
+ o 9 5eababdf0ac5 dir2/dirA/bar
+ |
+ o 8 99d690663739 dir2/dirA/foo
+ |
+ o 7 8e80155d5445... dir1/dirB/bar
+ |
+ o 6 623466a5f475... dir1/dirA/bar
+ |
+ o 5 06ff3a5be997... dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+include a directory that was previously explicitly excluded
+
+ $ hg tracked --removeexclude dir1/dirA
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 13 changesets with 9 changes to 9 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA/bar
+ path:dir1/dirB
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/foo
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/dirA
+ dir2/dirA/bar
+ dir2/dirA/foo
+ dir2/dirB
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 12 c87ca422d521 dir1/dirA/foo
+ |
+ o 11 951b8a83924e dir2/dirB/bar
+ |
+ o 10 01ae5a51b563 dir2/dirB/foo
+ |
+ o 9 5eababdf0ac5 dir2/dirA/bar
+ |
+ o 8 99d690663739 dir2/dirA/foo
+ |
+ o 7 8e80155d5445... dir1/dirB/bar
+ |
+ o 6 623466a5f475... dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+ $ cd ..
+
+clone a narrow portion of the master, such that we can widen it later
+
+ $ hg clone --narrow ssh://user@dummy/master narrow2 --include dir1/dirA
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 2 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow2
+ $ find * | sort
+ dir1
+ dir1/dirA
+ dir1/dirA/bar
+ dir1/dirA/foo
+ $ hg tracked --addinclude dir1
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow2/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 10 changesets with 6 changes to 6 files
+ new changesets *:* (glob)
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/bar
+ dir1/dirA/foo
+ dir1/dirB
+ dir1/dirB/bar
+ dir1/dirB/foo
+ dir1/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 9 c87ca422d521 dir1/dirA/foo
+ |
+ o 8 951b8a83924e... dir2/dirB/bar
+ |
+ o 7 8e80155d5445 dir1/dirB/bar
+ |
+ o 6 406760310428 dir1/dirB/foo
+ |
+ o 5 623466a5f475 dir1/dirA/bar
+ |
+ o 4 06ff3a5be997 dir1/dirA/foo
+ |
+ o 3 33227af02764... dir2/bar
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-pull.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,175 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Commit f$x"
+ > done
+ $ cd ..
+
+narrow clone a couple files, f2 and f8
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 2 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ ls
+ f2
+ f8
+ $ cat f2 f8
+ 2
+ 8
+
+ $ cd ..
+
+change every upstream file twice
+
+ $ cd master
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo "update#1 $x" >> "f$x"
+ > hg commit -m "Update#1 to f$x" "f$x"
+ > done
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo "update#2 $x" >> "f$x"
+ > hg commit -m "Update#2 to f$x" "f$x"
+ > done
+ $ cd ..
+
+look for incoming changes
+
+ $ cd narrow
+ $ hg incoming --limit 3
+ comparing with ssh://user@dummy/master
+ searching for changes
+ changeset: 5:ddc055582556
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Update#1 to f1
+
+ changeset: 6:f66eb5ad621d
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Update#1 to f2
+
+ changeset: 7:c42ecff04e99
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Update#1 to f3
+
+
+Interrupting the pull is safe
+ $ hg --config hooks.pretxnchangegroup.bad=false pull -q
+ transaction abort!
+ rollback completed
+ abort: pretxnchangegroup.bad hook exited with status 1
+ [255]
+ $ hg id
+ 223311e70a6f tip
+
+pull new changes down to the narrow clone. Should get 8 new changesets: 4
+relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 9 changesets with 4 changes to 2 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg log -T '{rev}: {desc}\n'
+ 13: Update#2 to f10
+ 12: Update#2 to f8
+ 11: Update#2 to f7
+ 10: Update#2 to f2
+ 9: Update#2 to f1
+ 8: Update#1 to f8
+ 7: Update#1 to f7
+ 6: Update#1 to f2
+ 5: Update#1 to f1
+ 4: Commit f10
+ 3: Commit f8
+ 2: Commit f7
+ 1: Commit f2
+ 0: Commit f1
+ $ hg update tip
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+add a change and push it
+
+ $ echo "update#3 2" >> f2
+ $ hg commit -m "Update#3 to f2" f2
+ $ hg log f2 -T '{rev}: {desc}\n'
+ 14: Update#3 to f2
+ 10: Update#2 to f2
+ 6: Update#1 to f2
+ 1: Commit f2
+ $ hg push
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+ $ cd ..
+
+ $ cd master
+ $ hg log f2 -T '{rev}: {desc}\n'
+ 30: Update#3 to f2
+ 21: Update#2 to f2
+ 11: Update#1 to f2
+ 1: Commit f2
+ $ hg log -l 3 -T '{rev}: {desc}\n'
+ 30: Update#3 to f2
+ 29: Update#2 to f10
+ 28: Update#2 to f9
+
+Can pull into repo with a single commit
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
+ $ cd narrow2
+ $ hg pull -q -r 1
+ transaction abort!
+ rollback completed
+ abort: pull failed on remote
+ [255]
+
+Can use 'hg share':
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > share=
+ > EOF
+
+ $ cd ..
+ $ hg share narrow2 narrow2-share
+ updating working directory
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow2-share
+ $ hg status
+
+We should also be able to unshare without breaking everything:
+ $ hg unshare
+ devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob)
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ 1 files, 1 changesets, 1 total revisions
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-rebase.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,93 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside1 > inside/f1
+ $ echo inside2 > inside/f2
+ $ mkdir outside
+ $ echo outside1 > outside/f1
+ $ echo outside2 > outside/f2
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside/f1'
+
+ $ hg update -q 0
+ $ echo modified2 > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+
+ $ hg update -q 0
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside/f1'
+
+ $ hg update -q 0
+ $ echo modified2 > outside/f1
+ $ hg ci -qm 'conflicting outside/f1'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 4 changes to 2 files (+3 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > rebase=
+ > EOF
+
+ $ hg update -q 0
+
+Can rebase onto commit where no files outside narrow spec are involved
+
+ $ hg update -q 0
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+ $ hg rebase -d 'desc("modify inside/f1")'
+ rebasing 5:c2f36d04e05d "modify inside/f2" (tip)
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob)
+
+Can rebase onto conflicting changes inside narrow spec
+
+ $ hg update -q 0
+ $ echo conflicting > inside/f1
+ $ hg ci -qm 'conflicting inside/f1'
+ $ hg rebase -d 'desc("modify inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip)
+ merging inside/f1
+ unresolved conflicts (see hg resolve, then hg rebase --continue)
+ $ echo modified3 > inside/f1
+ $ hg resolve -m 2>&1 | grep -v continue:
+ (no more unresolved files)
+ $ hg rebase --continue
+ rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip)
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob)
+
+Can rebase onto non-conflicting changes outside narrow spec
+
+ $ hg update -q 0
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+ $ hg rebase -d 'desc("modify outside/f1")'
+ rebasing 7:c2f36d04e05d "modify inside/f2" (tip)
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob)
+
+Rebase interrupts on conflicting changes outside narrow spec
+
+ $ hg update -q 'desc("conflicting outside/f1")'
+ $ hg phase -f -d .
+ no phases changed
+ $ hg rebase -d 'desc("modify outside/f1")'
+ rebasing 4:707c035aadb6 "conflicting outside/f1"
+ abort: conflict in file 'outside/f1' is outside narrow clone
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-shallow-merges.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,345 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 1 > inside/f
+ $ hg commit -Aqm 'initial inside'
+
+ $ mkdir outside
+ $ echo 1 > outside/f
+ $ hg commit -Aqm 'initial outside'
+
+ $ echo 2a > outside/f
+ $ hg commit -Aqm 'outside 2a'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4a > outside/f
+ $ hg commit -Aqm 'outside 4a'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2b > outside/f
+ $ hg commit -Aqm 'outside 2b'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4b > outside/f
+ $ hg commit -Aqm 'outside 4b'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2c > outside/f
+ $ hg commit -Aqm 'outside 2c'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4c > outside/f
+ $ hg commit -Aqm 'outside 4c'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2d > outside/f
+ $ hg commit -Aqm 'outside 2d'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4d > outside/f
+ $ hg commit -Aqm 'outside 4d'
+
+ $ hg update -r 'desc("outside 4a")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 5 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge a/b 5'
+ $ echo 6 > outside/f
+ $ hg commit -Aqm 'outside 6'
+
+ $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 7 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c 7'
+ $ echo 8 > outside/f
+ $ hg commit -Aqm 'outside 8'
+
+ $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 9 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c/d 9'
+ $ echo 10 > outside/f
+ $ hg commit -Aqm 'outside 10'
+
+ $ echo 11 > inside/f
+ $ hg commit -Aqm 'inside 11'
+ $ echo 12 > outside/f
+ $ hg commit -Aqm 'outside 12'
+
+ $ hg log -G -T '{rev} {node|short} {desc}\n'
+ @ 21 8d874d57adea outside 12
+ |
+ o 20 7ef88b4dd4fa inside 11
+ |
+ o 19 2a20009de83e outside 10
+ |
+ o 18 3ac1f5779de3 merge a/b/c/d 9
+ |\
+ | o 17 38a9c2f7e546 outside 8
+ | |
+ | o 16 094aa62fc898 merge a/b/c 7
+ | |\
+ | | o 15 f29d083d32e4 outside 6
+ | | |
+ | | o 14 2dc11382541d merge a/b 5
+ | | |\
+ o | | | 13 27d07ef97221 outside 4d
+ | | | |
+ o | | | 12 465567bdfb2d inside 3
+ | | | |
+ o | | | 11 d1c61993ec83 outside 2d
+ | | | |
+ | o | | 10 56859a8e33b9 outside 4c
+ | | | |
+ | o | | 9 bb96a08b062a inside 3
+ | | | |
+ | o | | 8 b844052e7b3b outside 2c
+ |/ / /
+ | | o 7 9db2d8fcc2a6 outside 4b
+ | | |
+ | | o 6 6418167787a6 inside 3
+ | | |
+ +---o 5 77344f344d83 outside 2b
+ | |
+ | o 4 9cadde08dc9f outside 4a
+ | |
+ | o 3 019ef06f125b inside 3
+ | |
+ | o 2 75e40c075a19 outside 2a
+ |/
+ o 1 906d6c682641 initial outside
+ |
+ o 0 9f8e82b51004 initial inside
+
+
+Now narrow and shallow clone this and get a hopefully correct graph
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside --depth 7
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 3 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+To make updating the tests easier, we print the emitted nodes
+sorted. This makes it easier to identify when the same node structure
+has been emitted, just in a different order.
+
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
+ @ 7 8d874d57adea... outside 12
+ |
+ o 6 7ef88b4dd4fa inside 11
+ |
+ o 5 2a20009de83e... outside 10
+ |
+ o 4 3ac1f5779de3... merge a/b/c/d 9
+ |\
+ | o 3 465567bdfb2d inside 3
+ | |
+ | o 2 d1c61993ec83... outside 2d
+ |
+ o 1 bb96a08b062a inside 3
+ |
+ o 0 b844052e7b3b... outside 2c
+
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...2a20009de83e 000000000000 3ac1f5779de3 outside 10
+ ...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9
+ ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
+ ...b844052e7b3b 000000000000 000000000000 outside 2c
+ ...d1c61993ec83 000000000000 000000000000 outside 2d
+ 465567bdfb2d d1c61993ec83 000000000000 inside 3
+ 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11
+ bb96a08b062a b844052e7b3b 000000000000 inside 3
+
+ $ cd ..
+
+Incremental test case: show a pull can pull in a conflicted merge even if elided
+
+ $ hg init pullmaster
+ $ cd pullmaster
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ mkdir inside outside
+ $ echo v1 > inside/f
+ $ echo v1 > outside/f
+ $ hg add inside/f outside/f
+ $ hg commit -m init
+
+ $ for line in a b c d
+ > do
+ > hg update -r 0
+ > echo v2$line > outside/f
+ > hg commit -m "outside 2$line"
+ > echo v2$line > inside/f
+ > hg commit -m "inside 2$line"
+ > echo v3$line > outside/f
+ > hg commit -m "outside 3$line"
+ > echo v4$line > outside/f
+ > hg commit -m "outside 4$line"
+ > done
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ created new head
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ created new head
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ created new head
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/pullmaster pullshallow \
+ > --include inside --depth 3
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 12 changesets with 5 changes to 1 files (+3 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd pullshallow
+
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
+ @ 11 0ebbd712a0c8... outside 4d
+ |
+ o 10 0d4c867aeb23 inside 2d
+ |
+ o 9 e932969c3961... outside 2d
+
+ o 8 33d530345455... outside 4c
+ |
+ o 7 0ce6481bfe07 inside 2c
+ |
+ o 6 caa65c940632... outside 2c
+
+ o 5 3df233defecc... outside 4b
+ |
+ o 4 7162cc6d11a4 inside 2b
+ |
+ o 3 f2a632f0082d... outside 2b
+
+ o 2 b8a3da16ba49... outside 4a
+ |
+ o 1 53f543eb8e45 inside 2a
+ |
+ o 0 1be3e5221c6a... outside 2a
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
+ ...1be3e5221c6a 000000000000 000000000000 outside 2a
+ ...33d530345455 0ce6481bfe07 000000000000 outside 4c
+ ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
+ ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
+ ...caa65c940632 000000000000 000000000000 outside 2c
+ ...e932969c3961 000000000000 000000000000 outside 2d
+ ...f2a632f0082d 000000000000 000000000000 outside 2b
+ 0ce6481bfe07 caa65c940632 000000000000 inside 2c
+ 0d4c867aeb23 e932969c3961 000000000000 inside 2d
+ 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
+ 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
+
+ $ cd ../pullmaster
+ $ hg update -r 'desc("outside 4a")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging inside/f
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 3 > inside/f
+ $ echo 5 > outside/f
+ $ rm -f {in,out}side/f.orig
+ $ hg resolve --mark inside/f outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge a/b 5'
+
+ $ hg update -r 'desc("outside 4c")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging inside/f
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 3 > inside/f
+ $ echo 5 > outside/f
+ $ rm -f {in,out}side/f.orig
+ $ hg resolve --mark inside/f outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge c/d 5'
+
+ $ hg update -r 'desc("merge a/b 5")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("merge c/d 5")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ echo 6 > outside/f
+ $ hg commit -m 'outside 6'
+ $ echo 7 > outside/f
+ $ hg commit -m 'outside 7'
+ $ echo 8 > outside/f
+ $ hg commit -m 'outside 8'
+
+ $ cd ../pullshallow
+ $ hg pull --depth 3
+ pulling from ssh://user@dummy/pullmaster
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 3 changes to 1 files (-3 heads)
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
+ ...1be3e5221c6a 000000000000 000000000000 outside 2a
+ ...33d530345455 0ce6481bfe07 000000000000 outside 4c
+ ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
+ ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
+ ...bf545653453e 968003d40c60 000000000000 outside 8
+ ...caa65c940632 000000000000 000000000000 outside 2c
+ ...e932969c3961 000000000000 000000000000 outside 2d
+ ...f2a632f0082d 000000000000 000000000000 outside 2b
+ 0ce6481bfe07 caa65c940632 000000000000 inside 2c
+ 0d4c867aeb23 e932969c3961 000000000000 inside 2d
+ 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
+ 67d49c0bdbda b8a3da16ba49 3df233defecc merge a/b 5
+ 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
+ 968003d40c60 67d49c0bdbda e867021d52c2 outside 6
+ e867021d52c2 33d530345455 0ebbd712a0c8 merge c/d 5
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-shallow.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,122 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > done
+ $ hg commit -m "Add root files"
+ $ mkdir d1 d2
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo d1/$x > "d1/f$x"
+ > hg add "d1/f$x"
+ > echo d2/$x > "d2/f$x"
+ > hg add "d2/f$x"
+ > done
+ $ hg commit -m "Add d1 and d2"
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo f$x rev2 > "f$x"
+ > echo d1/f$x rev2 > "d1/f$x"
+ > echo d2/f$x rev2 > "d2/f$x"
+ > hg commit -m "Commit rev2 of f$x, d1/f$x, d2/f$x"
+ > done
+ $ cd ..
+
+narrow and shallow clone the d2 directory
+
+ $ hg clone --narrow ssh://user@dummy/master shallow --include "d2" --depth 2
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 13 changes to 10 files
+ new changesets *:* (glob)
+ updating to branch default
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd shallow
+ $ hg log -T '{rev}{if(ellipsis,"...")}: {desc}\n'
+ 3: Commit rev2 of f10, d1/f10, d2/f10
+ 2: Commit rev2 of f9, d1/f9, d2/f9
+ 1: Commit rev2 of f8, d1/f8, d2/f8
+ 0...: Commit rev2 of f7, d1/f7, d2/f7
+ $ hg update 0
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat d2/f7 d2/f8
+ d2/f7 rev2
+ d2/8
+
+ $ cd ..
+
+change every upstream file once
+
+ $ cd master
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo f$x rev3 > "f$x"
+ > echo d1/f$x rev3 > "d1/f$x"
+ > echo d2/f$x rev3 > "d2/f$x"
+ > hg commit -m "Commit rev3 of f$x, d1/f$x, d2/f$x"
+ > done
+ $ cd ..
+
+pull new changes with --depth specified. There were 10 changes to the d2
+directory but the shallow pull should only fetch 3.
+
+ $ cd shallow
+ $ hg pull --depth 2
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 10 changes to 10 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg log -T '{rev}{if(ellipsis,"...")}: {desc}\n'
+ 7: Commit rev3 of f10, d1/f10, d2/f10
+ 6: Commit rev3 of f9, d1/f9, d2/f9
+ 5: Commit rev3 of f8, d1/f8, d2/f8
+ 4...: Commit rev3 of f7, d1/f7, d2/f7
+ 3: Commit rev2 of f10, d1/f10, d2/f10
+ 2: Commit rev2 of f9, d1/f9, d2/f9
+ 1: Commit rev2 of f8, d1/f8, d2/f8
+ 0...: Commit rev2 of f7, d1/f7, d2/f7
+ $ hg update 4
+ merging d2/f1
+ merging d2/f2
+ merging d2/f3
+ merging d2/f4
+ merging d2/f5
+ merging d2/f6
+ merging d2/f7
+ 3 files updated, 7 files merged, 0 files removed, 0 files unresolved
+ $ cat d2/f7 d2/f8
+ d2/f7 rev3
+ d2/f8 rev2
+ $ hg update 7
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat d2/f10
+ d2/f10 rev3
+
+ $ cd ..
+
+cannot clone with zero or negative depth
+
+ $ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth 0
+ requesting all changes
+ remote: abort: depth must be positive, got 0
+ abort: pull failed on remote
+ [255]
+ $ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth -1
+ requesting all changes
+ remote: abort: depth must be positive, got -1
+ abort: pull failed on remote
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-strip.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,163 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ hg co -q 0
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ echo modified again >> outside/f1
+ $ hg ci -qm 'modify outside again'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files (+1 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > strip=
+ > EOF
+
+Can strip and recover changesets affecting only files within narrow spec
+
+ $ hg co -r 'desc("modify inside")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ rm -f $TESTTMP/narrow/.hg/strip-backup/*-backup.hg
+ $ hg strip .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg unbundle .hg/strip-backup/*-backup.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets * (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+
+Can strip and recover changesets affecting files outside of narrow spec
+
+ $ hg co -r 'desc("modify outside")'
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg log -G -T '{rev} {desc}\n'
+ o 2 modify inside
+ |
+ | @ 1 modify outside again
+ |/
+ o 0 initial
+
+ $ hg debugdata -m 1
+ inside/f1\x004d6a634d5ba06331a60c29ee0db8412490a54fcd (esc) (flat !)
+ outside/f1\x0084ba604d54dee1f13310ce3d4ac2e8a36636691a (esc) (flat !)
+ inside\x006a8bc41df94075d501f9740587a0c0e13c170dc5t (esc) (tree !)
+ outside\x00255c2627ebdd3c7dcaa6945246f9b9f02bd45a09t (esc) (tree !)
+
+ $ rm -f $TESTTMP/narrow/.hg/strip-backup/*-backup.hg
+ $ hg strip .
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg unbundle .hg/strip-backup/*-backup.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files (+1 heads)
+ new changesets * (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ $ hg log -G -T '{rev} {desc}\n'
+ o 2 modify outside again
+ |
+ | o 1 modify inside
+ |/
+ @ 0 initial
+
+Check that hash of file outside narrow spec got restored
+ $ hg debugdata -m 2
+ inside/f1\x004d6a634d5ba06331a60c29ee0db8412490a54fcd (esc) (flat !)
+ outside/f1\x0084ba604d54dee1f13310ce3d4ac2e8a36636691a (esc) (flat !)
+ inside\x006a8bc41df94075d501f9740587a0c0e13c170dc5t (esc) (tree !)
+ outside\x00255c2627ebdd3c7dcaa6945246f9b9f02bd45a09t (esc) (tree !)
+
+Also verify we can apply the bundle with 'hg pull':
+ $ hg co -r 'desc("modify inside")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ rm .hg/strip-backup/*-backup.hg
+ $ hg strip .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg pull .hg/strip-backup/*-backup.hg
+ pulling from .hg/strip-backup/*-backup.hg (glob)
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets * (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+
+ $ rm .hg/strip-backup/*-backup.hg
+ $ hg strip 0
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg incoming .hg/strip-backup/*-backup.hg
+ comparing with .hg/strip-backup/*-backup.hg (glob)
+ changeset: 0:* (glob)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
+ changeset: 1:9e48d953700d (flat !)
+ changeset: 1:3888164bccf0 (tree !)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify outside again
+
+ changeset: 2:f505d5e96aa8 (flat !)
+ changeset: 2:40b66f95a209 (tree !)
+ tag: tip
+ parent: 0:a99f4d53924d (flat !)
+ parent: 0:c2a5fabcca3c (tree !)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify inside
+
+ $ hg pull .hg/strip-backup/*-backup.hg
+ pulling from .hg/strip-backup/*-backup.hg (glob)
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files (+1 heads)
+ new changesets *:* (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-update.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,76 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ echo init > init
+ $ hg ci -Aqm 'initial'
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'add inside and outside'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg debugindex -c
+ rev offset length base linkrev nodeid p1 p2
+ 0 0 64 0 0 9958b1af2add 000000000000 000000000000
+ 1 64 81 1 1 2db4ce2a3bfe 9958b1af2add 000000000000
+ 2 145 75 2 2 0980ee31a742 2db4ce2a3bfe 000000000000
+ 3 220 (76|77) 3 3 4410145019b7 0980ee31a742 000000000000 (re)
+
+ $ hg update -q 0
+
+Can update to revision with changes inside
+
+ $ hg update -q 'desc("add inside and outside")'
+ $ hg update -q 'desc("modify inside")'
+ $ find *
+ inside
+ inside/f1
+ $ cat inside/f1
+ modified
+
+Can update to revision with changes outside
+
+ $ hg update -q 'desc("modify outside")'
+ $ find *
+ inside
+ inside/f1
+ $ cat inside/f1
+ modified
+
+Can update with a deleted file inside
+
+ $ hg rm inside/f1
+ $ hg update -q 'desc("modify inside")'
+ $ hg update -q 'desc("modify outside")'
+ $ hg update -q 'desc("initial")'
+ $ hg update -q 'desc("modify inside")'
+
+Can update with a moved file inside
+
+ $ hg mv inside/f1 inside/f2
+ $ hg update -q 'desc("modify outside")'
+ $ hg update -q 'desc("initial")'
+ $ hg update -q 'desc("modify inside")'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-widen.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,365 @@
+#testcases flat tree
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 'inside' > inside/f
+ $ hg add inside/f
+ $ hg commit -m 'add inside'
+
+ $ mkdir widest
+ $ echo 'widest' > widest/f
+ $ hg add widest/f
+ $ hg commit -m 'add widest'
+
+ $ mkdir outside
+ $ echo 'outside' > outside/f
+ $ hg add outside/f
+ $ hg commit -m 'add outside'
+
+ $ cd ..
+
+narrow clone the inside file
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg tracked
+ I path:inside
+ $ ls
+ inside
+ $ cat inside/f
+ inside
+ $ cd ..
+
+add more upstream files which we will include in a wider narrow spec
+
+ $ cd master
+
+ $ mkdir wider
+ $ echo 'wider' > wider/f
+ $ hg add wider/f
+ $ echo 'widest v2' > widest/f
+ $ hg commit -m 'add wider, update widest'
+
+ $ echo 'widest v3' > widest/f
+ $ hg commit -m 'update widest v3'
+
+ $ echo 'inside v2' > inside/f
+ $ hg commit -m 'update inside'
+
+ $ mkdir outside2
+ $ echo 'outside2' > outside2/f
+ $ hg add outside2/f
+ $ hg commit -m 'add outside2'
+
+ $ echo 'widest v4' > widest/f
+ $ hg commit -m 'update widest v4'
+
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ *: update widest v4 (glob)
+ *: add outside2 (glob)
+ *: update inside (glob)
+ *: update widest v3 (glob)
+ *: add wider, update widest (glob)
+ *: add outside (glob)
+ *: add widest (glob)
+ *: add inside (glob)
+
+ $ cd ..
+
+Widen the narrow spec to see the wider file. This should not get the newly
+added upstream revisions.
+
+ $ cd narrow
+ $ hg tracked --addinclude wider/f
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:inside
+ I path:wider/f
+
+Pull down the newly added upstream revision.
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 2 changes to 2 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg update -r 'desc("add wider")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat wider/f
+ wider
+
+ $ hg update -r 'desc("update inside")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat wider/f
+ wider
+ $ cat inside/f
+ inside v2
+
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ ...*: update widest v4 (glob)
+ *: update inside (glob)
+ ...*: update widest v3 (glob)
+ *: add wider, update widest (glob)
+ ...*: add outside (glob)
+ *: add inside (glob)
+
+Check that widening with a newline fails
+
+ $ hg tracked --addinclude 'widest
+ > '
+ abort: newlines are not allowed in narrowspec paths
+ [255]
+
+widen the narrow spec to include the widest file
+
+ $ hg tracked --addinclude widest
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 7 changes to 3 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:inside
+ I path:wider/f
+ I path:widest
+ $ hg update 'desc("add widest")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ cat widest/f
+ widest
+ $ hg update 'desc("add wider, update widest")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat wider/f
+ wider
+ $ cat widest/f
+ widest v2
+ $ hg update 'desc("update widest v3")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat widest/f
+ widest v3
+ $ hg update 'desc("update widest v4")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat widest/f
+ widest v4
+
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ *: update widest v4 (glob)
+ ...*: add outside2 (glob)
+ *: update inside (glob)
+ *: update widest v3 (glob)
+ *: add wider, update widest (glob)
+ ...*: add outside (glob)
+ *: add widest (glob)
+ *: add inside (glob)
+
+separate suite of tests: files from 0-10 modified in changes 0-10. This allows
+more obvious precise tests tickling particular corner cases.
+
+ $ cd ..
+ $ hg init upstream
+ $ cd upstream
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 0 10`
+ > do
+ > mkdir d$x
+ > echo $x > d$x/f
+ > hg add d$x/f
+ > hg commit -m "add d$x/f"
+ > done
+ $ hg log -T "{node|short}: {desc}\n"
+ *: add d10/f (glob)
+ *: add d9/f (glob)
+ *: add d8/f (glob)
+ *: add d7/f (glob)
+ *: add d6/f (glob)
+ *: add d5/f (glob)
+ *: add d4/f (glob)
+ *: add d3/f (glob)
+ *: add d2/f (glob)
+ *: add d1/f (glob)
+ *: add d0/f (glob)
+
+make narrow clone with every third node.
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/upstream narrow2 --include d0 --include d3 --include d6 --include d9
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 4 changes to 4 files
+ new changesets *:* (glob)
+ updating to branch default
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow2
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d6
+ I path:d9
+ $ hg verify
+ checking changesets
+ checking manifests
+ checking directory manifests (tree !)
+ crosschecking files in changesets and manifests
+ checking files
+ 4 files, 8 changesets, 4 total revisions
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ ...*: add d10/f (glob)
+ *: add d9/f (glob)
+ ...*: add d8/f (glob)
+ *: add d6/f (glob)
+ ...*: add d5/f (glob)
+ *: add d3/f (glob)
+ ...*: add d2/f (glob)
+ *: add d0/f (glob)
+ $ hg tracked --addinclude d1
+ comparing with ssh://user@dummy/upstream
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow2/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 9 changesets with 5 changes to 5 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:d0
+ I path:d1
+ I path:d3
+ I path:d6
+ I path:d9
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ ...*: add d10/f (glob)
+ *: add d9/f (glob)
+ ...*: add d8/f (glob)
+ *: add d6/f (glob)
+ ...*: add d5/f (glob)
+ *: add d3/f (glob)
+ ...*: add d2/f (glob)
+ *: add d1/f (glob)
+ *: add d0/f (glob)
+
+Verify shouldn't claim the repo is corrupt after a widen.
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ checking directory manifests (tree !)
+ crosschecking files in changesets and manifests
+ checking files
+ 5 files, 9 changesets, 5 total revisions
+
+Widening preserves parent of local commit
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
+ $ cd narrow3
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ *: add d2/f (glob)
+ ...*: add d1/f (glob)
+ $ hg pull -q -r 3
+ $ hg co -q tip
+ $ hg pull -q -r 4
+ $ echo local > d2/f
+ $ hg ci -m local
+ created new head
+ $ hg tracked -q --addinclude d0 --addinclude d9
+
+Widening preserves bookmarks
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/upstream narrow-bookmarks --include d4
+ $ cd narrow-bookmarks
+ $ echo local > d4/f
+ $ hg ci -m local
+ $ hg bookmarks bookmark
+ $ hg bookmarks
+ * bookmark 3:* (glob)
+ $ hg -q tracked --addinclude d2
+ $ hg bookmarks
+ * bookmark 5:* (glob)
+ $ hg log -r bookmark -T '{desc}\n'
+ local
+
+Widening that fails can be recovered from
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/upstream interrupted --include d0
+ $ cd interrupted
+ $ echo local > d0/f
+ $ hg ci -m local
+ $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+ 2: local
+ ...1: add d10/f
+ 0: add d0/f
+ $ hg bookmarks bookmark
+ $ hg --config hooks.pretxnchangegroup.bad=false tracked --addinclude d1
+ comparing with ssh://user@dummy/upstream
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/interrupted/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ transaction abort!
+ rollback completed
+ abort: pretxnchangegroup.bad hook exited with status 1
+ [255]
+ $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+ $ hg bookmarks
+ no bookmarks set
+ $ hg unbundle .hg/strip-backup/*-widen.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+ 2: local
+ ...1: add d10/f
+ 0: add d0/f
+ $ hg bookmarks
+ * bookmark 2:* (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,374 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 0 10`
+ > do
+ > mkdir d$x
+ > echo $x > d$x/f
+ > hg add d$x/f
+ > hg commit -m "add d$x/f"
+ > done
+ $ hg log -T "{node|short}: {desc}\n"
+ *: add d10/f (glob)
+ *: add d9/f (glob)
+ *: add d8/f (glob)
+ *: add d7/f (glob)
+ *: add d6/f (glob)
+ *: add d5/f (glob)
+ *: add d4/f (glob)
+ *: add d3/f (glob)
+ *: add d2/f (glob)
+ *: add d1/f (glob)
+ *: add d0/f (glob)
+ $ cd ..
+
+Error if '.' or '..' are in the directory to track.
+ $ hg clone --narrow ssh://user@dummy/master foo --include ./asdf
+ requesting all changes
+ abort: "." and ".." are not allowed in narrowspec paths
+ [255]
+ $ hg clone --narrow ssh://user@dummy/master foo --include asdf/..
+ requesting all changes
+ abort: "." and ".." are not allowed in narrowspec paths
+ [255]
+ $ hg clone --narrow ssh://user@dummy/master foo --include a/./c
+ requesting all changes
+ abort: "." and ".." are not allowed in narrowspec paths
+ [255]
+
+Names with '.' in them are OK.
+ $ hg clone --narrow ssh://user@dummy/master should-work --include a/.b/c
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets * (glob)
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Test repo with local changes
+ $ hg clone --narrow ssh://user@dummy/master narrow-local-changes --include d0 --include d3 --include d6
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 3 changes to 3 files
+ new changesets *:* (glob)
+ updating to branch default
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow-local-changes
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > evolution=createmarkers
+ > EOF
+ $ echo local change >> d0/f
+ $ hg ci -m 'local change to d0'
+ $ hg co '.^'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo local change >> d3/f
+ $ hg ci -m 'local hidden change to d3'
+ created new head
+ $ hg ci --amend -m 'local change to d3'
+ $ hg tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ abort: local changes found
+ (use --force-delete-local-changes to ignore)
+ [255]
+Check that nothing was removed by the failed attempts
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d6
+ $ hg files
+ d0/f
+ d3/f
+ d6/f
+ $ find *
+ d0
+ d0/f
+ d3
+ d3/f
+ d6
+ d6/f
+ $ hg verify -q
+Force deletion of local changes
+ $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
+ *: local change to d3 (glob)
+ *: local change to d0 (glob)
+ *: add d10/f outsidenarrow (glob)
+ *: add d6/f (glob)
+ *: add d5/f outsidenarrow (glob)
+ *: add d3/f (glob)
+ *: add d2/f outsidenarrow (glob)
+ *: add d0/f (glob)
+ $ hg tracked --removeinclude d0 --force-delete-local-changes
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
+ *: local change to d3 (glob)
+ *: add d10/f outsidenarrow (glob)
+ *: add d6/f (glob)
+ *: add d5/f outsidenarrow (glob)
+ *: add d3/f (glob)
+ *: add d2/f outsidenarrow (glob)
+ *: add d0/f outsidenarrow (glob)
+Can restore stripped local changes after widening
+ $ hg tracked --addinclude d0 -q
+ $ hg unbundle .hg/strip-backup/*-narrow.hg -q
+ $ hg --hidden co -r 'desc("local change to d0")' -q
+ $ cat d0/f
+ 0
+ local change
+Pruned commits affecting removed paths should not prevent narrowing
+ $ hg co '.^'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'`
+ obsoleted 1 changesets
+ $ hg tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+Updates off of stripped commit if necessary
+ $ hg co -r 'desc("local change to d3")' -q
+ $ echo local change >> d6/f
+ $ hg ci -m 'local change to d6'
+ $ hg tracked --removeinclude d3 --force-delete-local-changes
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ * (glob)
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d3/f.i
+ deleting meta/d3/00manifest.i (tree !)
+ $ hg log -T '{desc}\n' -r .
+ add d10/f
+Updates to nullid if necessary
+ $ hg tracked --addinclude d3 -q
+ $ hg co null -q
+ $ mkdir d3
+ $ echo local change > d3/f
+ $ hg add d3/f
+ $ hg ci -m 'local change to d3'
+ created new head
+ $ hg tracked --removeinclude d3 --force-delete-local-changes
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d3/f.i
+ deleting meta/d3/00manifest.i (tree !)
+ $ hg id
+ 000000000000
+ $ cd ..
+
+Can remove last include, making repo empty
+ $ hg clone --narrow ssh://user@dummy/master narrow-empty --include d0 -r 5
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow-empty
+ $ hg tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ $ hg tracked
+ $ hg files
+ [1]
+ $ test -d d0
+ [1]
+Do some work in the empty clone
+ $ hg diff --change .
+ $ hg branch foo
+ marked working directory as branch foo
+ (branches are permanent and global, did you want a bookmark?)
+ $ hg ci -m empty
+ $ hg pull -q
+Can widen the empty clone
+ $ hg tracked --addinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow-empty/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:d0
+ $ hg files
+ d0/f
+ $ find *
+ d0
+ d0/f
+ $ cd ..
+
+TODO(martinvonz): test including e.g. d3/g and then removing it once
+https://bitbucket.org/Google/narrowhg/issues/6 is fixed
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include d0 --include d3 --include d6 --include d9
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 4 changes to 4 files
+ new changesets *:* (glob)
+ updating to branch default
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d6
+ I path:d9
+ $ hg tracked --removeinclude d6
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d6/f.i
+ deleting meta/d6/00manifest.i (tree !)
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d9
+ $ hg debugrebuildfncache
+ fncache already up to date
+ $ find *
+ d0
+ d0/f
+ d3
+ d3/f
+ d9
+ d9/f
+ $ hg verify -q
+ $ hg tracked --addexclude d3/f
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d3/f.i
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d9
+ X path:d3/f
+ $ hg debugrebuildfncache
+ fncache already up to date
+ $ find *
+ d0
+ d0/f
+ d9
+ d9/f
+ $ hg verify -q
+ $ hg tracked --addexclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ $ hg tracked
+ I path:d3
+ I path:d9
+ X path:d0
+ X path:d3/f
+ $ hg debugrebuildfncache
+ fncache already up to date
+ $ find *
+ d9
+ d9/f
+
+Make a 15 of changes to d9 to test the path without --verbose
+(Note: using regexes instead of "* (glob)" because if the test fails, it
+produces more sensible diffs)
+ $ hg tracked
+ I path:d3
+ I path:d9
+ X path:d0
+ X path:d3/f
+ $ for x in `$TESTDIR/seq.py 1 15`
+ > do
+ > echo local change >> d9/f
+ > hg commit -m "change $x to d9/f"
+ > done
+ $ hg tracked --removeinclude d9
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ...and 5 more, use --verbose to list all
+ abort: local changes found
+ (use --force-delete-local-changes to ignore)
+ [255]
+Now test it *with* verbose.
+ $ hg tracked --removeinclude d9 --verbose
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ abort: local changes found
+ (use --force-delete-local-changes to ignore)
+ [255]
--- a/tests/test-notify.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-notify.t Mon Mar 19 08:07:18 2018 -0700
@@ -421,7 +421,7 @@
> test = False
> mbox = mbox
> EOF
- $ $PYTHON -c 'file("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")'
+ $ $PYTHON -c 'open("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")'
$ hg --cwd a commit -A -m "long line"
$ hg --traceback --cwd b pull ../a
pulling from ../a
--- a/tests/test-obsolete-changeset-exchange.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-obsolete-changeset-exchange.t Mon Mar 19 08:07:18 2018 -0700
@@ -95,10 +95,12 @@
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
+ cache:rev-branch-cache -- {}
$ hg debugbundle ../f89bcc95eba5-obs.hg
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
+ cache:rev-branch-cache -- {}
obsmarkers -- {}
version: 1 (70 bytes)
9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
@@ -153,10 +155,11 @@
list of changesets:
bec0734cd68e84477ba7fc1d13e6cff53ab70129
listing keys for "bookmarks"
- bundle2-output-bundle: "HG20", 3 parts total
+ bundle2-output-bundle: "HG20", 4 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
bundle2-output-part: "phase-heads" 24 bytes payload
+ bundle2-output-part: "cache:rev-branch-cache" streamed payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
adding changesets
@@ -169,7 +172,9 @@
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
- bundle2-input-bundle: 2 parts total
+ bundle2-input-part: "cache:rev-branch-cache" supported
+ bundle2-input-part: total payload size 39
+ bundle2-input-bundle: 3 parts total
checking for updated bookmarks
updating the branch cache
new changesets bec0734cd68e
--- a/tests/test-obsolete-divergent.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-obsolete-divergent.t Mon Mar 19 08:07:18 2018 -0700
@@ -621,6 +621,34 @@
a139f71be9da
$ hg log -r 'contentdivergent()'
+#if serve
+
+ $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid --config web.view=all \
+ > -A access.log -E errors.log
+ $ cat hg.pid >> $DAEMON_PIDS
+
+check an obsolete changeset that was rewritten and also split
+
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=paper' | egrep 'rewritten|split'
+ <td>rewritten as <a href="/rev/bed64f5d2f5a?style=paper">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span><br>
+ split as <a href="/rev/7ae126973a96?style=paper">7ae126973a96</a> <a href="/rev/14608b260df8?style=paper">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=coal' | egrep 'rewritten|split'
+ <td>rewritten as <a href="/rev/bed64f5d2f5a?style=coal">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span><br>
+ split as <a href="/rev/7ae126973a96?style=coal">7ae126973a96</a> <a href="/rev/14608b260df8?style=coal">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=gitweb' | egrep 'rewritten|split'
+ <td>rewritten as <a class="list" href="/rev/bed64f5d2f5a?style=gitweb">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ <td>split as <a class="list" href="/rev/7ae126973a96?style=gitweb">7ae126973a96</a> <a class="list" href="/rev/14608b260df8?style=gitweb">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=monoblue' | egrep 'rewritten|split'
+ <dd>rewritten as <a href="/rev/bed64f5d2f5a?style=monoblue">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></dd>
+ <dd>split as <a href="/rev/7ae126973a96?style=monoblue">7ae126973a96</a> <a href="/rev/14608b260df8?style=monoblue">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></dd>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=spartan' | egrep 'rewritten|split'
+ <td class="obsolete">rewritten as <a href="/rev/bed64f5d2f5a?style=spartan">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ <td class="obsolete">split as <a href="/rev/7ae126973a96?style=spartan">7ae126973a96</a> <a href="/rev/14608b260df8?style=spartan">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+
+ $ killdaemons.py
+
+#endif
+
$ cd ..
@@ -689,3 +717,22 @@
a178212c3433c4e77b573f6011e29affb8aefa33 1a2a9b5b0030632400aa78e00388c20f99d3ec44 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
a178212c3433c4e77b573f6011e29affb8aefa33 ad6478fb94ecec98b86daae98722865d494ac561 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'test', 'user': 'test'}
ad6478fb94ecec98b86daae98722865d494ac561 70d5a63ca112acb3764bc1d7320ca90ea688d671 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '9', 'operation': 'test', 'user': 'test'}
+
+ $ hg debugwhyunstable 1a2a9b5b0030
+ content-divergent: 70d5a63ca112acb3764bc1d7320ca90ea688d671 (draft) predecessor a178212c3433c4e77b573f6011e29affb8aefa33
+
+#if serve
+
+ $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
+ $ cat hg.pid >> $DAEMON_PIDS
+
+check explanation for a content-divergent changeset
+
+ $ get-with-headers.py localhost:$HGPORT 'rev/1a2a9b5b0030?style=paper' | grep divergent:
+ <td>content-divergent: <a href="/rev/70d5a63ca112?style=paper">70d5a63ca112</a> (draft) predecessor <a href="/rev/a178212c3433?style=paper">a178212c3433</a></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/1a2a9b5b0030?style=coal' | grep divergent:
+ <td>content-divergent: <a href="/rev/70d5a63ca112?style=coal">70d5a63ca112</a> (draft) predecessor <a href="/rev/a178212c3433?style=coal">a178212c3433</a></td>
+
+ $ killdaemons.py
+
+#endif
--- a/tests/test-obsolete.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-obsolete.t Mon Mar 19 08:07:18 2018 -0700
@@ -18,7 +18,7 @@
> def reposetup(ui, repo):
> class debugkeysrepo(repo.__class__):
> def listkeys(self, namespace):
- > ui.write('listkeys %s\n' % (namespace,))
+ > ui.write(b'listkeys %s\n' % (namespace,))
> return super(debugkeysrepo, self).listkeys(namespace)
>
> if repo.local():
@@ -1033,6 +1033,12 @@
orphan: 2 changesets
phase-divergent: 1 changesets
+test debugwhyunstable output
+
+ $ hg debugwhyunstable 50c51b361e60
+ orphan: obsolete parent 3de5eca88c00aa039da7399a220f4a5221faa585
+ phase-divergent: immutable predecessor 245bde4270cd1072a27757984f9cda8ba26f08ca
+
#if serve
$ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
@@ -1049,20 +1055,8 @@
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(obsolete())&style=monoblue' | grep '<span class="logtags">'
<span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="obsoletetag" title="obsolete">obsolete</span> </span>
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(obsolete())&style=spartan' | grep 'class="obsolete"'
- <th class="obsolete">obsolete:</th>
- <td class="obsolete">pruned</td>
-
-check an obsolete changeset that has been rewritten
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=paper' | grep rewritten
- <td>rewritten as <a href="/rev/3de5eca88c00?style=paper">3de5eca88c00</a> </td>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=coal' | grep rewritten
- <td>rewritten as <a href="/rev/3de5eca88c00?style=coal">3de5eca88c00</a> </td>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=gitweb' | grep rewritten
- <tr><td>obsolete</td><td>rewritten as <a class="list" href="/rev/3de5eca88c00?style=gitweb">3de5eca88c00</a> </td></tr>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=monoblue' | grep rewritten
- <dt>obsolete</dt><dd>rewritten as <a href="/rev/3de5eca88c00?style=monoblue">3de5eca88c00</a> </dd>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=spartan' | grep rewritten
- <td class="obsolete">rewritten as <a href="/rev/3de5eca88c00?style=spartan">3de5eca88c00</a> </td>
+ <th class="obsolete">obsolete:</th>
+ <td class="obsolete">pruned by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
check changeset with instabilities
@@ -1078,9 +1072,19 @@
<th class="instabilities">instabilities:</th>
<td class="instabilities">orphan phase-divergent </td>
+check explanation for an orphan and phase-divergent changeset
+
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=paper' | egrep '(orphan|phase-divergent):'
+ <td>orphan: obsolete parent <a href="/rev/3de5eca88c00?style=paper">3de5eca88c00</a><br>
+ phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=paper">245bde4270cd</a></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=coal' | egrep '(orphan|phase-divergent):'
+ <td>orphan: obsolete parent <a href="/rev/3de5eca88c00?style=coal">3de5eca88c00</a><br>
+ phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=coal">245bde4270cd</a></td>
+
$ killdaemons.py
$ rm hg.pid access.log errors.log
+
#endif
Test incoming/outcoming with changesets obsoleted remotely, known locally
@@ -1291,12 +1295,12 @@
>
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b"amendtransient",[], _('hg amendtransient [rev]'))
+ > @command(b"amendtransient",[], _(b'hg amendtransient [rev]'))
> def amend(ui, repo, *pats, **opts):
> opts['message'] = 'Test'
> opts['logfile'] = None
> cmdutil.amend(ui, repo, repo['.'], {}, pats, opts)
- > ui.write('%s\n' % repo.changelog.headrevs())
+ > ui.write(b'%s\n' % repo.changelog.headrevs())
> EOF
$ cat >> $HGRCPATH << EOF
> [extensions]
@@ -1331,7 +1335,7 @@
> def trhook(tr):
> repo = reporef()
> hidden1 = repoview.computehidden(repo)
- > hidden = repoview.filterrevs(repo, 'visible')
+ > hidden = repoview.filterrevs(repo, b'visible')
> if sorted(hidden1) != sorted(hidden):
> print("cache inconsistency")
> bkmstoreinst._repo.currenttransaction().addpostclose('test_extension', trhook)
@@ -1423,6 +1427,7 @@
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
e008cf2834908e5d6b0f792a9d4b0e2272260fb8
+ cache:rev-branch-cache -- {}
phase-heads -- {}
e008cf2834908e5d6b0f792a9d4b0e2272260fb8 draft
@@ -1464,6 +1469,7 @@
changegroup -- {nbchanges: 2, version: 02}
e016b03fd86fcccc54817d120b90b751aaf367d6
b0551702f918510f01ae838ab03a463054c67b46
+ cache:rev-branch-cache -- {}
obsmarkers -- {}
version: 1 (92 bytes)
e008cf2834908e5d6b0f792a9d4b0e2272260fb8 b0551702f918510f01ae838ab03a463054c67b46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
--- a/tests/test-parseindex.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-parseindex.t Mon Mar 19 08:07:18 2018 -0700
@@ -41,10 +41,17 @@
> def __getattr__(self, key):
> return getattr(self.real, key)
>
+ > def __enter__(self):
+ > self.real.__enter__()
+ > return self
+ >
+ > def __exit__(self, *args, **kwargs):
+ > return self.real.__exit__(*args, **kwargs)
+ >
> def opener(*args):
> o = vfs.vfs(*args)
- > def wrapper(*a):
- > f = o(*a)
+ > def wrapper(*a, **kwargs):
+ > f = o(*a, **kwargs)
> return singlebyteread(f)
> return wrapper
>
--- a/tests/test-patch-offset.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-patch-offset.t Mon Mar 19 08:07:18 2018 -0700
@@ -5,7 +5,7 @@
> path = sys.argv[1]
> patterns = sys.argv[2:]
>
- > fp = file(path, 'wb')
+ > fp = open(path, 'wb')
> for pattern in patterns:
> count = int(pattern[0:-1])
> char = pattern[-1] + '\n'
--- a/tests/test-patchbomb.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-patchbomb.t Mon Mar 19 08:07:18 2018 -0700
@@ -390,14 +390,15 @@
Content-Disposition: attachment; filename="bundle.hg"
Content-Transfer-Encoding: base64
- SEcyMAAAAA5Db21wcmVzc2lvbj1CWkJaaDkxQVkmU1nYvy2xAAAJf//7vFYQXD1/4H7R09C/470I
- Ak0E4pe4SIIIgQSgGEQOcLAA5VBKqeppMxTI0YjQNBgQMQDI0GgMhtR6I0GI2p6I0yeSEVT9MiYn
- qjCYQwCBtARptARgBNDEwAGiDCMA40NGjQaNA0AAAAADIAAAA0BkABktCk6qObVxZ2A/33KHibLr
- UQ4BwkgcPcmuCUAQZCztIWgR1SpBS6IqbIij4UFwhnnMkElcFTqoucIWbsBPK3l+6c+xYaVBWsJo
- aT0OV/YAOvLrziifDQMJOMIaaYce9agtI2EwQBAq089UiRU+evFHSLRBT7Wa/D/YBaUtU5ezvtr3
- 6yrIS4Iyp9VWESdWPEi6VjRjdcEY4HvbmDIVEAEVJIUrHNIBx/MmnBBRkw8tSlCQ8ABZxf5ejgBI
- pP5TSQPLVMYbq1qbBPmWN0LYVlAvRbP4X512kDQZ9y4TQbvoZmhe+54sRsEJ8GW3hMJjERh0NNlg
- aB+3Cw/4u5IpwoSGxfltiA==
+ SEcyMAAAAA5Db21wcmVzc2lvbj1CWkJaaDkxQVkmU1kHdO0GAAAN////vFcSXL9/8H7R09C/578I
+ Ak0E4pe4SIIIgQSgGEQOcLABGYYNKiaaZGEyYjJhGTTRpiHogxGmTRiGRkNMIwhhPSbQJtpQiJkn
+ poyk9I0PUeoNNNBkeUAHqGgD0Ro0NNBoBoaMagNBoNCpNPUemp6QGmgyaPSGmQxGhkZDQbUaBkHp
+ MhoaANNMhkIyIauvSJPL4aUXjIQemQXkoaqOKqAQDIABsZALnf0yCLAyvmktzDWBCVHO6bb6kCqE
+ ZobVEhmMBjs0oQzekgs6PgZSyII8zy9mmG9To49ZlN6TaX5BxlS7cJiuICUdyjNQPIIdQs1Qqqqk
+ JZ2/BksYcU4HQyssZcpkoMco6gRc888KF9BO7BvuSuIPz7A4crBoaQB+euFU1ilz8yIBBmNBDgRX
+ pVh4zkmPiSKcqRJxcshMqh0vkKlgQDTcOujtdmnMVBZfQiPPemcHm2098VJyHBAOqOwluyIKyG92
+ JAR0CCu9SB5q9DyPHUdc5yB5CurIZHt3GM0dCiQRIN0EAcQNmTYTiHdi6B6Dc/ma0hrmSCQXBzfU
+ BEwthEg0YGaJf4u5IpwoSAO6doMA
--===============*==-- (glob)
with a specific bundle type
--- a/tests/test-pathencode.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-pathencode.py Mon Mar 19 08:07:18 2018 -0700
@@ -64,7 +64,7 @@
counts.pop(c, None)
t = sum(counts.itervalues()) / 100.0
fp.write('probtable = (')
- for i, (k, v) in enumerate(sorted(counts.iteritems(), key=lambda x: x[1],
+ for i, (k, v) in enumerate(sorted(counts.items(), key=lambda x: x[1],
reverse=True)):
if (i % 5) == 0:
fp.write('\n ')
--- a/tests/test-pending.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-pending.t Mon Mar 19 08:07:18 2018 -0700
@@ -44,7 +44,7 @@
> import os, time
> from mercurial import ui, localrepo
> def rejecthook(ui, repo, hooktype, node, **opts):
- > ui.write('hook %s\\n' % repo['tip'].hex())
+ > ui.write(b'hook %s\\n' % repo[b'tip'].hex())
> # create the notify file so caller knows we're running
> fpath = os.path.join('$d', 'notify')
> f = open(fpath, 'w')
--- a/tests/test-pull.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-pull.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,5 +1,15 @@
#require serve
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
$ hg init test
$ cd test
--- a/tests/test-pushvars.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-pushvars.t Mon Mar 19 08:07:18 2018 -0700
@@ -11,8 +11,6 @@
$ cat >> $HGRCPATH << EOF
> [hooks]
> pretxnchangegroup = sh $TESTTMP/pretxnchangegroup.sh
- > [experimental]
- > bundle2-exp = true
> EOF
$ hg init repo
--- a/tests/test-rebase-conflicts.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-conflicts.t Mon Mar 19 08:07:18 2018 -0700
@@ -297,8 +297,9 @@
list of changesets:
e31216eec445e44352c5f01588856059466a24c9
2f2496ddf49d69b5ef23ad8cf9fb2e0e4faf0ac2
- bundle2-output-bundle: "HG20", (1 params) 2 parts total
+ bundle2-output-bundle: "HG20", (1 params) 3 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
+ bundle2-output-part: "cache:rev-branch-cache" streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
saved backup bundle to $TESTTMP/issue4041/.hg/strip-backup/e31216eec445-15f7a814-rebase.hg
3 changesets found
@@ -306,8 +307,9 @@
4c9fbe56a16f30c0d5dcc40ec1a97bbe3325209c
19c888675e133ab5dff84516926a65672eaf04d9
2a7f09cac94c7f4b73ebd5cd1a62d3b2e8e336bf
- bundle2-output-bundle: "HG20", 2 parts total
+ bundle2-output-bundle: "HG20", 3 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
+ bundle2-output-part: "cache:rev-branch-cache" streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
adding branch
bundle2-input-bundle: with-transaction
@@ -321,9 +323,12 @@
adding f1.txt revisions
added 2 changesets with 2 changes to 1 files
bundle2-input-part: total payload size 1686
+ bundle2-input-part: "cache:rev-branch-cache" supported
+ bundle2-input-part: total payload size 74
+ truncating cache/rbc-revs-v1 to 56
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
- bundle2-input-bundle: 1 parts total
+ bundle2-input-bundle: 2 parts total
updating the branch cache
invalid branchheads cache (served): tip differs
rebase completed
--- a/tests/test-rebase-dest.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-dest.t Mon Mar 19 08:07:18 2018 -0700
@@ -85,20 +85,20 @@
> from mercurial import registrar, revset, revsetlang, smartset
> revsetpredicate = registrar.revsetpredicate()
> cache = {}
- > @revsetpredicate('map')
+ > @revsetpredicate(b'map')
> def map(repo, subset, x):
> """(set, mapping)"""
- > setarg, maparg = revsetlang.getargs(x, 2, 2, '')
+ > setarg, maparg = revsetlang.getargs(x, 2, 2, b'')
> rset = revset.getset(repo, smartset.fullreposet(repo), setarg)
- > mapstr = revsetlang.getstring(maparg, '')
- > map = dict(a.split(':') for a in mapstr.split(','))
+ > mapstr = revsetlang.getstring(maparg, b'')
+ > map = dict(a.split(b':') for a in mapstr.split(b','))
> rev = rset.first()
> desc = repo[rev].description()
> newdesc = map.get(desc)
- > if newdesc == 'null':
+ > if newdesc == b'null':
> revs = [-1]
> else:
- > query = revsetlang.formatspec('desc(%s)', newdesc)
+ > query = revsetlang.formatspec(b'desc(%s)', newdesc)
> revs = repo.revs(query)
> return smartset.baseset(revs)
> EOF
--- a/tests/test-rebase-inmemory.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-inmemory.t Mon Mar 19 08:07:18 2018 -0700
@@ -140,12 +140,11 @@
$ ls -l f | cut -c -10
-rwxr-xr-x
-Rebase the working copy parent, which should default to an on-disk merge even if
-we requested in-memory.
+Rebase the working copy parent
$ hg up -C 3
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg rebase -r 3 -d 0 --debug | grep rebasing
- rebasing on disk
+ rebasing in-memory
rebasing 3:753feb6fd12a "c" (tip)
$ hg tglog
@ 3: 844a7de3e617 'c'
--- a/tests/test-rebase-interruptions.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-interruptions.t Mon Mar 19 08:07:18 2018 -0700
@@ -461,5 +461,24 @@
note: rebase of 1:fdaca8533b86 created no changes to commit
saved backup bundle to $TESTTMP/repo/.hg/strip-backup/fdaca8533b86-7fd70513-rebase.hg
$ hg resolve --list
- $ test -f .hg/merge
+ $ test -d .hg/merge
+ [1]
+Now try again with --collapse
+ $ hg unbundle -q .hg/strip-backup/fdaca8533b86-7fd70513-rebase.hg
+ $ hg rebase -s 2 -d 1 --noninteractive --collapse
+ rebasing 2:fdaca8533b86 "b" (tip)
+ merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see hg resolve, then hg rebase --continue)
[1]
+ $ echo a > a
+ $ echo c >> a
+ $ hg resolve --mark a
+ (no more unresolved files)
+ continue: hg rebase --continue
+ $ hg rebase --continue
+ rebasing 2:fdaca8533b86 "b" (tip)
+ saved backup bundle to $TESTTMP/repo/.hg/strip-backup/fdaca8533b86-7fd70513-rebase.hg
+ $ hg resolve --list
+ $ test -d .hg/merge
+ [1]
--- a/tests/test-rebase-obsolete.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-obsolete.t Mon Mar 19 08:07:18 2018 -0700
@@ -1218,6 +1218,46 @@
o 0:b173517d0057 a
+issue5782
+ $ hg strip -r 0:
+ $ hg debugdrawdag <<EOF
+ > d
+ > |
+ > c1 c # replace: c -> c1
+ > \ /
+ > b
+ > |
+ > a
+ > EOF
+ 1 new orphan changesets
+ $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'`
+ obsoleted 1 changesets
+ $ hg log -G -r 'a': --hidden
+ * 4:76be324c128b d
+ |
+ | x 3:ef8a456de8fa c1 (pruned)
+ | |
+ x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa)
+ |/
+ o 1:488e1b7e7341 b
+ |
+ o 0:b173517d0057 a
+
+ $ hg rebase -d 0 -r 2
+ rebasing 2:a82ac2b38757 "c" (c)
+ $ hg log -G -r 'a': --hidden
+ o 5:69ad416a4a26 c
+ |
+ | * 4:76be324c128b d
+ | |
+ | | x 3:ef8a456de8fa c1 (pruned)
+ | | |
+ | x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa rewritten using rebase as 5:69ad416a4a26)
+ | |/
+ | o 1:488e1b7e7341 b
+ |/
+ o 0:b173517d0057 a
+
$ cd ..
Rebase merge where successor of one parent is equal to destination (issue5198)
--- a/tests/test-rebase-scenario-global.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-scenario-global.t Mon Mar 19 08:07:18 2018 -0700
@@ -954,14 +954,14 @@
> def _rebase(orig, ui, repo, *args, **kwargs):
> with repo.wlock():
> with repo.lock():
- > with repo.transaction('wrappedrebase'):
+ > with repo.transaction(b'wrappedrebase'):
> return orig(ui, repo, *args, **kwargs)
> def wraprebase(loaded):
> assert loaded
- > rebasemod = extensions.find('rebase')
- > extensions.wrapcommand(rebasemod.cmdtable, 'rebase', _rebase)
+ > rebasemod = extensions.find(b'rebase')
+ > extensions.wrapcommand(rebasemod.cmdtable, b'rebase', _rebase)
> def extsetup(ui):
- > extensions.afterloaded('rebase', wraprebase)
+ > extensions.afterloaded(b'rebase', wraprebase)
> EOF
$ cat >> .hg/hgrc <<EOF
--- a/tests/test-rebase-transaction.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rebase-transaction.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,8 +1,13 @@
+Rebasing using a single transaction
+
$ cat >> $HGRCPATH <<EOF
> [extensions]
> rebase=
> drawdag=$TESTDIR/drawdag.py
>
+ > [rebase]
+ > singletransaction=True
+ >
> [phases]
> publish=False
>
@@ -10,13 +15,9 @@
> tglog = log -G --template "{rev}: {desc}"
> EOF
-Rebasing using a single transaction
+Check that a simple rebase works
- $ hg init singletr && cd singletr
- $ cat >> .hg/hgrc <<EOF
- > [rebase]
- > singletransaction=True
- > EOF
+ $ hg init simple && cd simple
$ hg debugdrawdag <<'EOF'
> Z
> |
@@ -28,11 +29,9 @@
> |/
> A
> EOF
-- We should only see two status stored messages. One from the start, one from
-- the end.
+- We should only see one status stored message. It comes from the start.
$ hg rebase --debug -b D -d Z | grep 'status stored'
rebase status stored
- rebase status stored
$ hg tglog
o 5: D
|
@@ -47,3 +46,151 @@
o 0: A
$ cd ..
+
+Check that --collapse works
+
+ $ hg init collapse && cd collapse
+ $ hg debugdrawdag <<'EOF'
+ > Z
+ > |
+ > | D
+ > | |
+ > | C
+ > | |
+ > Y B
+ > |/
+ > A
+ > EOF
+- We should only see two status stored messages. One from the start, one from
+- cmdutil.commitforceeditor() which forces tr.writepending()
+ $ hg rebase --collapse --debug -b D -d Z | grep 'status stored'
+ rebase status stored
+ rebase status stored
+ $ hg tglog
+ o 3: Collapsed revision
+ | * B
+ | * C
+ | * D
+ o 2: Z
+ |
+ o 1: Y
+ |
+ o 0: A
+
+ $ cd ..
+
+With --collapse, check that conflicts can be resolved and rebase can then be
+continued
+
+ $ hg init collapse-conflict && cd collapse-conflict
+ $ hg debugdrawdag <<'EOF'
+ > Z # Z/conflict=Z
+ > |
+ > | D
+ > | |
+ > | C # C/conflict=C
+ > | |
+ > Y B
+ > |/
+ > A
+ > EOF
+ $ hg rebase --collapse -b D -d Z
+ rebasing 1:112478962961 "B" (B)
+ rebasing 3:c26739dbe603 "C" (C)
+ merging conflict
+ warning: conflicts while merging conflict! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see hg resolve, then hg rebase --continue)
+ [1]
+ $ hg tglog
+ o 5: D
+ |
+ | @ 4: Z
+ | |
+ @ | 3: C
+ | |
+ | o 2: Y
+ | |
+ o | 1: B
+ |/
+ o 0: A
+
+ $ hg st
+ M C
+ M conflict
+ A B
+ ? conflict.orig
+ $ echo resolved > conflict
+ $ hg resolve -m
+ (no more unresolved files)
+ continue: hg rebase --continue
+ $ hg rebase --continue
+ already rebased 1:112478962961 "B" (B) as 79bc8f4973ce
+ rebasing 3:c26739dbe603 "C" (C)
+ rebasing 5:d24bb333861c "D" (D tip)
+ saved backup bundle to $TESTTMP/collapse-conflict/.hg/strip-backup/112478962961-b5b34645-rebase.hg
+ $ hg tglog
+ o 3: Collapsed revision
+ | * B
+ | * C
+ | * D
+ o 2: Z
+ |
+ o 1: Y
+ |
+ o 0: A
+
+ $ cd ..
+
+With --collapse, check that the commit message editing can be canceled and
+rebase can then be continued
+
+ $ hg init collapse-cancel-editor && cd collapse-cancel-editor
+ $ hg debugdrawdag <<'EOF'
+ > Z
+ > |
+ > | D
+ > | |
+ > | C
+ > | |
+ > Y B
+ > |/
+ > A
+ > EOF
+ $ HGEDITOR=false hg --config ui.interactive=1 rebase --collapse -b D -d Z
+ rebasing 1:112478962961 "B" (B)
+ rebasing 3:26805aba1e60 "C" (C)
+ rebasing 5:f585351a92f8 "D" (D tip)
+ transaction abort!
+ rollback completed
+ abort: edit failed: false exited with status 1
+ [255]
+ $ hg tglog
+ o 5: D
+ |
+ | o 4: Z
+ | |
+ o | 3: C
+ | |
+ | o 2: Y
+ | |
+ o | 1: B
+ |/
+ o 0: A
+
+ $ hg rebase --continue
+ rebasing 1:112478962961 "B" (B)
+ rebasing 3:26805aba1e60 "C" (C)
+ rebasing 5:f585351a92f8 "D" (D tip)
+ saved backup bundle to $TESTTMP/collapse-cancel-editor/.hg/strip-backup/112478962961-cb2a9b47-rebase.hg
+ $ hg tglog
+ o 3: Collapsed revision
+ | * B
+ | * C
+ | * D
+ o 2: Z
+ |
+ o 1: Y
+ |
+ o 0: A
+
+ $ cd ..
--- a/tests/test-releasenotes-formatting.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-releasenotes-formatting.t Mon Mar 19 08:07:18 2018 -0700
@@ -457,3 +457,35 @@
------------------
First paragraph of fix 1.
+
+ $ cd ..
+
+Using multiple admonitions in same changeset
+
+ $ hg init relnotes-multiadmon
+ $ cd relnotes-multiadmon
+
+ $ touch file1
+ $ hg -q commit -A -l - << EOF
+ > commit 1
+ >
+ > .. feature::
+ >
+ > Details about new feature.
+ >
+ > .. perf::
+ >
+ > Improves the execution by 2x
+ > EOF
+
+ $ hg releasenotes -r . $TESTTMP/relnotes-multiple-admonitions
+ $ cat $TESTTMP/relnotes-multiple-admonitions
+ New Features
+ ============
+
+ * Details about new feature.
+
+ Performance Improvements
+ ========================
+
+ * Improves the execution by 2x
--- a/tests/test-releasenotes-parsing.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-releasenotes-parsing.t Mon Mar 19 08:07:18 2018 -0700
@@ -177,3 +177,26 @@
paragraph: Bullet item 1
bullet point:
paragraph: Bullet item 2
+
+Warn user in case of unexpected block while parsing
+
+ $ hg init relnotes-warn
+ $ cd relnotes-warn
+ $ touch feature1
+ $ hg -q commit -A -l - << EOF
+ > commit 1
+ >
+ > .. feature::
+ >
+ > new feature added.
+ > some words about the feature.
+ > EOF
+
+ $ hg releasenote -r .
+ changeset a4251905c440: unexpected block in release notes directive feature
+ New Features
+ ============
+
+ * new feature added. some words about the feature.
+
+ $ cd ..
--- a/tests/test-relink.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-relink.t Mon Mar 19 08:07:18 2018 -0700
@@ -49,7 +49,7 @@
Test files are read in binary mode
- $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\r\nb\n')"
+ $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\r\nb\n')"
$ cd ..
@@ -68,7 +68,7 @@
$ echo b >> b
$ hg ci -m changeb
created new head
- $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\nb\r\n')"
+ $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\nb\r\n')"
relink
--- a/tests/test-resolve.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-resolve.t Mon Mar 19 08:07:18 2018 -0700
@@ -85,24 +85,30 @@
$ cat > $TESTTMP/markdriver.py << EOF
> '''mark and unmark files as driver-resolved'''
- > from mercurial import merge, registrar, scmutil
+ > from mercurial import (
+ > merge,
+ > pycompat,
+ > registrar,
+ > scmutil,
+ > )
> cmdtable = {}
> command = registrar.command(cmdtable)
> @command(b'markdriver',
- > [('u', 'unmark', None, '')],
- > 'FILE...')
+ > [(b'u', b'unmark', None, b'')],
+ > b'FILE...')
> def markdriver(ui, repo, *pats, **opts):
> wlock = repo.wlock()
+ > opts = pycompat.byteskwargs(opts)
> try:
> ms = merge.mergestate.read(repo)
> m = scmutil.match(repo[None], pats, opts)
> for f in ms:
> if not m(f):
> continue
- > if not opts['unmark']:
- > ms.mark(f, 'd')
+ > if not opts[b'unmark']:
+ > ms.mark(f, b'd')
> else:
- > ms.mark(f, 'u')
+ > ms.mark(f, b'u')
> ms.commit()
> finally:
> wlock.release()
--- a/tests/test-revert-interactive.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-revert-interactive.t Mon Mar 19 08:07:18 2018 -0700
@@ -420,4 +420,13 @@
$ cat a
0
+When specified pattern does not exist, we should exit early (issue5789).
+
+ $ hg files
+ a
+ $ hg rev b
+ b: no such file in rev b40d1912accf
+ $ hg rev -i b
+ b: no such file in rev b40d1912accf
+
$ cd ..
--- a/tests/test-revlog-ancestry.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-revlog-ancestry.py Mon Mar 19 08:07:18 2018 -0700
@@ -8,15 +8,15 @@
u = uimod.ui.load()
-repo = hg.repository(u, 'test1', create=1)
+repo = hg.repository(u, b'test1', create=1)
os.chdir('test1')
def commit(text, time):
- repo.commit(text=text, date="%d 0" % time)
+ repo.commit(text=text, date=b"%d 0" % time)
def addcommit(name, time):
- f = open(name, 'w')
- f.write('%s\n' % name)
+ f = open(name, 'wb')
+ f.write(b'%s\n' % name)
f.close()
repo[None].add([name])
commit(name, time)
@@ -28,27 +28,27 @@
merge.update(repo, rev, True, False)
if __name__ == '__main__':
- addcommit("A", 0)
- addcommit("B", 1)
+ addcommit(b"A", 0)
+ addcommit(b"B", 1)
update(0)
- addcommit("C", 2)
+ addcommit(b"C", 2)
merge_(1)
- commit("D", 3)
+ commit(b"D", 3)
update(2)
- addcommit("E", 4)
- addcommit("F", 5)
+ addcommit(b"E", 4)
+ addcommit(b"F", 5)
update(3)
- addcommit("G", 6)
+ addcommit(b"G", 6)
merge_(5)
- commit("H", 7)
+ commit(b"H", 7)
update(5)
- addcommit("I", 8)
+ addcommit(b"I", 8)
# Ancestors
print('Ancestors of 5')
--- a/tests/test-revlog-v2.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-revlog-v2.t Mon Mar 19 08:07:18 2018 -0700
@@ -29,7 +29,7 @@
Unknown flags to revlog are rejected
>>> with open('.hg/store/00changelog.i', 'wb') as fh:
- ... fh.write('\x00\x04\xde\xad')
+ ... fh.write(b'\x00\x04\xde\xad')
$ hg log
abort: unknown flags (0x04) in version 57005 revlog 00changelog.i!
--- a/tests/test-revlog.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-revlog.t Mon Mar 19 08:07:18 2018 -0700
@@ -4,7 +4,7 @@
Flags on revlog version 0 are rejected
>>> with open('.hg/store/00changelog.i', 'wb') as fh:
- ... fh.write('\x00\x01\x00\x00')
+ ... fh.write(b'\x00\x01\x00\x00')
$ hg log
abort: unknown flags (0x01) in version 0 revlog 00changelog.i!
@@ -13,7 +13,7 @@
Unknown flags on revlog version 1 are rejected
>>> with open('.hg/store/00changelog.i', 'wb') as fh:
- ... fh.write('\x00\x04\x00\x01')
+ ... fh.write(b'\x00\x04\x00\x01')
$ hg log
abort: unknown flags (0x04) in version 1 revlog 00changelog.i!
@@ -22,7 +22,7 @@
Unknown version is rejected
>>> with open('.hg/store/00changelog.i', 'wb') as fh:
- ... fh.write('\x00\x00\x00\x02')
+ ... fh.write(b'\x00\x00\x00\x02')
$ hg log
abort: unknown version (2) in revlog 00changelog.i!
@@ -34,8 +34,8 @@
$ hg init
- >>> open("a.i", "w").write(
- ... """eJxjYGZgZIAAYQYGxhgom+k/FMx8YKx9ZUaKSOyqo4cnuKb8mbqHV5cBCVTMWb1Cwqkhe4Gsg9AD
+ >>> open("a.i", "wb").write(
+ ... b"""eJxjYGZgZIAAYQYGxhgom+k/FMx8YKx9ZUaKSOyqo4cnuKb8mbqHV5cBCVTMWb1Cwqkhe4Gsg9AD
... Joa3dYtcYYYBAQ8Qr4OqZAYRICPTSr5WKd/42rV36d+8/VmrNpv7NP1jQAXrQE4BqQUARngwVA=="""
... .decode("base64").decode("zlib"))
--- a/tests/test-revset.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-revset.t Mon Mar 19 08:07:18 2018 -0700
@@ -16,7 +16,7 @@
> return baseset()
> return baseset([3,3,2,2])
>
- > mercurial.revset.symbols['r3232'] = r3232
+ > mercurial.revset.symbols[b'r3232'] = r3232
> EOF
$ cat >> $HGRCPATH << EOF
> [extensions]
@@ -47,25 +47,25 @@
> cmdtable = {}
> command = registrar.command(cmdtable)
> @command(b'debugrevlistspec',
- > [('', 'optimize', None, 'print parsed tree after optimizing'),
- > ('', 'bin', None, 'unhexlify arguments')])
+ > [(b'', b'optimize', None, b'print parsed tree after optimizing'),
+ > (b'', b'bin', None, b'unhexlify arguments')])
> def debugrevlistspec(ui, repo, fmt, *args, **opts):
> if opts['bin']:
> args = map(nodemod.bin, args)
> expr = revsetlang.formatspec(fmt, list(args))
> if ui.verbose:
> tree = revsetlang.parse(expr, lookup=repo.__contains__)
- > ui.note(revsetlang.prettyformat(tree), "\n")
+ > ui.note(revsetlang.prettyformat(tree), b"\n")
> if opts["optimize"]:
> opttree = revsetlang.optimize(revsetlang.analyze(tree))
- > ui.note("* optimized:\n", revsetlang.prettyformat(opttree),
- > "\n")
+ > ui.note(b"* optimized:\n", revsetlang.prettyformat(opttree),
+ > b"\n")
> func = revset.match(ui, expr, repo)
> revs = func(repo)
> if ui.verbose:
- > ui.note("* set:\n", smartset.prettyformat(revs), "\n")
+ > ui.note(b"* set:\n", smartset.prettyformat(revs), b"\n")
> for c in revs:
- > ui.write("%s\n" % c)
+ > ui.write(b"%d\n" % c)
> EOF
$ cat <<EOF >> $HGRCPATH
> [extensions]
@@ -399,6 +399,8 @@
4
$ log 'date(this is a test)'
hg: parse error at 10: unexpected token: symbol
+ (date(this is a test)
+ ^ here)
[255]
$ log 'date()'
hg: parse error: date requires a string
@@ -408,9 +410,11 @@
[255]
$ log 'date('
hg: parse error at 5: not a prefix: end
+ (date(
+ ^ here)
[255]
$ log 'date("\xy")'
- hg: parse error: invalid \x escape
+ hg: parse error: invalid \x escape* (glob)
[255]
$ log 'date(tip)'
hg: parse error: invalid date: 'tip'
@@ -614,18 +618,28 @@
$ hg debugrevspec '[0]'
hg: parse error at 0: not a prefix: [
+ ([0]
+ ^ here)
[255]
$ hg debugrevspec '.#'
hg: parse error at 2: not a prefix: end
+ (.#
+ ^ here)
[255]
$ hg debugrevspec '#rel'
hg: parse error at 0: not a prefix: #
+ (#rel
+ ^ here)
[255]
$ hg debugrevspec '.#rel[0'
hg: parse error at 7: unexpected token: end
+ (.#rel[0
+ ^ here)
[255]
$ hg debugrevspec '.]'
hg: parse error at 1: invalid token
+ (.]
+ ^ here)
[255]
$ hg debugrevspec '.#generations[a]'
@@ -1309,7 +1323,7 @@
(func
(symbol 'grep')
(string '('))
- hg: parse error: invalid match pattern: unbalanced parenthesis
+ hg: parse error: invalid match pattern: (unbalanced parenthesis|missing \),.*) (re)
[255]
$ try 'grep("\bissue\d+")'
(func
@@ -1330,6 +1344,8 @@
6
$ try 'grep(r"\")'
hg: parse error at 7: unterminated string
+ (grep(r"\")
+ ^ here)
[255]
$ log 'head()'
0
@@ -2774,3 +2790,14 @@
$ cd ..
$ cd repo
+
+test multiline revset with errors
+
+ $ echo > multiline-revset
+ $ echo '. +' >> multiline-revset
+ $ echo '.^ +' >> multiline-revset
+ $ hg log -r "`cat multiline-revset`"
+ hg: parse error at 9: not a prefix: end
+ ( . + .^ +
+ ^ here)
+ [255]
--- a/tests/test-revset2.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-revset2.t Mon Mar 19 08:07:18 2018 -0700
@@ -420,7 +420,7 @@
test that repeated `-r` options never eat up stack (issue4565)
(uses `-r 0::1` to avoid possible optimization at old-style parser)
- $ hg log -T '{rev}\n' `$PYTHON -c "for i in xrange(500): print '-r 0::1 ',"`
+ $ hg log -T '{rev}\n' `$PYTHON -c "for i in range(500): print '-r 0::1 ',"`
0
1
@@ -690,6 +690,8 @@
$ log '1 OR 2'
hg: parse error at 2: invalid token
+ (1 OR 2
+ ^ here)
[255]
or operator should preserve ordering:
@@ -1562,6 +1564,8 @@
test error message of bad revset
$ hg log -r 'foo\\'
hg: parse error at 3: syntax error in revset 'foo\\'
+ (foo\\
+ ^ here)
[255]
$ cd ..
--- a/tests/test-rollback.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-rollback.t Mon Mar 19 08:07:18 2018 -0700
@@ -220,29 +220,29 @@
> import errno
> from mercurial.i18n import _
> from mercurial import (
+ > error,
> registrar,
- > error,
> ui as uimod,
> )
>
> configtable = {}
> configitem = registrar.configitem(configtable)
>
- > configitem('ui', 'ioerrors',
+ > configitem(b'ui', b'ioerrors',
> default=list,
> )
>
> def pretxncommit(ui, repo, **kwargs):
- > ui.warn('warn during pretxncommit\n')
+ > ui.warn(b'warn during pretxncommit\n')
>
> def pretxnclose(ui, repo, **kwargs):
- > ui.warn('warn during pretxnclose\n')
+ > ui.warn(b'warn during pretxnclose\n')
>
> def txnclose(ui, repo, **kwargs):
- > ui.warn('warn during txnclose\n')
+ > ui.warn(b'warn during txnclose\n')
>
> def txnabort(ui, repo, **kwargs):
- > ui.warn('warn during abort\n')
+ > ui.warn(b'warn during abort\n')
>
> class fdproxy(object):
> def __init__(self, ui, o):
@@ -253,25 +253,25 @@
> return getattr(self._o, attr)
>
> def write(self, msg):
- > errors = set(self._ui.configlist('ui', 'ioerrors'))
- > pretxncommit = msg == 'warn during pretxncommit\n'
- > pretxnclose = msg == 'warn during pretxnclose\n'
- > txnclose = msg == 'warn during txnclose\n'
- > txnabort = msg == 'warn during abort\n'
- > msgabort = msg == _('transaction abort!\n')
- > msgrollback = msg == _('rollback completed\n')
+ > errors = set(self._ui.configlist(b'ui', b'ioerrors'))
+ > pretxncommit = msg == b'warn during pretxncommit\n'
+ > pretxnclose = msg == b'warn during pretxnclose\n'
+ > txnclose = msg == b'warn during txnclose\n'
+ > txnabort = msg == b'warn during abort\n'
+ > msgabort = msg == _(b'transaction abort!\n')
+ > msgrollback = msg == _(b'rollback completed\n')
>
- > if pretxncommit and 'pretxncommit' in errors:
+ > if pretxncommit and b'pretxncommit' in errors:
> raise IOError(errno.EPIPE, 'simulated epipe')
- > if pretxnclose and 'pretxnclose' in errors:
+ > if pretxnclose and b'pretxnclose' in errors:
> raise IOError(errno.EIO, 'simulated eio')
- > if txnclose and 'txnclose' in errors:
+ > if txnclose and b'txnclose' in errors:
> raise IOError(errno.EBADF, 'simulated badf')
- > if txnabort and 'txnabort' in errors:
+ > if txnabort and b'txnabort' in errors:
> raise IOError(errno.EPIPE, 'simulated epipe')
- > if msgabort and 'msgabort' in errors:
+ > if msgabort and b'msgabort' in errors:
> raise IOError(errno.EBADF, 'simulated ebadf')
- > if msgrollback and 'msgrollback' in errors:
+ > if msgrollback and b'msgrollback' in errors:
> raise IOError(errno.EIO, 'simulated eio')
>
> return self._o.write(msg)
@@ -289,10 +289,10 @@
> ui.__class__ = badui
>
> def reposetup(ui, repo):
- > ui.setconfig('hooks', 'pretxnclose.badui', pretxnclose, 'badui')
- > ui.setconfig('hooks', 'txnclose.badui', txnclose, 'badui')
- > ui.setconfig('hooks', 'pretxncommit.badui', pretxncommit, 'badui')
- > ui.setconfig('hooks', 'txnabort.badui', txnabort, 'badui')
+ > ui.setconfig(b'hooks', b'pretxnclose.badui', pretxnclose, b'badui')
+ > ui.setconfig(b'hooks', b'txnclose.badui', txnclose, b'badui')
+ > ui.setconfig(b'hooks', b'pretxncommit.badui', pretxncommit, b'badui')
+ > ui.setconfig(b'hooks', b'txnabort.badui', txnabort, b'badui')
> EOF
$ cat >> $HGRCPATH << EOF
--- a/tests/test-run-tests.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-run-tests.t Mon Mar 19 08:07:18 2018 -0700
@@ -374,6 +374,7 @@
</testsuite>
$ cat .testtimes
+ test-empty.t * (glob)
test-failure-unicode.t * (glob)
test-failure.t * (glob)
test-success.t * (glob)
@@ -541,6 +542,12 @@
> EOF
$ rt test-serve-fail.t
+ --- $TESTTMP/test-serve-fail.t
+ +++ $TESTTMP/test-serve-fail.t.err
+ @@ -1* +1,2 @@ (glob)
+ $ echo 'abort: child process failed to start blah'
+ + abort: child process failed to start blah
+
ERROR: test-serve-fail.t output changed
!
Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
@@ -914,16 +921,24 @@
================
$ cat > test-skip.t <<EOF
> $ echo xyzzy
+ > #if true
> #require false
+ > #end
+ > EOF
+ $ cat > test-noskip.t <<EOF
+ > #if false
+ > #require false
+ > #endif
> EOF
$ rt --nodiff
- !.s
+ !.s.
Skipped test-skip.t: missing feature: nail clipper
Failed test-failure.t: output changed
- # Ran 2 tests, 1 skipped, 1 failed.
+ # Ran 3 tests, 1 skipped, 1 failed.
python hash seed: * (glob)
[1]
+ $ rm test-noskip.t
$ rt --keyword xyzzy
.s
Skipped test-skip.t: missing feature: nail clipper
--- a/tests/test-sparse-clone.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-sparse-clone.t Mon Mar 19 08:07:18 2018 -0700
@@ -2,7 +2,7 @@
$ cat >> $HGRCPATH << EOF
> [ui]
- > ssh = python "$RUNTESTDIR/dummyssh"
+ > ssh = $PYTHON "$RUNTESTDIR/dummyssh"
> username = nobody <no.reply@fb.com>
> [extensions]
> sparse=
--- a/tests/test-sparse.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-sparse.t Mon Mar 19 08:07:18 2018 -0700
@@ -129,6 +129,10 @@
(include file with `hg debugsparse --include <pattern>` or use `hg add -s <file>` to include file directory while adding)
[255]
+But adding a truly excluded file shouldn't count
+
+ $ hg add hide3 -X hide3
+
Verify deleting sparseness while a file has changes fails
$ hg debugsparse --delete 'show*'
--- a/tests/test-split.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-split.t Mon Mar 19 08:07:18 2018 -0700
@@ -2,7 +2,8 @@
$ cat > $TESTTMP/editor.py <<EOF
> #!$PYTHON
- > import os, sys
+ > import os
+ > import sys
> path = os.path.join(os.environ['TESTTMP'], 'messages')
> messages = open(path).read().split('--\n')
> prompt = open(sys.argv[1]).read()
--- a/tests/test-ssh-bundle1.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-ssh-bundle1.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,6 +1,16 @@
This test is a duplicate of 'test-http.t' feel free to factor out
parts that are not bundle1/bundle2 specific.
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
$ cat << EOF >> $HGRCPATH
> [devel]
> # This test is dedicated to interaction through old bundle
@@ -465,11 +475,13 @@
$ hg pull --debug ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
- remote: 384
- remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
- remote: 1
+ remote: 403 (sshv1 !)
+ protocol upgraded to exp-ssh-v2-0001 (sshv2 !)
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN (sshv2 !)
+ remote: 1 (sshv1 !)
preparing listkeys for "bookmarks"
sending listkeys command
received listkey for "bookmarks": 45 bytes
--- a/tests/test-ssh-clone-r.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-ssh-clone-r.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,5 +1,15 @@
This test tries to exercise the ssh functionality with a dummy script
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
creating 'remote' repo
$ hg init remote
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-proto-unbundle.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,2064 @@
+ $ cat > hgrc-sshv2 << EOF
+ > %include $HGRCPATH
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+
+ $ debugwireproto() {
+ > commands=`cat -`
+ > echo 'testing ssh1'
+ > tip=`hg log -r tip -T '{node}'`
+ > echo "${commands}" | hg --verbose debugwireproto --localssh --noreadstderr
+ > if [ -n "$1" ]; then
+ > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}"
+ > fi
+ > echo ""
+ > echo 'testing ssh2'
+ > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh --noreadstderr
+ > if [ -n "$1" ]; then
+ > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}"
+ > fi
+ > }
+
+Generate some bundle files
+
+ $ hg init repo
+ $ cd repo
+ $ echo 0 > foo
+ $ hg -q commit -A -m initial
+ $ hg bundle --all -t none-v1 ../initial.v1.hg
+ 1 changesets found
+ $ cd ..
+
+Test pushing bundle1 payload to a server with bundle1 disabled
+
+ $ hg init no-bundle1
+ $ cd no-bundle1
+ $ cat > .hg/hgrc << EOF
+ > [server]
+ > bundle1 = false
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 115:
+ e> abort: incompatible Mercurial client; bundle2 required\n
+ e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 115:
+ e> abort: incompatible Mercurial client; bundle2 required\n
+ e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n
+
+ $ cd ..
+
+Create a pretxnchangegroup hook that fails. Give it multiple modes of printing
+output so we can test I/O capture and behavior.
+
+Test pushing to a server that has a pretxnchangegroup Python hook that fails
+
+ $ cat > $TESTTMP/failhook << EOF
+ > from __future__ import print_function
+ > import sys
+ > def hook1line(ui, repo, **kwargs):
+ > ui.write(b'ui.write 1 line\n')
+ > return 1
+ > def hook2lines(ui, repo, **kwargs):
+ > ui.write(b'ui.write 2 lines 1\n')
+ > ui.write(b'ui.write 2 lines 2\n')
+ > return 1
+ > def hook1lineflush(ui, repo, **kwargs):
+ > ui.write(b'ui.write 1 line flush\n')
+ > ui.flush()
+ > return 1
+ > def hookmultiflush(ui, repo, **kwargs):
+ > ui.write(b'ui.write 1st\n')
+ > ui.flush()
+ > ui.write(b'ui.write 2nd\n')
+ > ui.flush()
+ > return 1
+ > def hookwriteandwriteerr(ui, repo, **kwargs):
+ > ui.write(b'ui.write 1\n')
+ > ui.write_err(b'ui.write_err 1\n')
+ > ui.write(b'ui.write 2\n')
+ > ui.write_err(b'ui.write_err 2\n')
+ > return 1
+ > def hookprintstdout(ui, repo, **kwargs):
+ > print('printed line')
+ > return 1
+ > def hookprintandwrite(ui, repo, **kwargs):
+ > print('print 1')
+ > ui.write(b'ui.write 1\n')
+ > print('print 2')
+ > ui.write(b'ui.write 2\n')
+ > return 1
+ > def hookprintstderrandstdout(ui, repo, **kwargs):
+ > print('stdout 1')
+ > print('stderr 1', file=sys.stderr)
+ > print('stdout 2')
+ > print('stderr 2', file=sys.stderr)
+ > return 1
+ > EOF
+
+ $ hg init failrepo
+ $ cd failrepo
+
+ui.write() in hook is redirected to stderr
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hook1line
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 196:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1 line\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 196:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1 line\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+And a variation that writes multiple lines using ui.write
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hook2lines
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 218:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 2 lines 1\n
+ e> ui.write 2 lines 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 218:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 2 lines 1\n
+ e> ui.write 2 lines 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+And a variation that does a ui.flush() after writing output
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hook1lineflush
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 202:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1 line flush\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 202:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1 line flush\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+Multiple writes + flush
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookmultiflush
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 206:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1st\n
+ e> ui.write 2nd\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 206:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1st\n
+ e> ui.write 2nd\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ui.write() + ui.write_err() output is captured
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookwriteandwriteerr
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 232:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1\n
+ e> ui.write_err 1\n
+ e> ui.write 2\n
+ e> ui.write_err 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 232:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1\n
+ e> ui.write_err 1\n
+ e> ui.write 2\n
+ e> ui.write_err 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+print() output is captured
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookprintstdout
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 193:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> printed line\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 193:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> printed line\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+Mixed print() and ui.write() are both captured
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookprintandwrite
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 218:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1\n
+ e> ui.write 2\n
+ e> print 1\n
+ e> print 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 218:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1\n
+ e> ui.write 2\n
+ e> print 1\n
+ e> print 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+print() to stdout and stderr both get captured
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookprintstderrandstdout
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 216:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stderr 1\n
+ e> stderr 2\n
+ e> stdout 1\n
+ e> stdout 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 216:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stderr 1\n
+ e> stderr 2\n
+ e> stdout 1\n
+ e> stdout 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook failed\n
+
+Shell hook writing to stdout has output captured
+
+ $ cat > $TESTTMP/hook.sh << EOF
+ > echo 'stdout 1'
+ > echo 'stdout 2'
+ > exit 1
+ > EOF
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.fail = sh $TESTTMP/hook.sh
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 212:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stdout 1\n
+ e> stdout 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook exited with status 1\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 212:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stdout 1\n
+ e> stdout 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook exited with status 1\n
+
+Shell hook writing to stderr has output captured
+
+ $ cat > $TESTTMP/hook.sh << EOF
+ > echo 'stderr 1' 1>&2
+ > echo 'stderr 2' 1>&2
+ > exit 1
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 212:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stderr 1\n
+ e> stderr 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook exited with status 1\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 212:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stderr 1\n
+ e> stderr 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook exited with status 1\n
+
+Shell hook writing to stdout and stderr has output captured
+
+ $ cat > $TESTTMP/hook.sh << EOF
+ > echo 'stdout 1'
+ > echo 'stderr 1' 1>&2
+ > echo 'stdout 2'
+ > echo 'stderr 2' 1>&2
+ > exit 1
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 230:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stdout 1\n
+ e> stderr 1\n
+ e> stdout 2\n
+ e> stderr 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook exited with status 1\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 230:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> stdout 1\n
+ e> stderr 1\n
+ e> stdout 2\n
+ e> stderr 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.fail hook exited with status 1\n
+
+Shell and Python hooks writing to stdout and stderr have output captured
+
+ $ cat > $TESTTMP/hook.sh << EOF
+ > echo 'shell stdout 1'
+ > echo 'shell stderr 1' 1>&2
+ > echo 'shell stdout 2'
+ > echo 'shell stderr 2' 1>&2
+ > exit 0
+ > EOF
+
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.a = sh $TESTTMP/hook.sh
+ > pretxnchangegroup.b = python:$TESTTMP/failhook:hookprintstderrandstdout
+ > EOF
+
+ $ debugwireproto << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 273:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> shell stdout 1\n
+ e> shell stderr 1\n
+ e> shell stdout 2\n
+ e> shell stderr 2\n
+ e> stderr 1\n
+ e> stderr 2\n
+ e> stdout 1\n
+ e> stdout 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.b hook failed\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 0
+ result: 0
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 273:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> shell stdout 1\n
+ e> shell stderr 1\n
+ e> shell stdout 2\n
+ e> shell stderr 2\n
+ e> stderr 1\n
+ e> stderr 2\n
+ e> stdout 1\n
+ e> stdout 2\n
+ e> transaction abort!\n
+ e> rollback completed\n
+ e> abort: pretxnchangegroup.b hook failed\n
+
+ $ cd ..
+
+Pushing a bundle1 with no output
+
+ $ hg init simplerepo
+ $ cd simplerepo
+
+ $ debugwireproto 1 << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 1
+ result: 1
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 100:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 1
+ result: 1
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 100:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+
+ $ cd ..
+
+Pushing a bundle1 with ui.write() and ui.write_err()
+
+ $ cat > $TESTTMP/hook << EOF
+ > def hookuiwrite(ui, repo, **kwargs):
+ > ui.write(b'ui.write 1\n')
+ > ui.write_err(b'ui.write_err 1\n')
+ > ui.write(b'ui.write 2\n')
+ > ui.write_err(b'ui.write_err 2\n')
+ > EOF
+
+ $ hg init uiwriterepo
+ $ cd uiwriterepo
+ $ cat > .hg/hgrc << EOF
+ > [hooks]
+ > pretxnchangegroup.hook = python:$TESTTMP/hook:hookuiwrite
+ > EOF
+
+ $ debugwireproto 1 << EOF
+ > command unbundle
+ > # This is "force" in hex.
+ > heads 666f726365
+ > PUSHFILE ../initial.v1.hg
+ > readavailable
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 1
+ result: 1
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 152:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1\n
+ e> ui.write_err 1\n
+ e> ui.write 2\n
+ e> ui.write_err 2\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending unbundle command
+ i> write(9) -> 9:
+ i> unbundle\n
+ i> write(9) -> 9:
+ i> heads 10\n
+ i> write(10) -> 10: 666f726365
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ i> write(4) -> 4:
+ i> 426\n
+ i> write(426) -> 426:
+ i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
+ i> test\n
+ i> 0 0\n
+ i> foo\n
+ i> \n
+ i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
+ i> \x00\x00\x00\x00\x00\x00\x00\x00
+ i> write(2) -> 2:
+ i> 0\n
+ i> flush() -> None
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 1\n
+ o> read(1) -> 1: 1
+ result: 1
+ remote output:
+ o> read(-1) -> 0:
+ e> read(-1) -> 152:
+ e> adding changesets\n
+ e> adding manifests\n
+ e> adding file changes\n
+ e> added 1 changesets with 1 changes to 1 files\n
+ e> ui.write 1\n
+ e> ui.write_err 1\n
+ e> ui.write 2\n
+ e> ui.write_err 2\n
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-proto.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,2143 @@
+ $ cat > hgrc-sshv2 << EOF
+ > %include $HGRCPATH
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+
+Helper function to run protocol tests against multiple protocol versions.
+This is easier than using #testcases because managing differences between
+protocols with inline conditional output is hard to read.
+
+ $ debugwireproto() {
+ > commands=`cat -`
+ > echo 'testing ssh1'
+ > echo "${commands}" | hg --verbose debugwireproto --localssh
+ > echo ""
+ > echo 'testing ssh2'
+ > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh
+ > }
+
+ $ cat >> $HGRCPATH << EOF
+ > [ui]
+ > ssh = $PYTHON "$TESTDIR/dummyssh"
+ > [devel]
+ > debug.peer-request = true
+ > [extensions]
+ > sshprotoext = $TESTDIR/sshprotoext.py
+ > EOF
+
+ $ hg init server
+ $ cd server
+ $ echo 0 > foo
+ $ hg -q add foo
+ $ hg commit -m initial
+
+A no-op connection performs a handshake
+
+ $ hg debugwireproto --localssh << EOF
+ > EOF
+ creating ssh peer from handshake results
+
+Raw peers don't perform any activity
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > EOF
+ using raw connection to peer
+ $ hg debugwireproto --localssh --peer ssh1 << EOF
+ > EOF
+ creating ssh peer for wire protocol version 1
+ $ hg debugwireproto --localssh --peer ssh2 << EOF
+ > EOF
+ creating ssh peer for wire protocol version 2
+
+Test a normal behaving server, for sanity
+
+ $ cd ..
+
+ $ hg --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 403
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Server should answer the "hello" command in isolation
+
+ $ hg -R server debugwireproto --localssh --peer raw << EOF
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+
+`hg debugserve --sshstdio` works
+
+ $ cd server
+ $ hg debugserve --sshstdio << EOF
+ > hello
+ > EOF
+ 403
+ capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+I/O logging works
+
+ $ hg debugserve --sshstdio --logiofd 1 << EOF
+ > hello
+ > EOF
+ o> write(4) -> 4:
+ o> 403\n
+ o> write(403) -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ 403
+ capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> flush() -> None
+
+ $ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
+ > hello
+ > EOF
+ 403
+ capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+ $ cat $TESTTMP/io
+ o> write(4) -> 4:
+ o> 403\n
+ o> write(403) -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> flush() -> None
+
+ $ cd ..
+
+>=0.9.1 clients send a "hello" + "between" for the null range as part of handshake.
+Server should reply with capabilities and should send "1\n\n" as a successful
+reply with empty response to the "between".
+
+ $ hg -R server debugwireproto --localssh --peer raw << EOF
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+SSH banner is not printed by default, ignored by clients
+
+ $ SSHSERVERMODE=banner hg debugpeer ssh://user@dummy/server
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+--debug will print the banner
+
+ $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: banner: line 0
+ remote: banner: line 1
+ remote: banner: line 2
+ remote: banner: line 3
+ remote: banner: line 4
+ remote: banner: line 5
+ remote: banner: line 6
+ remote: banner: line 7
+ remote: banner: line 8
+ remote: banner: line 9
+ remote: 403
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+And test the banner with the raw protocol
+
+ $ SSHSERVERMODE=banner hg -R server debugwireproto --localssh --peer raw << EOF
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 15:
+ o> banner: line 0\n
+ o> readline() -> 15:
+ o> banner: line 1\n
+ o> readline() -> 15:
+ o> banner: line 2\n
+ o> readline() -> 15:
+ o> banner: line 3\n
+ o> readline() -> 15:
+ o> banner: line 4\n
+ o> readline() -> 15:
+ o> banner: line 5\n
+ o> readline() -> 15:
+ o> banner: line 6\n
+ o> readline() -> 15:
+ o> banner: line 7\n
+ o> readline() -> 15:
+ o> banner: line 8\n
+ o> readline() -> 15:
+ o> banner: line 9\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+Connecting to a <0.9.1 server that doesn't support the hello command.
+The client should refuse, as we dropped support for connecting to such
+servers.
+
+ $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 1
+ abort: no suitable response from remote hg!
+ [255]
+
+Sending an unknown command to the server results in an empty response to that command
+
+ $ hg -R server debugwireproto --localssh --peer raw << EOF
+ > raw
+ > pre-hello\n
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(10) -> 10:
+ i> pre-hello\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+
+ $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending no-args command
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 403
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Send multiple unknown commands before hello
+
+ $ hg -R server debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown1\n
+ > readline
+ > raw
+ > unknown2\n
+ > readline
+ > raw
+ > unknown3\n
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(9) -> 9:
+ i> unknown1\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(9) -> 9:
+ i> unknown2\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(9) -> 9:
+ i> unknown3\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+ $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending unknown1 command
+ sending unknown2 command
+ sending unknown3 command
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 0
+ remote: 0
+ remote: 403
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Send an unknown command before hello that has arguments
+
+ $ cd server
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > with-args\n
+ > foo 13\n
+ > value for foo\n
+ > bar 13\n
+ > value for bar\n
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(52) -> 52:
+ i> with-args\n
+ i> foo 13\n
+ i> value for foo\n
+ i> bar 13\n
+ i> value for bar\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+Send an unknown command having an argument that looks numeric
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown\n
+ > foo 1\n
+ > 0\n
+ > readline
+ > readline
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(16) -> 16:
+ i> unknown\n
+ i> foo 1\n
+ i> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown\n
+ > foo 1\n
+ > 1\n
+ > readline
+ > readline
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(16) -> 16:
+ i> unknown\n
+ i> foo 1\n
+ i> 1\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+When sending a dict argument value, it is serialized to
+"<arg> <item count>" followed by "<key> <len>\n<value>" for each item
+in the dict.
+
+Dictionary value for unknown command
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown\n
+ > dict 3\n
+ > key1 3\n
+ > foo\n
+ > key2 3\n
+ > bar\n
+ > key3 3\n
+ > baz\n
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(48) -> 48:
+ i> unknown\n
+ i> dict 3\n
+ i> key1 3\n
+ i> foo\n
+ i> key2 3\n
+ i> bar\n
+ i> key3 3\n
+ i> baz\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+
+Incomplete dictionary send
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown\n
+ > dict 3\n
+ > key1 3\n
+ > foo\n
+ > readline
+ > readline
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(26) -> 26:
+ i> unknown\n
+ i> dict 3\n
+ i> key1 3\n
+ i> foo\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+
+Incomplete value send
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown\n
+ > dict 3\n
+ > key1 3\n
+ > fo
+ > readline
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(24) -> 24:
+ i> unknown\n
+ i> dict 3\n
+ i> key1 3\n
+ i> fo
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+
+Send a command line with spaces
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown withspace\n
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(18) -> 18:
+ i> unknown withspace\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown with multiple spaces\n
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(29) -> 29:
+ i> unknown with multiple spaces\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > unknown with spaces\n
+ > key 10\n
+ > some value\n
+ > readline
+ > readline
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(38) -> 38:
+ i> unknown with spaces\n
+ i> key 10\n
+ i> some value\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+Send an unknown command after the "between"
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000unknown
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(105) -> 105:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000unknown
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+And one with arguments
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > hello\n
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > readline
+ > readline
+ > raw
+ > unknown\n
+ > foo 5\n
+ > \nvalue\n
+ > bar 3\n
+ > baz\n
+ > readline
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ i> write(31) -> 31:
+ i> unknown\n
+ i> foo 5\n
+ i> \n
+ i> value\n
+ i> bar 3\n
+ i> baz\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 2:
+ o> 0\n
+ o> readline() -> 0:
+
+Send a valid command before the handshake
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > heads\n
+ > readline
+ > raw
+ > hello\n
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(6) -> 6:
+ i> heads\n
+ o> readline() -> 3:
+ o> 41\n
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 41:
+ o> 68986213bd4485ea51533535e3fc9e78007a711f\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+
+And a variation that doesn't send the between command
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > heads\n
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(6) -> 6:
+ i> heads\n
+ o> readline() -> 3:
+ o> 41\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 41:
+ o> 68986213bd4485ea51533535e3fc9e78007a711f\n
+ o> readline() -> 4:
+ o> 403\n
+
+Send an upgrade request to a server that doesn't support that command
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n
+ > readline
+ > raw
+ > hello\n
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(77) -> 77:
+ i> upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+ $ cd ..
+
+ $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 403
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Enable version 2 support on server. We need to do this in hgrc because we can't
+use --config with `hg serve --stdio`.
+
+ $ cat >> server/.hg/hgrc << EOF
+ > [experimental]
+ > sshserver.support-v2 = true
+ > EOF
+
+Send an upgrade request to a server that supports upgrade
+
+ $ cd server
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+ > hello\n
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(153) -> 153:
+ i> upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 44:
+ o> upgraded this-is-some-token exp-ssh-v2-0001\n
+ o> readline() -> 4:
+ o> 402\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+
+ $ cd ..
+
+ $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ protocol upgraded to exp-ssh-v2-0001
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Verify the peer has capabilities
+
+ $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ protocol upgraded to exp-ssh-v2-0001
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ Main capabilities:
+ batch
+ branchmap
+ $USUAL_BUNDLE2_CAPS_SERVER$
+ changegroupsubset
+ getbundle
+ known
+ lookup
+ pushkey
+ streamreqs=generaldelta,revlogv1
+ unbundle=HG10GZ,HG10BZ,HG10UN
+ unbundlehash
+ Bundle2 capabilities:
+ HG20
+ bookmarks
+ changegroup
+ 01
+ 02
+ digests
+ md5
+ sha1
+ sha512
+ error
+ abort
+ unsupportedcontent
+ pushraced
+ pushkey
+ hgtagsfnodes
+ listkeys
+ phases
+ heads
+ pushkey
+ remote-changegroup
+ http
+ https
+ rev-branch-cache
+
+Command after upgrade to version 2 is processed
+
+ $ cd server
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+ > hello\n
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(153) -> 153:
+ i> upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 44:
+ o> upgraded this-is-some-token exp-ssh-v2-0001\n
+ o> readline() -> 4:
+ o> 402\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 385\n
+ o> readline() -> 385:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+
+Multiple upgrades is not allowed
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+ > hello\n
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > readline
+ > raw
+ > upgrade another-token proto=irrelevant\n
+ > hello\n
+ > readline
+ > readavailable
+ > EOF
+ using raw connection to peer
+ i> write(153) -> 153:
+ i> upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 44:
+ o> upgraded this-is-some-token exp-ssh-v2-0001\n
+ o> readline() -> 4:
+ o> 402\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(45) -> 45:
+ i> upgrade another-token proto=irrelevant\n
+ i> hello\n
+ o> readline() -> 1:
+ o> \n
+ o> read(-1) -> 0:
+ e> read(-1) -> 42:
+ e> cannot upgrade protocols multiple times\n
+ e> -\n
+
+Malformed upgrade request line (not exactly 3 space delimited tokens)
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade\n
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(8) -> 8:
+ i> upgrade\n
+ o> readline() -> 2:
+ o> 0\n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade token\n
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(14) -> 14:
+ i> upgrade token\n
+ o> readline() -> 2:
+ o> 0\n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade token foo=bar extra-token\n
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(34) -> 34:
+ i> upgrade token foo=bar extra-token\n
+ o> readline() -> 2:
+ o> 0\n
+
+Upgrade request to unsupported protocol is ignored
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade this-is-some-token proto=unknown1,unknown2\n
+ > readline
+ > raw
+ > hello\n
+ > readline
+ > readline
+ > raw
+ > between\n
+ > pairs 81\n
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > readline
+ > readline
+ > EOF
+ using raw connection to peer
+ i> write(51) -> 51:
+ i> upgrade this-is-some-token proto=unknown1,unknown2\n
+ o> readline() -> 2:
+ o> 0\n
+ i> write(6) -> 6:
+ i> hello\n
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ i> write(98) -> 98:
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+
+Upgrade request must be followed by hello + between
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade token proto=exp-ssh-v2-0001\n
+ > invalid\n
+ > readline
+ > readavailable
+ > EOF
+ using raw connection to peer
+ i> write(44) -> 44:
+ i> upgrade token proto=exp-ssh-v2-0001\n
+ i> invalid\n
+ o> readline() -> 1:
+ o> \n
+ o> read(-1) -> 0:
+ e> read(-1) -> 46:
+ e> malformed handshake protocol: missing hello\n
+ e> -\n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade token proto=exp-ssh-v2-0001\n
+ > hello\n
+ > invalid\n
+ > readline
+ > readavailable
+ > EOF
+ using raw connection to peer
+ i> write(50) -> 50:
+ i> upgrade token proto=exp-ssh-v2-0001\n
+ i> hello\n
+ i> invalid\n
+ o> readline() -> 1:
+ o> \n
+ o> read(-1) -> 0:
+ e> read(-1) -> 48:
+ e> malformed handshake protocol: missing between\n
+ e> -\n
+
+ $ hg debugwireproto --localssh --peer raw << EOF
+ > raw
+ > upgrade token proto=exp-ssh-v2-0001\n
+ > hello\n
+ > between\n
+ > invalid\n
+ > readline
+ > readavailable
+ > EOF
+ using raw connection to peer
+ i> write(58) -> 58:
+ i> upgrade token proto=exp-ssh-v2-0001\n
+ i> hello\n
+ i> between\n
+ i> invalid\n
+ o> readline() -> 1:
+ o> \n
+ o> read(-1) -> 0:
+ e> read(-1) -> 49:
+ e> malformed handshake protocol: missing pairs 81\n
+ e> -\n
+
+Legacy commands are not exposed to version 2 of protocol
+
+ $ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF
+ > command branches
+ > nodes 0000000000000000000000000000000000000000
+ > EOF
+ creating ssh peer from handshake results
+ sending branches command
+ response:
+
+ $ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF
+ > command changegroup
+ > roots 0000000000000000000000000000000000000000
+ > EOF
+ creating ssh peer from handshake results
+ sending changegroup command
+ response:
+
+ $ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF
+ > command changegroupsubset
+ > bases 0000000000000000000000000000000000000000
+ > heads 0000000000000000000000000000000000000000
+ > EOF
+ creating ssh peer from handshake results
+ sending changegroupsubset command
+ response:
+
+ $ cd ..
+
+Test listkeys for listing namespaces
+
+ $ hg init empty
+ $ cd empty
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace namespaces
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(13) -> 13:
+ i> namespace 10\n
+ i> write(10) -> 10: namespaces
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 30\n
+ o> bufferedread(30) -> 30:
+ o> bookmarks \n
+ o> namespaces \n
+ o> phases
+ response: bookmarks \nnamespaces \nphases
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(13) -> 13:
+ i> namespace 10\n
+ i> write(10) -> 10: namespaces
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 30\n
+ o> bufferedread(30) -> 30:
+ o> bookmarks \n
+ o> namespaces \n
+ o> phases
+ response: bookmarks \nnamespaces \nphases
+
+ $ cd ..
+
+Test listkeys for bookmarks
+
+ $ hg init bookmarkrepo
+ $ cd bookmarkrepo
+ $ echo 0 > foo
+ $ hg add foo
+ $ hg -q commit -m initial
+ $ echo 1 > foo
+ $ hg commit -m second
+
+With no bookmarks set
+
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace bookmarks
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> flush() -> None
+ o> bufferedreadline() -> 2:
+ o> 0\n
+ response:
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> flush() -> None
+ o> bufferedreadline() -> 2:
+ o> 0\n
+ response:
+
+With a single bookmark set
+
+ $ hg book -r 0 bookA
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace bookmarks
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 46\n
+ o> bufferedread(46) -> 46: bookA 68986213bd4485ea51533535e3fc9e78007a711f
+ response: bookA 68986213bd4485ea51533535e3fc9e78007a711f
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 46\n
+ o> bufferedread(46) -> 46: bookA 68986213bd4485ea51533535e3fc9e78007a711f
+ response: bookA 68986213bd4485ea51533535e3fc9e78007a711f
+
+With multiple bookmarks set
+
+ $ hg book -r 1 bookB
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace bookmarks
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 93\n
+ o> bufferedread(93) -> 93:
+ o> bookA 68986213bd4485ea51533535e3fc9e78007a711f\n
+ o> bookB 1880f3755e2e52e3199e0ee5638128b08642f34d
+ response: bookA 68986213bd4485ea51533535e3fc9e78007a711f\nbookB 1880f3755e2e52e3199e0ee5638128b08642f34d
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 93\n
+ o> bufferedread(93) -> 93:
+ o> bookA 68986213bd4485ea51533535e3fc9e78007a711f\n
+ o> bookB 1880f3755e2e52e3199e0ee5638128b08642f34d
+ response: bookA 68986213bd4485ea51533535e3fc9e78007a711f\nbookB 1880f3755e2e52e3199e0ee5638128b08642f34d
+
+Test pushkey for bookmarks
+
+ $ debugwireproto << EOF
+ > command pushkey
+ > namespace bookmarks
+ > key remote
+ > old
+ > new 68986213bd4485ea51533535e3fc9e78007a711f
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending pushkey command
+ i> write(8) -> 8:
+ i> pushkey\n
+ i> write(6) -> 6:
+ i> key 6\n
+ i> write(6) -> 6: remote
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> write(7) -> 7:
+ i> new 40\n
+ i> write(40) -> 40: 68986213bd4485ea51533535e3fc9e78007a711f
+ i> write(6) -> 6:
+ i> old 0\n
+ i> flush() -> None
+ o> bufferedreadline() -> 2:
+ o> 2\n
+ o> bufferedread(2) -> 2:
+ o> 1\n
+ response: 1\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending pushkey command
+ i> write(8) -> 8:
+ i> pushkey\n
+ i> write(6) -> 6:
+ i> key 6\n
+ i> write(6) -> 6: remote
+ i> write(12) -> 12:
+ i> namespace 9\n
+ i> write(9) -> 9: bookmarks
+ i> write(7) -> 7:
+ i> new 40\n
+ i> write(40) -> 40: 68986213bd4485ea51533535e3fc9e78007a711f
+ i> write(6) -> 6:
+ i> old 0\n
+ i> flush() -> None
+ o> bufferedreadline() -> 2:
+ o> 2\n
+ o> bufferedread(2) -> 2:
+ o> 1\n
+ response: 1\n
+
+ $ hg bookmarks
+ bookA 0:68986213bd44
+ bookB 1:1880f3755e2e
+ remote 0:68986213bd44
+
+ $ cd ..
+
+Test listkeys for phases
+
+ $ hg init phasesrepo
+ $ cd phasesrepo
+
+Phases on empty repo
+
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace phases
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 15\n
+ o> bufferedread(15) -> 15: publishing True
+ response: publishing True
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 15\n
+ o> bufferedread(15) -> 15: publishing True
+ response: publishing True
+
+Create some commits
+
+ $ echo 0 > foo
+ $ hg add foo
+ $ hg -q commit -m initial
+ $ hg phase --public
+ $ echo 1 > foo
+ $ hg commit -m 'head 1 commit 1'
+ $ echo 2 > foo
+ $ hg commit -m 'head 1 commit 2'
+ $ hg -q up 0
+ $ echo 1a > foo
+ $ hg commit -m 'head 2 commit 1'
+ created new head
+ $ echo 2a > foo
+ $ hg commit -m 'head 2 commit 2'
+
+Two draft heads
+
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace phases
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 4:
+ o> 101\n
+ o> bufferedread(101) -> 101:
+ o> 20b8a89289d80036e6c4e87c2083e3bea1586637 1\n
+ o> c4750011d906c18ea2f0527419cbc1a544435150 1\n
+ o> publishing True
+ response: 20b8a89289d80036e6c4e87c2083e3bea1586637 1\nc4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 4:
+ o> 101\n
+ o> bufferedread(101) -> 101:
+ o> 20b8a89289d80036e6c4e87c2083e3bea1586637 1\n
+ o> c4750011d906c18ea2f0527419cbc1a544435150 1\n
+ o> publishing True
+ response: 20b8a89289d80036e6c4e87c2083e3bea1586637 1\nc4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True
+
+Single draft head
+
+ $ hg phase --public -r 2
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace phases
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 58\n
+ o> bufferedread(58) -> 58:
+ o> c4750011d906c18ea2f0527419cbc1a544435150 1\n
+ o> publishing True
+ response: c4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 58\n
+ o> bufferedread(58) -> 58:
+ o> c4750011d906c18ea2f0527419cbc1a544435150 1\n
+ o> publishing True
+ response: c4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True
+
+All public heads
+
+ $ hg phase --public -r 4
+ $ debugwireproto << EOF
+ > command listkeys
+ > namespace phases
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 15\n
+ o> bufferedread(15) -> 15: publishing True
+ response: publishing True
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending listkeys command
+ i> write(9) -> 9:
+ i> listkeys\n
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> flush() -> None
+ o> bufferedreadline() -> 3:
+ o> 15\n
+ o> bufferedread(15) -> 15: publishing True
+ response: publishing True
+
+Setting public phase via pushkey
+
+ $ hg phase --draft --force -r .
+
+ $ debugwireproto << EOF
+ > command pushkey
+ > namespace phases
+ > key 7127240a084fd9dc86fe8d1f98e26229161ec82b
+ > old 1
+ > new 0
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending pushkey command
+ i> write(8) -> 8:
+ i> pushkey\n
+ i> write(7) -> 7:
+ i> key 40\n
+ i> write(40) -> 40: 7127240a084fd9dc86fe8d1f98e26229161ec82b
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> write(6) -> 6:
+ i> new 1\n
+ i> write(1) -> 1: 0
+ i> write(6) -> 6:
+ i> old 1\n
+ i> write(1) -> 1: 1
+ i> flush() -> None
+ o> bufferedreadline() -> 2:
+ o> 2\n
+ o> bufferedread(2) -> 2:
+ o> 1\n
+ response: 1\n
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending pushkey command
+ i> write(8) -> 8:
+ i> pushkey\n
+ i> write(7) -> 7:
+ i> key 40\n
+ i> write(40) -> 40: 7127240a084fd9dc86fe8d1f98e26229161ec82b
+ i> write(12) -> 12:
+ i> namespace 6\n
+ i> write(6) -> 6: phases
+ i> write(6) -> 6:
+ i> new 1\n
+ i> write(1) -> 1: 0
+ i> write(6) -> 6:
+ i> old 1\n
+ i> write(1) -> 1: 1
+ i> flush() -> None
+ o> bufferedreadline() -> 2:
+ o> 2\n
+ o> bufferedread(2) -> 2:
+ o> 1\n
+ response: 1\n
+
+ $ hg phase .
+ 4: public
+
+ $ cd ..
+
+Test batching of requests
+
+ $ hg init batching
+ $ cd batching
+ $ echo 0 > foo
+ $ hg add foo
+ $ hg -q commit -m initial
+ $ hg phase --public
+ $ echo 1 > foo
+ $ hg commit -m 'commit 1'
+ $ hg -q up 0
+ $ echo 2 > foo
+ $ hg commit -m 'commit 2'
+ created new head
+ $ hg book -r 1 bookA
+ $ hg book -r 2 bookB
+
+ $ debugwireproto << EOF
+ > batchbegin
+ > command heads
+ > command listkeys
+ > namespace bookmarks
+ > command listkeys
+ > namespace phases
+ > batchsubmit
+ > EOF
+ testing ssh1
+ creating ssh peer from handshake results
+ i> write(104) -> 104:
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 4:
+ o> 403\n
+ o> readline() -> 403:
+ o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n
+ o> readline() -> 2:
+ o> 1\n
+ o> readline() -> 1:
+ o> \n
+ sending batch with 3 sub-commands
+ i> write(6) -> 6:
+ i> batch\n
+ i> write(4) -> 4:
+ i> * 0\n
+ i> write(8) -> 8:
+ i> cmds 61\n
+ i> write(61) -> 61: heads ;listkeys namespace=bookmarks;listkeys namespace=phases
+ i> flush() -> None
+ o> bufferedreadline() -> 4:
+ o> 278\n
+ o> bufferedread(278) -> 278:
+ o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
+ o> ;bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
+ o> bookB bfebe6bd38eebc6f8202e419c1171268987ea6a6;4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\n
+ o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 1\n
+ o> publishing True
+ response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
+ response #1: bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB bfebe6bd38eebc6f8202e419c1171268987ea6a6
+ response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6 1\npublishing True
+
+ testing ssh2
+ creating ssh peer from handshake results
+ i> write(171) -> 171:
+ i> upgrade * proto=exp-ssh-v2-0001\n (glob)
+ i> hello\n
+ i> between\n
+ i> pairs 81\n
+ i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ i> flush() -> None
+ o> readline() -> 62:
+ o> upgraded * exp-ssh-v2-0001\n (glob)
+ o> readline() -> 4:
+ o> 402\n
+ o> read(402) -> 402: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ o> read(1) -> 1:
+ o> \n
+ sending batch with 3 sub-commands
+ i> write(6) -> 6:
+ i> batch\n
+ i> write(4) -> 4:
+ i> * 0\n
+ i> write(8) -> 8:
+ i> cmds 61\n
+ i> write(61) -> 61: heads ;listkeys namespace=bookmarks;listkeys namespace=phases
+ i> flush() -> None
+ o> bufferedreadline() -> 4:
+ o> 278\n
+ o> bufferedread(278) -> 278:
+ o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
+ o> ;bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
+ o> bookB bfebe6bd38eebc6f8202e419c1171268987ea6a6;4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\n
+ o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 1\n
+ o> publishing True
+ response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
+ response #1: bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB bfebe6bd38eebc6f8202e419c1171268987ea6a6
+ response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6 1\npublishing True
--- a/tests/test-ssh.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-ssh.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,3 +1,12 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
This test tries to exercise the ssh functionality with a dummy script
@@ -481,15 +490,20 @@
$ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
pulling from ssh://user@dummy/remote
running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
devel-peer-request: hello
sending hello command
devel-peer-request: between
devel-peer-request: pairs: 81 bytes
sending between command
- remote: 384
- remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
- remote: 1
+ remote: 403 (sshv1 !)
+ protocol upgraded to exp-ssh-v2-0001 (sshv2 !)
+ remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1 (sshv1 !)
query 1; heads
+ devel-peer-request: batched-content
+ devel-peer-request: - heads (0 arguments)
+ devel-peer-request: - known (1 arguments)
devel-peer-request: batch
devel-peer-request: cmds: 141 bytes
sending batch command
@@ -498,7 +512,7 @@
no changes found
devel-peer-request: getbundle
devel-peer-request: bookmarks: 1 bytes
- devel-peer-request: bundlecaps: 247 bytes
+ devel-peer-request: bundlecaps: 266 bytes
devel-peer-request: cg: 1 bytes
devel-peer-request: common: 122 bytes
devel-peer-request: heads: 122 bytes
--- a/tests/test-sshserver.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-sshserver.py Mon Mar 19 08:07:18 2018 -0700
@@ -6,30 +6,33 @@
import silenttestrunner
from mercurial import (
- sshserver,
util,
wireproto,
+ wireprotoserver,
)
class SSHServerGetArgsTests(unittest.TestCase):
def testparseknown(self):
tests = [
- ('* 0\nnodes 0\n', ['', {}]),
- ('* 0\nnodes 40\n1111111111111111111111111111111111111111\n',
- ['1111111111111111111111111111111111111111', {}]),
+ (b'* 0\nnodes 0\n', [b'', {}]),
+ (b'* 0\nnodes 40\n1111111111111111111111111111111111111111\n',
+ [b'1111111111111111111111111111111111111111', {}]),
]
for input, expected in tests:
- self.assertparse('known', input, expected)
+ self.assertparse(b'known', input, expected)
def assertparse(self, cmd, input, expected):
server = mockserver(input)
+ proto = wireprotoserver.sshv1protocolhandler(server._ui,
+ server._fin,
+ server._fout)
_func, spec = wireproto.commands[cmd]
- self.assertEqual(server.getargs(spec), expected)
+ self.assertEqual(proto.getargs(spec), expected)
def mockserver(inbytes):
ui = mockui(inbytes)
repo = mockrepo(ui)
- return sshserver.sshserver(ui, repo)
+ return wireprotoserver.sshserver(ui, repo)
class mockrepo(object):
def __init__(self, ui):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-stack.t Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,253 @@
+
+This test test the low-level definition of stack, agnostic from all formatting
+
+Initial setup
+
+ $ cat << EOF >> $HGRCPATH
+ > [ui]
+ > logtemplate = {rev} {branch} {phase} {desc|firstline}\n
+ > [extensions]
+ > rebase=
+ > [experimental]
+ > evolution=createmarkers,exchange,allowunstable
+ > EOF
+
+ $ hg init main
+ $ cd main
+ $ hg branch other
+ marked working directory as branch other
+ (branches are permanent and global, did you want a bookmark?)
+ $ echo aaa > aaa
+ $ hg add aaa
+ $ hg commit -m c_a
+ $ echo aaa > bbb
+ $ hg add bbb
+ $ hg commit -m c_b
+ $ hg branch foo
+ marked working directory as branch foo
+ $ echo aaa > ccc
+ $ hg add ccc
+ $ hg commit -m c_c
+ $ echo aaa > ddd
+ $ hg add ddd
+ $ hg commit -m c_d
+ $ echo aaa > eee
+ $ hg add eee
+ $ hg commit -m c_e
+ $ echo aaa > fff
+ $ hg add fff
+ $ hg commit -m c_f
+ $ hg log -G
+ @ 5 foo draft c_f
+ |
+ o 4 foo draft c_e
+ |
+ o 3 foo draft c_d
+ |
+ o 2 foo draft c_c
+ |
+ o 1 other draft c_b
+ |
+ o 0 other draft c_a
+
+
+Check that stack doesn't include public changesets
+--------------------------------------------------
+
+ $ hg up other
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ $ hg log -G -r "stack()"
+ @ 1 other draft c_b
+ |
+ o 0 other draft c_a
+
+ $ hg phase --public 'branch("other")'
+ $ hg log -G -r "stack()"
+ $ hg up foo
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Simple test
+-----------
+
+'stack()' list all changeset in the branch
+
+ $ hg branch
+ foo
+ $ hg log -G -r "stack()"
+ @ 5 foo draft c_f
+ |
+ o 4 foo draft c_e
+ |
+ o 3 foo draft c_d
+ |
+ o 2 foo draft c_c
+ |
+ ~
+
+Case with some of the branch unstable
+------------------------------------
+
+ $ hg up 3
+ 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ echo bbb > ddd
+ $ hg commit --amend
+ 2 new orphan changesets
+ $ hg log -G
+ @ 6 foo draft c_d
+ |
+ | * 5 foo draft c_f
+ | |
+ | * 4 foo draft c_e
+ | |
+ | x 3 foo draft c_d
+ |/
+ o 2 foo draft c_c
+ |
+ o 1 other public c_b
+ |
+ o 0 other public c_a
+
+ $ hg log -G -r "stack()"
+ @ 6 foo draft c_d
+ |
+ ~
+ $ hg up -r "desc(c_e)"
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg log -G -r "stack()"
+ @ 4 foo draft c_e
+ |
+ x 3 foo draft c_d
+ |
+ ~
+ $ hg up -r "desc(c_d)"
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+ $ hg log -G -r "stack()"
+ @ 6 foo draft c_d
+ |
+ ~
+
+Case with multiple topological heads
+------------------------------------
+
+Make things linear again
+
+ $ hg rebase -s 'desc(c_e)' -d 'desc(c_d) - obsolete()'
+ rebasing 4:4f2a69f6d380 "c_e"
+ rebasing 5:913c298d8b0a "c_f"
+ $ hg log -G
+ o 8 foo draft c_f
+ |
+ o 7 foo draft c_e
+ |
+ @ 6 foo draft c_d
+ |
+ o 2 foo draft c_c
+ |
+ o 1 other public c_b
+ |
+ o 0 other public c_a
+
+
+Create the second branch
+
+ $ hg up 'desc(c_d)'
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo aaa > ggg
+ $ hg add ggg
+ $ hg commit -m c_g
+ created new head
+ $ echo aaa > hhh
+ $ hg add hhh
+ $ hg commit -m c_h
+ $ hg log -G
+ @ 10 foo draft c_h
+ |
+ o 9 foo draft c_g
+ |
+ | o 8 foo draft c_f
+ | |
+ | o 7 foo draft c_e
+ |/
+ o 6 foo draft c_d
+ |
+ o 2 foo draft c_c
+ |
+ o 1 other public c_b
+ |
+ o 0 other public c_a
+
+
+Test output
+
+ $ hg log -G -r "stack(10)"
+ @ 10 foo draft c_h
+ |
+ o 9 foo draft c_g
+ |
+ ~
+ $ hg log -G -r "stack(8)"
+ o 8 foo draft c_f
+ |
+ o 7 foo draft c_e
+ |
+ ~
+ $ hg log -G -r "stack(head())"
+ @ 10 foo draft c_h
+ |
+ o 9 foo draft c_g
+ |
+ ~
+ o 8 foo draft c_f
+ |
+ o 7 foo draft c_e
+ |
+ ~
+Check the stack order
+ $ hg log -r "first(stack())"
+ 9 foo draft c_g
+ $ hg log -r "first(stack(10))"
+ 9 foo draft c_g
+ $ hg log -r "first(stack(8))"
+ 7 foo draft c_e
+ $ hg log -r "first(stack(head()))"
+ 7 foo draft c_e
+
+Case with multiple heads with unstability involved
+--------------------------------------------------
+
+We amend the message to make sure the display base pick the right changeset
+
+ $ hg up 'desc(c_d)'
+ 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ echo ccc > ddd
+ $ hg commit --amend -m 'c_D'
+ 4 new orphan changesets
+ $ hg rebase -d . -s 'desc(c_g)'
+ rebasing 9:2ebb6e48ab8a "c_g"
+ rebasing 10:634f38e27a1d "c_h"
+ $ hg log -G
+ o 13 foo draft c_h
+ |
+ o 12 foo draft c_g
+ |
+ @ 11 foo draft c_D
+ |
+ | * 8 foo draft c_f
+ | |
+ | * 7 foo draft c_e
+ | |
+ | x 6 foo draft c_d
+ |/
+ o 2 foo draft c_c
+ |
+ o 1 other public c_b
+ |
+ o 0 other public c_a
+
+
+We should improve stack definition to also show 12 and 13 here
+ $ hg log -G -r "stack()"
+ @ 11 foo draft c_D
+ |
+ ~
--- a/tests/test-static-http.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-static-http.t Mon Mar 19 08:07:18 2018 -0700
@@ -222,6 +222,8 @@
/.hg/bookmarks
/.hg/bookmarks.current
/.hg/cache/hgtagsfnodes1
+ /.hg/cache/rbc-names-v1
+ /.hg/cache/rbc-revs-v1
/.hg/requires
/.hg/store/00changelog.i
/.hg/store/00manifest.i
@@ -234,6 +236,8 @@
/remote-with-names/.hg/bookmarks.current
/remote-with-names/.hg/cache/branch2-served
/remote-with-names/.hg/cache/hgtagsfnodes1
+ /remote-with-names/.hg/cache/rbc-names-v1
+ /remote-with-names/.hg/cache/rbc-revs-v1
/remote-with-names/.hg/cache/tags2-served
/remote-with-names/.hg/localtags
/remote-with-names/.hg/requires
@@ -248,6 +252,7 @@
/remote/.hg/cache/branch2-served
/remote/.hg/cache/hgtagsfnodes1
/remote/.hg/cache/rbc-names-v1
+ /remote/.hg/cache/rbc-revs-v1
/remote/.hg/cache/tags2-served
/remote/.hg/localtags
/remote/.hg/requires
@@ -265,6 +270,8 @@
/sub/.hg/bookmarks
/sub/.hg/bookmarks.current
/sub/.hg/cache/hgtagsfnodes1
+ /sub/.hg/cache/rbc-names-v1
+ /sub/.hg/cache/rbc-revs-v1
/sub/.hg/requires
/sub/.hg/store/00changelog.i
/sub/.hg/store/00manifest.i
--- a/tests/test-status.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-status.t Mon Mar 19 08:07:18 2018 -0700
@@ -465,12 +465,12 @@
$ hg init repo5
$ cd repo5
- >>> open("010a", "wb").write("\1\nfoo")
+ >>> open("010a", r"wb").write(b"\1\nfoo")
$ hg ci -q -A -m 'initial checkin'
$ hg status -A
C 010a
- >>> open("010a", "wb").write("\1\nbar")
+ >>> open("010a", r"wb").write(b"\1\nbar")
$ hg status -A
M 010a
$ hg ci -q -m 'modify 010a'
--- a/tests/test-strip.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-strip.t Mon Mar 19 08:07:18 2018 -0700
@@ -214,6 +214,7 @@
Stream params: {Compression: BZ}
changegroup -- {nbchanges: 1, version: 02}
264128213d290d868c54642d13aeaa3675551a78
+ cache:rev-branch-cache -- {}
phase-heads -- {}
264128213d290d868c54642d13aeaa3675551a78 draft
$ hg pull .hg/strip-backup/*
@@ -843,13 +844,13 @@
list of changesets:
6625a516847449b6f0fa3737b9ba56e9f0f3032c
d8db9d1372214336d2b5570f20ee468d2c72fa8b
- bundle2-output-bundle: "HG20", (1 params) 2 parts total
+ bundle2-output-bundle: "HG20", (1 params) 3 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
+ bundle2-output-part: "cache:rev-branch-cache" streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/6625a5168474-345bb43d-backup.hg
updating the branch cache
invalid branchheads cache (served): tip differs
- truncating cache/rbc-revs-v1 to 24
$ hg log -G
o changeset: 2:5c51d8d6557d
| tag: tip
@@ -893,17 +894,17 @@
> def test(transaction):
> # observe cache inconsistency
> try:
- > [repo.changelog.node(r) for r in repo.revs("not public()")]
+ > [repo.changelog.node(r) for r in repo.revs(b"not public()")]
> except IndexError:
- > repo.ui.status("Index error!\n")
+ > repo.ui.status(b"Index error!\n")
> transaction = orig(repo, desc, *args, **kwargs)
> # warm up the phase cache
- > list(repo.revs("not public()"))
- > if desc != 'strip':
- > transaction.addpostclose("phase invalidation test", test)
+ > list(repo.revs(b"not public()"))
+ > if desc != b'strip':
+ > transaction.addpostclose(b"phase invalidation test", test)
> return transaction
> def extsetup(ui):
- > extensions.wrapfunction(localrepo.localrepository, "transaction",
+ > extensions.wrapfunction(localrepo.localrepository, b"transaction",
> transactioncallback)
> EOF
$ hg up -C 2
@@ -930,9 +931,9 @@
> class crashstriprepo(repo.__class__):
> def transaction(self, desc, *args, **kwargs):
> tr = super(crashstriprepo, self).transaction(desc, *args, **kwargs)
- > if desc == 'strip':
- > def crash(tra): raise error.Abort('boom')
- > tr.addpostclose('crash', crash)
+ > if desc == b'strip':
+ > def crash(tra): raise error.Abort(b'boom')
+ > tr.addpostclose(b'crash', crash)
> return tr
> repo.__class__ = crashstriprepo
> EOF
@@ -1175,16 +1176,16 @@
> from mercurial import commands, registrar, repair
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('testdelayedstrip')
+ > @command(b'testdelayedstrip')
> def testdelayedstrip(ui, repo):
> def getnodes(expr):
> return [repo.changelog.node(r) for r in repo.revs(expr)]
> with repo.wlock():
> with repo.lock():
- > with repo.transaction('delayedstrip'):
- > repair.delayedstrip(ui, repo, getnodes('B+I+Z+D+E'), 'J')
- > repair.delayedstrip(ui, repo, getnodes('G+H+Z'), 'I')
- > commands.commit(ui, repo, message='J', date='0 0')
+ > with repo.transaction(b'delayedstrip'):
+ > repair.delayedstrip(ui, repo, getnodes(b'B+I+Z+D+E'), b'J')
+ > repair.delayedstrip(ui, repo, getnodes(b'G+H+Z'), b'I')
+ > commands.commit(ui, repo, message=b'J', date=b'0 0')
> EOF
$ hg testdelayedstrip --config extensions.t=$TESTTMP/delayedstrip.py
warning: orphaned descendants detected, not stripping 08ebfeb61bac, 112478962961, 7fb047a69f22
@@ -1225,7 +1226,7 @@
> from mercurial import registrar, scmutil
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('testnodescleanup')
+ > @command(b'testnodescleanup')
> def testnodescleanup(ui, repo):
> def nodes(expr):
> return [repo.changelog.node(r) for r in repo.revs(expr)]
@@ -1233,12 +1234,13 @@
> return nodes(expr)[0]
> with repo.wlock():
> with repo.lock():
- > with repo.transaction('delayedstrip'):
- > mapping = {node('F'): [node('F2')],
- > node('D'): [node('D2')],
- > node('G'): [node('G2')]}
- > scmutil.cleanupnodes(repo, mapping, 'replace')
- > scmutil.cleanupnodes(repo, nodes('((B::)+I+Z)-D2'), 'replace')
+ > with repo.transaction(b'delayedstrip'):
+ > mapping = {node(b'F'): [node(b'F2')],
+ > node(b'D'): [node(b'D2')],
+ > node(b'G'): [node(b'G2')]}
+ > scmutil.cleanupnodes(repo, mapping, b'replace')
+ > scmutil.cleanupnodes(repo, nodes(b'((B::)+I+Z)-D2'),
+ > b'replace')
> EOF
$ hg testnodescleanup --config extensions.t=$TESTTMP/scmutilcleanup.py
warning: orphaned descendants detected, not stripping 112478962961, 1fc8102cda62, 26805aba1e60
--- a/tests/test-subrepo-deep-nested-change.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-subrepo-deep-nested-change.t Mon Mar 19 08:07:18 2018 -0700
@@ -114,6 +114,7 @@
* "GET /?cmd=batch HTTP/1.1" 200 - * (glob)
* "GET /?cmd=getbundle HTTP/1.1" 200 - * (glob)
* "GET /../sub1?cmd=capabilities HTTP/1.1" 404 - (glob)
+ $ cat error.log
$ killdaemons.py
$ rm hg1.pid error.log access.log
--- a/tests/test-subrepo-missing.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-subrepo-missing.t Mon Mar 19 08:07:18 2018 -0700
@@ -14,7 +14,7 @@
ignore blanklines in .hgsubstate
- >>> file('.hgsubstate', 'wb').write('\n\n \t \n \n')
+ >>> open('.hgsubstate', 'wb').write(b'\n\n \t \n \n')
$ hg st --subrepos
M .hgsubstate
$ hg revert -qC .hgsubstate
@@ -22,7 +22,7 @@
abort more gracefully on .hgsubstate parsing error
$ cp .hgsubstate .hgsubstate.old
- >>> file('.hgsubstate', 'wb').write('\ninvalid')
+ >>> open('.hgsubstate', 'wb').write(b'\ninvalid')
$ hg st --subrepos --cwd $TESTTMP -R $TESTTMP/repo
abort: invalid subrepository revision specifier in 'repo/.hgsubstate' line 2
[255]
--- a/tests/test-tag.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-tag.t Mon Mar 19 08:07:18 2018 -0700
@@ -231,8 +231,8 @@
doesn't end with EOL
$ $PYTHON << EOF
- > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
- > f = file('.hg/localtags', 'w'); f.write(last); f.close()
+ > f = open('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
+ > f = open('.hg/localtags', 'w'); f.write(last); f.close()
> EOF
$ cat .hg/localtags; echo
acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
@@ -243,8 +243,8 @@
$ $PYTHON << EOF
- > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
- > f = file('.hgtags', 'w'); f.write(last); f.close()
+ > f = open('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
+ > f = open('.hgtags', 'w'); f.write(last); f.close()
> EOF
$ hg ci -m'broken manual edit of .hgtags'
$ cat .hgtags; echo
--- a/tests/test-tags.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-tags.t Mon Mar 19 08:07:18 2018 -0700
@@ -681,6 +681,8 @@
checklink (symlink !)
checklink-target (symlink !)
hgtagsfnodes1
+ rbc-names-v1
+ rbc-revs-v1
Cache should contain the head only, even though other nodes have tags data
@@ -706,6 +708,8 @@
checklink (symlink !)
checklink-target (symlink !)
hgtagsfnodes1
+ rbc-names-v1
+ rbc-revs-v1
tags2-visible
$ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
@@ -729,6 +733,7 @@
f63cc8fe54e4d326f8d692805d70e092f851ddb1
40f0358cb314c824a5929ee527308d90e023bc10
hgtagsfnodes -- {}
+ cache:rev-branch-cache -- {}
Check that local clone includes cache data
--- a/tests/test-template-engine.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-template-engine.t Mon Mar 19 08:07:18 2018 -0700
@@ -1,30 +1,34 @@
$ cat > engine.py << EOF
>
- > from mercurial import templater
+ > from mercurial import (
+ > pycompat,
+ > templater,
+ > templateutil,
+ > )
>
- > class mytemplater(object):
- > def __init__(self, loader, filters, defaults, resources, aliases):
- > self.loader = loader
- > self._defaults = defaults
- > self._resources = resources
+ > class mytemplater(templater.engine):
+ > def _load(self, t):
+ > return self._loader(t)
>
> def process(self, t, map):
- > tmpl = self.loader(t)
+ > tmpl = self._load(t)
> props = self._defaults.copy()
> props.update(map)
- > for k, v in props.iteritems():
- > if k in ('templ', 'ctx', 'repo', 'revcache', 'cache', 'troubles'):
+ > for k, v in props.items():
+ > if b'{{%s}}' % k not in tmpl:
> continue
- > if hasattr(v, '__call__'):
+ > if callable(v) and getattr(v, '_requires', None) is None:
> props = self._resources.copy()
> props.update(map)
- > v = v(**props)
- > v = templater.stringify(v)
- > tmpl = tmpl.replace('{{%s}}' % k, v)
+ > v = v(**pycompat.strkwargs(props))
+ > elif callable(v):
+ > v = v(self, props)
+ > v = templateutil.stringify(v)
+ > tmpl = tmpl.replace(b'{{%s}}' % k, v)
> yield tmpl
>
- > templater.engines['my'] = mytemplater
+ > templater.engines[b'my'] = mytemplater
> EOF
$ hg init test
$ echo '[extensions]' > test/.hg/hgrc
--- a/tests/test-transplant.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-transplant.t Mon Mar 19 08:07:18 2018 -0700
@@ -760,7 +760,7 @@
$ cd twin2
$ echo '[patch]' >> .hg/hgrc
$ echo 'eol = crlf' >> .hg/hgrc
- $ $PYTHON -c "file('b', 'wb').write('b\r\nb\r\n')"
+ $ $PYTHON -c "open('b', 'wb').write(b'b\r\nb\r\n')"
$ hg ci -Am addb
adding b
$ hg transplant -s ../twin1 tip
@@ -838,9 +838,9 @@
$ cd binarysource
$ echo a > a
$ hg ci -Am adda a
- >>> file('b', 'wb').write('\0b1')
+ >>> open('b', 'wb').write(b'\0b1')
$ hg ci -Am addb b
- >>> file('b', 'wb').write('\0b2')
+ >>> open('b', 'wb').write(b'\0b2')
$ hg ci -m changeb b
$ cd ..
@@ -891,14 +891,14 @@
> # emulate that patch.patch() is aborted at patching on "abort" file
> from mercurial import error, extensions, patch as patchmod
> def patch(orig, ui, repo, patchname,
- > strip=1, prefix='', files=None,
- > eolmode='strict', similarity=0):
+ > strip=1, prefix=b'', files=None,
+ > eolmode=b'strict', similarity=0):
> if files is None:
> files = set()
> r = orig(ui, repo, patchname,
> strip=strip, prefix=prefix, files=files,
> eolmode=eolmode, similarity=similarity)
- > if 'abort' in files:
+ > if b'abort' in files:
> raise error.PatchError('intentional error while patching')
> return r
> def extsetup(ui):
--- a/tests/test-ui-color.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-ui-color.py Mon Mar 19 08:07:18 2018 -0700
@@ -9,27 +9,27 @@
# ensure errors aren't buffered
testui = uimod.ui()
testui.pushbuffer()
-testui.write(('buffered\n'))
-testui.warn(('warning\n'))
-testui.write_err('error\n')
+testui.write((b'buffered\n'))
+testui.warn((b'warning\n'))
+testui.write_err(b'error\n')
print(repr(testui.popbuffer()))
# test dispatch.dispatch with the same ui object
-hgrc = open(os.environ["HGRCPATH"], 'w')
-hgrc.write('[extensions]\n')
-hgrc.write('color=\n')
+hgrc = open(os.environ["HGRCPATH"], 'wb')
+hgrc.write(b'[extensions]\n')
+hgrc.write(b'color=\n')
hgrc.close()
ui_ = uimod.ui.load()
-ui_.setconfig('ui', 'formatted', 'True')
+ui_.setconfig(b'ui', b'formatted', b'True')
# we're not interested in the output, so write that to devnull
-ui_.fout = open(os.devnull, 'w')
+ui_.fout = open(os.devnull, 'wb')
# call some arbitrary command just so we go through
# color's wrapped _runcommand twice.
def runcmd():
- dispatch.dispatch(dispatch.request(['version', '-q'], ui_))
+ dispatch.dispatch(dispatch.request([b'version', b'-q'], ui_))
runcmd()
print("colored? %s" % (ui_._colormode is not None))
--- a/tests/test-ui-verbosity.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-ui-verbosity.py Mon Mar 19 08:07:18 2018 -0700
@@ -2,9 +2,13 @@
import os
from mercurial import (
+ pycompat,
ui as uimod,
)
+if pycompat.ispy3:
+ xrange = range
+
hgrc = os.environ['HGRCPATH']
f = open(hgrc)
basehgrc = f.read()
--- a/tests/test-uncommit.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-uncommit.t Mon Mar 19 08:07:18 2018 -0700
@@ -27,6 +27,9 @@
deleted in the changeset will be left unchanged, and so will remain
modified in the working directory.
+ If no files are specified, the commit will be pruned, unless --keep is
+ given.
+
(use 'hg help -e uncommit' to show help for the uncommit extension)
options ([+] can be repeated):
@@ -158,7 +161,7 @@
$ cat files
abcde
foo
- $ hg commit -m "files abcde + foo"
+ $ hg commit --amend -m "files abcde + foo"
Testing the 'experimental.uncommitondirtywdir' config
@@ -188,16 +191,16 @@
+abc
$ hg bookmark
- foo 9:48e5bd7cd583
+ foo 10:48e5bd7cd583
$ hg uncommit
3 new orphan changesets
$ hg status
M files
A file-abc
$ hg heads -T '{rev}:{node} {desc}'
- 9:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo (no-eol)
+ 10:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo (no-eol)
$ hg bookmark
- foo 9:48e5bd7cd583
+ foo 10:48e5bd7cd583
$ hg commit -m 'new abc'
created new head
@@ -219,36 +222,38 @@
+ab
$ hg bookmark
- foo 9:48e5bd7cd583
+ foo 10:48e5bd7cd583
$ hg uncommit file-ab
1 new orphan changesets
$ hg status
A file-ab
$ hg heads -T '{rev}:{node} {desc}\n'
- 11:8eb87968f2edb7f27f27fe676316e179de65fff6 added file-ab
- 10:5dc89ca4486f8a88716c5797fa9f498d13d7c2e1 new abc
- 9:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo
+ 12:8eb87968f2edb7f27f27fe676316e179de65fff6 added file-ab
+ 11:5dc89ca4486f8a88716c5797fa9f498d13d7c2e1 new abc
+ 10:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo
$ hg bookmark
- foo 9:48e5bd7cd583
+ foo 10:48e5bd7cd583
$ hg commit -m 'update ab'
$ hg status
$ hg heads -T '{rev}:{node} {desc}\n'
- 12:f21039c59242b085491bb58f591afc4ed1c04c09 update ab
- 10:5dc89ca4486f8a88716c5797fa9f498d13d7c2e1 new abc
- 9:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo
+ 13:f21039c59242b085491bb58f591afc4ed1c04c09 update ab
+ 11:5dc89ca4486f8a88716c5797fa9f498d13d7c2e1 new abc
+ 10:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo
$ hg log -G -T '{rev}:{node} {desc}' --hidden
- @ 12:f21039c59242b085491bb58f591afc4ed1c04c09 update ab
+ @ 13:f21039c59242b085491bb58f591afc4ed1c04c09 update ab
|
- o 11:8eb87968f2edb7f27f27fe676316e179de65fff6 added file-ab
+ o 12:8eb87968f2edb7f27f27fe676316e179de65fff6 added file-ab
|
- | * 10:5dc89ca4486f8a88716c5797fa9f498d13d7c2e1 new abc
+ | * 11:5dc89ca4486f8a88716c5797fa9f498d13d7c2e1 new abc
| |
- | | * 9:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo
+ | | * 10:48e5bd7cd583eb24164ef8b89185819c84c96ed7 files abcde + foo
| | |
- | | | x 8:83815831694b1271e9f207cb1b79b2b19275edcb files abcde + foo
+ | | | x 9:8a6b58c173ca6a2e3745d8bd86698718d664bc6c files abcde + foo
+ | | |/
+ | | | x 8:39ad452c7f684a55d161c574340c5766c4569278 update files for abcde
| | |/
| | | x 7:0977fa602c2fd7d8427ed4e7ee15ea13b84c9173 update files for abcde
| | |/
@@ -270,7 +275,7 @@
$ hg uncommit
$ hg phase -r .
- 11: draft
+ 12: draft
$ hg commit -m 'update ab again'
Uncommit with public parent
@@ -278,7 +283,7 @@
$ hg phase -p "::.^"
$ hg uncommit
$ hg phase -r .
- 11: public
+ 12: public
Partial uncommit with public parent
@@ -289,9 +294,9 @@
$ hg status
A xyz
$ hg phase -r .
- 15: draft
+ 16: draft
$ hg phase -r ".^"
- 11: public
+ 12: public
Uncommit leaving an empty changeset
--- a/tests/test-upgrade-repo.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-upgrade-repo.t Mon Mar 19 08:07:18 2018 -0700
@@ -31,23 +31,18 @@
abort: cannot upgrade repository; unsupported source requirement: shared
[255]
-Do not yet support upgrading manifestv2 and treemanifest repos
-
- $ hg --config experimental.manifestv2=true init manifestv2
- $ hg -R manifestv2 debugupgraderepo
- abort: cannot upgrade repository; unsupported source requirement: manifestv2
- [255]
+Do not yet support upgrading treemanifest repos
$ hg --config experimental.treemanifest=true init treemanifest
$ hg -R treemanifest debugupgraderepo
abort: cannot upgrade repository; unsupported source requirement: treemanifest
[255]
-Cannot add manifestv2 or treemanifest requirement during upgrade
+Cannot add treemanifest requirement during upgrade
$ hg init disallowaddedreq
- $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
- abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
+ $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
+ abort: cannot upgrade repository; do not support adding requirement: treemanifest
[255]
An upgrade of a repository created with recommended settings only suggests optimizations
--- a/tests/test-walk.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-walk.t Mon Mar 19 08:07:18 2018 -0700
@@ -304,12 +304,10 @@
f beans/turtle beans/turtle
$ hg debugwalk -Xbeans/black beans/black
matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans\\/black(?:/|$))'>>
- f beans/black beans/black exact
$ hg debugwalk -Xbeans/black -Ibeans/black
matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans\\/black(?:/|$))'>>
$ hg debugwalk -Xbeans beans/black
matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
- f beans/black beans/black exact
$ hg debugwalk -Xbeans -Ibeans/black
matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
$ hg debugwalk 'glob:mammals/../beans/b*'
@@ -345,17 +343,13 @@
[255]
Test explicit paths and excludes:
-(BROKEN: nothing should be included, but wctx.walk() does)
$ hg debugwalk fennel -X fennel
matcher: <differencematcher m1=<patternmatcher patterns='(?:fennel(?:/|$))'>, m2=<includematcher includes='(?:fennel(?:/|$))'>>
- f fennel fennel exact
$ hg debugwalk fennel -X 'f*'
matcher: <differencematcher m1=<patternmatcher patterns='(?:fennel(?:/|$))'>, m2=<includematcher includes='(?:f[^/]*(?:/|$))'>>
- f fennel fennel exact
$ hg debugwalk beans/black -X 'path:beans'
matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
- f beans/black beans/black exact
$ hg debugwalk -I 'path:beans/black' -X 'path:beans'
matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
@@ -494,12 +488,12 @@
Test listfile and listfile0
- $ $PYTHON -c "file('listfile0', 'wb').write('fenugreek\0new\0')"
+ $ $PYTHON -c "open('listfile0', 'wb').write(b'fenugreek\0new\0')"
$ hg debugwalk -I 'listfile0:listfile0'
matcher: <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$))'>
f fenugreek fenugreek
f new new
- $ $PYTHON -c "file('listfile', 'wb').write('fenugreek\nnew\r\nmammals/skunk\n')"
+ $ $PYTHON -c "open('listfile', 'wb').write(b'fenugreek\nnew\r\nmammals/skunk\n')"
$ hg debugwalk -I 'listfile:listfile'
matcher: <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$)|mammals\\/skunk(?:/|$))'>
f fenugreek fenugreek
@@ -525,7 +519,7 @@
$ cd t
$ echo fennel > overflow.list
- $ $PYTHON -c "for i in xrange(20000 / 100): print 'x' * 100" >> overflow.list
+ $ $PYTHON -c "for i in range(20000 / 100): print 'x' * 100" >> overflow.list
$ echo fenugreek >> overflow.list
$ hg debugwalk 'listfile:overflow.list' 2>&1 | egrep -v '(^matcher: |^xxx)'
f fennel fennel exact
--- a/tests/test-win32text.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-win32text.t Mon Mar 19 08:07:18 2018 -0700
@@ -5,9 +5,9 @@
> import sys
>
> for path in sys.argv[1:]:
- > data = file(path, 'rb').read()
- > data = data.replace('\n', '\r\n')
- > file(path, 'wb').write(data)
+ > data = open(path, 'rb').read()
+ > data = data.replace(b'\n', b'\r\n')
+ > open(path, 'wb').write(data)
> EOF
$ echo '[hooks]' >> .hg/hgrc
$ echo 'pretxncommit.crlf = python:hgext.win32text.forbidcrlf' >> .hg/hgrc
@@ -118,7 +118,7 @@
$ hg rem f
$ hg ci -m 4
- $ $PYTHON -c 'file("bin", "wb").write("hello\x00\x0D\x0A")'
+ $ $PYTHON -c 'open("bin", "wb").write(b"hello\x00\x0D\x0A")'
$ hg add bin
$ hg ci -m 5
$ hg log -v
@@ -342,7 +342,7 @@
$ rm .hg/hgrc
$ (echo some; echo text) > f3
- $ $PYTHON -c 'file("f4.bat", "wb").write("rem empty\x0D\x0A")'
+ $ $PYTHON -c 'open("f4.bat", "wb").write(b"rem empty\x0D\x0A")'
$ hg add f3 f4.bat
$ hg ci -m 6
$ cat bin
@@ -395,7 +395,7 @@
$ cat f4.bat
rem empty\r (esc)
- $ $PYTHON -c 'file("f5.sh", "wb").write("# empty\x0D\x0A")'
+ $ $PYTHON -c 'open("f5.sh", "wb").write(b"# empty\x0D\x0A")'
$ hg add f5.sh
$ hg ci -m 7
$ cat f5.sh
--- a/tests/test-wireproto.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-wireproto.py Mon Mar 19 08:07:18 2018 -0700
@@ -1,8 +1,12 @@
from __future__ import absolute_import, print_function
from mercurial import (
+ error,
+ pycompat,
+ ui as uimod,
util,
wireproto,
+ wireprototypes,
)
stringio = util.stringio
@@ -11,20 +15,24 @@
self.args = args
def getargs(self, spec):
args = self.args
- args.setdefault('*', {})
+ args.setdefault(b'*', {})
names = spec.split()
return [args[n] for n in names]
+ def checkperm(self, perm):
+ pass
+
class clientpeer(wireproto.wirepeer):
- def __init__(self, serverrepo):
+ def __init__(self, serverrepo, ui):
self.serverrepo = serverrepo
+ self._ui = ui
@property
def ui(self):
- return self.serverrepo.ui
+ return self._ui
def url(self):
- return 'test'
+ return b'test'
def local(self):
return None
@@ -39,10 +47,17 @@
pass
def capabilities(self):
- return ['batch']
+ return [b'batch']
def _call(self, cmd, **args):
- return wireproto.dispatch(self.serverrepo, proto(args), cmd)
+ args = pycompat.byteskwargs(args)
+ res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
+ if isinstance(res, wireprototypes.bytesresponse):
+ return res.data
+ elif isinstance(res, bytes):
+ return res
+ else:
+ raise error.Abort('dummy client does not support response type')
def _callstream(self, cmd, **args):
return stringio(self._call(cmd, **args))
@@ -50,31 +65,31 @@
@wireproto.batchable
def greet(self, name):
f = wireproto.future()
- yield {'name': mangle(name)}, f
+ yield {b'name': mangle(name)}, f
yield unmangle(f.value)
class serverrepo(object):
def greet(self, name):
- return "Hello, " + name
+ return b"Hello, " + name
def filtered(self, name):
return self
def mangle(s):
- return ''.join(chr(ord(c) + 1) for c in s)
+ return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
def unmangle(s):
- return ''.join(chr(ord(c) - 1) for c in s)
+ return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
def greet(repo, proto, name):
return mangle(repo.greet(unmangle(name)))
-wireproto.commands['greet'] = (greet, 'name',)
+wireproto.commands[b'greet'] = (greet, b'name',)
srv = serverrepo()
-clt = clientpeer(srv)
+clt = clientpeer(srv, uimod.ui())
-print(clt.greet("Foobar"))
+print(clt.greet(b"Foobar"))
b = clt.iterbatch()
-map(b.greet, ('Fo, =;:<o', 'Bar'))
+list(map(b.greet, (b'Fo, =;:<o', b'Bar')))
b.submit()
print([r for r in b.results()])
--- a/tests/test-worker.t Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/test-worker.t Mon Mar 19 08:07:18 2018 -0700
@@ -12,7 +12,7 @@
> def abort(ui, args):
> if args[0] == 0:
> # by first worker for test stability
- > raise error.Abort('known exception')
+ > raise error.Abort(b'known exception')
> return runme(ui, [])
> def exc(ui, args):
> if args[0] == 0:
@@ -21,25 +21,25 @@
> return runme(ui, [])
> def runme(ui, args):
> for arg in args:
- > ui.status('run\n')
+ > ui.status(b'run\n')
> yield 1, arg
> time.sleep(0.1) # easier to trigger killworkers code path
> functable = {
- > 'abort': abort,
- > 'exc': exc,
- > 'runme': runme,
+ > b'abort': abort,
+ > b'exc': exc,
+ > b'runme': runme,
> }
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'test', [], 'hg test [COST] [FUNC]')
- > def t(ui, repo, cost=1.0, func='runme'):
+ > @command(b'test', [], b'hg test [COST] [FUNC]')
+ > def t(ui, repo, cost=1.0, func=b'runme'):
> cost = float(cost)
> func = functable[func]
- > ui.status('start\n')
+ > ui.status(b'start\n')
> runs = worker.worker(ui, cost, func, (ui,), range(8))
> for n, i in runs:
> pass
- > ui.status('done\n')
+ > ui.status(b'done\n')
> EOF
$ abspath=`pwd`/t.py
$ hg init
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wsgirequest.py Mon Mar 19 08:07:18 2018 -0700
@@ -0,0 +1,416 @@
+from __future__ import absolute_import, print_function
+
+import unittest
+
+from mercurial.hgweb import (
+ request as requestmod,
+)
+from mercurial import (
+ error,
+)
+
+DEFAULT_ENV = {
+ r'REQUEST_METHOD': r'GET',
+ r'SERVER_NAME': r'testserver',
+ r'SERVER_PORT': r'80',
+ r'SERVER_PROTOCOL': r'http',
+ r'wsgi.version': (1, 0),
+ r'wsgi.url_scheme': r'http',
+ r'wsgi.input': None,
+ r'wsgi.errors': None,
+ r'wsgi.multithread': False,
+ r'wsgi.multiprocess': True,
+ r'wsgi.run_once': False,
+}
+
+def parse(env, reponame=None, altbaseurl=None, extra=None):
+ env = dict(env)
+ env.update(extra or {})
+
+ return requestmod.parserequestfromenv(env, reponame=reponame,
+ altbaseurl=altbaseurl)
+
+class ParseRequestTests(unittest.TestCase):
+ def testdefault(self):
+ r = parse(DEFAULT_ENV)
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.method, b'GET')
+ self.assertIsNone(r.remoteuser)
+ self.assertIsNone(r.remotehost)
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+ self.assertEqual(r.querystring, b'')
+ self.assertEqual(len(r.qsparams), 0)
+ self.assertEqual(len(r.headers), 0)
+
+ def testcustomport(self):
+ r = parse(DEFAULT_ENV, extra={
+ r'SERVER_PORT': r'8000',
+ })
+
+ self.assertEqual(r.url, b'http://testserver:8000')
+ self.assertEqual(r.baseurl, b'http://testserver:8000')
+ self.assertEqual(r.advertisedurl, b'http://testserver:8000')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver:8000')
+
+ r = parse(DEFAULT_ENV, extra={
+ r'SERVER_PORT': r'4000',
+ r'wsgi.url_scheme': r'https',
+ })
+
+ self.assertEqual(r.url, b'https://testserver:4000')
+ self.assertEqual(r.baseurl, b'https://testserver:4000')
+ self.assertEqual(r.advertisedurl, b'https://testserver:4000')
+ self.assertEqual(r.advertisedbaseurl, b'https://testserver:4000')
+
+ def testhttphost(self):
+ r = parse(DEFAULT_ENV, extra={
+ r'HTTP_HOST': r'altserver',
+ })
+
+ self.assertEqual(r.url, b'http://altserver')
+ self.assertEqual(r.baseurl, b'http://altserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+
+ def testscriptname(self):
+ r = parse(DEFAULT_ENV, extra={
+ r'SCRIPT_NAME': r'',
+ })
+
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+
+ r = parse(DEFAULT_ENV, extra={
+ r'SCRIPT_NAME': r'/script',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/script')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/script')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/script')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+
+ r = parse(DEFAULT_ENV, extra={
+ r'SCRIPT_NAME': r'/multiple words',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/multiple%20words')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/multiple%20words')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/multiple words')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+
+ def testpathinfo(self):
+ r = parse(DEFAULT_ENV, extra={
+ r'PATH_INFO': r'',
+ })
+
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertEqual(r.dispatchpath, b'')
+
+ r = parse(DEFAULT_ENV, extra={
+ r'PATH_INFO': r'/pathinfo',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/pathinfo')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/pathinfo')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [b'pathinfo'])
+ self.assertEqual(r.dispatchpath, b'pathinfo')
+
+ r = parse(DEFAULT_ENV, extra={
+ r'PATH_INFO': r'/one/two/',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/one/two/')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/one/two/')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [b'one', b'two'])
+ self.assertEqual(r.dispatchpath, b'one/two')
+
+ def testscriptandpathinfo(self):
+ r = parse(DEFAULT_ENV, extra={
+ r'SCRIPT_NAME': r'/script',
+ r'PATH_INFO': r'/pathinfo',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/script/pathinfo')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/script/pathinfo')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/script')
+ self.assertEqual(r.dispatchparts, [b'pathinfo'])
+ self.assertEqual(r.dispatchpath, b'pathinfo')
+
+ r = parse(DEFAULT_ENV, extra={
+ r'SCRIPT_NAME': r'/script1/script2',
+ r'PATH_INFO': r'/path1/path2',
+ })
+
+ self.assertEqual(r.url,
+ b'http://testserver/script1/script2/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl,
+ b'http://testserver/script1/script2/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/script1/script2')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+
+ r = parse(DEFAULT_ENV, extra={
+ r'HTTP_HOST': r'hostserver',
+ r'SCRIPT_NAME': r'/script',
+ r'PATH_INFO': r'/pathinfo',
+ })
+
+ self.assertEqual(r.url, b'http://hostserver/script/pathinfo')
+ self.assertEqual(r.baseurl, b'http://hostserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/script/pathinfo')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/script')
+ self.assertEqual(r.dispatchparts, [b'pathinfo'])
+ self.assertEqual(r.dispatchpath, b'pathinfo')
+
+ def testreponame(self):
+ """repository path components get stripped from URL."""
+
+ with self.assertRaisesRegexp(error.ProgrammingError,
+ b'reponame requires PATH_INFO'):
+ parse(DEFAULT_ENV, reponame=b'repo')
+
+ with self.assertRaisesRegexp(error.ProgrammingError,
+ b'PATH_INFO does not begin with repo '
+ b'name'):
+ parse(DEFAULT_ENV, reponame=b'repo', extra={
+ r'PATH_INFO': r'/pathinfo',
+ })
+
+ with self.assertRaisesRegexp(error.ProgrammingError,
+ b'reponame prefix of PATH_INFO'):
+ parse(DEFAULT_ENV, reponame=b'repo', extra={
+ r'PATH_INFO': r'/repoextra/path',
+ })
+
+ r = parse(DEFAULT_ENV, reponame=b'repo', extra={
+ r'PATH_INFO': r'/repo/path1/path2',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/repo/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://testserver/repo/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/repo')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertEqual(r.reponame, b'repo')
+
+ r = parse(DEFAULT_ENV, reponame=b'prefix/repo', extra={
+ r'PATH_INFO': r'/prefix/repo/path1/path2',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/prefix/repo/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl,
+ b'http://testserver/prefix/repo/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://testserver')
+ self.assertEqual(r.apppath, b'/prefix/repo')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertEqual(r.reponame, b'prefix/repo')
+
+ def testaltbaseurl(self):
+ # Simple hostname remap.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver')
+
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://altserver')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+
+ # With a custom port.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver:8000')
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://altserver:8000')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver:8000')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+
+ # With a changed protocol.
+ r = parse(DEFAULT_ENV, altbaseurl='https://altserver')
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'https://altserver')
+ self.assertEqual(r.advertisedbaseurl, b'https://altserver')
+ # URL scheme is defined as the actual scheme, not advertised.
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+
+ # Need to specify explicit port number for proper https:// alt URLs.
+ r = parse(DEFAULT_ENV, altbaseurl='https://altserver:443')
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'https://altserver')
+ self.assertEqual(r.advertisedbaseurl, b'https://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+
+ # With only PATH_INFO defined.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver', extra={
+ r'PATH_INFO': r'/path1/path2',
+ })
+ self.assertEqual(r.url, b'http://testserver/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertIsNone(r.reponame)
+
+ # Path on alt URL.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver/altpath')
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://altserver/altpath')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'/altpath')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+
+ # With a trailing slash.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver/altpath/')
+ self.assertEqual(r.url, b'http://testserver')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://altserver/altpath/')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'/altpath/')
+ self.assertEqual(r.dispatchparts, [])
+ self.assertIsNone(r.dispatchpath)
+ self.assertIsNone(r.reponame)
+
+ # PATH_INFO + path on alt URL.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver/altpath', extra={
+ r'PATH_INFO': r'/path1/path2',
+ })
+ self.assertEqual(r.url, b'http://testserver/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl,
+ b'http://altserver/altpath/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'/altpath')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertIsNone(r.reponame)
+
+ # PATH_INFO + path on alt URL with trailing slash.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver/altpath/', extra={
+ r'PATH_INFO': r'/path1/path2',
+ })
+ self.assertEqual(r.url, b'http://testserver/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl,
+ b'http://altserver/altpath//path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'/altpath/')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertIsNone(r.reponame)
+
+ # Local SCRIPT_NAME is ignored.
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver', extra={
+ r'SCRIPT_NAME': r'/script',
+ r'PATH_INFO': r'/path1/path2',
+ })
+ self.assertEqual(r.url, b'http://testserver/script/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertIsNone(r.reponame)
+
+ # Use remote's path for script name, app path
+ r = parse(DEFAULT_ENV, altbaseurl='http://altserver/altroot', extra={
+ r'SCRIPT_NAME': r'/script',
+ r'PATH_INFO': r'/path1/path2',
+ })
+ self.assertEqual(r.url, b'http://testserver/script/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl,
+ b'http://altserver/altroot/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.urlscheme, b'http')
+ self.assertEqual(r.apppath, b'/altroot')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertIsNone(r.reponame)
+
+ # reponame is factored in properly.
+ r = parse(DEFAULT_ENV, reponame=b'repo',
+ altbaseurl='http://altserver/altroot',
+ extra={
+ r'SCRIPT_NAME': r'/script',
+ r'PATH_INFO': r'/repo/path1/path2',
+ })
+
+ self.assertEqual(r.url, b'http://testserver/script/repo/path1/path2')
+ self.assertEqual(r.baseurl, b'http://testserver')
+ self.assertEqual(r.advertisedurl,
+ b'http://altserver/altroot/repo/path1/path2')
+ self.assertEqual(r.advertisedbaseurl, b'http://altserver')
+ self.assertEqual(r.apppath, b'/altroot/repo')
+ self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
+ self.assertEqual(r.dispatchpath, b'path1/path2')
+ self.assertEqual(r.reponame, b'repo')
+
+if __name__ == '__main__':
+ import silenttestrunner
+ silenttestrunner.main(__name__)
--- a/tests/testlib/ext-phase-report.py Thu Mar 15 22:35:07 2018 -0700
+++ b/tests/testlib/ext-phase-report.py Mon Mar 19 08:07:18 2018 -0700
@@ -5,18 +5,18 @@
def reposetup(ui, repo):
def reportphasemove(tr):
- for rev, move in sorted(tr.changes['phases'].iteritems()):
+ for rev, move in sorted(tr.changes[b'phases'].items()):
if move[0] is None:
- ui.write(('test-debug-phase: new rev %d: x -> %d\n'
+ ui.write((b'test-debug-phase: new rev %d: x -> %d\n'
% (rev, move[1])))
else:
- ui.write(('test-debug-phase: move rev %d: %s -> %d\n'
+ ui.write((b'test-debug-phase: move rev %d: %d -> %d\n'
% (rev, move[0], move[1])))
class reportphaserepo(repo.__class__):
def transaction(self, *args, **kwargs):
tr = super(reportphaserepo, self).transaction(*args, **kwargs)
- tr.addpostclose('report-phase', reportphasemove)
+ tr.addpostclose(b'report-phase', reportphasemove)
return tr
repo.__class__ = reportphaserepo