--- a/contrib/dirstatenonnormalcheck.py Fri Aug 27 13:51:44 2021 -0700
+++ b/contrib/dirstatenonnormalcheck.py Mon Aug 30 12:25:57 2021 +0200
@@ -59,11 +59,13 @@
if paranoid:
# We don't do all these checks when paranoid is disable as it would
# make the extension run very slowly on large repos
- extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
- extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
- extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
- extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
- extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
- extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
- extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
+ extensions.wrapfunction(dirstatecl, 'set_tracked', _checkdirstate)
+ extensions.wrapfunction(dirstatecl, 'set_untracked', _checkdirstate)
+ extensions.wrapfunction(
+ dirstatecl, 'set_possibly_dirty', _checkdirstate
+ )
+ extensions.wrapfunction(
+ dirstatecl, 'update_file_p1', _checkdirstate
+ )
+ extensions.wrapfunction(dirstatecl, 'update_file', _checkdirstate)
--- a/hgext/fastannotate/protocol.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/fastannotate/protocol.py Mon Aug 30 12:25:57 2021 +0200
@@ -140,12 +140,10 @@
def getannotate(self, path, lastnode=None):
if not self.capable(b'getannotate'):
ui.warn(_(b'remote peer cannot provide annotate cache\n'))
- yield None, None
+ return None, None
else:
args = {b'path': path, b'lastnode': lastnode or b''}
- f = wireprotov1peer.future()
- yield args, f
- yield _parseresponse(f.value)
+ return args, _parseresponse
peer.__class__ = fastannotatepeer
--- a/hgext/infinitepush/__init__.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/infinitepush/__init__.py Mon Aug 30 12:25:57 2021 +0200
@@ -431,18 +431,19 @@
@wireprotov1peer.batchable
def listkeyspatterns(self, namespace, patterns):
if not self.capable(b'pushkey'):
- yield {}, None
- f = wireprotov1peer.future()
+ return {}, None
self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
- yield {
+
+ def decode(d):
+ self.ui.debug(
+ b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
+ )
+ return pushkey.decodekeys(d)
+
+ return {
b'namespace': encoding.fromlocal(namespace),
b'patterns': wireprototypes.encodelist(patterns),
- }, f
- d = f.value
- self.ui.debug(
- b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
- )
- yield pushkey.decodekeys(d)
+ }, decode
def _readbundlerevs(bundlerepo):
--- a/hgext/largefiles/proto.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/largefiles/proto.py Mon Aug 30 12:25:57 2021 +0200
@@ -184,17 +184,18 @@
@wireprotov1peer.batchable
def statlfile(self, sha):
- f = wireprotov1peer.future()
+ def decode(d):
+ try:
+ return int(d)
+ except (ValueError, urlerr.httperror):
+ # If the server returns anything but an integer followed by a
+ # newline, newline, it's not speaking our language; if we get
+ # an HTTP error, we can't be sure the largefile is present;
+ # either way, consider it missing.
+ return 2
+
result = {b'sha': sha}
- yield result, f
- try:
- yield int(f.value)
- except (ValueError, urlerr.httperror):
- # If the server returns anything but an integer followed by a
- # newline, newline, it's not speaking our language; if we get
- # an HTTP error, we can't be sure the largefile is present;
- # either way, consider it missing.
- yield 2
+ return result, decode
repo.__class__ = lfileswirerepository
--- a/hgext/narrow/narrowcommands.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/narrow/narrowcommands.py Mon Aug 30 12:25:57 2021 +0200
@@ -289,7 +289,7 @@
repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
todelete = []
- for t, f, f2, size in repo.store.datafiles():
+ for t, f, size in repo.store.datafiles():
if f.startswith(b'data/'):
file = f[5:-2]
if not newmatch(file):
--- a/hgext/remotefilelog/contentstore.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/remotefilelog/contentstore.py Mon Aug 30 12:25:57 2021 +0200
@@ -378,7 +378,7 @@
ledger.markdataentry(self, treename, node)
ledger.markhistoryentry(self, treename, node)
- for t, path, encoded, size in self._store.datafiles():
+ for t, path, size in self._store.datafiles():
if path[:5] != b'meta/' or path[-2:] != b'.i':
continue
--- a/hgext/remotefilelog/fileserverclient.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/remotefilelog/fileserverclient.py Mon Aug 30 12:25:57 2021 +0200
@@ -63,12 +63,14 @@
raise error.Abort(
b'configured remotefile server does not support getfile'
)
- f = wireprotov1peer.future()
- yield {b'file': file, b'node': node}, f
- code, data = f.value.split(b'\0', 1)
- if int(code):
- raise error.LookupError(file, node, data)
- yield data
+
+ def decode(d):
+ code, data = d.split(b'\0', 1)
+ if int(code):
+ raise error.LookupError(file, node, data)
+ return data
+
+ return {b'file': file, b'node': node}, decode
@wireprotov1peer.batchable
def x_rfl_getflogheads(self, path):
@@ -77,10 +79,11 @@
b'configured remotefile server does not '
b'support getflogheads'
)
- f = wireprotov1peer.future()
- yield {b'path': path}, f
- heads = f.value.split(b'\n') if f.value else []
- yield heads
+
+ def decode(d):
+ return d.split(b'\n') if d else []
+
+ return {b'path': path}, decode
def _updatecallstreamopts(self, command, opts):
if command != b'getbundle':
--- a/hgext/remotefilelog/remotefilelogserver.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/remotefilelog/remotefilelogserver.py Mon Aug 30 12:25:57 2021 +0200
@@ -166,24 +166,24 @@
n = util.pconvert(fp[striplen:])
d = store.decodedir(n)
t = store.FILETYPE_OTHER
- yield (t, d, n, st.st_size)
+ yield (t, d, st.st_size)
if kind == stat.S_IFDIR:
visit.append(fp)
if scmutil.istreemanifest(repo):
- for (t, u, e, s) in repo.store.datafiles():
+ for (t, u, s) in repo.store.datafiles():
if u.startswith(b'meta/') and (
u.endswith(b'.i') or u.endswith(b'.d')
):
- yield (t, u, e, s)
+ yield (t, u, s)
# Return .d and .i files that do not match the shallow pattern
match = state.match
if match and not match.always():
- for (t, u, e, s) in repo.store.datafiles():
+ for (t, u, s) in repo.store.datafiles():
f = u[5:-2] # trim data/... and .i/.d
if not state.match(f):
- yield (t, u, e, s)
+ yield (t, u, s)
for x in repo.store.topfiles():
if state.noflatmf and x[1][:11] == b'00manifest.':
--- a/hgext/sparse.py Fri Aug 27 13:51:44 2021 -0700
+++ b/hgext/sparse.py Mon Aug 30 12:25:57 2021 +0200
@@ -255,14 +255,9 @@
# Prevent adding files that are outside the sparse checkout
editfuncs = [
- b'normal',
b'set_tracked',
b'set_untracked',
- b'add',
- b'normallookup',
b'copy',
- b'remove',
- b'merge',
]
hint = _(
b'include file with `hg debugsparse --include <pattern>` or use '
--- a/mercurial/cext/parsers.c Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/cext/parsers.c Mon Aug 30 12:25:57 2021 +0200
@@ -65,21 +65,100 @@
/* We do all the initialization here and not a tp_init function because
* dirstate_item is immutable. */
dirstateItemObject *t;
- char state;
- int size, mode, mtime;
- if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
+ int wc_tracked;
+ int p1_tracked;
+ int p2_tracked;
+ int merged;
+ int clean_p1;
+ int clean_p2;
+ int possibly_dirty;
+ PyObject *parentfiledata;
+ static char *keywords_name[] = {
+ "wc_tracked", "p1_tracked", "p2_tracked",
+ "merged", "clean_p1", "clean_p2",
+ "possibly_dirty", "parentfiledata", NULL,
+ };
+ wc_tracked = 0;
+ p1_tracked = 0;
+ p2_tracked = 0;
+ merged = 0;
+ clean_p1 = 0;
+ clean_p2 = 0;
+ possibly_dirty = 0;
+ parentfiledata = Py_None;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiiiiiiO", keywords_name,
+ &wc_tracked, &p1_tracked, &p2_tracked,
+ &merged, &clean_p1, &clean_p2,
+ &possibly_dirty, &parentfiledata
+
+ )) {
return NULL;
}
-
+ if (merged && (clean_p1 || clean_p2)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "`merged` argument incompatible with "
+ "`clean_p1`/`clean_p2`");
+ return NULL;
+ }
t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
if (!t) {
return NULL;
}
- t->state = state;
- t->mode = mode;
- t->size = size;
- t->mtime = mtime;
-
+ t->state = 'r';
+ t->mode = 0;
+ t->size = dirstate_v1_nonnormal;
+ t->mtime = ambiguous_time;
+ if (!(p1_tracked || p2_tracked || wc_tracked)) {
+ /* Nothing special to do, file is untracked */
+ } else if (merged) {
+ t->state = 'm';
+ t->size = dirstate_v1_from_p2;
+ t->mtime = ambiguous_time;
+ } else if (!(p1_tracked || p2_tracked) && wc_tracked) {
+ t->state = 'a';
+ t->size = dirstate_v1_nonnormal;
+ t->mtime = ambiguous_time;
+ } else if ((p1_tracked || p2_tracked) && !wc_tracked) {
+ t->state = 'r';
+ t->size = 0;
+ t->mtime = 0;
+ } else if (clean_p2 && wc_tracked) {
+ t->state = 'n';
+ t->size = dirstate_v1_from_p2;
+ t->mtime = ambiguous_time;
+ } else if (!p1_tracked && p2_tracked && wc_tracked) {
+ t->state = 'n';
+ t->size = dirstate_v1_from_p2;
+ t->mtime = ambiguous_time;
+ } else if (possibly_dirty) {
+ t->state = 'n';
+ t->size = dirstate_v1_nonnormal;
+ t->mtime = ambiguous_time;
+ } else if (wc_tracked) {
+ /* this is a "normal" file */
+ if (parentfiledata == Py_None) {
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ "failed to pass parentfiledata for a normal file");
+ return NULL;
+ }
+ if (!PyTuple_CheckExact(parentfiledata)) {
+ PyErr_SetString(
+ PyExc_TypeError,
+ "parentfiledata should be a Tuple or None");
+ return NULL;
+ }
+ t->state = 'n';
+ t->mode =
+ (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0));
+ t->size =
+ (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1));
+ t->mtime =
+ (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2));
+ } else {
+ PyErr_SetString(PyExc_RuntimeError, "unreachable");
+ return NULL;
+ }
return (PyObject *)t;
}
@@ -142,23 +221,6 @@
return PyInt_FromLong(self->mtime);
};
-static PyObject *dm_nonnormal(dirstateItemObject *self)
-{
- if (self->state != 'n' || self->mtime == ambiguous_time) {
- Py_RETURN_TRUE;
- } else {
- Py_RETURN_FALSE;
- }
-};
-static PyObject *dm_otherparent(dirstateItemObject *self)
-{
- if (self->size == dirstate_v1_from_p2) {
- Py_RETURN_TRUE;
- } else {
- Py_RETURN_FALSE;
- }
-};
-
static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
PyObject *value)
{
@@ -215,6 +277,102 @@
return (PyObject *)t;
};
+/* constructor to help legacy API to build a new "added" item
+
+Should eventually be removed */
+static PyObject *dirstate_item_new_added(PyTypeObject *subtype)
+{
+ dirstateItemObject *t;
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
+ if (!t) {
+ return NULL;
+ }
+ t->state = 'a';
+ t->mode = 0;
+ t->size = dirstate_v1_nonnormal;
+ t->mtime = ambiguous_time;
+ return (PyObject *)t;
+};
+
+/* constructor to help legacy API to build a new "merged" item
+
+Should eventually be removed */
+static PyObject *dirstate_item_new_merged(PyTypeObject *subtype)
+{
+ dirstateItemObject *t;
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
+ if (!t) {
+ return NULL;
+ }
+ t->state = 'm';
+ t->mode = 0;
+ t->size = dirstate_v1_from_p2;
+ t->mtime = ambiguous_time;
+ return (PyObject *)t;
+};
+
+/* constructor to help legacy API to build a new "from_p2" item
+
+Should eventually be removed */
+static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype)
+{
+ /* We do all the initialization here and not a tp_init function because
+ * dirstate_item is immutable. */
+ dirstateItemObject *t;
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
+ if (!t) {
+ return NULL;
+ }
+ t->state = 'n';
+ t->mode = 0;
+ t->size = dirstate_v1_from_p2;
+ t->mtime = ambiguous_time;
+ return (PyObject *)t;
+};
+
+/* constructor to help legacy API to build a new "possibly" item
+
+Should eventually be removed */
+static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype)
+{
+ /* We do all the initialization here and not a tp_init function because
+ * dirstate_item is immutable. */
+ dirstateItemObject *t;
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
+ if (!t) {
+ return NULL;
+ }
+ t->state = 'n';
+ t->mode = 0;
+ t->size = dirstate_v1_nonnormal;
+ t->mtime = ambiguous_time;
+ return (PyObject *)t;
+};
+
+/* constructor to help legacy API to build a new "normal" item
+
+Should eventually be removed */
+static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args)
+{
+ /* We do all the initialization here and not a tp_init function because
+ * dirstate_item is immutable. */
+ dirstateItemObject *t;
+ int size, mode, mtime;
+ if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
+ return NULL;
+ }
+
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
+ if (!t) {
+ return NULL;
+ }
+ t->state = 'n';
+ t->mode = mode;
+ t->size = size;
+ t->mtime = mtime;
+ return (PyObject *)t;
+};
+
/* This means the next status call will have to actually check its content
to make sure it is correct. */
static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
@@ -223,6 +381,21 @@
Py_RETURN_NONE;
}
+static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
+{
+ if (self->state == 'm') {
+ self->size = dirstate_v1_nonnormal;
+ } else if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
+ self->size = dirstate_v1_from_p2;
+ } else {
+ self->size = 0;
+ }
+ self->state = 'r';
+ self->mode = 0;
+ self->mtime = 0;
+ Py_RETURN_NONE;
+}
+
static PyMethodDef dirstate_item_methods[] = {
{"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
"return a \"state\" suitable for v1 serialization"},
@@ -234,14 +407,27 @@
"return a \"mtime\" suitable for v1 serialization"},
{"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
"True if the stored mtime would be ambiguous with the current time"},
- {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
- "build a new DirstateItem object from V1 data"},
+ {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
+ METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
+ {"new_added", (PyCFunction)dirstate_item_new_added,
+ METH_NOARGS | METH_CLASS,
+ "constructor to help legacy API to build a new \"added\" item"},
+ {"new_merged", (PyCFunction)dirstate_item_new_merged,
+ METH_NOARGS | METH_CLASS,
+ "constructor to help legacy API to build a new \"merged\" item"},
+ {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2,
+ METH_NOARGS | METH_CLASS,
+ "constructor to help legacy API to build a new \"from_p2\" item"},
+ {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty,
+ METH_NOARGS | METH_CLASS,
+ "constructor to help legacy API to build a new \"possibly_dirty\" item"},
+ {"new_normal", (PyCFunction)dirstate_item_new_normal,
+ METH_VARARGS | METH_CLASS,
+ "constructor to help legacy API to build a new \"normal\" item"},
{"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
METH_NOARGS, "mark a file as \"possibly dirty\""},
- {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
- "True is the entry is non-normal in the dirstatemap sense"},
- {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
- "True is the entry is `otherparent` in the dirstatemap sense"},
+ {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
+ "mark a file as \"untracked\""},
{NULL} /* Sentinel */
};
@@ -328,6 +514,23 @@
}
};
+static PyObject *dm_nonnormal(dirstateItemObject *self)
+{
+ if (self->state != 'n' || self->mtime == ambiguous_time) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+static PyObject *dm_otherparent(dirstateItemObject *self)
+{
+ if (self->size == dirstate_v1_from_p2) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
static PyGetSetDef dirstate_item_getset[] = {
{"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
{"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
@@ -342,6 +545,8 @@
"from_p2_removed", NULL},
{"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
{"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
+ {"dm_nonnormal", (getter)dm_nonnormal, NULL, "dm_nonnormal", NULL},
+ {"dm_otherparent", (getter)dm_otherparent, NULL, "dm_otherparent", NULL},
{NULL} /* Sentinel */
};
--- a/mercurial/configitems.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/configitems.py Mon Aug 30 12:25:57 2021 +0200
@@ -1266,6 +1266,11 @@
)
coreconfigitem(
b'experimental',
+ b'web.full-garbage-collection-rate',
+ default=1, # still forcing a full collection on each request
+)
+coreconfigitem(
+ b'experimental',
b'worker.wdir-get-thread-safe',
default=False,
)
--- a/mercurial/debugcommands.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/debugcommands.py Mon Aug 30 12:25:57 2021 +0200
@@ -2987,10 +2987,22 @@
dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
-@command(b'debugrebuildfncache', [], b'')
-def debugrebuildfncache(ui, repo):
+@command(
+ b'debugrebuildfncache',
+ [
+ (
+ b'',
+ b'only-data',
+ False,
+ _(b'only look for wrong .d files (much faster)'),
+ )
+ ],
+ b'',
+)
+def debugrebuildfncache(ui, repo, **opts):
"""rebuild the fncache file"""
- repair.rebuildfncache(ui, repo)
+ opts = pycompat.byteskwargs(opts)
+ repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
@command(
--- a/mercurial/dirstate.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/dirstate.py Mon Aug 30 12:25:57 2021 +0200
@@ -500,7 +500,9 @@
self._drop(filename)
return True
else:
- self._remove(filename)
+ self._dirty = True
+ self._updatedfiles.add(filename)
+ self._map.set_untracked(filename)
return True
@requires_no_parents_change
@@ -508,7 +510,19 @@
"""record that the current state of the file on disk is known to be clean"""
self._dirty = True
self._updatedfiles.add(filename)
- self._normal(filename, parentfiledata=parentfiledata)
+ if parentfiledata:
+ (mode, size, mtime) = parentfiledata
+ else:
+ (mode, size, mtime) = self._get_filedata(filename)
+ self._addpath(filename, mode=mode, size=size, mtime=mtime)
+ self._map.copymap.pop(filename, None)
+ if filename in self._map.nonnormalset:
+ self._map.nonnormalset.remove(filename)
+ if mtime > self._lastnormaltime:
+ # Remember the most recent modification timeslot for status(),
+ # to make sure we won't miss future size-preserving file content
+ # modifications that happen within the same timeslot.
+ self._lastnormaltime = mtime
@requires_no_parents_change
def set_possibly_dirty(self, filename):
@@ -703,65 +717,6 @@
mtime = s[stat.ST_MTIME]
return (mode, size, mtime)
- def normal(self, f, parentfiledata=None):
- """Mark a file normal and clean.
-
- parentfiledata: (mode, size, mtime) of the clean file
-
- parentfiledata should be computed from memory (for mode,
- size), as or close as possible from the point where we
- determined the file was clean, to limit the risk of the
- file having been changed by an external process between the
- moment where the file was determined to be clean and now."""
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `normal` inside of update/merge context."
- b" Use `update_file` or `update_file_p1`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `normal` outside of update/merge context."
- b" Use `set_tracked`",
- b'6.0',
- stacklevel=2,
- )
- self._normal(f, parentfiledata=parentfiledata)
-
- def _normal(self, f, parentfiledata=None):
- if parentfiledata:
- (mode, size, mtime) = parentfiledata
- else:
- (mode, size, mtime) = self._get_filedata(f)
- self._addpath(f, mode=mode, size=size, mtime=mtime)
- self._map.copymap.pop(f, None)
- if f in self._map.nonnormalset:
- self._map.nonnormalset.remove(f)
- if mtime > self._lastnormaltime:
- # Remember the most recent modification timeslot for status(),
- # to make sure we won't miss future size-preserving file content
- # modifications that happen within the same timeslot.
- self._lastnormaltime = mtime
-
- def normallookup(self, f):
- '''Mark a file normal, but possibly dirty.'''
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `normallookup` inside of update/merge context."
- b" Use `update_file` or `update_file_p1`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `normallookup` outside of update/merge context."
- b" Use `set_possibly_dirty` or `set_tracked`",
- b'6.0',
- stacklevel=2,
- )
- self._normallookup(f)
-
def _normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
if self.in_merge:
@@ -774,10 +729,8 @@
# (see `merged_removed` and `from_p2_removed`)
if entry.merged_removed or entry.from_p2_removed:
source = self._map.copymap.get(f)
- if entry.merged_removed:
- self._merge(f)
- elif entry.from_p2_removed:
- self._otherparent(f)
+ self._addpath(f, from_p2=True)
+ self._map.copymap.pop(f, None)
if source is not None:
self.copy(source, f)
return
@@ -786,125 +739,11 @@
self._addpath(f, possibly_dirty=True)
self._map.copymap.pop(f, None)
- def otherparent(self, f):
- '''Mark as coming from the other parent, always dirty.'''
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `otherparent` inside of update/merge context."
- b" Use `update_file` or `update_file_p1`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `otherparent` outside of update/merge context."
- b"It should have been set by the update/merge code",
- b'6.0',
- stacklevel=2,
- )
- self._otherparent(f)
-
- def _otherparent(self, f):
- if not self.in_merge:
- msg = _(b"setting %r to other parent only allowed in merges") % f
- raise error.Abort(msg)
- entry = self._map.get(f)
- if entry is not None and entry.tracked:
- # merge-like
- self._addpath(f, merged=True)
- else:
- # add-like
- self._addpath(f, from_p2=True)
- self._map.copymap.pop(f, None)
-
- def add(self, f):
- '''Mark a file added.'''
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `add` inside of update/merge context."
- b" Use `update_file`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `add` outside of update/merge context."
- b" Use `set_tracked`",
- b'6.0',
- stacklevel=2,
- )
- self._add(f)
-
def _add(self, filename):
"""internal function to mark a file as added"""
self._addpath(filename, added=True)
self._map.copymap.pop(filename, None)
- def remove(self, f):
- '''Mark a file removed'''
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `remove` insde of update/merge context."
- b" Use `update_file` or `update_file_p1`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `remove` outside of update/merge context."
- b" Use `set_untracked`",
- b'6.0',
- stacklevel=2,
- )
- self._remove(f)
-
- def _remove(self, filename):
- """internal function to mark a file removed"""
- self._dirty = True
- self._updatedfiles.add(filename)
- self._map.removefile(filename, in_merge=self.in_merge)
-
- def merge(self, f):
- '''Mark a file merged.'''
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `merge` inside of update/merge context."
- b" Use `update_file`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `merge` outside of update/merge context."
- b"It should have been set by the update/merge code",
- b'6.0',
- stacklevel=2,
- )
- self._merge(f)
-
- def _merge(self, f):
- if not self.in_merge:
- return self._normallookup(f)
- return self._otherparent(f)
-
- def drop(self, f):
- '''Drop a file from the dirstate'''
- if self.pendingparentchange():
- util.nouideprecwarn(
- b"do not use `drop` inside of update/merge context."
- b" Use `update_file`",
- b'6.0',
- stacklevel=2,
- )
- else:
- util.nouideprecwarn(
- b"do not use `drop` outside of update/merge context."
- b" Use `set_untracked`",
- b'6.0',
- stacklevel=2,
- )
- self._drop(f)
-
def _drop(self, filename):
"""internal function to drop a file from the dirstate"""
if self._map.dropfile(filename):
--- a/mercurial/dirstatemap.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/dirstatemap.py Mon Aug 30 12:25:57 2021 +0200
@@ -29,16 +29,6 @@
DirstateItem = parsers.DirstateItem
-
-# a special value used internally for `size` if the file come from the other parent
-FROM_P2 = -2
-
-# a special value used internally for `size` if the file is modified/merged/added
-NONNORMAL = -1
-
-# a special value used internally for `time` if the time is ambigeous
-AMBIGUOUS_TIME = -1
-
rangemask = 0x7FFFFFFF
@@ -188,43 +178,33 @@
assert not merged
assert not possibly_dirty
assert not from_p2
- state = b'a'
- size = NONNORMAL
- mtime = AMBIGUOUS_TIME
+ new_entry = DirstateItem.new_added()
elif merged:
assert not possibly_dirty
assert not from_p2
- state = b'm'
- size = FROM_P2
- mtime = AMBIGUOUS_TIME
+ new_entry = DirstateItem.new_merged()
elif from_p2:
assert not possibly_dirty
- state = b'n'
- size = FROM_P2
- mtime = AMBIGUOUS_TIME
+ new_entry = DirstateItem.new_from_p2()
elif possibly_dirty:
- state = b'n'
- size = NONNORMAL
- mtime = AMBIGUOUS_TIME
+ new_entry = DirstateItem.new_possibly_dirty()
else:
- assert size != FROM_P2
- assert size != NONNORMAL
assert size is not None
assert mtime is not None
-
- state = b'n'
size = size & rangemask
mtime = mtime & rangemask
- assert state is not None
- assert size is not None
- assert mtime is not None
+ new_entry = DirstateItem.new_normal(mode, size, mtime)
old_entry = self.get(f)
self._dirs_incr(f, old_entry)
- e = self._map[f] = DirstateItem(state, mode, size, mtime)
- if e.dm_nonnormal:
+ self._map[f] = new_entry
+ if new_entry.dm_nonnormal:
self.nonnormalset.add(f)
- if e.dm_otherparent:
+ else:
+ self.nonnormalset.discard(f)
+ if new_entry.dm_otherparent:
self.otherparentset.add(f)
+ else:
+ self.otherparentset.discard(f)
def reset_state(
self,
@@ -256,25 +236,21 @@
if not (p1_tracked or p2_tracked or wc_tracked):
self.dropfile(filename)
+ return
elif merged:
# XXX might be merged and removed ?
entry = self.get(filename)
- if entry is not None and entry.tracked:
+ if entry is None or not entry.tracked:
# XXX mostly replicate dirstate.other parent. We should get
# the higher layer to pass us more reliable data where `merged`
- # actually mean merged. Dropping the else clause will show
- # failure in `test-graft.t`
- self.addfile(filename, merged=True)
- else:
- self.addfile(filename, from_p2=True)
+ # actually mean merged. Dropping this clause will show failure
+ # in `test-graft.t`
+ merged = False
+ clean_p2 = True
elif not (p1_tracked or p2_tracked) and wc_tracked:
- self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
+ pass # file is added, nothing special to adjust
elif (p1_tracked or p2_tracked) and not wc_tracked:
- # XXX might be merged and removed ?
- old_entry = self._map.get(filename)
- self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
- self._map[filename] = DirstateItem(b'r', 0, 0, 0)
- self.nonnormalset.add(filename)
+ pass
elif clean_p2 and wc_tracked:
if p1_tracked or self.get(filename) is not None:
# XXX the `self.get` call is catching some case in
@@ -284,51 +260,52 @@
# In addition, this seems to be a case where the file is marked
# as merged without actually being the result of a merge
# action. So thing are not ideal here.
- self.addfile(filename, merged=True)
- else:
- self.addfile(filename, from_p2=True)
+ merged = True
+ clean_p2 = False
elif not p1_tracked and p2_tracked and wc_tracked:
- self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
+ clean_p2 = True
elif possibly_dirty:
- self.addfile(filename, possibly_dirty=possibly_dirty)
+ pass
elif wc_tracked:
# this is a "normal" file
if parentfiledata is None:
msg = b'failed to pass parentfiledata for a normal file: %s'
msg %= filename
raise error.ProgrammingError(msg)
- mode, size, mtime = parentfiledata
- self.addfile(filename, mode=mode, size=size, mtime=mtime)
- self.nonnormalset.discard(filename)
else:
assert False, 'unreachable'
- def removefile(self, f, in_merge=False):
- """
- Mark a file as removed in the dirstate.
+ old_entry = self._map.get(filename)
+ self._dirs_incr(filename, old_entry)
+ entry = DirstateItem(
+ wc_tracked=wc_tracked,
+ p1_tracked=p1_tracked,
+ p2_tracked=p2_tracked,
+ merged=merged,
+ clean_p1=clean_p1,
+ clean_p2=clean_p2,
+ possibly_dirty=possibly_dirty,
+ parentfiledata=parentfiledata,
+ )
+ if entry.dm_nonnormal:
+ self.nonnormalset.add(filename)
+ else:
+ self.nonnormalset.discard(filename)
+ if entry.dm_otherparent:
+ self.otherparentset.add(filename)
+ else:
+ self.otherparentset.discard(filename)
+ self._map[filename] = entry
- The `size` parameter is used to store sentinel values that indicate
- the file's previous state. In the future, we should refactor this
- to be more explicit about what that state is.
- """
- entry = self.get(f)
- size = 0
- if in_merge:
- # XXX we should not be able to have 'm' state and 'FROM_P2' if not
- # during a merge. So I (marmoute) am not sure we need the
- # conditionnal at all. Adding double checking this with assert
- # would be nice.
- if entry is not None:
- # backup the previous state
- if entry.merged: # merge
- size = NONNORMAL
- elif entry.from_p2:
- size = FROM_P2
- self.otherparentset.add(f)
- if entry is not None and not (entry.merged or entry.from_p2):
+ def set_untracked(self, f):
+ """Mark a file as no longer tracked in the dirstate map"""
+ entry = self[f]
+ self._dirs_decr(f, old_entry=entry, remove_variant=True)
+ if entry.from_p2:
+ self.otherparentset.add(f)
+ elif not entry.merged:
self.copymap.pop(f, None)
- self._dirs_decr(f, old_entry=entry, remove_variant=True)
- self._map[f] = DirstateItem(b'r', 0, size, 0)
+ entry.set_untracked()
self.nonnormalset.add(f)
def dropfile(self, f):
@@ -632,7 +609,7 @@
)
elif (p1_tracked or p2_tracked) and not wc_tracked:
# XXX might be merged and removed ?
- self[filename] = DirstateItem(b'r', 0, 0, 0)
+ self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
self.nonnormalset.add(filename)
elif clean_p2 and wc_tracked:
if p1_tracked or self.get(filename) is not None:
@@ -664,6 +641,14 @@
else:
assert False, 'unreachable'
+ def set_untracked(self, f):
+ """Mark a file as no longer tracked in the dirstate map"""
+ # in merge is only trigger more logic, so it "fine" to pass it.
+ #
+ # the inner rust dirstate map code need to be adjusted once the API
+ # for dirstate/dirstatemap/DirstateItem is a bit more settled
+ self._rustmap.removefile(f, in_merge=True)
+
def removefile(self, *args, **kwargs):
return self._rustmap.removefile(*args, **kwargs)
--- a/mercurial/hgweb/hgwebdir_mod.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/hgweb/hgwebdir_mod.py Mon Aug 30 12:25:57 2021 +0200
@@ -285,6 +285,7 @@
self.lastrefresh = 0
self.motd = None
self.refresh()
+ self.requests_count = 0
if not baseui:
# set up environment for new ui
extensions.loadall(self.ui)
@@ -341,6 +342,10 @@
self.repos = repos
self.ui = u
+ self.gc_full_collect_rate = self.ui.configint(
+ b'experimental', b'web.full-garbage-collection-rate'
+ )
+ self.gc_full_collections_done = 0
encoding.encoding = self.ui.config(b'web', b'encoding')
self.style = self.ui.config(b'web', b'style')
self.templatepath = self.ui.config(
@@ -383,12 +388,27 @@
finally:
# There are known cycles in localrepository that prevent
# those objects (and tons of held references) from being
- # collected through normal refcounting. We mitigate those
- # leaks by performing an explicit GC on every request.
- # TODO remove this once leaks are fixed.
- # TODO only run this on requests that create localrepository
- # instances instead of every request.
- gc.collect()
+ # collected through normal refcounting.
+ # In some cases, the resulting memory consumption can
+ # be tamed by performing explicit garbage collections.
+ # In presence of actual leaks or big long-lived caches, the
+ # impact on performance of such collections can become a
+ # problem, hence the rate shouldn't be set too low.
+ # See "Collecting the oldest generation" in
+ # https://devguide.python.org/garbage_collector
+ # for more about such trade-offs.
+ rate = self.gc_full_collect_rate
+
+ # this is not thread safe, but the consequence (skipping
+ # a garbage collection) is arguably better than risking
+ # to have several threads perform a collection in parallel
+ # (long useless wait on all threads).
+ self.requests_count += 1
+ if rate > 0 and self.requests_count % rate == 0:
+ gc.collect()
+ self.gc_full_collections_done += 1
+ else:
+ gc.collect(generation=1)
def _runwsgi(self, req, res):
try:
--- a/mercurial/interfaces/dirstate.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/interfaces/dirstate.py Mon Aug 30 12:25:57 2021 +0200
@@ -132,36 +132,6 @@
def copies():
pass
- def normal(f, parentfiledata=None):
- """Mark a file normal and clean.
-
- parentfiledata: (mode, size, mtime) of the clean file
-
- parentfiledata should be computed from memory (for mode,
- size), as or close as possible from the point where we
- determined the file was clean, to limit the risk of the
- file having been changed by an external process between the
- moment where the file was determined to be clean and now."""
- pass
-
- def normallookup(f):
- '''Mark a file normal, but possibly dirty.'''
-
- def otherparent(f):
- '''Mark as coming from the other parent, always dirty.'''
-
- def add(f):
- '''Mark a file added.'''
-
- def remove(f):
- '''Mark a file removed.'''
-
- def merge(f):
- '''Mark a file merged.'''
-
- def drop(f):
- '''Drop a file from the dirstate'''
-
def normalize(path, isknown=False, ignoremissing=False):
"""
normalize the case of a pathname when on a casefolding filesystem
--- a/mercurial/pure/parsers.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/pure/parsers.py Mon Aug 30 12:25:57 2021 +0200
@@ -61,11 +61,129 @@
_size = attr.ib()
_mtime = attr.ib()
- def __init__(self, state, mode, size, mtime):
- self._state = state
- self._mode = mode
- self._size = size
- self._mtime = mtime
+ def __init__(
+ self,
+ wc_tracked=False,
+ p1_tracked=False,
+ p2_tracked=False,
+ merged=False,
+ clean_p1=False,
+ clean_p2=False,
+ possibly_dirty=False,
+ parentfiledata=None,
+ ):
+ if merged and (clean_p1 or clean_p2):
+ msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
+ raise error.ProgrammingError(msg)
+
+ self._state = None
+ self._mode = 0
+ self._size = NONNORMAL
+ self._mtime = AMBIGUOUS_TIME
+ if not (p1_tracked or p2_tracked or wc_tracked):
+ pass # the object has no state to record
+ elif merged:
+ self._state = b'm'
+ self._size = FROM_P2
+ self._mtime = AMBIGUOUS_TIME
+ elif not (p1_tracked or p2_tracked) and wc_tracked:
+ self._state = b'a'
+ self._size = NONNORMAL
+ self._mtime = AMBIGUOUS_TIME
+ elif (p1_tracked or p2_tracked) and not wc_tracked:
+ self._state = b'r'
+ self._size = 0
+ self._mtime = 0
+ elif clean_p2 and wc_tracked:
+ self._state = b'n'
+ self._size = FROM_P2
+ self._mtime = AMBIGUOUS_TIME
+ elif not p1_tracked and p2_tracked and wc_tracked:
+ self._state = b'n'
+ self._size = FROM_P2
+ self._mtime = AMBIGUOUS_TIME
+ elif possibly_dirty:
+ self._state = b'n'
+ self._size = NONNORMAL
+ self._mtime = AMBIGUOUS_TIME
+ elif wc_tracked:
+ # this is a "normal" file
+ if parentfiledata is None:
+ msg = b'failed to pass parentfiledata for a normal file'
+ raise error.ProgrammingError(msg)
+ self._state = b'n'
+ self._mode = parentfiledata[0]
+ self._size = parentfiledata[1]
+ self._mtime = parentfiledata[2]
+ else:
+ assert False, 'unreachable'
+
+ @classmethod
+ def new_added(cls):
+ """constructor to help legacy API to build a new "added" item
+
+ Should eventually be removed
+ """
+ instance = cls()
+ instance._state = b'a'
+ instance._mode = 0
+ instance._size = NONNORMAL
+ instance._mtime = AMBIGUOUS_TIME
+ return instance
+
+ @classmethod
+ def new_merged(cls):
+ """constructor to help legacy API to build a new "merged" item
+
+ Should eventually be removed
+ """
+ instance = cls()
+ instance._state = b'm'
+ instance._mode = 0
+ instance._size = FROM_P2
+ instance._mtime = AMBIGUOUS_TIME
+ return instance
+
+ @classmethod
+ def new_from_p2(cls):
+ """constructor to help legacy API to build a new "from_p2" item
+
+ Should eventually be removed
+ """
+ instance = cls()
+ instance._state = b'n'
+ instance._mode = 0
+ instance._size = FROM_P2
+ instance._mtime = AMBIGUOUS_TIME
+ return instance
+
+ @classmethod
+ def new_possibly_dirty(cls):
+ """constructor to help legacy API to build a new "possibly_dirty" item
+
+ Should eventually be removed
+ """
+ instance = cls()
+ instance._state = b'n'
+ instance._mode = 0
+ instance._size = NONNORMAL
+ instance._mtime = AMBIGUOUS_TIME
+ return instance
+
+ @classmethod
+ def new_normal(cls, mode, size, mtime):
+ """constructor to help legacy API to build a new "normal" item
+
+ Should eventually be removed
+ """
+ assert size != FROM_P2
+ assert size != NONNORMAL
+ instance = cls()
+ instance._state = b'n'
+ instance._mode = mode
+ instance._size = size
+ instance._mtime = mtime
+ return instance
@classmethod
def from_v1_data(cls, state, mode, size, mtime):
@@ -74,12 +192,12 @@
Since the dirstate-v1 format is frozen, the signature of this function
is not expected to change, unlike the __init__ one.
"""
- return cls(
- state=state,
- mode=mode,
- size=size,
- mtime=mtime,
- )
+ instance = cls()
+ instance._state = state
+ instance._mode = mode
+ instance._size = size
+ instance._mtime = mtime
+ return instance
def set_possibly_dirty(self):
"""Mark a file as "possibly dirty"
@@ -89,6 +207,22 @@
"""
self._mtime = AMBIGUOUS_TIME
+ def set_untracked(self):
+ """mark a file as untracked in the working copy
+
+ This will ultimately be called by command like `hg remove`.
+ """
+ # backup the previous state (useful for merge)
+ size = 0
+ if self.merged: # merge
+ size = NONNORMAL
+ elif self.from_p2:
+ size = FROM_P2
+ self._state = b'r'
+ self._mode = 0
+ self._size = size
+ self._mtime = 0
+
def __getitem__(self, idx):
if idx == 0 or idx == -4:
msg = b"do not use item[x], use item.state"
--- a/mercurial/repair.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/repair.py Mon Aug 30 12:25:57 2021 +0200
@@ -433,7 +433,7 @@
if scmutil.istreemanifest(repo):
# This logic is safe if treemanifest isn't enabled, but also
# pointless, so we skip it if treemanifest isn't enabled.
- for t, unencoded, encoded, size in repo.store.datafiles():
+ for t, unencoded, size in repo.store.datafiles():
if unencoded.startswith(b'meta/') and unencoded.endswith(
b'00manifest.i'
):
@@ -441,7 +441,7 @@
yield repo.manifestlog.getstorage(dir)
-def rebuildfncache(ui, repo):
+def rebuildfncache(ui, repo, only_data=False):
"""Rebuilds the fncache file from repo history.
Missing entries will be added. Extra entries will be removed.
@@ -465,28 +465,40 @@
newentries = set()
seenfiles = set()
- progress = ui.makeprogress(
- _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
- )
- for rev in repo:
- progress.update(rev)
+ if only_data:
+ # Trust the listing of .i from the fncache, but not the .d. This is
+ # much faster, because we only need to stat every possible .d files,
+ # instead of reading the full changelog
+ for f in fnc:
+ if f[:5] == b'data/' and f[-2:] == b'.i':
+ seenfiles.add(f[5:-2])
+ newentries.add(f)
+ dataf = f[:-2] + b'.d'
+ if repo.store._exists(dataf):
+ newentries.add(dataf)
+ else:
+ progress = ui.makeprogress(
+ _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
+ )
+ for rev in repo:
+ progress.update(rev)
- ctx = repo[rev]
- for f in ctx.files():
- # This is to minimize I/O.
- if f in seenfiles:
- continue
- seenfiles.add(f)
+ ctx = repo[rev]
+ for f in ctx.files():
+ # This is to minimize I/O.
+ if f in seenfiles:
+ continue
+ seenfiles.add(f)
- i = b'data/%s.i' % f
- d = b'data/%s.d' % f
+ i = b'data/%s.i' % f
+ d = b'data/%s.d' % f
- if repo.store._exists(i):
- newentries.add(i)
- if repo.store._exists(d):
- newentries.add(d)
+ if repo.store._exists(i):
+ newentries.add(i)
+ if repo.store._exists(d):
+ newentries.add(d)
- progress.complete()
+ progress.complete()
if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
# This logic is safe if treemanifest isn't enabled, but also
--- a/mercurial/revlogutils/rewrite.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/revlogutils/rewrite.py Mon Aug 30 12:25:57 2021 +0200
@@ -824,7 +824,7 @@
with context():
files = list(
(file_type, path)
- for (file_type, path, _e, _s) in repo.store.datafiles()
+ for (file_type, path, _s) in repo.store.datafiles()
if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
)
--- a/mercurial/store.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/store.py Mon Aug 30 12:25:57 2021 +0200
@@ -472,7 +472,7 @@
return self.path + b'/' + encodedir(f)
def _walk(self, relpath, recurse):
- '''yields (unencoded, encoded, size)'''
+ '''yields (revlog_type, unencoded, size)'''
path = self.path
if relpath:
path += b'/' + relpath
@@ -488,7 +488,7 @@
rl_type = is_revlog(f, kind, st)
if rl_type is not None:
n = util.pconvert(fp[striplen:])
- l.append((rl_type, decodedir(n), n, st.st_size))
+ l.append((rl_type, decodedir(n), st.st_size))
elif kind == stat.S_IFDIR and recurse:
visit.append(fp)
l.sort()
@@ -505,26 +505,32 @@
rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
- def datafiles(self, matcher=None):
+ def datafiles(self, matcher=None, undecodable=None):
+ """Like walk, but excluding the changelog and root manifest.
+
+ When [undecodable] is None, revlogs names that can't be
+ decoded cause an exception. When it is provided, it should
+ be a list and the filenames that can't be decoded are added
+ to it instead. This is very rarely needed."""
files = self._walk(b'data', True) + self._walk(b'meta', True)
- for (t, u, e, s) in files:
- yield (FILEFLAGS_FILELOG | t, u, e, s)
+ for (t, u, s) in files:
+ yield (FILEFLAGS_FILELOG | t, u, s)
def topfiles(self):
# yield manifest before changelog
files = reversed(self._walk(b'', False))
- for (t, u, e, s) in files:
+ for (t, u, s) in files:
if u.startswith(b'00changelog'):
- yield (FILEFLAGS_CHANGELOG | t, u, e, s)
+ yield (FILEFLAGS_CHANGELOG | t, u, s)
elif u.startswith(b'00manifest'):
- yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
+ yield (FILEFLAGS_MANIFESTLOG | t, u, s)
else:
- yield (FILETYPE_OTHER | t, u, e, s)
+ yield (FILETYPE_OTHER | t, u, s)
def walk(self, matcher=None):
"""return file related to data storage (ie: revlogs)
- yields (file_type, unencoded, encoded, size)
+ yields (file_type, unencoded, size)
if a matcher is passed, storage files of only those tracked paths
are passed with matches the matcher
@@ -574,15 +580,20 @@
# However that might change so we should probably add a test and encoding
# decoding for it too. see issue6548
- def datafiles(self, matcher=None):
- for t, a, b, size in super(encodedstore, self).datafiles():
+ def datafiles(self, matcher=None, undecodable=None):
+ for t, f1, size in super(encodedstore, self).datafiles():
try:
- a = decodefilename(a)
+ f2 = decodefilename(f1)
except KeyError:
- a = None
- if a is not None and not _matchtrackedpath(a, matcher):
+ if undecodable is None:
+ msg = _(b'undecodable revlog name %s') % f1
+ raise error.StorageError(msg)
+ else:
+ undecodable.append(f1)
+ continue
+ if not _matchtrackedpath(f2, matcher):
continue
- yield t, a, b, size
+ yield t, f2, size
def join(self, f):
return self.path + b'/' + encodefilename(f)
@@ -770,7 +781,7 @@
def getsize(self, path):
return self.rawvfs.stat(path).st_size
- def datafiles(self, matcher=None):
+ def datafiles(self, matcher=None, undecodable=None):
for f in sorted(self.fncache):
if not _matchtrackedpath(f, matcher):
continue
@@ -779,7 +790,7 @@
t = revlog_type(f)
assert t is not None, f
t |= FILEFLAGS_FILELOG
- yield t, f, ef, self.getsize(ef)
+ yield t, f, self.getsize(ef)
except OSError as err:
if err.errno != errno.ENOENT:
raise
--- a/mercurial/streamclone.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/streamclone.py Mon Aug 30 12:25:57 2021 +0200
@@ -248,7 +248,7 @@
# Get consistent snapshot of repo, lock during scan.
with repo.lock():
repo.ui.debug(b'scanning\n')
- for file_type, name, ename, size in _walkstreamfiles(repo):
+ for file_type, name, size in _walkstreamfiles(repo):
if size:
entries.append((name, size))
total_bytes += size
@@ -650,7 +650,7 @@
if includes or excludes:
matcher = narrowspec.match(repo.root, includes, excludes)
- for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
+ for rl_type, name, size in _walkstreamfiles(repo, matcher):
if size:
ft = _fileappend
if rl_type & store.FILEFLAGS_VOLATILE:
--- a/mercurial/upgrade_utils/engine.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/upgrade_utils/engine.py Mon Aug 30 12:25:57 2021 +0200
@@ -201,7 +201,7 @@
# Perform a pass to collect metadata. This validates we can open all
# source files and allows a unified progress bar to be displayed.
- for rl_type, unencoded, encoded, size in alldatafiles:
+ for rl_type, unencoded, size in alldatafiles:
if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
continue
--- a/mercurial/utils/resourceutil.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/utils/resourceutil.py Mon Aug 30 12:25:57 2021 +0200
@@ -59,28 +59,9 @@
# further down
from importlib import resources
- from .. import encoding
-
# Force loading of the resources module
resources.open_binary # pytype: disable=module-attr
- def open_resource(package, name):
- return resources.open_binary( # pytype: disable=module-attr
- pycompat.sysstr(package), pycompat.sysstr(name)
- )
-
- def is_resource(package, name):
- return resources.is_resource( # pytype: disable=module-attr
- pycompat.sysstr(package), encoding.strfromlocal(name)
- )
-
- def contents(package):
- # pytype: disable=module-attr
- for r in resources.contents(pycompat.sysstr(package)):
- # pytype: enable=module-attr
- yield encoding.strtolocal(r)
-
-
except (ImportError, AttributeError):
# importlib.resources was not found (almost definitely because we're on a
# Python version before 3.7)
@@ -102,3 +83,23 @@
for p in os.listdir(path):
yield pycompat.fsencode(p)
+
+
+else:
+ from .. import encoding
+
+ def open_resource(package, name):
+ return resources.open_binary( # pytype: disable=module-attr
+ pycompat.sysstr(package), pycompat.sysstr(name)
+ )
+
+ def is_resource(package, name):
+ return resources.is_resource( # pytype: disable=module-attr
+ pycompat.sysstr(package), encoding.strfromlocal(name)
+ )
+
+ def contents(package):
+ # pytype: disable=module-attr
+ for r in resources.contents(pycompat.sysstr(package)):
+ # pytype: enable=module-attr
+ yield encoding.strtolocal(r)
--- a/mercurial/verify.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/verify.py Mon Aug 30 12:25:57 2021 +0200
@@ -395,12 +395,13 @@
storefiles = set()
subdirs = set()
revlogv1 = self.revlogv1
- for t, f, f2, size in repo.store.datafiles():
- if not f:
- self._err(None, _(b"cannot decode filename '%s'") % f2)
- elif (size > 0 or not revlogv1) and f.startswith(b'meta/'):
+ undecodable = []
+ for t, f, size in repo.store.datafiles(undecodable=undecodable):
+ if (size > 0 or not revlogv1) and f.startswith(b'meta/'):
storefiles.add(_normpath(f))
subdirs.add(os.path.dirname(f))
+ for f in undecodable:
+ self._err(None, _(b"cannot decode filename '%s'") % f)
subdirprogress = ui.makeprogress(
_(b'checking'), unit=_(b'manifests'), total=len(subdirs)
)
@@ -459,11 +460,12 @@
ui.status(_(b"checking files\n"))
storefiles = set()
- for rl_type, f, f2, size in repo.store.datafiles():
- if not f:
- self._err(None, _(b"cannot decode filename '%s'") % f2)
- elif (size > 0 or not revlogv1) and f.startswith(b'data/'):
+ undecodable = []
+ for t, f, size in repo.store.datafiles(undecodable=undecodable):
+ if (size > 0 or not revlogv1) and f.startswith(b'data/'):
storefiles.add(_normpath(f))
+ for f in undecodable:
+ self._err(None, _(b"cannot decode filename '%s'") % f)
state = {
# TODO this assumes revlog storage for changelog.
--- a/mercurial/wireprotov1peer.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/wireprotov1peer.py Mon Aug 30 12:25:57 2021 +0200
@@ -44,13 +44,9 @@
def sample(self, one, two=None):
# Build list of encoded arguments suitable for your wire protocol:
encoded_args = [('one', encode(one),), ('two', encode(two),)]
- # Create future for injection of encoded result:
- encoded_res_future = future()
- # Return encoded arguments and future:
- yield encoded_args, encoded_res_future
- # Assuming the future to be filled with the result from the batched
- # request now. Decode it:
- yield decode(encoded_res_future.value)
+ # Return it, along with a function that will receive the result
+ # from the batched request.
+ return encoded_args, decode
The decorator returns a function which wraps this coroutine as a plain
method, but adds the original method as an attribute called "batchable",
@@ -59,29 +55,19 @@
"""
def plain(*args, **opts):
- batchable = f(*args, **opts)
- encoded_args_or_res, encoded_res_future = next(batchable)
- if not encoded_res_future:
+ encoded_args_or_res, decode = f(*args, **opts)
+ if not decode:
return encoded_args_or_res # a local result in this case
self = args[0]
cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
- encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
- return next(batchable)
+ encoded_res = self._submitone(cmd, encoded_args_or_res)
+ return decode(encoded_res)
setattr(plain, 'batchable', f)
setattr(plain, '__name__', f.__name__)
return plain
-class future(object):
- '''placeholder for a value to be set later'''
-
- def set(self, value):
- if util.safehasattr(self, b'value'):
- raise error.RepoError(b"future is already set")
- self.value = value
-
-
def encodebatchcmds(req):
"""Return a ``cmds`` argument value for the ``batch`` command."""
escapearg = wireprototypes.escapebatcharg
@@ -248,25 +234,18 @@
continue
try:
- batchable = fn.batchable(
+ encoded_args_or_res, decode = fn.batchable(
fn.__self__, **pycompat.strkwargs(args)
)
except Exception:
pycompat.future_set_exception_info(f, sys.exc_info()[1:])
return
- # Encoded arguments and future holding remote result.
- try:
- encoded_args_or_res, fremote = next(batchable)
- except Exception:
- pycompat.future_set_exception_info(f, sys.exc_info()[1:])
- return
-
- if not fremote:
+ if not decode:
f.set_result(encoded_args_or_res)
else:
requests.append((command, encoded_args_or_res))
- states.append((command, f, batchable, fremote))
+ states.append((command, f, batchable, decode))
if not requests:
return
@@ -319,7 +298,7 @@
def _readbatchresponse(self, states, wireresults):
# Executes in a thread to read data off the wire.
- for command, f, batchable, fremote in states:
+ for command, f, batchable, decode in states:
# Grab raw result off the wire and teach the internal future
# about it.
try:
@@ -334,11 +313,8 @@
)
)
else:
- fremote.set(remoteresult)
-
- # And ask the coroutine to decode that value.
try:
- result = next(batchable)
+ result = decode(remoteresult)
except Exception:
pycompat.future_set_exception_info(f, sys.exc_info()[1:])
else:
@@ -369,87 +345,90 @@
@batchable
def lookup(self, key):
self.requirecap(b'lookup', _(b'look up remote revision'))
- f = future()
- yield {b'key': encoding.fromlocal(key)}, f
- d = f.value
- success, data = d[:-1].split(b" ", 1)
- if int(success):
- yield bin(data)
- else:
- self._abort(error.RepoError(data))
+
+ def decode(d):
+ success, data = d[:-1].split(b" ", 1)
+ if int(success):
+ return bin(data)
+ else:
+ self._abort(error.RepoError(data))
+
+ return {b'key': encoding.fromlocal(key)}, decode
@batchable
def heads(self):
- f = future()
- yield {}, f
- d = f.value
- try:
- yield wireprototypes.decodelist(d[:-1])
- except ValueError:
- self._abort(error.ResponseError(_(b"unexpected response:"), d))
+ def decode(d):
+ try:
+ return wireprototypes.decodelist(d[:-1])
+ except ValueError:
+ self._abort(error.ResponseError(_(b"unexpected response:"), d))
+
+ return {}, decode
@batchable
def known(self, nodes):
- f = future()
- yield {b'nodes': wireprototypes.encodelist(nodes)}, f
- d = f.value
- try:
- yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
- except ValueError:
- self._abort(error.ResponseError(_(b"unexpected response:"), d))
+ def decode(d):
+ try:
+ return [bool(int(b)) for b in pycompat.iterbytestr(d)]
+ except ValueError:
+ self._abort(error.ResponseError(_(b"unexpected response:"), d))
+
+ return {b'nodes': wireprototypes.encodelist(nodes)}, decode
@batchable
def branchmap(self):
- f = future()
- yield {}, f
- d = f.value
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchname, branchheads = branchpart.split(b' ', 1)
- branchname = encoding.tolocal(urlreq.unquote(branchname))
- branchheads = wireprototypes.decodelist(branchheads)
- branchmap[branchname] = branchheads
- yield branchmap
- except TypeError:
- self._abort(error.ResponseError(_(b"unexpected response:"), d))
+ def decode(d):
+ try:
+ branchmap = {}
+ for branchpart in d.splitlines():
+ branchname, branchheads = branchpart.split(b' ', 1)
+ branchname = encoding.tolocal(urlreq.unquote(branchname))
+ branchheads = wireprototypes.decodelist(branchheads)
+ branchmap[branchname] = branchheads
+ return branchmap
+ except TypeError:
+ self._abort(error.ResponseError(_(b"unexpected response:"), d))
+
+ return {}, decode
@batchable
def listkeys(self, namespace):
if not self.capable(b'pushkey'):
- yield {}, None
- f = future()
+ return {}, None
self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
- yield {b'namespace': encoding.fromlocal(namespace)}, f
- d = f.value
- self.ui.debug(
- b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
- )
- yield pushkeymod.decodekeys(d)
+
+ def decode(d):
+ self.ui.debug(
+ b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
+ )
+ return pushkeymod.decodekeys(d)
+
+ return {b'namespace': encoding.fromlocal(namespace)}, decode
@batchable
def pushkey(self, namespace, key, old, new):
if not self.capable(b'pushkey'):
- yield False, None
- f = future()
+ return False, None
self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
- yield {
+
+ def decode(d):
+ d, output = d.split(b'\n', 1)
+ try:
+ d = bool(int(d))
+ except ValueError:
+ raise error.ResponseError(
+ _(b'push failed (unexpected response):'), d
+ )
+ for l in output.splitlines(True):
+ self.ui.status(_(b'remote: '), l)
+ return d
+
+ return {
b'namespace': encoding.fromlocal(namespace),
b'key': encoding.fromlocal(key),
b'old': encoding.fromlocal(old),
b'new': encoding.fromlocal(new),
- }, f
- d = f.value
- d, output = d.split(b'\n', 1)
- try:
- d = bool(int(d))
- except ValueError:
- raise error.ResponseError(
- _(b'push failed (unexpected response):'), d
- )
- for l in output.splitlines(True):
- self.ui.status(_(b'remote: '), l)
- yield d
+ }, decode
def stream_out(self):
return self._callstream(b'stream_out')
--- a/mercurial/wireprotov2server.py Fri Aug 27 13:51:44 2021 -0700
+++ b/mercurial/wireprotov2server.py Mon Aug 30 12:25:57 2021 +0200
@@ -1579,7 +1579,7 @@
# TODO this is a bunch of storage layer interface abstractions because
# it assumes revlogs.
- for rl_type, name, encodedname, size in topfiles:
+ for rl_type, name, size in topfiles:
# XXX use the `rl_type` for that
if b'changelog' in files and name.startswith(b'00changelog'):
pass
--- a/tests/library-infinitepush.sh Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/library-infinitepush.sh Mon Aug 30 12:25:57 2021 +0200
@@ -14,8 +14,6 @@
cat >> $HGRCPATH << EOF
[extensions]
infinitepush=
-[ui]
-ssh = "$PYTHON" "$TESTDIR/dummyssh"
[infinitepush]
branchpattern=re:scratch/.*
EOF
--- a/tests/narrow-library.sh Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/narrow-library.sh Mon Aug 30 12:25:57 2021 +0200
@@ -1,8 +1,6 @@
cat >> $HGRCPATH <<EOF
[extensions]
narrow=
-[ui]
-ssh="$PYTHON" "$RUNTESTDIR/dummyssh"
[experimental]
changegroup3 = True
EOF
--- a/tests/remotefilelog-library.sh Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/remotefilelog-library.sh Mon Aug 30 12:25:57 2021 +0200
@@ -7,8 +7,6 @@
remotefilelog=
rebase=
strip=
-[ui]
-ssh="$PYTHON" "$TESTDIR/dummyssh"
[server]
preferuncompressed=True
[experimental]
--- a/tests/run-tests.py Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/run-tests.py Mon Aug 30 12:25:57 2021 +0200
@@ -1554,6 +1554,8 @@
hgrc.write(b'merge = internal:merge\n')
hgrc.write(b'mergemarkers = detailed\n')
hgrc.write(b'promptecho = True\n')
+ dummyssh = os.path.join(self._testdir, b'dummyssh')
+ hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
hgrc.write(b'timeout.warn=15\n')
hgrc.write(b'[chgserver]\n')
hgrc.write(b'idletimeout=60\n')
--- a/tests/simplestorerepo.py Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/simplestorerepo.py Mon Aug 30 12:25:57 2021 +0200
@@ -665,20 +665,24 @@
class simplestore(store.encodedstore):
- def datafiles(self):
+ def datafiles(self, undecodable=None):
for x in super(simplestore, self).datafiles():
yield x
# Supplement with non-revlog files.
extrafiles = self._walk('data', True, filefilter=issimplestorefile)
- for unencoded, encoded, size in extrafiles:
+ for f1, size in extrafiles:
try:
- unencoded = store.decodefilename(unencoded)
+ f2 = store.decodefilename(f1)
except KeyError:
- unencoded = None
+ if undecodable is None:
+ raise error.StorageError(b'undecodable revlog name %s' % f1)
+ else:
+ undecodable.append(f1)
+ continue
- yield unencoded, encoded, size
+ yield f2, size
def reposetup(ui, repo):
--- a/tests/test-basic.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-basic.t Mon Aug 30 12:25:57 2021 +0200
@@ -15,6 +15,7 @@
ui.merge=internal:merge
ui.mergemarkers=detailed
ui.promptecho=True
+ ui.ssh=* (glob)
ui.timeout.warn=15
web.address=localhost
web\.ipv6=(?:True|False) (re)
--- a/tests/test-batching.py Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-batching.py Mon Aug 30 12:25:57 2021 +0200
@@ -214,14 +214,11 @@
mangle(two),
),
]
- encoded_res_future = wireprotov1peer.future()
- yield encoded_args, encoded_res_future
- yield unmangle(encoded_res_future.value)
+ return encoded_args, unmangle
@wireprotov1peer.batchable
def bar(self, b, a):
- encresref = wireprotov1peer.future()
- yield [
+ return [
(
b'b',
mangle(b),
@@ -230,8 +227,7 @@
b'a',
mangle(a),
),
- ], encresref
- yield unmangle(encresref.value)
+ ], unmangle
# greet is coded directly. It therefore does not support batching. If it
# does appear in a batch, the batch is split around greet, and the call to
--- a/tests/test-bookmarks-corner-case.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-bookmarks-corner-case.t Mon Aug 30 12:25:57 2021 +0200
@@ -12,16 +12,6 @@
node known to the changelog. If the cache invalidation between these two bits
goes wrong, bookmark can be dropped.
-global setup
-------------
-
- $ cat >> $HGRCPATH << EOF
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
- > [server]
- > concurrent-push-mode=check-related
- > EOF
-
Setup
-----
--- a/tests/test-bookmarks-pushpull.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-bookmarks-pushpull.t Mon Aug 30 12:25:57 2021 +0200
@@ -1142,8 +1142,6 @@
> local=../issue4455-dest/
> ssh=ssh://user@dummy/issue4455-dest
> http=http://localhost:$HGPORT/
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
> EOF
$ cat >> ../issue4455-dest/.hg/hgrc << EOF
> [hooks]
@@ -1270,7 +1268,6 @@
$ cat << EOF >> $HGRCPATH
> [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
> [server]
> bookmarks-pushkey-compat = yes
> EOF
--- a/tests/test-bundle2-exchange.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-bundle2-exchange.t Mon Aug 30 12:25:57 2021 +0200
@@ -28,8 +28,6 @@
> evolution.createmarkers=True
> evolution.exchange=True
> bundle2-output-capture=True
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
> [command-templates]
> log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
> [web]
@@ -922,10 +920,6 @@
Test lazily acquiring the lock during unbundle
$ cp $TESTTMP/hgrc.orig $HGRCPATH
- $ cat >> $HGRCPATH <<EOF
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
- > EOF
$ cat >> $TESTTMP/locktester.py <<EOF
> import os
--- a/tests/test-bundle2-format.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-bundle2-format.t Mon Aug 30 12:25:57 2021 +0200
@@ -233,8 +233,6 @@
> bundle2=$TESTTMP/bundle2.py
> [experimental]
> evolution.createmarkers=True
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
> [command-templates]
> log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
> [web]
--- a/tests/test-bundle2-pushback.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-bundle2-pushback.t Mon Aug 30 12:25:57 2021 +0200
@@ -37,7 +37,6 @@
$ cat >> $HGRCPATH <<EOF
> [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> username = nobody <no.reply@example.com>
>
> [alias]
--- a/tests/test-bundle2-remote-changegroup.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-bundle2-remote-changegroup.t Mon Aug 30 12:25:57 2021 +0200
@@ -94,8 +94,6 @@
$ cat dumb.pid >> $DAEMON_PIDS
$ cat >> $HGRCPATH << EOF
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
> [command-templates]
> log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
> EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-clone-stream.t Mon Aug 30 12:25:57 2021 +0200
@@ -0,0 +1,904 @@
+#require serve no-reposimplestore no-chg
+
+#testcases stream-legacy stream-bundle2
+
+#if stream-legacy
+ $ cat << EOF >> $HGRCPATH
+ > [server]
+ > bundle2.stream = no
+ > EOF
+#endif
+
+Initialize repository
+the status call is to check for issue5130
+
+ $ hg init server
+ $ cd server
+ $ touch foo
+ $ hg -q commit -A -m initial
+ >>> for i in range(1024):
+ ... with open(str(i), 'wb') as fh:
+ ... fh.write(b"%d" % i) and None
+ $ hg -q commit -A -m 'add a lot of files'
+ $ hg st
+
+add files with "tricky" name:
+
+ $ echo foo > 00changelog.i
+ $ echo foo > 00changelog.d
+ $ echo foo > 00changelog.n
+ $ echo foo > 00changelog-ab349180a0405010.nd
+ $ echo foo > 00manifest.i
+ $ echo foo > 00manifest.d
+ $ echo foo > foo.i
+ $ echo foo > foo.d
+ $ echo foo > foo.n
+ $ echo foo > undo.py
+ $ echo foo > undo.i
+ $ echo foo > undo.d
+ $ echo foo > undo.n
+ $ echo foo > undo.foo.i
+ $ echo foo > undo.foo.d
+ $ echo foo > undo.foo.n
+ $ echo foo > undo.babar
+ $ mkdir savanah
+ $ echo foo > savanah/foo.i
+ $ echo foo > savanah/foo.d
+ $ echo foo > savanah/foo.n
+ $ echo foo > savanah/undo.py
+ $ echo foo > savanah/undo.i
+ $ echo foo > savanah/undo.d
+ $ echo foo > savanah/undo.n
+ $ echo foo > savanah/undo.foo.i
+ $ echo foo > savanah/undo.foo.d
+ $ echo foo > savanah/undo.foo.n
+ $ echo foo > savanah/undo.babar
+ $ mkdir data
+ $ echo foo > data/foo.i
+ $ echo foo > data/foo.d
+ $ echo foo > data/foo.n
+ $ echo foo > data/undo.py
+ $ echo foo > data/undo.i
+ $ echo foo > data/undo.d
+ $ echo foo > data/undo.n
+ $ echo foo > data/undo.foo.i
+ $ echo foo > data/undo.foo.d
+ $ echo foo > data/undo.foo.n
+ $ echo foo > data/undo.babar
+ $ mkdir meta
+ $ echo foo > meta/foo.i
+ $ echo foo > meta/foo.d
+ $ echo foo > meta/foo.n
+ $ echo foo > meta/undo.py
+ $ echo foo > meta/undo.i
+ $ echo foo > meta/undo.d
+ $ echo foo > meta/undo.n
+ $ echo foo > meta/undo.foo.i
+ $ echo foo > meta/undo.foo.d
+ $ echo foo > meta/undo.foo.n
+ $ echo foo > meta/undo.babar
+ $ mkdir store
+ $ echo foo > store/foo.i
+ $ echo foo > store/foo.d
+ $ echo foo > store/foo.n
+ $ echo foo > store/undo.py
+ $ echo foo > store/undo.i
+ $ echo foo > store/undo.d
+ $ echo foo > store/undo.n
+ $ echo foo > store/undo.foo.i
+ $ echo foo > store/undo.foo.d
+ $ echo foo > store/undo.foo.n
+ $ echo foo > store/undo.babar
+
+Name with special characters
+
+ $ echo foo > store/CélesteVille_is_a_Capital_City
+
+name causing issue6581
+
+ $ mkdir --parents container/isam-build-centos7/
+ $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
+
+Add all that
+
+ $ hg add .
+ adding 00changelog-ab349180a0405010.nd
+ adding 00changelog.d
+ adding 00changelog.i
+ adding 00changelog.n
+ adding 00manifest.d
+ adding 00manifest.i
+ adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
+ adding data/foo.d
+ adding data/foo.i
+ adding data/foo.n
+ adding data/undo.babar
+ adding data/undo.d
+ adding data/undo.foo.d
+ adding data/undo.foo.i
+ adding data/undo.foo.n
+ adding data/undo.i
+ adding data/undo.n
+ adding data/undo.py
+ adding foo.d
+ adding foo.i
+ adding foo.n
+ adding meta/foo.d
+ adding meta/foo.i
+ adding meta/foo.n
+ adding meta/undo.babar
+ adding meta/undo.d
+ adding meta/undo.foo.d
+ adding meta/undo.foo.i
+ adding meta/undo.foo.n
+ adding meta/undo.i
+ adding meta/undo.n
+ adding meta/undo.py
+ adding savanah/foo.d
+ adding savanah/foo.i
+ adding savanah/foo.n
+ adding savanah/undo.babar
+ adding savanah/undo.d
+ adding savanah/undo.foo.d
+ adding savanah/undo.foo.i
+ adding savanah/undo.foo.n
+ adding savanah/undo.i
+ adding savanah/undo.n
+ adding savanah/undo.py
+ adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc)
+ adding store/foo.d
+ adding store/foo.i
+ adding store/foo.n
+ adding store/undo.babar
+ adding store/undo.d
+ adding store/undo.foo.d
+ adding store/undo.foo.i
+ adding store/undo.foo.n
+ adding store/undo.i
+ adding store/undo.n
+ adding store/undo.py
+ adding undo.babar
+ adding undo.d
+ adding undo.foo.d
+ adding undo.foo.i
+ adding undo.foo.n
+ adding undo.i
+ adding undo.n
+ adding undo.py
+ $ hg ci -m 'add files with "tricky" name'
+ $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+Check local clone
+==================
+
+The logic is close enough of uncompressed.
+This is present here to reuse the testing around file with "special" names.
+
+ $ hg clone server local-clone
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Check that the clone went well
+
+ $ hg verify -R local-clone
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 3 changesets with 1088 changes to 1088 files
+
+Check uncompressed
+==================
+
+Cannot stream clone when server.uncompressed is set
+
+ $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
+ 200 Script output follows
+
+ 1
+
+#if stream-legacy
+ $ hg debugcapabilities http://localhost:$HGPORT
+ Main capabilities:
+ batch
+ branchmap
+ $USUAL_BUNDLE2_CAPS_SERVER$
+ changegroupsubset
+ compression=$BUNDLE2_COMPRESSIONS$
+ getbundle
+ httpheader=1024
+ httpmediatype=0.1rx,0.1tx,0.2tx
+ known
+ lookup
+ pushkey
+ unbundle=HG10GZ,HG10BZ,HG10UN
+ unbundlehash
+ Bundle2 capabilities:
+ HG20
+ bookmarks
+ changegroup
+ 01
+ 02
+ checkheads
+ related
+ digests
+ md5
+ sha1
+ sha512
+ error
+ abort
+ unsupportedcontent
+ pushraced
+ pushkey
+ hgtagsfnodes
+ listkeys
+ phases
+ heads
+ pushkey
+ remote-changegroup
+ http
+ https
+
+ $ hg clone --stream -U http://localhost:$HGPORT server-disabled
+ warning: stream clone requested but server has them disabled
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1088 changes to 1088 files
+ new changesets 96ee1d7354c4:5223b5e3265f
+
+ $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
+ 200 Script output follows
+ content-type: application/mercurial-0.2
+
+
+ $ f --size body --hexdump --bytes 100
+ body: size=232
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
+ 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
+ 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
+ 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
+ 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
+ 0060: 69 73 20 66 |is f|
+
+#endif
+#if stream-bundle2
+ $ hg debugcapabilities http://localhost:$HGPORT
+ Main capabilities:
+ batch
+ branchmap
+ $USUAL_BUNDLE2_CAPS_SERVER$
+ changegroupsubset
+ compression=$BUNDLE2_COMPRESSIONS$
+ getbundle
+ httpheader=1024
+ httpmediatype=0.1rx,0.1tx,0.2tx
+ known
+ lookup
+ pushkey
+ unbundle=HG10GZ,HG10BZ,HG10UN
+ unbundlehash
+ Bundle2 capabilities:
+ HG20
+ bookmarks
+ changegroup
+ 01
+ 02
+ checkheads
+ related
+ digests
+ md5
+ sha1
+ sha512
+ error
+ abort
+ unsupportedcontent
+ pushraced
+ pushkey
+ hgtagsfnodes
+ listkeys
+ phases
+ heads
+ pushkey
+ remote-changegroup
+ http
+ https
+
+ $ hg clone --stream -U http://localhost:$HGPORT server-disabled
+ warning: stream clone requested but server has them disabled
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1088 changes to 1088 files
+ new changesets 96ee1d7354c4:5223b5e3265f
+
+ $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
+ 200 Script output follows
+ content-type: application/mercurial-0.2
+
+
+ $ f --size body --hexdump --bytes 100
+ body: size=232
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
+ 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
+ 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
+ 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
+ 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
+ 0060: 69 73 20 66 |is f|
+
+#endif
+
+ $ killdaemons.py
+ $ cd server
+ $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+Basic clone
+
+#if stream-legacy
+ $ hg clone --stream -U http://localhost:$HGPORT clone1
+ streaming all changes
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ searching for changes
+ no changes found
+ $ cat server/errors.txt
+#endif
+#if stream-bundle2
+ $ hg clone --stream -U http://localhost:$HGPORT clone1
+ streaming all changes
+ 1093 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1093 files to transfer, 98.9 KB of data (zstd !)
+ transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
+
+ $ ls -1 clone1/.hg/cache
+ branch2-base
+ branch2-immutable
+ branch2-served
+ branch2-served.hidden
+ branch2-visible
+ branch2-visible-hidden
+ rbc-names-v1
+ rbc-revs-v1
+ tags2
+ tags2-served
+ $ cat server/errors.txt
+#endif
+
+getbundle requests with stream=1 are uncompressed
+
+ $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
+ 200 Script output follows
+ content-type: application/mercurial-0.2
+
+
+#if no-zstd no-rust
+ $ f --size --hex --bytes 256 body
+ body: size=119153
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10|
+ 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
+ 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
+ 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
+ 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
+ 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
+ 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
+ 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
+ 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
+ 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
+ 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
+ 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
+ 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan|
+ 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0|
+#endif
+#if zstd no-rust
+ $ f --size --hex --bytes 256 body
+ body: size=116340
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10|
+ 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
+ 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
+ 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
+ 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
+ 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres|
+ 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl|
+ 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
+ 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s|
+ 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......|
+ 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................|
+ 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.|
+ 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..|
+ 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed|
+#endif
+#if zstd rust no-dirstate-v2
+ $ f --size --hex --bytes 256 body
+ body: size=116361
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10|
+ 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
+ 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
+ 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
+ 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
+ 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod|
+ 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co|
+ 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2|
+ 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
+ 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
+ 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
+ 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
+ 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
+ 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
+#endif
+#if zstd dirstate-v2
+ $ f --size --hex --bytes 256 body
+ body: size=109549
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
+ 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
+ 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
+ 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
+ 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
+ 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
+ 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
+ 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
+ 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
+ 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
+ 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
+ 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
+ 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
+ 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
+#endif
+
+--uncompressed is an alias to --stream
+
+#if stream-legacy
+ $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
+ streaming all changes
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ searching for changes
+ no changes found
+#endif
+#if stream-bundle2
+ $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
+ streaming all changes
+ 1093 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1093 files to transfer, 98.9 KB of data (zstd !)
+ transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
+#endif
+
+Clone with background file closing enabled
+
+#if stream-legacy
+ $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
+ using http://localhost:$HGPORT/
+ sending capabilities command
+ sending branchmap command
+ streaming all changes
+ sending stream_out command
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ starting 4 threads for background file closing
+ updating the branch cache
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ query 1; heads
+ sending batch command
+ searching for changes
+ all remote heads known locally
+ no changes found
+ sending getbundle command
+ bundle2-input-bundle: with-transaction
+ bundle2-input-part: "listkeys" (params: 1 mandatory) supported
+ bundle2-input-part: "phase-heads" supported
+ bundle2-input-part: total payload size 24
+ bundle2-input-bundle: 2 parts total
+ checking for updated bookmarks
+ updating the branch cache
+ (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+#endif
+#if stream-bundle2
+ $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
+ using http://localhost:$HGPORT/
+ sending capabilities command
+ query 1; heads
+ sending batch command
+ streaming all changes
+ sending getbundle command
+ bundle2-input-bundle: with-transaction
+ bundle2-input-part: "stream2" (params: 3 mandatory) supported
+ applying stream bundle
+ 1093 files to transfer, 102 KB of data (no-zstd !)
+ 1093 files to transfer, 98.9 KB of data (zstd !)
+ starting 4 threads for background file closing
+ starting 4 threads for background file closing
+ updating the branch cache
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ bundle2-input-part: total payload size 118984 (no-zstd !)
+ transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
+ bundle2-input-part: total payload size 116145 (zstd !)
+ bundle2-input-part: "listkeys" (params: 1 mandatory) supported
+ bundle2-input-bundle: 2 parts total
+ checking for updated bookmarks
+ updating the branch cache
+ (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
+#endif
+
+Cannot stream clone when there are secret changesets
+
+ $ hg -R server phase --force --secret -r tip
+ $ hg clone --stream -U http://localhost:$HGPORT secret-denied
+ warning: stream clone requested but server has them disabled
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1025 changes to 1025 files
+ new changesets 96ee1d7354c4:c17445101a72
+
+ $ killdaemons.py
+
+Streaming of secrets can be overridden by server config
+
+ $ cd server
+ $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+#if stream-legacy
+ $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
+ streaming all changes
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ searching for changes
+ no changes found
+#endif
+#if stream-bundle2
+ $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
+ streaming all changes
+ 1093 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1093 files to transfer, 98.9 KB of data (zstd !)
+ transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
+#endif
+
+ $ killdaemons.py
+
+Verify interaction between preferuncompressed and secret presence
+
+ $ cd server
+ $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+ $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1025 changes to 1025 files
+ new changesets 96ee1d7354c4:c17445101a72
+
+ $ killdaemons.py
+
+Clone not allowed when full bundles disabled and can't serve secrets
+
+ $ cd server
+ $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+ $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
+ warning: stream clone requested but server has them disabled
+ requesting all changes
+ remote: abort: server has pull-based clones disabled
+ abort: pull failed on remote
+ (remove --pull if specified or upgrade Mercurial)
+ [100]
+
+Local stream clone with secrets involved
+(This is just a test over behavior: if you have access to the repo's files,
+there is no security so it isn't important to prevent a clone here.)
+
+ $ hg clone -U --stream server local-secret
+ warning: stream clone requested but server has them disabled
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1025 changes to 1025 files
+ new changesets 96ee1d7354c4:c17445101a72
+
+Stream clone while repo is changing:
+
+ $ mkdir changing
+ $ cd changing
+
+extension for delaying the server process so we reliably can modify the repo
+while cloning
+
+ $ cat > stream_steps.py <<EOF
+ > import os
+ > import sys
+ > from mercurial import (
+ > encoding,
+ > extensions,
+ > streamclone,
+ > testing,
+ > )
+ > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
+ > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
+ >
+ > def _test_sync_point_walk_1(orig, repo):
+ > testing.write_file(WALKED_FILE_1)
+ >
+ > def _test_sync_point_walk_2(orig, repo):
+ > assert repo._currentlock(repo._lockref) is None
+ > testing.wait_file(WALKED_FILE_2)
+ >
+ > extensions.wrapfunction(
+ > streamclone,
+ > '_test_sync_point_walk_1',
+ > _test_sync_point_walk_1
+ > )
+ > extensions.wrapfunction(
+ > streamclone,
+ > '_test_sync_point_walk_2',
+ > _test_sync_point_walk_2
+ > )
+ > EOF
+
+prepare repo with small and big file to cover both code paths in emitrevlogdata
+
+ $ hg init repo
+ $ touch repo/f1
+ $ $TESTDIR/seq.py 50000 > repo/f2
+ $ hg -R repo ci -Aqm "0"
+ $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
+ $ export HG_TEST_STREAM_WALKED_FILE_1
+ $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
+ $ export HG_TEST_STREAM_WALKED_FILE_2
+ $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
+ $ export HG_TEST_STREAM_WALKED_FILE_3
+# $ cat << EOF >> $HGRCPATH
+# > [hooks]
+# > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
+# > EOF
+ $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
+ $ cat hg.pid >> $DAEMON_PIDS
+
+clone while modifying the repo between stating file with write lock and
+actually serving file content
+
+ $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
+ $ echo >> repo/f1
+ $ echo >> repo/f2
+ $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
+ $ touch $HG_TEST_STREAM_WALKED_FILE_2
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
+ $ hg -R clone id
+ 000000000000
+ $ cat errors.log
+ $ cd ..
+
+Stream repository with bookmarks
+--------------------------------
+
+(revert introduction of secret changeset)
+
+ $ hg -R server phase --draft 'secret()'
+
+add a bookmark
+
+ $ hg -R server bookmark -r tip some-bookmark
+
+clone it
+
+#if stream-legacy
+ $ hg clone --stream http://localhost:$HGPORT with-bookmarks
+ streaming all changes
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ searching for changes
+ no changes found
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+#endif
+#if stream-bundle2
+ $ hg clone --stream http://localhost:$HGPORT with-bookmarks
+ streaming all changes
+ 1096 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1096 files to transfer, 99.1 KB of data (zstd !)
+ transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+#endif
+ $ hg verify -R with-bookmarks
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 3 changesets with 1088 changes to 1088 files
+ $ hg -R with-bookmarks bookmarks
+ some-bookmark 2:5223b5e3265f
+
+Stream repository with phases
+-----------------------------
+
+Clone as publishing
+
+ $ hg -R server phase -r 'all()'
+ 0: draft
+ 1: draft
+ 2: draft
+
+#if stream-legacy
+ $ hg clone --stream http://localhost:$HGPORT phase-publish
+ streaming all changes
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ searching for changes
+ no changes found
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+#endif
+#if stream-bundle2
+ $ hg clone --stream http://localhost:$HGPORT phase-publish
+ streaming all changes
+ 1096 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1096 files to transfer, 99.1 KB of data (zstd !)
+ transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+#endif
+ $ hg verify -R phase-publish
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 3 changesets with 1088 changes to 1088 files
+ $ hg -R phase-publish phase -r 'all()'
+ 0: public
+ 1: public
+ 2: public
+
+Clone as non publishing
+
+ $ cat << EOF >> server/.hg/hgrc
+ > [phases]
+ > publish = False
+ > EOF
+ $ killdaemons.py
+ $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+
+#if stream-legacy
+
+With v1 of the stream protocol, changeset are always cloned as public. It make
+stream v1 unsuitable for non-publishing repository.
+
+ $ hg clone --stream http://localhost:$HGPORT phase-no-publish
+ streaming all changes
+ 1090 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1090 files to transfer, 98.8 KB of data (zstd !)
+ transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
+ searching for changes
+ no changes found
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg -R phase-no-publish phase -r 'all()'
+ 0: public
+ 1: public
+ 2: public
+#endif
+#if stream-bundle2
+ $ hg clone --stream http://localhost:$HGPORT phase-no-publish
+ streaming all changes
+ 1097 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1097 files to transfer, 99.1 KB of data (zstd !)
+ transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
+ updating to branch default
+ 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg -R phase-no-publish phase -r 'all()'
+ 0: draft
+ 1: draft
+ 2: draft
+#endif
+ $ hg verify -R phase-no-publish
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 3 changesets with 1088 changes to 1088 files
+
+ $ killdaemons.py
+
+#if stream-legacy
+
+With v1 of the stream protocol, changeset are always cloned as public. There's
+no obsolescence markers exchange in stream v1.
+
+#endif
+#if stream-bundle2
+
+Stream repository with obsolescence
+-----------------------------------
+
+Clone non-publishing with obsolescence
+
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > evolution=all
+ > EOF
+
+ $ cd server
+ $ echo foo > foo
+ $ hg -q commit -m 'about to be pruned'
+ $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ hg up null -q
+ $ hg log -T '{rev}: {phase}\n'
+ 2: draft
+ 1: draft
+ 0: draft
+ $ hg serve -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+ $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
+ streaming all changes
+ 1098 files to transfer, 102 KB of data (no-zstd !)
+ transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1098 files to transfer, 99.5 KB of data (zstd !)
+ transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
+ $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
+ 2: draft
+ 1: draft
+ 0: draft
+ $ hg debugobsolete -R with-obsolescence
+ 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+ $ hg verify -R with-obsolescence
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 4 changesets with 1089 changes to 1088 files
+
+ $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
+ streaming all changes
+ remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
+ abort: pull failed on remote
+ [100]
+
+ $ killdaemons.py
+
+#endif
--- a/tests/test-clone-uncompressed.t Fri Aug 27 13:51:44 2021 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,904 +0,0 @@
-#require serve no-reposimplestore no-chg
-
-#testcases stream-legacy stream-bundle2
-
-#if stream-legacy
- $ cat << EOF >> $HGRCPATH
- > [server]
- > bundle2.stream = no
- > EOF
-#endif
-
-Initialize repository
-the status call is to check for issue5130
-
- $ hg init server
- $ cd server
- $ touch foo
- $ hg -q commit -A -m initial
- >>> for i in range(1024):
- ... with open(str(i), 'wb') as fh:
- ... fh.write(b"%d" % i) and None
- $ hg -q commit -A -m 'add a lot of files'
- $ hg st
-
-add files with "tricky" name:
-
- $ echo foo > 00changelog.i
- $ echo foo > 00changelog.d
- $ echo foo > 00changelog.n
- $ echo foo > 00changelog-ab349180a0405010.nd
- $ echo foo > 00manifest.i
- $ echo foo > 00manifest.d
- $ echo foo > foo.i
- $ echo foo > foo.d
- $ echo foo > foo.n
- $ echo foo > undo.py
- $ echo foo > undo.i
- $ echo foo > undo.d
- $ echo foo > undo.n
- $ echo foo > undo.foo.i
- $ echo foo > undo.foo.d
- $ echo foo > undo.foo.n
- $ echo foo > undo.babar
- $ mkdir savanah
- $ echo foo > savanah/foo.i
- $ echo foo > savanah/foo.d
- $ echo foo > savanah/foo.n
- $ echo foo > savanah/undo.py
- $ echo foo > savanah/undo.i
- $ echo foo > savanah/undo.d
- $ echo foo > savanah/undo.n
- $ echo foo > savanah/undo.foo.i
- $ echo foo > savanah/undo.foo.d
- $ echo foo > savanah/undo.foo.n
- $ echo foo > savanah/undo.babar
- $ mkdir data
- $ echo foo > data/foo.i
- $ echo foo > data/foo.d
- $ echo foo > data/foo.n
- $ echo foo > data/undo.py
- $ echo foo > data/undo.i
- $ echo foo > data/undo.d
- $ echo foo > data/undo.n
- $ echo foo > data/undo.foo.i
- $ echo foo > data/undo.foo.d
- $ echo foo > data/undo.foo.n
- $ echo foo > data/undo.babar
- $ mkdir meta
- $ echo foo > meta/foo.i
- $ echo foo > meta/foo.d
- $ echo foo > meta/foo.n
- $ echo foo > meta/undo.py
- $ echo foo > meta/undo.i
- $ echo foo > meta/undo.d
- $ echo foo > meta/undo.n
- $ echo foo > meta/undo.foo.i
- $ echo foo > meta/undo.foo.d
- $ echo foo > meta/undo.foo.n
- $ echo foo > meta/undo.babar
- $ mkdir store
- $ echo foo > store/foo.i
- $ echo foo > store/foo.d
- $ echo foo > store/foo.n
- $ echo foo > store/undo.py
- $ echo foo > store/undo.i
- $ echo foo > store/undo.d
- $ echo foo > store/undo.n
- $ echo foo > store/undo.foo.i
- $ echo foo > store/undo.foo.d
- $ echo foo > store/undo.foo.n
- $ echo foo > store/undo.babar
-
-Name with special characters
-
- $ echo foo > store/CélesteVille_is_a_Capital_City
-
-name causing issue6581
-
- $ mkdir --parents container/isam-build-centos7/
- $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
-
-Add all that
-
- $ hg add .
- adding 00changelog-ab349180a0405010.nd
- adding 00changelog.d
- adding 00changelog.i
- adding 00changelog.n
- adding 00manifest.d
- adding 00manifest.i
- adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
- adding data/foo.d
- adding data/foo.i
- adding data/foo.n
- adding data/undo.babar
- adding data/undo.d
- adding data/undo.foo.d
- adding data/undo.foo.i
- adding data/undo.foo.n
- adding data/undo.i
- adding data/undo.n
- adding data/undo.py
- adding foo.d
- adding foo.i
- adding foo.n
- adding meta/foo.d
- adding meta/foo.i
- adding meta/foo.n
- adding meta/undo.babar
- adding meta/undo.d
- adding meta/undo.foo.d
- adding meta/undo.foo.i
- adding meta/undo.foo.n
- adding meta/undo.i
- adding meta/undo.n
- adding meta/undo.py
- adding savanah/foo.d
- adding savanah/foo.i
- adding savanah/foo.n
- adding savanah/undo.babar
- adding savanah/undo.d
- adding savanah/undo.foo.d
- adding savanah/undo.foo.i
- adding savanah/undo.foo.n
- adding savanah/undo.i
- adding savanah/undo.n
- adding savanah/undo.py
- adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc)
- adding store/foo.d
- adding store/foo.i
- adding store/foo.n
- adding store/undo.babar
- adding store/undo.d
- adding store/undo.foo.d
- adding store/undo.foo.i
- adding store/undo.foo.n
- adding store/undo.i
- adding store/undo.n
- adding store/undo.py
- adding undo.babar
- adding undo.d
- adding undo.foo.d
- adding undo.foo.i
- adding undo.foo.n
- adding undo.i
- adding undo.n
- adding undo.py
- $ hg ci -m 'add files with "tricky" name'
- $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
-Check local clone
-==================
-
-The logic is close enough of uncompressed.
-This is present here to reuse the testing around file with "special" names.
-
- $ hg clone server local-clone
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Check that the clone went well
-
- $ hg verify -R local-clone
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 3 changesets with 1088 changes to 1088 files
-
-Check uncompressed
-==================
-
-Cannot stream clone when server.uncompressed is set
-
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
- 200 Script output follows
-
- 1
-
-#if stream-legacy
- $ hg debugcapabilities http://localhost:$HGPORT
- Main capabilities:
- batch
- branchmap
- $USUAL_BUNDLE2_CAPS_SERVER$
- changegroupsubset
- compression=$BUNDLE2_COMPRESSIONS$
- getbundle
- httpheader=1024
- httpmediatype=0.1rx,0.1tx,0.2tx
- known
- lookup
- pushkey
- unbundle=HG10GZ,HG10BZ,HG10UN
- unbundlehash
- Bundle2 capabilities:
- HG20
- bookmarks
- changegroup
- 01
- 02
- checkheads
- related
- digests
- md5
- sha1
- sha512
- error
- abort
- unsupportedcontent
- pushraced
- pushkey
- hgtagsfnodes
- listkeys
- phases
- heads
- pushkey
- remote-changegroup
- http
- https
-
- $ hg clone --stream -U http://localhost:$HGPORT server-disabled
- warning: stream clone requested but server has them disabled
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 3 changesets with 1088 changes to 1088 files
- new changesets 96ee1d7354c4:5223b5e3265f
-
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
- 200 Script output follows
- content-type: application/mercurial-0.2
-
-
- $ f --size body --hexdump --bytes 100
- body: size=232
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
- 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
- 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
- 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
- 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
- 0060: 69 73 20 66 |is f|
-
-#endif
-#if stream-bundle2
- $ hg debugcapabilities http://localhost:$HGPORT
- Main capabilities:
- batch
- branchmap
- $USUAL_BUNDLE2_CAPS_SERVER$
- changegroupsubset
- compression=$BUNDLE2_COMPRESSIONS$
- getbundle
- httpheader=1024
- httpmediatype=0.1rx,0.1tx,0.2tx
- known
- lookup
- pushkey
- unbundle=HG10GZ,HG10BZ,HG10UN
- unbundlehash
- Bundle2 capabilities:
- HG20
- bookmarks
- changegroup
- 01
- 02
- checkheads
- related
- digests
- md5
- sha1
- sha512
- error
- abort
- unsupportedcontent
- pushraced
- pushkey
- hgtagsfnodes
- listkeys
- phases
- heads
- pushkey
- remote-changegroup
- http
- https
-
- $ hg clone --stream -U http://localhost:$HGPORT server-disabled
- warning: stream clone requested but server has them disabled
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 3 changesets with 1088 changes to 1088 files
- new changesets 96ee1d7354c4:5223b5e3265f
-
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
- 200 Script output follows
- content-type: application/mercurial-0.2
-
-
- $ f --size body --hexdump --bytes 100
- body: size=232
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
- 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
- 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
- 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
- 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
- 0060: 69 73 20 66 |is f|
-
-#endif
-
- $ killdaemons.py
- $ cd server
- $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
-Basic clone
-
-#if stream-legacy
- $ hg clone --stream -U http://localhost:$HGPORT clone1
- streaming all changes
- 1090 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- $ cat server/errors.txt
-#endif
-#if stream-bundle2
- $ hg clone --stream -U http://localhost:$HGPORT clone1
- streaming all changes
- 1093 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1093 files to transfer, 98.9 KB of data (zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
-
- $ ls -1 clone1/.hg/cache
- branch2-base
- branch2-immutable
- branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
- rbc-names-v1
- rbc-revs-v1
- tags2
- tags2-served
- $ cat server/errors.txt
-#endif
-
-getbundle requests with stream=1 are uncompressed
-
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
- 200 Script output follows
- content-type: application/mercurial-0.2
-
-
-#if no-zstd no-rust
- $ f --size --hex --bytes 256 body
- body: size=119153
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10|
- 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
- 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
- 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
- 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
- 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
- 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
- 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
- 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
- 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
- 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
- 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan|
- 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0|
-#endif
-#if zstd no-rust
- $ f --size --hex --bytes 256 body
- body: size=116340
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10|
- 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
- 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
- 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
- 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres|
- 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl|
- 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
- 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s|
- 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......|
- 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................|
- 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.|
- 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..|
- 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed|
-#endif
-#if zstd rust no-dirstate-v2
- $ f --size --hex --bytes 256 body
- body: size=116361
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10|
- 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
- 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
- 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
- 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod|
- 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co|
- 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2|
- 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
- 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
- 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
- 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
- 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
- 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
-#endif
-#if zstd dirstate-v2
- $ f --size --hex --bytes 256 body
- body: size=109549
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
- 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
- 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
- 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
- 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
- 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
- 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
- 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
- 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
- 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
- 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
- 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
- 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
- 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
-#endif
-
---uncompressed is an alias to --stream
-
-#if stream-legacy
- $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
- streaming all changes
- 1090 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
-#endif
-#if stream-bundle2
- $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
- streaming all changes
- 1093 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1093 files to transfer, 98.9 KB of data (zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
-#endif
-
-Clone with background file closing enabled
-
-#if stream-legacy
- $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
- using http://localhost:$HGPORT/
- sending capabilities command
- sending branchmap command
- streaming all changes
- sending stream_out command
- 1090 files to transfer, 102 KB of data (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- starting 4 threads for background file closing
- updating the branch cache
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- query 1; heads
- sending batch command
- searching for changes
- all remote heads known locally
- no changes found
- sending getbundle command
- bundle2-input-bundle: with-transaction
- bundle2-input-part: "listkeys" (params: 1 mandatory) supported
- bundle2-input-part: "phase-heads" supported
- bundle2-input-part: total payload size 24
- bundle2-input-bundle: 2 parts total
- checking for updated bookmarks
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2
- $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending batch command
- streaming all changes
- sending getbundle command
- bundle2-input-bundle: with-transaction
- bundle2-input-part: "stream2" (params: 3 mandatory) supported
- applying stream bundle
- 1093 files to transfer, 102 KB of data (no-zstd !)
- 1093 files to transfer, 98.9 KB of data (zstd !)
- starting 4 threads for background file closing
- starting 4 threads for background file closing
- updating the branch cache
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- bundle2-input-part: total payload size 118984 (no-zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
- bundle2-input-part: total payload size 116145 (zstd !)
- bundle2-input-part: "listkeys" (params: 1 mandatory) supported
- bundle2-input-bundle: 2 parts total
- checking for updated bookmarks
- updating the branch cache
- (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-
-Cannot stream clone when there are secret changesets
-
- $ hg -R server phase --force --secret -r tip
- $ hg clone --stream -U http://localhost:$HGPORT secret-denied
- warning: stream clone requested but server has them disabled
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 2 changesets with 1025 changes to 1025 files
- new changesets 96ee1d7354c4:c17445101a72
-
- $ killdaemons.py
-
-Streaming of secrets can be overridden by server config
-
- $ cd server
- $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
-#if stream-legacy
- $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
- streaming all changes
- 1090 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
-#endif
-#if stream-bundle2
- $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
- streaming all changes
- 1093 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1093 files to transfer, 98.9 KB of data (zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
-#endif
-
- $ killdaemons.py
-
-Verify interaction between preferuncompressed and secret presence
-
- $ cd server
- $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
- $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 2 changesets with 1025 changes to 1025 files
- new changesets 96ee1d7354c4:c17445101a72
-
- $ killdaemons.py
-
-Clone not allowed when full bundles disabled and can't serve secrets
-
- $ cd server
- $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
- $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
- warning: stream clone requested but server has them disabled
- requesting all changes
- remote: abort: server has pull-based clones disabled
- abort: pull failed on remote
- (remove --pull if specified or upgrade Mercurial)
- [100]
-
-Local stream clone with secrets involved
-(This is just a test over behavior: if you have access to the repo's files,
-there is no security so it isn't important to prevent a clone here.)
-
- $ hg clone -U --stream server local-secret
- warning: stream clone requested but server has them disabled
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 2 changesets with 1025 changes to 1025 files
- new changesets 96ee1d7354c4:c17445101a72
-
-Stream clone while repo is changing:
-
- $ mkdir changing
- $ cd changing
-
-extension for delaying the server process so we reliably can modify the repo
-while cloning
-
- $ cat > stream_steps.py <<EOF
- > import os
- > import sys
- > from mercurial import (
- > encoding,
- > extensions,
- > streamclone,
- > testing,
- > )
- > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
- > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
- >
- > def _test_sync_point_walk_1(orig, repo):
- > testing.write_file(WALKED_FILE_1)
- >
- > def _test_sync_point_walk_2(orig, repo):
- > assert repo._currentlock(repo._lockref) is None
- > testing.wait_file(WALKED_FILE_2)
- >
- > extensions.wrapfunction(
- > streamclone,
- > '_test_sync_point_walk_1',
- > _test_sync_point_walk_1
- > )
- > extensions.wrapfunction(
- > streamclone,
- > '_test_sync_point_walk_2',
- > _test_sync_point_walk_2
- > )
- > EOF
-
-prepare repo with small and big file to cover both code paths in emitrevlogdata
-
- $ hg init repo
- $ touch repo/f1
- $ $TESTDIR/seq.py 50000 > repo/f2
- $ hg -R repo ci -Aqm "0"
- $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
- $ export HG_TEST_STREAM_WALKED_FILE_1
- $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
- $ export HG_TEST_STREAM_WALKED_FILE_2
- $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
- $ export HG_TEST_STREAM_WALKED_FILE_3
-# $ cat << EOF >> $HGRCPATH
-# > [hooks]
-# > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
-# > EOF
- $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
- $ cat hg.pid >> $DAEMON_PIDS
-
-clone while modifying the repo between stating file with write lock and
-actually serving file content
-
- $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
- $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
- $ echo >> repo/f1
- $ echo >> repo/f2
- $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
- $ touch $HG_TEST_STREAM_WALKED_FILE_2
- $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
- $ hg -R clone id
- 000000000000
- $ cat errors.log
- $ cd ..
-
-Stream repository with bookmarks
---------------------------------
-
-(revert introduction of secret changeset)
-
- $ hg -R server phase --draft 'secret()'
-
-add a bookmark
-
- $ hg -R server bookmark -r tip some-bookmark
-
-clone it
-
-#if stream-legacy
- $ hg clone --stream http://localhost:$HGPORT with-bookmarks
- streaming all changes
- 1090 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2
- $ hg clone --stream http://localhost:$HGPORT with-bookmarks
- streaming all changes
- 1096 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1096 files to transfer, 99.1 KB of data (zstd !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
- $ hg verify -R with-bookmarks
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 3 changesets with 1088 changes to 1088 files
- $ hg -R with-bookmarks bookmarks
- some-bookmark 2:5223b5e3265f
-
-Stream repository with phases
------------------------------
-
-Clone as publishing
-
- $ hg -R server phase -r 'all()'
- 0: draft
- 1: draft
- 2: draft
-
-#if stream-legacy
- $ hg clone --stream http://localhost:$HGPORT phase-publish
- streaming all changes
- 1090 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2
- $ hg clone --stream http://localhost:$HGPORT phase-publish
- streaming all changes
- 1096 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1096 files to transfer, 99.1 KB of data (zstd !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
- $ hg verify -R phase-publish
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 3 changesets with 1088 changes to 1088 files
- $ hg -R phase-publish phase -r 'all()'
- 0: public
- 1: public
- 2: public
-
-Clone as non publishing
-
- $ cat << EOF >> server/.hg/hgrc
- > [phases]
- > publish = False
- > EOF
- $ killdaemons.py
- $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-#if stream-legacy
-
-With v1 of the stream protocol, changeset are always cloned as public. It make
-stream v1 unsuitable for non-publishing repository.
-
- $ hg clone --stream http://localhost:$HGPORT phase-no-publish
- streaming all changes
- 1090 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1090 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R phase-no-publish phase -r 'all()'
- 0: public
- 1: public
- 2: public
-#endif
-#if stream-bundle2
- $ hg clone --stream http://localhost:$HGPORT phase-no-publish
- streaming all changes
- 1097 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1097 files to transfer, 99.1 KB of data (zstd !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R phase-no-publish phase -r 'all()'
- 0: draft
- 1: draft
- 2: draft
-#endif
- $ hg verify -R phase-no-publish
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 3 changesets with 1088 changes to 1088 files
-
- $ killdaemons.py
-
-#if stream-legacy
-
-With v1 of the stream protocol, changeset are always cloned as public. There's
-no obsolescence markers exchange in stream v1.
-
-#endif
-#if stream-bundle2
-
-Stream repository with obsolescence
------------------------------------
-
-Clone non-publishing with obsolescence
-
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > evolution=all
- > EOF
-
- $ cd server
- $ echo foo > foo
- $ hg -q commit -m 'about to be pruned'
- $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
- 1 new obsolescence markers
- obsoleted 1 changesets
- $ hg up null -q
- $ hg log -T '{rev}: {phase}\n'
- 2: draft
- 1: draft
- 0: draft
- $ hg serve -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
- $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
- streaming all changes
- 1098 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1098 files to transfer, 99.5 KB of data (zstd !)
- transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
- $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
- 2: draft
- 1: draft
- 0: draft
- $ hg debugobsolete -R with-obsolescence
- 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
- $ hg verify -R with-obsolescence
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 4 changesets with 1089 changes to 1088 files
-
- $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
- streaming all changes
- remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
- abort: pull failed on remote
- [100]
-
- $ killdaemons.py
-
-#endif
--- a/tests/test-clone.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-clone.t Mon Aug 30 12:25:57 2021 +0200
@@ -1125,7 +1125,7 @@
$ hg id -R remote -r 0
abort: repository remote not found
[255]
- $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
+ $ hg --config share.pool=share -q clone a ssh://user@dummy/remote
$ hg -R remote id -r 0
acb14030fe0a
--- a/tests/test-clonebundles.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-clonebundles.t Mon Aug 30 12:25:57 2021 +0200
@@ -208,7 +208,7 @@
Feature works over SSH
- $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
+ $ hg clone -U ssh://user@dummy/server ssh-full-clone
applying clone bundle from http://localhost:$HGPORT1/full.hg
adding changesets
adding manifests
--- a/tests/test-commandserver.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-commandserver.t Mon Aug 30 12:25:57 2021 +0200
@@ -226,6 +226,7 @@
ui.detailed-exit-code=True
ui.merge=internal:merge
ui.mergemarkers=detailed
+ ui.ssh=* (glob)
ui.timeout.warn=15
ui.foo=bar
ui.nontty=true
@@ -239,6 +240,7 @@
ui.detailed-exit-code=True
ui.merge=internal:merge
ui.mergemarkers=detailed
+ ui.ssh=* (glob)
ui.timeout.warn=15
ui.nontty=true
#endif
--- a/tests/test-completion.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-completion.t Mon Aug 30 12:25:57 2021 +0200
@@ -316,7 +316,7 @@
debugpushkey:
debugpvec:
debugrebuilddirstate: rev, minimal
- debugrebuildfncache:
+ debugrebuildfncache: only-data
debugrename: rev
debugrequires:
debugrevlog: changelog, manifest, dir, dump
--- a/tests/test-config.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-config.t Mon Aug 30 12:25:57 2021 +0200
@@ -413,7 +413,7 @@
The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state.
- $ hg config --exp-all-known | grep commit
+ $ hg config --exp-all-known | grep commit | grep -v ssh
commands.commit.interactive.git=False
commands.commit.interactive.ignoreblanklines=False
commands.commit.interactive.ignorews=False
--- a/tests/test-debugcommands.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-debugcommands.t Mon Aug 30 12:25:57 2021 +0200
@@ -644,14 +644,13 @@
Test debugpeer
- $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog
+ $ hg debugpeer ssh://user@dummy/debugrevlog
url: ssh://user@dummy/debugrevlog
local: no
pushable: yes
- $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog
- running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !)
- running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !)
+ $ hg --debug debugpeer ssh://user@dummy/debugrevlog
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
sending hello command
--- a/tests/test-fastannotate-protocol.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-fastannotate-protocol.t Mon Aug 30 12:25:57 2021 +0200
@@ -1,6 +1,4 @@
$ cat >> $HGRCPATH << EOF
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> [extensions]
> fastannotate=
> [fastannotate]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgwebdir-gc.py Mon Aug 30 12:25:57 2021 +0200
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+
+import os
+from mercurial.hgweb import hgwebdir_mod
+
+hgwebdir = hgwebdir_mod.hgwebdir
+
+os.mkdir(b'webdir')
+os.chdir(b'webdir')
+
+webdir = os.path.realpath(b'.')
+
+
+def trivial_response(req, res):
+ return []
+
+
+def make_hgwebdir(gc_rate=None):
+ config = os.path.join(webdir, b'hgwebdir.conf')
+ with open(config, 'wb') as configfile:
+ configfile.write(b'[experimental]\n')
+ if gc_rate is not None:
+ configfile.write(b'web.full-garbage-collection-rate=%d\n' % gc_rate)
+ hg_wd = hgwebdir(config)
+ hg_wd._runwsgi = trivial_response
+ return hg_wd
+
+
+def process_requests(webdir_instance, number):
+ # we don't care for now about passing realistic arguments
+ for _ in range(number):
+ for chunk in webdir_instance.run_wsgi(None, None):
+ pass
+
+
+without_gc = make_hgwebdir(gc_rate=0)
+process_requests(without_gc, 5)
+assert without_gc.requests_count == 5
+assert without_gc.gc_full_collections_done == 0
+
+with_gc = make_hgwebdir(gc_rate=2)
+process_requests(with_gc, 5)
+assert with_gc.requests_count == 5
+assert with_gc.gc_full_collections_done == 2
+
+with_systematic_gc = make_hgwebdir() # default value of the setting
+process_requests(with_systematic_gc, 3)
+assert with_systematic_gc.requests_count == 3
+assert with_systematic_gc.gc_full_collections_done == 3
--- a/tests/test-infinitepush-ci.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-infinitepush-ci.t Mon Aug 30 12:25:57 2021 +0200
@@ -9,8 +9,6 @@
$ . "$TESTDIR/library-infinitepush.sh"
$ cat >> $HGRCPATH <<EOF
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> [alias]
> glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
> EOF
--- a/tests/test-init.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-init.t Mon Aug 30 12:25:57 2021 +0200
@@ -123,7 +123,7 @@
init+push to remote2
- $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
+ $ hg init ssh://user@dummy/remote2
$ hg incoming -R remote2 local
comparing with local
changeset: 0:08b9e9f63b32
@@ -133,7 +133,7 @@
summary: init
- $ hg push -R local -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2
+ $ hg push -R local ssh://user@dummy/remote2
pushing to ssh://user@dummy/remote2
searching for changes
remote: adding changesets
@@ -143,7 +143,7 @@
clone to remote1
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
+ $ hg clone local ssh://user@dummy/remote1
searching for changes
remote: adding changesets
remote: adding manifests
@@ -151,7 +151,7 @@
remote: added 1 changesets with 1 changes to 1 files
The largefiles extension doesn't crash
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remotelf --config extensions.largefiles=
+ $ hg clone local ssh://user@dummy/remotelf --config extensions.largefiles=
The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
searching for changes
@@ -162,14 +162,14 @@
init to existing repo
- $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote1
+ $ hg init ssh://user@dummy/remote1
abort: repository remote1 already exists
abort: could not create remote repo
[255]
clone to existing repo
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1
+ $ hg clone local ssh://user@dummy/remote1
abort: repository remote1 already exists
abort: could not create remote repo
[255]
@@ -283,7 +283,7 @@
$ hg -R local bookmark test
$ hg -R local bookmarks
* test 0:08b9e9f63b32
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote-bookmarks
+ $ hg clone local ssh://user@dummy/remote-bookmarks
searching for changes
remote: adding changesets
remote: adding manifests
--- a/tests/test-largefiles-wireproto.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-largefiles-wireproto.t Mon Aug 30 12:25:57 2021 +0200
@@ -124,7 +124,7 @@
#endif
vanilla clients locked out from largefiles ssh repos
- $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
+ $ hg --config extensions.largefiles=! clone ssh://user@dummy/r4 r5
remote:
remote: This repository uses the largefiles extension.
remote:
--- a/tests/test-log.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-log.t Mon Aug 30 12:25:57 2021 +0200
@@ -2516,10 +2516,9 @@
is global. So we shouldn't expect the namespace always exists. Using
ssh:// makes sure a bundle repository is created from scratch. (issue6301)
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
- > -qr0 "ssh://user@dummy/`pwd`/a" a-clone
+ $ hg clone -qr0 "ssh://user@dummy/`pwd`/a" a-clone
$ hg incoming --config extensions.names=names.py -R a-clone \
- > -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -T '{bars}\n' -l1
+ > -T '{bars}\n' -l1
comparing with ssh://user@dummy/$TESTTMP/a
searching for changes
--- a/tests/test-logexchange.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-logexchange.t Mon Aug 30 12:25:57 2021 +0200
@@ -2,8 +2,6 @@
=============================================
$ cat >> $HGRCPATH << EOF
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> [alias]
> glog = log -G -T '{rev}:{node|short} {desc}'
> [extensions]
--- a/tests/test-missing-capability.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-missing-capability.t Mon Aug 30 12:25:57 2021 +0200
@@ -24,10 +24,6 @@
> [extensions]
> disable-lookup = $TESTTMP/disable-lookup.py
> EOF
- $ cat >> .hg/hgrc <<EOF
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
- > EOF
$ hg pull ssh://user@dummy/repo1 -r tip -B a
pulling from ssh://user@dummy/repo1
--- a/tests/test-persistent-nodemap.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-persistent-nodemap.t Mon Aug 30 12:25:57 2021 +0200
@@ -868,7 +868,7 @@
No race condition
- $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
+ $ hg clone -U --stream ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
adding [s] 00manifest.n (62 bytes)
adding [s] 00manifest-*.nd (118 KB) (glob)
adding [s] 00changelog.n (62 bytes)
@@ -933,7 +933,7 @@
Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
- $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
+ $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
$ hg -R test-repo/ commit -m foo
$ touch $HG_TEST_STREAM_WALKED_FILE_2
@@ -1030,7 +1030,7 @@
Performe the mix of clone and full refresh of the nodemap, so that the files
(and filenames) are different between listing time and actual transfer time.
- $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
+ $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
$ rm test-repo/.hg/store/00changelog.n
$ rm test-repo/.hg/store/00changelog-*.nd
--- a/tests/test-push-race.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-push-race.t Mon Aug 30 12:25:57 2021 +0200
@@ -102,7 +102,6 @@
$ cat >> $HGRCPATH << EOF
> [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> # simplify output
> logtemplate = {node|short} {desc} ({branch})
> [phases]
--- a/tests/test-share.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-share.t Mon Aug 30 12:25:57 2021 +0200
@@ -160,7 +160,7 @@
Cloning a shared repo via bundle2 results in a non-shared clone
$ cd ..
- $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
+ $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
$ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
[1]
$ hg id --cwd cloned-via-bundle2 -r tip
--- a/tests/test-sparse-clone.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-sparse-clone.t Mon Aug 30 12:25:57 2021 +0200
@@ -2,7 +2,6 @@
$ cat >> $HGRCPATH << EOF
> [ui]
- > ssh = "$PYTHON" "$RUNTESTDIR/dummyssh"
> username = nobody <no.reply@fb.com>
> [extensions]
> sparse=
--- a/tests/test-ssh-batch.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-ssh-batch.t Mon Aug 30 12:25:57 2021 +0200
@@ -9,7 +9,7 @@
fails (thus causing the sshpeer to be stopped), the errors from the
further lookups don't result in tracebacks.
- $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
+ $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) ssh://user@dummy/$(pwd)/../a
pulling from ssh://user@dummy/$TESTTMP/b/../a
abort: unknown revision 'nosuchbookmark'
[255]
--- a/tests/test-ssh-bundle1.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-ssh-bundle1.t Mon Aug 30 12:25:57 2021 +0200
@@ -52,7 +52,7 @@
repo not found error
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
+ $ hg clone ssh://user@dummy/nonexistent local
remote: abort: repository nonexistent not found
abort: no suitable response from remote hg
[255]
@@ -60,7 +60,7 @@
non-existent absolute path
#if no-msys
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
+ $ hg clone ssh://user@dummy//`pwd`/nonexistent local
remote: abort: repository /$TESTTMP/nonexistent not found
abort: no suitable response from remote hg
[255]
@@ -70,7 +70,7 @@
#if no-reposimplestore
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
+ $ hg clone --stream ssh://user@dummy/remote local-stream
streaming all changes
4 files to transfer, 602 bytes of data (no-zstd !)
transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -94,7 +94,7 @@
clone bookmarks via stream
$ hg -R local-stream book mybook
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
+ $ hg clone --stream ssh://user@dummy/local-stream stream2
streaming all changes
4 files to transfer, 602 bytes of data (no-zstd !)
transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
@@ -114,7 +114,7 @@
clone remote via pull
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
+ $ hg clone ssh://user@dummy/remote local
requesting all changes
adding changesets
adding manifests
@@ -142,14 +142,14 @@
$ hg paths
default = ssh://user@dummy/remote
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
+ $ hg pull
pulling from ssh://user@dummy/remote
searching for changes
no changes found
pull from wrong ssh URL
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
+ $ hg pull ssh://user@dummy/doesnotexist
pulling from ssh://user@dummy/doesnotexist
remote: abort: repository doesnotexist not found
abort: no suitable response from remote hg
@@ -163,8 +163,6 @@
updating rc
$ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
- $ echo "[ui]" >> .hg/hgrc
- $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
find outgoing
@@ -181,7 +179,7 @@
find incoming on the remote side
- $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
+ $ hg incoming -R ../remote ssh://user@dummy/local
comparing with ssh://user@dummy/local
searching for changes
changeset: 3:a28a9d1a809c
@@ -194,7 +192,7 @@
find incoming on the remote side (using absolute path)
- $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
+ $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
comparing with ssh://user@dummy/$TESTTMP/local
searching for changes
changeset: 3:a28a9d1a809c
@@ -241,7 +239,7 @@
test pushkeys and bookmarks
$ cd $TESTTMP/local
- $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
+ $ hg debugpushkey ssh://user@dummy/remote namespaces
bookmarks
namespaces
phases
@@ -256,7 +254,7 @@
no changes found
exporting bookmark foo
[1]
- $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
+ $ hg debugpushkey ssh://user@dummy/remote bookmarks
foo 1160648e36cec0054048a7edc4110c6f84fde594
$ hg book -f foo
$ hg push --traceback
@@ -328,7 +326,7 @@
$ hg -R ../remote bookmark test
$ hg -R ../remote bookmarks
* test 4:6c0482d977a3
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
+ $ hg clone ssh://user@dummy/remote local-bookmarks
requesting all changes
adding changesets
adding manifests
@@ -356,21 +354,21 @@
Test remote paths with spaces (issue2983):
- $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
+ $ hg init "ssh://user@dummy/a repo"
$ touch "$TESTTMP/a repo/test"
$ hg -R 'a repo' commit -A -m "test"
adding test
$ hg -R 'a repo' tag tag
- $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
+ $ hg id "ssh://user@dummy/a repo"
73649e48688a
- $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
+ $ hg id "ssh://user@dummy/a repo#noNoNO"
abort: unknown revision 'noNoNO'
[255]
Test (non-)escaping of remote paths with spaces when cloning (issue3145):
- $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
+ $ hg clone "ssh://user@dummy/a repo"
destination directory: a repo
abort: destination 'a repo' is not empty
[10]
@@ -462,8 +460,6 @@
$ cat >> .hg/hgrc << EOF
> [paths]
> default-push = ssh://user@dummy/remote
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> [extensions]
> localwrite = localwrite.py
> EOF
@@ -486,7 +482,7 @@
$ hg pull --debug ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
- running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
@@ -583,11 +579,11 @@
$ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
- $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
+ $ hg -q clone ssh://user@dummy/remote hookout
$ cd hookout
$ touch hookfailure
$ hg -q commit -A -m 'remote hook failure'
- $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
+ $ hg push
pushing to ssh://user@dummy/remote
searching for changes
remote: adding changesets
@@ -607,7 +603,7 @@
> [extensions]
> crash = ${TESTDIR}/crashgetbundler.py
> EOF
- $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
+ $ hg pull
pulling from ssh://user@dummy/remote
searching for changes
adding changesets
--- a/tests/test-ssh-clone-r.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-ssh-clone-r.t Mon Aug 30 12:25:57 2021 +0200
@@ -28,7 +28,7 @@
clone remote via stream
$ for i in 0 1 2 3 4 5 6 7 8; do
- > hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream -r "$i" ssh://user@dummy/remote test-"$i"
+ > hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i"
> if cd test-"$i"; then
> hg verify
> cd ..
@@ -160,7 +160,7 @@
checked 9 changesets with 7 changes to 4 files
$ cd ..
$ cd test-1
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 4 ssh://user@dummy/remote
+ $ hg pull -r 4 ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
searching for changes
adding changesets
@@ -175,7 +175,7 @@
crosschecking files in changesets and manifests
checking files
checked 3 changesets with 2 changes to 1 files
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
+ $ hg pull ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
searching for changes
adding changesets
@@ -186,7 +186,7 @@
(run 'hg update' to get a working copy)
$ cd ..
$ cd test-2
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 5 ssh://user@dummy/remote
+ $ hg pull -r 5 ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
searching for changes
adding changesets
@@ -201,7 +201,7 @@
crosschecking files in changesets and manifests
checking files
checked 5 changesets with 3 changes to 1 files
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
+ $ hg pull ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
searching for changes
adding changesets
--- a/tests/test-ssh-proto.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-ssh-proto.t Mon Aug 30 12:25:57 2021 +0200
@@ -28,8 +28,6 @@
> }
$ cat >> $HGRCPATH << EOF
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> [devel]
> debug.peer-request = true
> [extensions]
@@ -65,8 +63,7 @@
$ cd ..
$ hg --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
sending hello command
@@ -178,8 +175,7 @@
--debug will print the banner
$ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
sending hello command
@@ -269,8 +265,7 @@
servers.
$ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
sending hello command
@@ -315,8 +310,7 @@
o> 1\n
$ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
sending no-args command
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
@@ -385,8 +379,7 @@
o> \n
$ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
sending unknown1 command
sending unknown2 command
sending unknown3 command
@@ -961,8 +954,7 @@
$ cd ..
$ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
@@ -1019,8 +1011,7 @@
$ cd ..
$ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
@@ -1038,8 +1029,7 @@
Verify the peer has capabilities
$ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
- running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
- running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
--- a/tests/test-ssh-repoerror.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-ssh-repoerror.t Mon Aug 30 12:25:57 2021 +0200
@@ -4,13 +4,6 @@
`alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed
buggy. This need to be resolved sooner than later.
-initial setup
-
- $ cat << EOF >> $HGRCPATH
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
- > EOF
-
repository itself is non-readable
---------------------------------
--- a/tests/test-ssh.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-ssh.t Mon Aug 30 12:25:57 2021 +0200
@@ -42,18 +42,18 @@
repo not found error
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
+ $ hg clone ssh://user@dummy/nonexistent local
remote: abort: repository nonexistent not found
abort: no suitable response from remote hg
[255]
- $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
+ $ hg clone -q ssh://user@dummy/nonexistent local
remote: abort: repository nonexistent not found
abort: no suitable response from remote hg
[255]
non-existent absolute path
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local
+ $ hg clone ssh://user@dummy/`pwd`/nonexistent local
remote: abort: repository $TESTTMP/nonexistent not found
abort: no suitable response from remote hg
[255]
@@ -62,7 +62,7 @@
#if no-reposimplestore
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
+ $ hg clone --stream ssh://user@dummy/remote local-stream
streaming all changes
8 files to transfer, 827 bytes of data (no-zstd !)
transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
@@ -84,7 +84,7 @@
clone bookmarks via stream
$ hg -R local-stream book mybook
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
+ $ hg clone --stream ssh://user@dummy/local-stream stream2
streaming all changes
15 files to transfer, * of data (glob)
transferred * in * seconds (*) (glob)
@@ -100,7 +100,7 @@
clone remote via pull
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
+ $ hg clone ssh://user@dummy/remote local
requesting all changes
adding changesets
adding manifests
@@ -128,14 +128,14 @@
$ hg paths
default = ssh://user@dummy/remote
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
+ $ hg pull
pulling from ssh://user@dummy/remote
searching for changes
no changes found
pull from wrong ssh URL
- $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist
+ $ hg pull ssh://user@dummy/doesnotexist
pulling from ssh://user@dummy/doesnotexist
remote: abort: repository doesnotexist not found
abort: no suitable response from remote hg
@@ -149,8 +149,6 @@
updating rc
$ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
- $ echo "[ui]" >> .hg/hgrc
- $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc
find outgoing
@@ -167,7 +165,7 @@
find incoming on the remote side
- $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
+ $ hg incoming -R ../remote ssh://user@dummy/local
comparing with ssh://user@dummy/local
searching for changes
changeset: 3:a28a9d1a809c
@@ -180,7 +178,7 @@
find incoming on the remote side (using absolute path)
- $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
+ $ hg incoming -R ../remote "ssh://user@dummy/`pwd`"
comparing with ssh://user@dummy/$TESTTMP/local
searching for changes
changeset: 3:a28a9d1a809c
@@ -227,7 +225,7 @@
test pushkeys and bookmarks
$ cd $TESTTMP/local
- $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
+ $ hg debugpushkey ssh://user@dummy/remote namespaces
bookmarks
namespaces
phases
@@ -242,7 +240,7 @@
no changes found
exporting bookmark foo
[1]
- $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
+ $ hg debugpushkey ssh://user@dummy/remote bookmarks
foo 1160648e36cec0054048a7edc4110c6f84fde594
$ hg book -f foo
$ hg push --traceback
@@ -347,7 +345,7 @@
$ hg -R ../remote bookmark test
$ hg -R ../remote bookmarks
* test 4:6c0482d977a3
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
+ $ hg clone ssh://user@dummy/remote local-bookmarks
requesting all changes
adding changesets
adding manifests
@@ -375,21 +373,21 @@
Test remote paths with spaces (issue2983):
- $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
+ $ hg init "ssh://user@dummy/a repo"
$ touch "$TESTTMP/a repo/test"
$ hg -R 'a repo' commit -A -m "test"
adding test
$ hg -R 'a repo' tag tag
- $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
+ $ hg id "ssh://user@dummy/a repo"
73649e48688a
- $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
+ $ hg id "ssh://user@dummy/a repo#noNoNO"
abort: unknown revision 'noNoNO'
[255]
Test (non-)escaping of remote paths with spaces when cloning (issue3145):
- $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
+ $ hg clone "ssh://user@dummy/a repo"
destination directory: a repo
abort: destination 'a repo' is not empty
[10]
@@ -515,8 +513,6 @@
$ cat >> .hg/hgrc << EOF
> [paths]
> default-push = ssh://user@dummy/remote
- > [ui]
- > ssh = "$PYTHON" "$TESTDIR/dummyssh"
> [extensions]
> localwrite = localwrite.py
> EOF
@@ -540,7 +536,7 @@
$ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
pulling from ssh://user@dummy/remote
- running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
@@ -670,11 +666,11 @@
$ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc
- $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout
+ $ hg -q clone ssh://user@dummy/remote hookout
$ cd hookout
$ touch hookfailure
$ hg -q commit -A -m 'remote hook failure'
- $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push
+ $ hg push
pushing to ssh://user@dummy/remote
searching for changes
remote: adding changesets
@@ -695,7 +691,7 @@
> [extensions]
> crash = ${TESTDIR}/crashgetbundler.py
> EOF
- $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull
+ $ hg pull
pulling from ssh://user@dummy/remote
searching for changes
remote: abort: this is an exercise
@@ -704,14 +700,14 @@
abort with no error hint when there is a ssh problem when pulling
- $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\""
+ $ hg pull ssh://brokenrepository
pulling from ssh://brokenrepository/
abort: no suitable response from remote hg
[255]
abort with configured error hint when there is a ssh problem when pulling
- $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \
+ $ hg pull ssh://brokenrepository \
> --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html"
pulling from ssh://brokenrepository/
abort: no suitable response from remote hg
--- a/tests/test-stream-bundle-v2.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-stream-bundle-v2.t Mon Aug 30 12:25:57 2021 +0200
@@ -14,7 +14,6 @@
> evolution.exchange=True
> bundle2-output-capture=True
> [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
> logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
> [web]
> push_ssl = false
--- a/tests/test-subrepo-relative-path.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-subrepo-relative-path.t Mon Aug 30 12:25:57 2021 +0200
@@ -186,7 +186,7 @@
subrepo paths with ssh urls
- $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/cloned sshclone
+ $ hg clone ssh://user@dummy/cloned sshclone
requesting all changes
adding changesets
adding manifests
@@ -203,7 +203,7 @@
new changesets 863c1745b441
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R sshclone push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/cloned
+ $ hg -R sshclone push ssh://user@dummy/`pwd`/cloned
pushing to ssh://user@dummy/$TESTTMP/cloned
pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
searching for changes
--- a/tests/test-transaction-rollback-on-revlog-split.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-transaction-rollback-on-revlog-split.t Mon Aug 30 12:25:57 2021 +0200
@@ -82,15 +82,14 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: _
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
+ $ hg verify -q
warning: revlog 'data/file.d' not in fncache!
- checked 2 changesets with 2 changes to 1 files
1 warnings encountered!
hint: run "hg debugrebuildfncache" to recover from corrupt fncache
+ $ hg debugrebuildfncache --only-data
+ adding data/file.d
+ 1 items added, 0 removed from fncache
+ $ hg verify -q
$ cd ..
@@ -133,12 +132,7 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: _
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 2 changesets with 2 changes to 1 files
+ $ hg verify -q
$ cd ..
@@ -170,13 +164,8 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: _
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
+ $ hg verify -q
warning: revlog 'data/file.d' not in fncache!
- checked 2 changesets with 2 changes to 1 files
1 warnings encountered!
hint: run "hg debugrebuildfncache" to recover from corrupt fncache
$ cd ..
--- a/tests/test-transaction-rollback-on-sigpipe.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-transaction-rollback-on-sigpipe.t Mon Aug 30 12:25:57 2021 +0200
@@ -2,7 +2,7 @@
the remote hg is able to successfully roll back the transaction.
$ hg init -q remote
- $ hg clone -e "\"$PYTHON\" \"$RUNTESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local
+ $ hg clone -q ssh://user@dummy/`pwd`/remote local
$ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE"
$ SYNCFILE1="$TESTTMP/SYNCFILE1"
$ SYNCFILE2="$TESTTMP/SYNCFILE2"
@@ -36,7 +36,7 @@
(use quiet to avoid flacky output from the server)
- $ hg push --quiet -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd "$remotecmd"
+ $ hg push --quiet --remotecmd "$remotecmd"
abort: stream ended unexpectedly (got 0 bytes, expected 4)
[255]
$ cat $SIGPIPE_REMOTE_DEBUG_FILE
--- a/tests/test-treemanifest.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-treemanifest.t Mon Aug 30 12:25:57 2021 +0200
@@ -1,8 +1,3 @@
- $ cat << EOF >> $HGRCPATH
- > [ui]
- > ssh="$PYTHON" "$TESTDIR/dummyssh"
- > EOF
-
Set up repo
$ hg --config experimental.treemanifest=True init repo
--- a/tests/test-wireproto.py Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-wireproto.py Mon Aug 30 12:25:57 2021 +0200
@@ -75,9 +75,7 @@
@wireprotov1peer.batchable
def greet(self, name):
- f = wireprotov1peer.future()
- yield {b'name': mangle(name)}, f
- yield unmangle(f.value)
+ return {b'name': mangle(name)}, unmangle
class serverrepo(object):
--- a/tests/test-wireproto.t Fri Aug 27 13:51:44 2021 -0700
+++ b/tests/test-wireproto.t Mon Aug 30 12:25:57 2021 +0200
@@ -142,13 +142,13 @@
SSH (try to exercise the ssh functionality with a dummy script):
- $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo uno due tre quattro
+ $ hg debugwireargs ssh://user@dummy/repo uno due tre quattro
uno due tre quattro None
- $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --four vier
+ $ hg debugwireargs ssh://user@dummy/repo eins zwei --four vier
eins zwei None vier None
- $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei
+ $ hg debugwireargs ssh://user@dummy/repo eins zwei
eins zwei None None None
- $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --five fuenf
+ $ hg debugwireargs ssh://user@dummy/repo eins zwei --five fuenf
eins zwei None None None
Explicitly kill daemons to let the test exit on Windows