--- a/black.toml Wed Jan 27 00:54:57 2021 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-[tool.black]
-line-length = 80
-exclude = '''
-build/
-| wheelhouse/
-| dist/
-| packages/
-| \.hg/
-| \.mypy_cache/
-| \.venv/
-| mercurial/thirdparty/
-'''
-skip-string-normalization = true
-quiet = true
--- a/contrib/clang-format-ignorelist Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/clang-format-ignorelist Fri Jan 29 17:32:09 2021 +0530
@@ -9,3 +9,4 @@
hgext/fsmonitor/pywatchman/**.c
mercurial/thirdparty/**.c
mercurial/thirdparty/**.h
+mercurial/pythoncapi_compat.h
--- a/contrib/examples/fix.hgrc Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/examples/fix.hgrc Fri Jan 29 17:32:09 2021 +0530
@@ -5,7 +5,7 @@
rustfmt:command = rustfmt +nightly
rustfmt:pattern = set:"**.rs" - "mercurial/thirdparty/**"
-black:command = black --config=black.toml -
+black:command = black
black:pattern = set:**.py - mercurial/thirdparty/**
# Mercurial doesn't have any Go code, but if we did this is how we
--- a/contrib/heptapod-ci.yml Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/heptapod-ci.yml Fri Jan 29 17:32:09 2021 +0530
@@ -7,6 +7,7 @@
variables:
PYTHON: python
TEST_HGMODULEPOLICY: "allow"
+ HG_CI_IMAGE_TAG: "latest"
.runtests_template: &runtests
stage: tests
@@ -17,6 +18,8 @@
- hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
- cd /tmp/mercurial-ci/
- ls -1 tests/test-check-*.* > /tmp/check-tests.txt
+ - black --version
+ - clang-format --version
script:
- echo "python used, $PYTHON"
- echo "$RUNTEST_ARGS"
--- a/contrib/perf.py Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/perf.py Fri Jan 29 17:32:09 2021 +0530
@@ -744,7 +744,7 @@
# perf commands
-@command(b'perfwalk', formatteropts)
+@command(b'perf::walk|perfwalk', formatteropts)
def perfwalk(ui, repo, *pats, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -759,7 +759,7 @@
fm.end()
-@command(b'perfannotate', formatteropts)
+@command(b'perf::annotate|perfannotate', formatteropts)
def perfannotate(ui, repo, f, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -769,7 +769,7 @@
@command(
- b'perfstatus',
+ b'perf::status|perfstatus',
[
(b'u', b'unknown', False, b'ask status to look for unknown files'),
(b'', b'dirstate', False, b'benchmark the internal dirstate call'),
@@ -806,7 +806,7 @@
fm.end()
-@command(b'perfaddremove', formatteropts)
+@command(b'perf::addremove|perfaddremove', formatteropts)
def perfaddremove(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -837,7 +837,7 @@
cl._nodepos = None
-@command(b'perfheads', formatteropts)
+@command(b'perf::heads|perfheads', formatteropts)
def perfheads(ui, repo, **opts):
"""benchmark the computation of a changelog heads"""
opts = _byteskwargs(opts)
@@ -855,7 +855,7 @@
@command(
- b'perftags',
+ b'perf::tags|perftags',
formatteropts
+ [
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
@@ -880,7 +880,7 @@
fm.end()
-@command(b'perfancestors', formatteropts)
+@command(b'perf::ancestors|perfancestors', formatteropts)
def perfancestors(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -894,7 +894,7 @@
fm.end()
-@command(b'perfancestorset', formatteropts)
+@command(b'perf::ancestorset|perfancestorset', formatteropts)
def perfancestorset(ui, repo, revset, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -910,7 +910,7 @@
fm.end()
-@command(b'perfdiscovery', formatteropts, b'PATH')
+@command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
def perfdiscovery(ui, repo, path, **opts):
"""benchmark discovery between local repo and the peer at given path"""
repos = [repo, None]
@@ -928,7 +928,7 @@
@command(
- b'perfbookmarks',
+ b'perf::bookmarks|perfbookmarks',
formatteropts
+ [
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
@@ -953,7 +953,7 @@
fm.end()
-@command(b'perfbundleread', formatteropts, b'BUNDLE')
+@command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
def perfbundleread(ui, repo, bundlepath, **opts):
"""Benchmark reading of bundle files.
@@ -1080,7 +1080,7 @@
@command(
- b'perfchangegroupchangelog',
+ b'perf::changegroupchangelog|perfchangegroupchangelog',
formatteropts
+ [
(b'', b'cgversion', b'02', b'changegroup version'),
@@ -1116,7 +1116,7 @@
fm.end()
-@command(b'perfdirs', formatteropts)
+@command(b'perf::dirs|perfdirs', formatteropts)
def perfdirs(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1132,7 +1132,7 @@
@command(
- b'perfdirstate',
+ b'perf::dirstate|perfdirstate',
[
(
b'',
@@ -1195,7 +1195,7 @@
fm.end()
-@command(b'perfdirstatedirs', formatteropts)
+@command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
def perfdirstatedirs(ui, repo, **opts):
"""benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
opts = _byteskwargs(opts)
@@ -1212,7 +1212,7 @@
fm.end()
-@command(b'perfdirstatefoldmap', formatteropts)
+@command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
def perfdirstatefoldmap(ui, repo, **opts):
"""benchmap a `dirstate._map.filefoldmap.get()` request
@@ -1233,7 +1233,7 @@
fm.end()
-@command(b'perfdirfoldmap', formatteropts)
+@command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
def perfdirfoldmap(ui, repo, **opts):
"""benchmap a `dirstate._map.dirfoldmap.get()` request
@@ -1255,7 +1255,7 @@
fm.end()
-@command(b'perfdirstatewrite', formatteropts)
+@command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
def perfdirstatewrite(ui, repo, **opts):
"""benchmap the time it take to write a dirstate on disk"""
opts = _byteskwargs(opts)
@@ -1297,7 +1297,7 @@
@command(
- b'perfmergecalculate',
+ b'perf::mergecalculate|perfmergecalculate',
[
(b'r', b'rev', b'.', b'rev to merge against'),
(b'', b'from', b'', b'rev to merge from'),
@@ -1330,7 +1330,7 @@
@command(
- b'perfmergecopies',
+ b'perf::mergecopies|perfmergecopies',
[
(b'r', b'rev', b'.', b'rev to merge against'),
(b'', b'from', b'', b'rev to merge from'),
@@ -1353,7 +1353,7 @@
fm.end()
-@command(b'perfpathcopies', [], b"REV REV")
+@command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
def perfpathcopies(ui, repo, rev1, rev2, **opts):
"""benchmark the copy tracing logic"""
opts = _byteskwargs(opts)
@@ -1369,7 +1369,7 @@
@command(
- b'perfphases',
+ b'perf::phases|perfphases',
[
(b'', b'full', False, b'include file reading time too'),
],
@@ -1394,7 +1394,7 @@
fm.end()
-@command(b'perfphasesremote', [], b"[DEST]")
+@command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
def perfphasesremote(ui, repo, dest=None, **opts):
"""benchmark time needed to analyse phases of the remote server"""
from mercurial.node import bin
@@ -1455,7 +1455,7 @@
@command(
- b'perfmanifest',
+ b'perf::manifest|perfmanifest',
[
(b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
(b'', b'clear-disk', False, b'clear on-disk caches too'),
@@ -1499,7 +1499,7 @@
fm.end()
-@command(b'perfchangeset', formatteropts)
+@command(b'perf::changeset|perfchangeset', formatteropts)
def perfchangeset(ui, repo, rev, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1513,7 +1513,7 @@
fm.end()
-@command(b'perfignore', formatteropts)
+@command(b'perf::ignore|perfignore', formatteropts)
def perfignore(ui, repo, **opts):
"""benchmark operation related to computing ignore"""
opts = _byteskwargs(opts)
@@ -1532,7 +1532,7 @@
@command(
- b'perfindex',
+ b'perf::index|perfindex',
[
(b'', b'rev', [], b'revision to be looked up (default tip)'),
(b'', b'no-lookup', None, b'do not revision lookup post creation'),
@@ -1596,7 +1596,7 @@
@command(
- b'perfnodemap',
+ b'perf::nodemap|perfnodemap',
[
(b'', b'rev', [], b'revision to be looked up (default tip)'),
(b'', b'clear-caches', True, b'clear revlog cache between calls'),
@@ -1667,7 +1667,7 @@
fm.end()
-@command(b'perfstartup', formatteropts)
+@command(b'perf::startup|perfstartup', formatteropts)
def perfstartup(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1685,7 +1685,7 @@
fm.end()
-@command(b'perfparents', formatteropts)
+@command(b'perf::parents|perfparents', formatteropts)
def perfparents(ui, repo, **opts):
"""benchmark the time necessary to fetch one changeset's parents.
@@ -1712,7 +1712,7 @@
fm.end()
-@command(b'perfctxfiles', formatteropts)
+@command(b'perf::ctxfiles|perfctxfiles', formatteropts)
def perfctxfiles(ui, repo, x, **opts):
opts = _byteskwargs(opts)
x = int(x)
@@ -1725,7 +1725,7 @@
fm.end()
-@command(b'perfrawfiles', formatteropts)
+@command(b'perf::rawfiles|perfrawfiles', formatteropts)
def perfrawfiles(ui, repo, x, **opts):
opts = _byteskwargs(opts)
x = int(x)
@@ -1739,7 +1739,7 @@
fm.end()
-@command(b'perflookup', formatteropts)
+@command(b'perf::lookup|perflookup', formatteropts)
def perflookup(ui, repo, rev, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1748,7 +1748,7 @@
@command(
- b'perflinelogedits',
+ b'perf::linelogedits|perflinelogedits',
[
(b'n', b'edits', 10000, b'number of edits'),
(b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
@@ -1786,7 +1786,7 @@
fm.end()
-@command(b'perfrevrange', formatteropts)
+@command(b'perf::revrange|perfrevrange', formatteropts)
def perfrevrange(ui, repo, *specs, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1795,7 +1795,7 @@
fm.end()
-@command(b'perfnodelookup', formatteropts)
+@command(b'perf::nodelookup|perfnodelookup', formatteropts)
def perfnodelookup(ui, repo, rev, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1814,7 +1814,7 @@
@command(
- b'perflog',
+ b'perf::log|perflog',
[(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
)
def perflog(ui, repo, rev=None, **opts):
@@ -1832,7 +1832,7 @@
fm.end()
-@command(b'perfmoonwalk', formatteropts)
+@command(b'perf::moonwalk|perfmoonwalk', formatteropts)
def perfmoonwalk(ui, repo, **opts):
"""benchmark walking the changelog backwards
@@ -1851,7 +1851,7 @@
@command(
- b'perftemplating',
+ b'perf::templating|perftemplating',
[
(b'r', b'rev', [], b'revisions to run the template on'),
]
@@ -1941,7 +1941,7 @@
@command(
- b'perfhelper-mergecopies',
+ b'perf::helper-mergecopies|perfhelper-mergecopies',
formatteropts
+ [
(b'r', b'revs', [], b'restrict search to these revisions'),
@@ -2124,7 +2124,7 @@
@command(
- b'perfhelper-pathcopies',
+ b'perf::helper-pathcopies|perfhelper-pathcopies',
formatteropts
+ [
(b'r', b'revs', [], b'restrict search to these revisions'),
@@ -2263,7 +2263,7 @@
_displaystats(ui, opts, entries, alldata)
-@command(b'perfcca', formatteropts)
+@command(b'perf::cca|perfcca', formatteropts)
def perfcca(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2271,7 +2271,7 @@
fm.end()
-@command(b'perffncacheload', formatteropts)
+@command(b'perf::fncacheload|perffncacheload', formatteropts)
def perffncacheload(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2284,7 +2284,7 @@
fm.end()
-@command(b'perffncachewrite', formatteropts)
+@command(b'perf::fncachewrite|perffncachewrite', formatteropts)
def perffncachewrite(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2304,7 +2304,7 @@
fm.end()
-@command(b'perffncacheencode', formatteropts)
+@command(b'perf::fncacheencode|perffncacheencode', formatteropts)
def perffncacheencode(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2348,7 +2348,7 @@
@command(
- b'perfbdiff',
+ b'perf::bdiff|perfbdiff',
revlogopts
+ formatteropts
+ [
@@ -2464,7 +2464,7 @@
@command(
- b'perfunidiff',
+ b'perf::unidiff|perfunidiff',
revlogopts
+ formatteropts
+ [
@@ -2543,7 +2543,7 @@
fm.end()
-@command(b'perfdiffwd', formatteropts)
+@command(b'perf::diffwd|perfdiffwd', formatteropts)
def perfdiffwd(ui, repo, **opts):
"""Profile diff of working directory changes"""
opts = _byteskwargs(opts)
@@ -2568,7 +2568,11 @@
fm.end()
-@command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
+@command(
+ b'perf::revlogindex|perfrevlogindex',
+ revlogopts + formatteropts,
+ b'-c|-m|FILE',
+)
def perfrevlogindex(ui, repo, file_=None, **opts):
"""Benchmark operations against a revlog index.
@@ -2704,7 +2708,7 @@
@command(
- b'perfrevlogrevisions',
+ b'perf::revlogrevisions|perfrevlogrevisions',
revlogopts
+ formatteropts
+ [
@@ -2754,7 +2758,7 @@
@command(
- b'perfrevlogwrite',
+ b'perf::revlogwrite|perfrevlogwrite',
revlogopts
+ formatteropts
+ [
@@ -3047,7 +3051,7 @@
@command(
- b'perfrevlogchunks',
+ b'perf::revlogchunks|perfrevlogchunks',
revlogopts
+ formatteropts
+ [
@@ -3176,7 +3180,7 @@
@command(
- b'perfrevlogrevision',
+ b'perf::revlogrevision|perfrevlogrevision',
revlogopts
+ formatteropts
+ [(b'', b'cache', False, b'use caches instead of clearing')],
@@ -3319,7 +3323,7 @@
@command(
- b'perfrevset',
+ b'perf::revset|perfrevset',
[
(b'C', b'clear', False, b'clear volatile cache between each call.'),
(b'', b'contexts', False, b'obtain changectx for each revision'),
@@ -3352,7 +3356,7 @@
@command(
- b'perfvolatilesets',
+ b'perf::volatilesets|perfvolatilesets',
[
(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
]
@@ -3401,7 +3405,7 @@
@command(
- b'perfbranchmap',
+ b'perf::branchmap|perfbranchmap',
[
(b'f', b'full', False, b'Includes build time of subset'),
(
@@ -3492,7 +3496,7 @@
@command(
- b'perfbranchmapupdate',
+ b'perf::branchmapupdate|perfbranchmapupdate',
[
(b'', b'base', [], b'subset of revision to start from'),
(b'', b'target', [], b'subset of revision to end with'),
@@ -3602,7 +3606,7 @@
@command(
- b'perfbranchmapload',
+ b'perf::branchmapload|perfbranchmapload',
[
(b'f', b'filter', b'', b'Specify repoview filter'),
(b'', b'list', False, b'List brachmap filter caches'),
@@ -3661,7 +3665,7 @@
fm.end()
-@command(b'perfloadmarkers')
+@command(b'perf::loadmarkers|perfloadmarkers')
def perfloadmarkers(ui, repo):
"""benchmark the time to parse the on-disk markers for a repo
@@ -3673,7 +3677,7 @@
@command(
- b'perflrucachedict',
+ b'perf::lrucachedict|perflrucachedict',
formatteropts
+ [
(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
@@ -3829,7 +3833,7 @@
@command(
- b'perfwrite',
+ b'perf::write|perfwrite',
formatteropts
+ [
(b'', b'write-method', b'write', b'ui write method'),
@@ -3892,7 +3896,7 @@
@command(
- b'perfprogress',
+ b'perf::progress|perfprogress',
formatteropts
+ [
(b'', b'topic', b'topic', b'topic for progress messages'),
--- a/contrib/python-zstandard/c-ext/bufferutil.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/bufferutil.c Fri Jan 29 17:32:09 2021 +0530
@@ -758,7 +758,7 @@
};
void bufferutil_module_init(PyObject* mod) {
- Py_TYPE(&ZstdBufferWithSegmentsType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferWithSegmentsType, &PyType_Type);
if (PyType_Ready(&ZstdBufferWithSegmentsType) < 0) {
return;
}
@@ -766,7 +766,7 @@
Py_INCREF(&ZstdBufferWithSegmentsType);
PyModule_AddObject(mod, "BufferWithSegments", (PyObject*)&ZstdBufferWithSegmentsType);
- Py_TYPE(&ZstdBufferSegmentsType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferSegmentsType, &PyType_Type);
if (PyType_Ready(&ZstdBufferSegmentsType) < 0) {
return;
}
@@ -774,7 +774,7 @@
Py_INCREF(&ZstdBufferSegmentsType);
PyModule_AddObject(mod, "BufferSegments", (PyObject*)&ZstdBufferSegmentsType);
- Py_TYPE(&ZstdBufferSegmentType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferSegmentType, &PyType_Type);
if (PyType_Ready(&ZstdBufferSegmentType) < 0) {
return;
}
@@ -782,7 +782,7 @@
Py_INCREF(&ZstdBufferSegmentType);
PyModule_AddObject(mod, "BufferSegment", (PyObject*)&ZstdBufferSegmentType);
- Py_TYPE(&ZstdBufferWithSegmentsCollectionType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferWithSegmentsCollectionType, &PyType_Type);
if (PyType_Ready(&ZstdBufferWithSegmentsCollectionType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionchunker.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionchunker.c Fri Jan 29 17:32:09 2021 +0530
@@ -348,12 +348,12 @@
};
void compressionchunker_module_init(PyObject* module) {
- Py_TYPE(&ZstdCompressionChunkerIteratorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionChunkerIteratorType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionChunkerIteratorType) < 0) {
return;
}
- Py_TYPE(&ZstdCompressionChunkerType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionChunkerType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionChunkerType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressiondict.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressiondict.c Fri Jan 29 17:32:09 2021 +0530
@@ -400,7 +400,7 @@
};
void compressiondict_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressionDictType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionDictType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionDictType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionparams.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionparams.c Fri Jan 29 17:32:09 2021 +0530
@@ -556,7 +556,7 @@
};
void compressionparams_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressionParametersType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionParametersType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionParametersType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionreader.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionreader.c Fri Jan 29 17:32:09 2021 +0530
@@ -811,7 +811,7 @@
void compressionreader_module_init(PyObject* mod) {
/* TODO make reader a sub-class of io.RawIOBase */
- Py_TYPE(&ZstdCompressionReaderType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionReaderType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionReaderType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionwriter.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c Fri Jan 29 17:32:09 2021 +0530
@@ -365,7 +365,7 @@
};
void compressionwriter_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressionWriterType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionWriterType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionWriterType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressobj.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressobj.c Fri Jan 29 17:32:09 2021 +0530
@@ -249,7 +249,7 @@
};
void compressobj_module_init(PyObject* module) {
- Py_TYPE(&ZstdCompressionObjType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionObjType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionObjType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressor.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressor.c Fri Jan 29 17:32:09 2021 +0530
@@ -619,7 +619,7 @@
goto finally;
}
- Py_SIZE(output) = outBuffer.pos;
+ Py_SET_SIZE(output, outBuffer.pos);
finally:
PyBuffer_Release(&source);
@@ -1659,7 +1659,7 @@
};
void compressor_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressorType, &PyType_Type);
if (PyType_Ready(&ZstdCompressorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressoriterator.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressoriterator.c Fri Jan 29 17:32:09 2021 +0530
@@ -228,7 +228,7 @@
};
void compressoriterator_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressorIteratorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressorIteratorType, &PyType_Type);
if (PyType_Ready(&ZstdCompressorIteratorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressionreader.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressionreader.c Fri Jan 29 17:32:09 2021 +0530
@@ -774,7 +774,7 @@
void decompressionreader_module_init(PyObject* mod) {
/* TODO make reader a sub-class of io.RawIOBase */
- Py_TYPE(&ZstdDecompressionReaderType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressionReaderType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressionReaderType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressionwriter.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressionwriter.c Fri Jan 29 17:32:09 2021 +0530
@@ -288,7 +288,7 @@
};
void decompressionwriter_module_init(PyObject* mod) {
- Py_TYPE(&ZstdDecompressionWriterType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressionWriterType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressionWriterType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressobj.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressobj.c Fri Jan 29 17:32:09 2021 +0530
@@ -195,7 +195,7 @@
};
void decompressobj_module_init(PyObject* module) {
- Py_TYPE(&ZstdDecompressionObjType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressionObjType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressionObjType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressor.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressor.c Fri Jan 29 17:32:09 2021 +0530
@@ -1811,7 +1811,7 @@
};
void decompressor_module_init(PyObject* mod) {
- Py_TYPE(&ZstdDecompressorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressorType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressoriterator.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressoriterator.c Fri Jan 29 17:32:09 2021 +0530
@@ -242,7 +242,7 @@
};
void decompressoriterator_module_init(PyObject* mod) {
- Py_TYPE(&ZstdDecompressorIteratorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressorIteratorType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressorIteratorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/frameparams.c Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/frameparams.c Fri Jan 29 17:32:09 2021 +0530
@@ -128,7 +128,7 @@
};
void frameparams_module_init(PyObject* mod) {
- Py_TYPE(&FrameParametersType) = &PyType_Type;
+ Py_SET_TYPE(&FrameParametersType, &PyType_Type);
if (PyType_Ready(&FrameParametersType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/python-zstandard.h Wed Jan 27 00:54:57 2021 -0500
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h Fri Jan 29 17:32:09 2021 +0530
@@ -9,6 +9,7 @@
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h"
+#include <pythoncapi_compat.h>
#define ZSTD_STATIC_LINKING_ONLY
#define ZDICT_STATIC_LINKING_ONLY
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pythoncapi_compat.h Fri Jan 29 17:32:09 2021 +0530
@@ -0,0 +1,283 @@
+// Header file providing new functions of the Python C API to old Python
+// versions.
+//
+// File distributed under the MIT license.
+//
+// Homepage:
+// https://github.com/pythoncapi/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/pythoncapi/pythoncapi_compat/master/pythoncapi_compat.h
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
+
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __forceinline
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+# define _PyObject_CAST(op) ((PyObject*)(op))
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+ Py_INCREF(obj);
+ return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+ Py_XINCREF(obj);
+ return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
+{
+ ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT((PyObject*)(ob), refcnt)
+#endif
+
+
+// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+static inline void
+_Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
+{
+ ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
+#endif
+
+
+// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
+static inline void
+_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
+{
+ ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
+#endif
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyCodeObject*
+PyFrame_GetCode(PyFrameObject *frame)
+{
+ PyCodeObject *code;
+ assert(frame != NULL);
+ code = frame->f_code;
+ assert(code != NULL);
+ Py_INCREF(code);
+ return code;
+}
+#endif
+
+static inline PyCodeObject*
+_PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+ PyCodeObject *code = PyFrame_GetCode(frame);
+ Py_DECREF(code);
+ return code; // borrowed reference
+}
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyFrame_GetBack(PyFrameObject *frame)
+{
+ PyFrameObject *back;
+ assert(frame != NULL);
+ back = frame->f_back;
+ Py_XINCREF(back);
+ return back;
+}
+#endif
+
+static inline PyFrameObject*
+_PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+ PyFrameObject *back = PyFrame_GetBack(frame);
+ Py_XDECREF(back);
+ return back; // borrowed reference
+}
+
+
+// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->interp;
+}
+#endif
+
+
+// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyThreadState_GetFrame(PyThreadState *tstate)
+{
+ PyFrameObject *frame;
+ assert(tstate != NULL);
+ frame = tstate->frame;
+ Py_XINCREF(frame);
+ return frame;
+}
+#endif
+
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+ PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+ Py_XDECREF(frame);
+ return frame; // borrowed reference
+}
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyInterpreterState_Get(void)
+{
+ PyThreadState *tstate;
+ PyInterpreterState *interp;
+
+ tstate = PyThreadState_GET();
+ if (tstate == NULL) {
+ Py_FatalError("GIL released (tstate is NULL)");
+ }
+ interp = tstate->interp;
+ if (interp == NULL) {
+ Py_FatalError("no current interpreter");
+ }
+ return interp;
+}
+#endif
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
+#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6
+static inline uint64_t
+PyThreadState_GetID(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->id;
+}
+#endif
+
+
+// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
+#if PY_VERSION_HEX < 0x030900A1
+static inline PyObject*
+PyObject_CallNoArgs(PyObject *func)
+{
+ return PyObject_CallFunctionObjArgs(func, NULL);
+}
+#endif
+
+
+// bpo-39245 made PyObject_CallOneArg() public (previously called
+// _PyObject_CallOneArg) in Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4
+static inline PyObject*
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+ return PyObject_CallFunctionObjArgs(func, arg, NULL);
+}
+#endif
+
+
+// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline int
+PyModule_AddType(PyObject *module, PyTypeObject *type)
+{
+ const char *name, *dot;
+
+ if (PyType_Ready(type) < 0) {
+ return -1;
+ }
+
+ // inline _PyType_Name()
+ name = type->tp_name;
+ assert(name != NULL);
+ dot = strrchr(name, '.');
+ if (dot != NULL) {
+ name = dot + 1;
+ }
+
+ Py_INCREF(type);
+ if (PyModule_AddObject(module, name, (PyObject *)type) < 0) {
+ Py_DECREF(type);
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+
+// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
+// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
+#if PY_VERSION_HEX < 0x030900A6
+static inline int
+PyObject_GC_IsTracked(PyObject* obj)
+{
+ return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
+}
+#endif
+
+// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
+// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
+#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0
+static inline int
+PyObject_GC_IsFinalized(PyObject *obj)
+{
+ return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED((PyGC_Head *)(obj)-1));
+}
+#endif
+
+
+// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
+static inline int
+_Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+ return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE((const PyObject*)(ob), type)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif // PYTHONCAPI_COMPAT
--- a/hgext/convert/__init__.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/convert/__init__.py Fri Jan 29 17:32:09 2021 +0530
@@ -491,6 +491,22 @@
:convert.skiptags: does not convert tags from the source repo to the target
repo. The default is False.
+
+ Subversion Destination
+ ######################
+
+ Original commit dates are not preserved by default.
+
+ :convert.svn.dangerous-set-commit-dates: preserve original commit dates,
+ forcefully setting ``svn:date`` revision properties. This option is
+ DANGEROUS and may break some subversion functionality for the resulting
+ repository (e.g. filtering revisions with date ranges in ``svn log``),
+ as original commit dates are not guaranteed to be monotonically
+ increasing.
+
+ For commit dates setting to work destination repository must have
+ ``pre-revprop-change`` hook configured to allow setting of ``svn:date``
+ revision properties. See Subversion documentation for more details.
"""
return convcmd.convert(ui, src, dest, revmapfile, **opts)
--- a/hgext/convert/subversion.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/convert/subversion.py Fri Jan 29 17:32:09 2021 +0530
@@ -97,6 +97,17 @@
return s.decode(fsencoding).encode('utf-8')
+def formatsvndate(date):
+ return dateutil.datestr(date, b'%Y-%m-%dT%H:%M:%S.000000Z')
+
+
+def parsesvndate(s):
+ # Example SVN datetime. Includes microseconds.
+ # ISO-8601 conformant
+ # '2007-01-04T17:35:00.902377Z'
+ return dateutil.parsedate(s[:19] + b' UTC', [b'%Y-%m-%dT%H:%M:%S'])
+
+
class SvnPathNotFound(Exception):
pass
@@ -1158,12 +1169,7 @@
continue
paths.append((path, ent))
- # Example SVN datetime. Includes microseconds.
- # ISO-8601 conformant
- # '2007-01-04T17:35:00.902377Z'
- date = dateutil.parsedate(
- date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"]
- )
+ date = parsesvndate(date)
if self.ui.configbool(b'convert', b'localtimezone'):
date = makedatetimestamp(date[0])
@@ -1380,7 +1386,7 @@
return logstream(stdout)
-pre_revprop_change = b'''#!/bin/sh
+pre_revprop_change_template = b'''#!/bin/sh
REPOS="$1"
REV="$2"
@@ -1388,15 +1394,26 @@
PROPNAME="$4"
ACTION="$5"
-if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
+%(rules)s
echo "Changing prohibited revision property" >&2
exit 1
'''
+def gen_pre_revprop_change_hook(prop_actions_allowed):
+ rules = []
+ for action, propname in prop_actions_allowed:
+ rules.append(
+ (
+ b'if [ "$ACTION" = "%s" -a "$PROPNAME" = "%s" ]; '
+ b'then exit 0; fi'
+ )
+ % (action, propname)
+ )
+ return pre_revprop_change_template % {b'rules': b'\n'.join(rules)}
+
+
class svn_sink(converter_sink, commandline):
commit_re = re.compile(br'Committed revision (\d+).', re.M)
uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M)
@@ -1470,9 +1487,20 @@
self.is_exec = None
if created:
+ prop_actions_allowed = [
+ (b'M', b'svn:log'),
+ (b'A', b'hg:convert-branch'),
+ (b'A', b'hg:convert-rev'),
+ ]
+
+ if self.ui.configbool(
+ b'convert', b'svn.dangerous-set-commit-dates'
+ ):
+ prop_actions_allowed.append((b'M', b'svn:date'))
+
hook = os.path.join(created, b'hooks', b'pre-revprop-change')
fp = open(hook, b'wb')
- fp.write(pre_revprop_change)
+ fp.write(gen_pre_revprop_change_hook(prop_actions_allowed))
fp.close()
util.setflags(hook, False, True)
@@ -1667,6 +1695,23 @@
revprop=True,
revision=rev,
)
+
+ if self.ui.configbool(
+ b'convert', b'svn.dangerous-set-commit-dates'
+ ):
+ # Subverson always uses UTC to represent date and time
+ date = dateutil.parsedate(commit.date)
+ date = (date[0], 0)
+
+ # The only way to set date and time for svn commit is to use propset after commit is done
+ self.run(
+ b'propset',
+ b'svn:date',
+ formatsvndate(date),
+ revprop=True,
+ revision=rev,
+ )
+
for parent in parents:
self.addchild(parent, rev)
return self.revid(rev)
--- a/hgext/fix.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/fix.py Fri Jan 29 17:32:09 2021 +0530
@@ -433,8 +433,9 @@
if not (len(revs) == 1 and wdirrev in revs):
cmdutil.checkunfinished(repo)
rewriteutil.precheck(repo, revs, b'fix')
- if wdirrev in revs and list(
- mergestatemod.mergestate.read(repo).unresolved()
+ if (
+ wdirrev in revs
+ and mergestatemod.mergestate.read(repo).unresolvedcount()
):
raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
if not revs:
--- a/hgext/histedit.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/histedit.py Fri Jan 29 17:32:09 2021 +0530
@@ -1581,10 +1581,19 @@
def layout(mode):
maxy, maxx = stdscr.getmaxyx()
helplen = len(helplines(mode))
+ mainlen = maxy - helplen - 12
+ if mainlen < 1:
+ raise error.Abort(
+ _(b"terminal dimensions %d by %d too small for curses histedit")
+ % (maxy, maxx),
+ hint=_(
+ b"enlarge your terminal or use --config ui.interface=text"
+ ),
+ )
return {
b'commit': (12, maxx),
b'help': (helplen, maxx),
- b'main': (maxy - helplen - 12, maxx),
+ b'main': (mainlen, maxx),
}
def drawvertwin(size, y, x):
@@ -1614,63 +1623,60 @@
stdscr.clear()
stdscr.refresh()
while True:
- try:
- oldmode, _ = state[b'mode']
- if oldmode == MODE_INIT:
- changemode(state, MODE_RULES)
- e = event(state, ch)
-
- if e == E_QUIT:
- return False
- if e == E_HISTEDIT:
- return state[b'rules']
+ oldmode, unused = state[b'mode']
+ if oldmode == MODE_INIT:
+ changemode(state, MODE_RULES)
+ e = event(state, ch)
+
+ if e == E_QUIT:
+ return False
+ if e == E_HISTEDIT:
+ return state[b'rules']
+ else:
+ if e == E_RESIZE:
+ size = screen_size()
+ if size != stdscr.getmaxyx():
+ curses.resizeterm(*size)
+
+ curmode, unused = state[b'mode']
+ sizes = layout(curmode)
+ if curmode != oldmode:
+ state[b'page_height'] = sizes[b'main'][0]
+ # Adjust the view to fit the current screen size.
+ movecursor(state, state[b'pos'], state[b'pos'])
+
+ # Pack the windows against the top, each pane spread across the
+ # full width of the screen.
+ y, x = (0, 0)
+ helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
+ mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
+ commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
+
+ if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
+ if e == E_PAGEDOWN:
+ changeview(state, +1, b'page')
+ elif e == E_PAGEUP:
+ changeview(state, -1, b'page')
+ elif e == E_LINEDOWN:
+ changeview(state, +1, b'line')
+ elif e == E_LINEUP:
+ changeview(state, -1, b'line')
+
+ # start rendering
+ commitwin.erase()
+ helpwin.erase()
+ mainwin.erase()
+ if curmode == MODE_PATCH:
+ renderpatch(mainwin, state)
+ elif curmode == MODE_HELP:
+ renderstring(mainwin, state, __doc__.strip().splitlines())
else:
- if e == E_RESIZE:
- size = screen_size()
- if size != stdscr.getmaxyx():
- curses.resizeterm(*size)
-
- curmode, _ = state[b'mode']
- sizes = layout(curmode)
- if curmode != oldmode:
- state[b'page_height'] = sizes[b'main'][0]
- # Adjust the view to fit the current screen size.
- movecursor(state, state[b'pos'], state[b'pos'])
-
- # Pack the windows against the top, each pane spread across the
- # full width of the screen.
- y, x = (0, 0)
- helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
- mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
- commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
-
- if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
- if e == E_PAGEDOWN:
- changeview(state, +1, b'page')
- elif e == E_PAGEUP:
- changeview(state, -1, b'page')
- elif e == E_LINEDOWN:
- changeview(state, +1, b'line')
- elif e == E_LINEUP:
- changeview(state, -1, b'line')
-
- # start rendering
- commitwin.erase()
- helpwin.erase()
- mainwin.erase()
- if curmode == MODE_PATCH:
- renderpatch(mainwin, state)
- elif curmode == MODE_HELP:
- renderstring(mainwin, state, __doc__.strip().splitlines())
- else:
- renderrules(mainwin, state)
- rendercommit(commitwin, state)
- renderhelp(helpwin, state)
- curses.doupdate()
- # done rendering
- ch = encoding.strtolocal(stdscr.getkey())
- except curses.error:
- pass
+ renderrules(mainwin, state)
+ rendercommit(commitwin, state)
+ renderhelp(helpwin, state)
+ curses.doupdate()
+ # done rendering
+ ch = encoding.strtolocal(stdscr.getkey())
def _chistedit(ui, repo, freeargs, opts):
--- a/hgext/largefiles/overrides.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/largefiles/overrides.py Fri Jan 29 17:32:09 2021 +0530
@@ -1567,7 +1567,7 @@
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
-@eh.wrapcommand(b'purge', extension=b'purge')
+@eh.wrapcommand(b'purge')
def overridepurge(orig, ui, repo, *dirs, **opts):
# XXX Monkey patching a repoview will not work. The assigned attribute will
# be set on the unfiltered repo, but we will only lookup attributes in the
--- a/hgext/purge.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/purge.py Fri Jan 29 17:32:09 2021 +0530
@@ -22,115 +22,11 @@
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
-'''command to delete untracked files from the working directory'''
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-from mercurial import (
- cmdutil,
- merge as mergemod,
- pycompat,
- registrar,
- scmutil,
-)
-
-cmdtable = {}
-command = registrar.command(cmdtable)
-# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
-# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
-# be specifying the version(s) of Mercurial they are tested with, or
-# leave the attribute unspecified.
-testedwith = b'ships-with-hg-core'
-
-
-@command(
- b'purge|clean',
- [
- (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
- (b'', b'all', None, _(b'purge ignored files too')),
- (b'i', b'ignored', None, _(b'purge only ignored files')),
- (b'', b'dirs', None, _(b'purge empty directories')),
- (b'', b'files', None, _(b'purge files')),
- (b'p', b'print', None, _(b'print filenames instead of deleting them')),
- (
- b'0',
- b'print0',
- None,
- _(
- b'end filenames with NUL, for use with xargs'
- b' (implies -p/--print)'
- ),
- ),
- ]
- + cmdutil.walkopts,
- _(b'hg purge [OPTION]... [DIR]...'),
- helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-)
-def purge(ui, repo, *dirs, **opts):
- """removes files not tracked by Mercurial
-
- Delete files not known to Mercurial. This is useful to test local
- and uncommitted changes in an otherwise-clean source tree.
-
- This means that purge will delete the following by default:
-
- - Unknown files: files marked with "?" by :hg:`status`
- - Empty directories: in fact Mercurial ignores directories unless
- they contain files under source control management
+'''command to delete untracked files from the working directory (DEPRECATED)
- But it will leave untouched:
-
- - Modified and unmodified tracked files
- - Ignored files (unless -i or --all is specified)
- - New files added to the repository (with :hg:`add`)
-
- The --files and --dirs options can be used to direct purge to delete
- only files, only directories, or both. If neither option is given,
- both will be deleted.
-
- If directories are given on the command line, only files in these
- directories are considered.
-
- Be careful with purge, as you could irreversibly delete some files
- you forgot to add to the repository. If you only want to print the
- list of files that this program would delete, use the --print
- option.
- """
- opts = pycompat.byteskwargs(opts)
- cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
+The functionality of this extension has been included in core Mercurial since
+version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now the default, unless the extension is enabled for backward compatibility.
+'''
- act = not opts.get(b'print')
- eol = b'\n'
- if opts.get(b'print0'):
- eol = b'\0'
- act = False # --print0 implies --print
- if opts.get(b'all', False):
- ignored = True
- unknown = True
- else:
- ignored = opts.get(b'ignored', False)
- unknown = not ignored
-
- removefiles = opts.get(b'files')
- removedirs = opts.get(b'dirs')
-
- if not removefiles and not removedirs:
- removefiles = True
- removedirs = True
-
- match = scmutil.match(repo[None], dirs, opts)
-
- paths = mergemod.purge(
- repo,
- match,
- unknown=unknown,
- ignored=ignored,
- removeemptydirs=removedirs,
- removefiles=removefiles,
- abortonerror=opts.get(b'abort_on_err'),
- noop=not act,
- )
-
- for path in paths:
- if not act:
- ui.write(b'%s%s' % (path, eol))
+# This empty extension looks pointless, but core mercurial checks if it's loaded
+# to implement the slightly different behavior documented above.
--- a/hgext/rebase.py Wed Jan 27 00:54:57 2021 -0500
+++ b/hgext/rebase.py Fri Jan 29 17:32:09 2021 +0530
@@ -67,6 +67,14 @@
cmdtable = {}
command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+configitem(
+ b'devel',
+ b'rebase.force-in-memory-merge',
+ default=False,
+)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
@@ -1112,6 +1120,8 @@
with ui.configoverride(overrides, b'rebase'):
return _dorebase(ui, repo, action, opts, inmemory=inmemory)
except error.InMemoryMergeConflictsError:
+ if ui.configbool(b'devel', b'rebase.force-in-memory-merge'):
+ raise
ui.warn(
_(
b'hit merge conflicts; re-running rebase without in-memory'
--- a/mercurial/branchmap.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/branchmap.py Fri Jan 29 17:32:09 2021 +0530
@@ -566,6 +566,7 @@
# [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
_rbcrecfmt = b'>4sI'
_rbcrecsize = calcsize(_rbcrecfmt)
+_rbcmininc = 64 * _rbcrecsize
_rbcnodelen = 4
_rbcbranchidxmask = 0x7FFFFFFF
_rbccloseflag = 0x80000000
@@ -705,8 +706,10 @@
self._setcachedata(rev, reponode, branchidx)
return b, close
- def setdata(self, branch, rev, node, close):
+ def setdata(self, rev, changelogrevision):
"""add new data information to the cache"""
+ branch, close = changelogrevision.branchinfo
+
if branch in self._namesreverse:
branchidx = self._namesreverse[branch]
else:
@@ -715,7 +718,7 @@
self._namesreverse[branch] = branchidx
if close:
branchidx |= _rbccloseflag
- self._setcachedata(rev, node, branchidx)
+ self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
# If no cache data were readable (non exists, bad permission, etc)
# the cache was bypassing itself by setting:
#
@@ -730,11 +733,15 @@
if rev == nullrev:
return
rbcrevidx = rev * _rbcrecsize
- if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
- self._rbcrevs.extend(
- b'\0'
- * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
- )
+ requiredsize = rbcrevidx + _rbcrecsize
+ rbccur = len(self._rbcrevs)
+ if rbccur < requiredsize:
+ # bytearray doesn't allocate extra space at least in Python 3.7.
+ # When multiple changesets are added in a row, precise resize would
+ # result in quadratic complexity. Overallocate to compensate by
+ # use the classic doubling technique for dynamic arrays instead.
+ # If there was a gap in the map before, less space will be reserved.
+ self._rbcrevs.extend(b'\0' * max(_rbcmininc, requiredsize))
pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
self._rbcrevslen = min(self._rbcrevslen, rev)
--- a/mercurial/bundle2.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/bundle2.py Fri Jan 29 17:32:09 2021 +0530
@@ -2478,35 +2478,10 @@
@parthandler(b'cache:rev-branch-cache')
def handlerbc(op, inpart):
- """receive a rev-branch-cache payload and update the local cache
-
- The payload is a series of data related to each branch
-
- 1) branch name length
- 2) number of open heads
- 3) number of closed heads
- 4) open heads nodes
- 5) closed heads nodes
- """
- total = 0
- rawheader = inpart.read(rbcstruct.size)
- cache = op.repo.revbranchcache()
- cl = op.repo.unfiltered().changelog
- while rawheader:
- header = rbcstruct.unpack(rawheader)
- total += header[1] + header[2]
- utf8branch = inpart.read(header[0])
- branch = encoding.tolocal(utf8branch)
- for x in pycompat.xrange(header[1]):
- node = inpart.read(20)
- rev = cl.rev(node)
- cache.setdata(branch, rev, node, False)
- for x in pycompat.xrange(header[2]):
- node = inpart.read(20)
- rev = cl.rev(node)
- cache.setdata(branch, rev, node, True)
- rawheader = inpart.read(rbcstruct.size)
- cache.write()
+ """Legacy part, ignored for compatibility with bundles from or
+ for Mercurial before 5.7. Newer Mercurial computes the cache
+ efficiently enough during unbundling that the additional transfer
+ is unnecessary."""
@parthandler(b'pushvars')
@@ -2561,8 +2536,6 @@
for r in repo.revs(b"::%ln", common):
commonnodes.add(cl.node(r))
if commonnodes:
- # XXX: we should only send the filelogs (and treemanifest). user
- # already has the changelog and manifest
packer = changegroup.getbundler(
cgversion,
repo,
--- a/mercurial/cext/osutil.c Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/cext/osutil.c Fri Jan 29 17:32:09 2021 +0530
@@ -119,7 +119,7 @@
static void listdir_stat_dealloc(PyObject *o)
{
- o->ob_type->tp_free(o);
+ Py_TYPE(o)->tp_free(o);
}
static PyObject *listdir_stat_getitem(PyObject *self, PyObject *key)
--- a/mercurial/cext/pathencode.c Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/cext/pathencode.c Fri Jan 29 17:32:09 2021 +0530
@@ -21,6 +21,7 @@
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
+#include "pythoncapi_compat.h"
#include "util.h"
@@ -678,7 +679,7 @@
}
assert(PyBytes_Check(ret));
- Py_SIZE(ret) = destlen;
+ Py_SET_SIZE(ret, destlen);
return ret;
}
--- a/mercurial/changegroup.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/changegroup.py Fri Jan 29 17:32:09 2021 +0530
@@ -323,7 +323,10 @@
cgnodes.append(node)
def onchangelog(cl, node):
- efilesset.update(cl.readfiles(node))
+ rev = cl.rev(node)
+ ctx = cl.changelogrevision(rev)
+ efilesset.update(ctx.files)
+ repo.register_changeset(rev, ctx)
self.changelogheader()
deltas = self.deltaiter()
@@ -331,6 +334,7 @@
deltas,
csmap,
trp,
+ alwayscache=True,
addrevisioncb=onchangelog,
duplicaterevisioncb=ondupchangelog,
):
--- a/mercurial/changelog.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/changelog.py Fri Jan 29 17:32:09 2021 +0530
@@ -200,6 +200,7 @@
p1copies = attr.ib(default=None)
p2copies = attr.ib(default=None)
description = attr.ib(default=b'')
+ branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
class changelogrevision(object):
@@ -372,6 +373,11 @@
def description(self):
return encoding.tolocal(self._text[self._offsets[3] + 2 :])
+ @property
+ def branchinfo(self):
+ extra = self.extra
+ return encoding.tolocal(extra.get(b"branch")), b'close' in extra
+
class changelog(revlog.revlog):
def __init__(self, opener, trypending=False):
@@ -601,8 +607,7 @@
This function exists because creating a changectx object
just to access this is costly."""
- extra = self.changelogrevision(rev).extra
- return encoding.tolocal(extra.get(b"branch")), b'close' in extra
+ return self.changelogrevision(rev).branchinfo
def _nodeduplicatecallback(self, transaction, node):
# keep track of revisions that got "re-added", eg: unbunde of know rev.
--- a/mercurial/commands.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/commands.py Fri Jan 29 17:32:09 2021 +0530
@@ -5447,6 +5447,108 @@
@command(
+ b'purge|clean',
+ [
+ (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
+ (b'', b'all', None, _(b'purge ignored files too')),
+ (b'i', b'ignored', None, _(b'purge only ignored files')),
+ (b'', b'dirs', None, _(b'purge empty directories')),
+ (b'', b'files', None, _(b'purge files')),
+ (b'p', b'print', None, _(b'print filenames instead of deleting them')),
+ (
+ b'0',
+ b'print0',
+ None,
+ _(
+ b'end filenames with NUL, for use with xargs'
+ b' (implies -p/--print)'
+ ),
+ ),
+ (b'', b'confirm', None, _(b'ask before permanently deleting files')),
+ ]
+ + cmdutil.walkopts,
+ _(b'hg purge [OPTION]... [DIR]...'),
+ helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
+def purge(ui, repo, *dirs, **opts):
+ """removes files not tracked by Mercurial
+
+ Delete files not known to Mercurial. This is useful to test local
+ and uncommitted changes in an otherwise-clean source tree.
+
+ This means that purge will delete the following by default:
+
+ - Unknown files: files marked with "?" by :hg:`status`
+ - Empty directories: in fact Mercurial ignores directories unless
+ they contain files under source control management
+
+ But it will leave untouched:
+
+ - Modified and unmodified tracked files
+ - Ignored files (unless -i or --all is specified)
+ - New files added to the repository (with :hg:`add`)
+
+ The --files and --dirs options can be used to direct purge to delete
+ only files, only directories, or both. If neither option is given,
+ both will be deleted.
+
+ If directories are given on the command line, only files in these
+ directories are considered.
+
+ Be careful with purge, as you could irreversibly delete some files
+ you forgot to add to the repository. If you only want to print the
+ list of files that this program would delete, use the --print
+ option.
+ """
+ opts = pycompat.byteskwargs(opts)
+ cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
+
+ act = not opts.get(b'print')
+ eol = b'\n'
+ if opts.get(b'print0'):
+ eol = b'\0'
+ act = False # --print0 implies --print
+ if opts.get(b'all', False):
+ ignored = True
+ unknown = True
+ else:
+ ignored = opts.get(b'ignored', False)
+ unknown = not ignored
+
+ removefiles = opts.get(b'files')
+ removedirs = opts.get(b'dirs')
+ confirm = opts.get(b'confirm')
+ if confirm is None:
+ try:
+ extensions.find(b'purge')
+ confirm = False
+ except KeyError:
+ confirm = True
+
+ if not removefiles and not removedirs:
+ removefiles = True
+ removedirs = True
+
+ match = scmutil.match(repo[None], dirs, opts)
+
+ paths = mergemod.purge(
+ repo,
+ match,
+ unknown=unknown,
+ ignored=ignored,
+ removeemptydirs=removedirs,
+ removefiles=removefiles,
+ abortonerror=opts.get(b'abort_on_err'),
+ noop=not act,
+ confirm=confirm,
+ )
+
+ for path in paths:
+ if not act:
+ ui.write(b'%s%s' % (path, eol))
+
+
+@command(
b'push',
[
(b'f', b'force', None, _(b'force push')),
@@ -6082,7 +6184,7 @@
if hint:
ui.warn(hint)
- unresolvedf = list(ms.unresolved())
+ unresolvedf = ms.unresolvedcount()
if not unresolvedf:
ui.status(_(b'(no more unresolved files)\n'))
cmdutil.checkafterresolved(repo)
--- a/mercurial/commit.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/commit.py Fri Jan 29 17:32:09 2021 +0530
@@ -96,6 +96,10 @@
ctx.date(),
extra,
)
+ rev = repo[n].rev()
+ if oldtip != repo.changelog.tiprev():
+ repo.register_changeset(rev, repo.changelog.changelogrevision(rev))
+
xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
repo.hook(
b'pretxncommit',
@@ -108,7 +112,7 @@
targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
# prevent unmarking changesets as public on recommit
- waspublic = oldtip == repo.changelog.tiprev() and not repo[n].phase()
+ waspublic = oldtip == repo.changelog.tiprev() and not repo[rev].phase()
if targetphase and not waspublic:
# retract boundary do not alter parent changeset.
@@ -116,7 +120,7 @@
# be compliant anyway
#
# if minimal phase was 0 we don't need to retract anything
- phases.registernew(repo, tr, targetphase, [repo[n].rev()])
+ phases.registernew(repo, tr, targetphase, [rev])
return n
--- a/mercurial/configitems.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/configitems.py Fri Jan 29 17:32:09 2021 +0530
@@ -570,6 +570,11 @@
default=0,
)
coreconfigitem(
+ b'convert',
+ b'svn.dangerous-set-commit-dates',
+ default=False,
+)
+coreconfigitem(
b'debug',
b'dirstate.delaywrite',
default=0,
@@ -610,6 +615,12 @@
b'check-relroot',
default=False,
)
+# Track copy information for all file, not just "added" one (very slow)
+coreconfigitem(
+ b'devel',
+ b'copy-tracing.trace-all-files',
+ default=False,
+)
coreconfigitem(
b'devel',
b'default-date',
@@ -729,6 +740,18 @@
b'discovery.randomize',
default=True,
)
+# Control the initial size of the discovery sample
+coreconfigitem(
+ b'devel',
+ b'discovery.sample-size',
+ default=200,
+)
+# Control the initial size of the discovery for initial change
+coreconfigitem(
+ b'devel',
+ b'discovery.sample-size.initial',
+ default=100,
+)
_registerdiffopts(section=b'diff')
coreconfigitem(
b'email',
--- a/mercurial/copies.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/copies.py Fri Jan 29 17:32:09 2021 +0530
@@ -59,14 +59,13 @@
# Cases 1, 3, and 5 are then removed by _filter().
for k, v in list(t.items()):
- # remove copies from files that didn't exist
- if v not in src:
+ if k == v: # case 3
del t[k]
- # remove criss-crossed copies
- elif k in src and v in dst:
+ elif v not in src: # case 5
+ # remove copies from files that didn't exist
del t[k]
- # remove copies to files that were then removed
- elif k not in dst:
+ elif k not in dst: # case 1
+ # remove copies to files that were then removed
del t[k]
@@ -153,13 +152,21 @@
if b.p1() == a and b.p2().node() == nullid:
filesmatcher = matchmod.exact(b.files())
forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
- missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
+ if repo.ui.configbool(b'devel', b'copy-tracing.trace-all-files'):
+ missing = list(b.walk(match))
+ # _computeforwardmissing(a, b, match=forwardmissingmatch)
+ if debug:
+ dbg(b'debug.copies: searching all files: %d\n' % len(missing))
+ else:
+ missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
+ if debug:
+ dbg(
+ b'debug.copies: missing files to search: %d\n'
+ % len(missing)
+ )
ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
- if debug:
- dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
-
for f in sorted(missing):
if debug:
dbg(b'debug.copies: tracing file: %s\n' % f)
@@ -1220,6 +1227,15 @@
by merge.update().
"""
new_copies = pathcopies(base, ctx)
- _filter(wctx.p1(), wctx, new_copies)
+ parent = wctx.p1()
+ _filter(parent, wctx, new_copies)
+ # Extra filtering to drop copy information for files that existed before
+ # the graft. This is to handle the case of grafting a rename onto a commit
+ # that already has the rename. Otherwise the presence of copy information
+ # would result in the creation of an empty commit where we would prefer to
+ # not create one.
+ for dest, __ in list(new_copies.items()):
+ if dest in parent:
+ del new_copies[dest]
for dst, src in pycompat.iteritems(new_copies):
wctx[dst].markcopied(src)
--- a/mercurial/debugcommands.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/debugcommands.py Fri Jan 29 17:32:09 2021 +0530
@@ -3717,6 +3717,23 @@
ui.writenoi18n(b' revision %s\n' % v[1])
+@command(b'debugshell', optionalrepo=True)
+def debugshell(ui, repo):
+ """run an interactive Python interpreter
+
+ The local namespace is provided with a reference to the ui and
+ the repo instance (if available).
+ """
+ import code
+
+ imported_objects = {
+ 'ui': ui,
+ 'repo': repo,
+ }
+
+ code.interact(local=imported_objects)
+
+
@command(
b'debugsuccessorssets',
[(b'', b'closest', False, _(b'return closest successors sets only'))],
--- a/mercurial/exchangev2.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/exchangev2.py Fri Jan 29 17:32:09 2021 +0530
@@ -364,12 +364,15 @@
def onchangeset(cl, node):
progress.increment()
- revision = cl.changelogrevision(node)
+ rev = cl.rev(node)
+ revision = cl.changelogrevision(rev)
added.append(node)
# We need to preserve the mapping of changelog revision to node
# so we can set the linkrev accordingly when manifests are added.
- manifestnodes[cl.rev(node)] = revision.manifest
+ manifestnodes[rev] = revision.manifest
+
+ repo.register_changeset(rev, revision)
nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
remotebookmarks = {}
@@ -420,6 +423,7 @@
iterrevisions(),
linkrev,
weakref.proxy(tr),
+ alwayscache=True,
addrevisioncb=onchangeset,
duplicaterevisioncb=ondupchangeset,
)
--- a/mercurial/filemerge.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/filemerge.py Fri Jan 29 17:32:09 2021 +0530
@@ -538,6 +538,25 @@
@internaltool(
+ b'merge3-lie-about-conflicts',
+ fullmerge,
+ b'',
+ precheck=_mergecheck,
+)
+def _imerge3alwaysgood(*args, **kwargs):
+ # Like merge3, but record conflicts as resolved with markers in place.
+ #
+ # This is used for `hg diff --merge` to show the differences between
+ # the auto-merge state and the committed merge state. It may be
+ # useful for other things.
+ b1, junk, b2 = _imerge3(*args, **kwargs)
+ # TODO is this right? I'm not sure what these return values mean,
+ # but as far as I can tell this will indicate to callers tha the
+ # merge succeeded.
+ return b1, False, b2
+
+
+@internaltool(
b'mergediff',
fullmerge,
_(
@@ -1195,7 +1214,11 @@
def hasconflictmarkers(data):
return bool(
- re.search(b"^(<<<<<<< .*|=======|>>>>>>> .*)$", data, re.MULTILINE)
+ re.search(
+ br"^(<<<<<<<.*|=======.*|------- .*|\+\+\+\+\+\+\+ .*|>>>>>>>.*)$",
+ data,
+ re.MULTILINE,
+ )
)
--- a/mercurial/help.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/help.py Fri Jan 29 17:32:09 2021 +0530
@@ -829,10 +829,11 @@
def appendcmds(cmds):
cmds = sorted(cmds)
for c in cmds:
+ display_cmd = c
if ui.verbose:
- rst.append(b" :%s: %s\n" % (b', '.join(syns[c]), h[c]))
- else:
- rst.append(b' :%s: %s\n' % (c, h[c]))
+ display_cmd = b', '.join(syns[c])
+ display_cmd = display_cmd.replace(b':', br'\:')
+ rst.append(b' :%s: %s\n' % (display_cmd, h[c]))
if name in (b'shortlist', b'debug'):
# List without categories.
--- a/mercurial/interfaces/repository.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/interfaces/repository.py Fri Jan 29 17:32:09 2021 +0530
@@ -769,7 +769,13 @@
``nullid``, in which case the header from the delta can be ignored
and the delta used as the fulltext.
+ ``alwayscache`` instructs the lower layers to cache the content of the
+ newly added revision, even if it needs to be explicitly computed.
+ This used to be the default when ``addrevisioncb`` was provided up to
+ Mercurial 5.8.
+
``addrevisioncb`` should be called for each node as it is committed.
+ ``duplicaterevisioncb`` should be called for each pre-existing node.
``maybemissingparents`` is a bool indicating whether the incoming
data may reference parents/ancestor revisions that aren't present.
@@ -1641,6 +1647,14 @@
def revbranchcache():
pass
+ def register_changeset(rev, changelogrevision):
+ """Extension point for caches for new nodes.
+
+ Multiple consumers are expected to need parts of the changelogrevision,
+ so it is provided as optimization to avoid duplicate lookups. A simple
+ cache would be fragile when other revisions are accessed, too."""
+ pass
+
def branchtip(branchtip, ignoremissing=False):
"""Return the tip node for a given branch."""
--- a/mercurial/localrepo.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/localrepo.py Fri Jan 29 17:32:09 2021 +0530
@@ -2059,6 +2059,9 @@
self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
return self._revbranchcache
+ def register_changeset(self, rev, changelogrevision):
+ self.revbranchcache().setdata(rev, changelogrevision)
+
def branchtip(self, branch, ignoremissing=False):
"""return the tip node for a given branch
@@ -3633,11 +3636,11 @@
# effectively locks out old clients and prevents them from
# mucking with a repo in an unknown format.
#
- # The revlog header has version 2, which won't be recognized by
+ # The revlog header has version 65535, which won't be recognized by
# such old clients.
hgvfs.append(
b'00changelog.i',
- b'\0\0\0\2 dummy changelog to prevent using the old repo '
+ b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
b'layout',
)
--- a/mercurial/manifest.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/manifest.py Fri Jan 29 17:32:09 2021 +0530
@@ -1836,6 +1836,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=False,
addrevisioncb=None,
duplicaterevisioncb=None,
):
@@ -1843,6 +1844,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=alwayscache,
addrevisioncb=addrevisioncb,
duplicaterevisioncb=duplicaterevisioncb,
)
--- a/mercurial/merge.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/merge.py Fri Jan 29 17:32:09 2021 +0530
@@ -1920,7 +1920,7 @@
if len(pl) > 1:
raise error.Abort(_(b"outstanding uncommitted merge"))
ms = wc.mergestate()
- if list(ms.unresolved()):
+ if ms.unresolvedcount():
raise error.Abort(
_(b"outstanding merge conflicts"),
hint=_(b"use 'hg resolve' to resolve"),
@@ -2324,6 +2324,7 @@
removefiles=True,
abortonerror=False,
noop=False,
+ confirm=False,
):
"""Purge the working directory of untracked files.
@@ -2344,6 +2345,8 @@
``noop`` controls whether to actually remove files. If not defined, actions
will be taken.
+ ``confirm`` ask confirmation before actually removing anything.
+
Returns an iterable of relative paths in the working directory that were
or would be removed.
"""
@@ -2371,6 +2374,35 @@
status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
+ if confirm:
+ nb_ignored = len(status.ignored)
+ nb_unkown = len(status.unknown)
+ if nb_unkown and nb_ignored:
+ msg = _(b"permanently delete %d unkown and %d ignored files?")
+ msg %= (nb_unkown, nb_ignored)
+ elif nb_unkown:
+ msg = _(b"permanently delete %d unkown files?")
+ msg %= nb_unkown
+ elif nb_ignored:
+ msg = _(b"permanently delete %d ignored files?")
+ msg %= nb_ignored
+ elif removeemptydirs:
+ dir_count = 0
+ for f in directories:
+ if matcher(f) and not repo.wvfs.listdir(f):
+ dir_count += 1
+ if dir_count:
+ msg = _(
+ b"permanently delete at least %d empty directories?"
+ )
+ msg %= dir_count
+ else:
+ # XXX we might be missing directory there
+ return res
+ msg += b" (yN)$$ &Yes $$ &No"
+ if repo.ui.promptchoice(msg, default=1) == 1:
+ raise error.CanceledError(_(b'removal cancelled'))
+
if removefiles:
for f in sorted(status.unknown + status.ignored):
if not noop:
--- a/mercurial/mergeutil.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/mergeutil.py Fri Jan 29 17:32:09 2021 +0530
@@ -13,7 +13,7 @@
def checkunresolved(ms):
- if list(ms.unresolved()):
+ if ms.unresolvedcount():
raise error.StateError(
_(b"unresolved merge conflicts (see 'hg help resolve')")
)
--- a/mercurial/minirst.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/minirst.py Fri Jan 29 17:32:09 2021 +0530
@@ -158,7 +158,7 @@
_optionre = re.compile(
br'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)' br'((.*) +)(.*)$'
)
-_fieldre = re.compile(br':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
+_fieldre = re.compile(br':(?![: ])((?:\:|[^:])*)(?<! ):[ ]+(.*)')
_definitionre = re.compile(br'[^ ]')
_tablere = re.compile(br'(=+\s+)*=+')
@@ -229,7 +229,7 @@
m = _fieldre.match(blocks[j][b'lines'][0])
key, rest = m.groups()
blocks[j][b'lines'][0] = rest
- blocks[j][b'key'] = key
+ blocks[j][b'key'] = key.replace(br'\:', b':')
j += 1
i = j + 1
--- a/mercurial/pure/parsers.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/pure/parsers.py Fri Jan 29 17:32:09 2021 +0530
@@ -33,13 +33,6 @@
return x
-indexformatng = b">Qiiiiii20s12x"
-indexfirst = struct.calcsize(b'Q')
-sizeint = struct.calcsize(b'i')
-indexsize = struct.calcsize(indexformatng)
-nullitem = (0, 0, 0, -1, -1, -1, -1, nullid)
-
-
def gettype(q):
return int(q & 0xFFFF)
@@ -49,6 +42,12 @@
class BaseIndexObject(object):
+ index_format = b">Qiiiiii20s12x"
+ big_int_size = struct.calcsize(b'Q')
+ int_size = struct.calcsize(b'i')
+ index_size = struct.calcsize(index_format)
+ null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+
@property
def nodemap(self):
msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
@@ -94,7 +93,7 @@
def append(self, tup):
if '_nodemap' in vars(self):
self._nodemap[tup[7]] = len(self)
- data = _pack(indexformatng, *tup)
+ data = _pack(self.index_format, *tup)
self._extra.append(data)
def _check_index(self, i):
@@ -105,14 +104,14 @@
def __getitem__(self, i):
if i == -1:
- return nullitem
+ return self.null_item
self._check_index(i)
if i >= self._lgt:
data = self._extra[i - self._lgt]
else:
index = self._calculate_index(i)
- data = self._data[index : index + indexsize]
- r = _unpack(indexformatng, data)
+ data = self._data[index : index + self.index_size]
+ r = _unpack(self.index_format, data)
if self._lgt and i == 0:
r = (offset_type(0, gettype(r[0])),) + r[1:]
return r
@@ -120,13 +119,13 @@
class IndexObject(BaseIndexObject):
def __init__(self, data):
- assert len(data) % indexsize == 0
+ assert len(data) % self.index_size == 0
self._data = data
- self._lgt = len(data) // indexsize
+ self._lgt = len(data) // self.index_size
self._extra = []
def _calculate_index(self, i):
- return i * indexsize
+ return i * self.index_size
def __delitem__(self, i):
if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
@@ -135,7 +134,7 @@
self._check_index(i)
self._stripnodes(i)
if i < self._lgt:
- self._data = self._data[: i * indexsize]
+ self._data = self._data[: i * self.index_size]
self._lgt = i
self._extra = []
else:
@@ -198,14 +197,16 @@
if lgt is not None:
self._offsets = [0] * lgt
count = 0
- while off <= len(self._data) - indexsize:
+ while off <= len(self._data) - self.index_size:
+ start = off + self.big_int_size
(s,) = struct.unpack(
- b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
+ b'>i',
+ self._data[start : start + self.int_size],
)
if lgt is not None:
self._offsets[count] = off
count += 1
- off += indexsize + s
+ off += self.index_size + s
if off != len(self._data):
raise ValueError(b"corrupted data")
return count
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/pythoncapi_compat.h Fri Jan 29 17:32:09 2021 +0530
@@ -0,0 +1,283 @@
+// Header file providing new functions of the Python C API to old Python
+// versions.
+//
+// File distributed under the MIT license.
+//
+// Homepage:
+// https://github.com/pythoncapi/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/pythoncapi/pythoncapi_compat/master/pythoncapi_compat.h
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
+
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __forceinline
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+# define _PyObject_CAST(op) ((PyObject*)(op))
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+ Py_INCREF(obj);
+ return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+ Py_XINCREF(obj);
+ return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
+{
+ ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT((PyObject*)(ob), refcnt)
+#endif
+
+
+// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+static inline void
+_Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
+{
+ ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
+#endif
+
+
+// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
+static inline void
+_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
+{
+ ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
+#endif
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyCodeObject*
+PyFrame_GetCode(PyFrameObject *frame)
+{
+ PyCodeObject *code;
+ assert(frame != NULL);
+ code = frame->f_code;
+ assert(code != NULL);
+ Py_INCREF(code);
+ return code;
+}
+#endif
+
+static inline PyCodeObject*
+_PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+ PyCodeObject *code = PyFrame_GetCode(frame);
+ Py_DECREF(code);
+ return code; // borrowed reference
+}
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyFrame_GetBack(PyFrameObject *frame)
+{
+ PyFrameObject *back;
+ assert(frame != NULL);
+ back = frame->f_back;
+ Py_XINCREF(back);
+ return back;
+}
+#endif
+
+static inline PyFrameObject*
+_PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+ PyFrameObject *back = PyFrame_GetBack(frame);
+ Py_XDECREF(back);
+ return back; // borrowed reference
+}
+
+
+// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->interp;
+}
+#endif
+
+
+// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyThreadState_GetFrame(PyThreadState *tstate)
+{
+ PyFrameObject *frame;
+ assert(tstate != NULL);
+ frame = tstate->frame;
+ Py_XINCREF(frame);
+ return frame;
+}
+#endif
+
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+ PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+ Py_XDECREF(frame);
+ return frame; // borrowed reference
+}
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyInterpreterState_Get(void)
+{
+ PyThreadState *tstate;
+ PyInterpreterState *interp;
+
+ tstate = PyThreadState_GET();
+ if (tstate == NULL) {
+ Py_FatalError("GIL released (tstate is NULL)");
+ }
+ interp = tstate->interp;
+ if (interp == NULL) {
+ Py_FatalError("no current interpreter");
+ }
+ return interp;
+}
+#endif
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
+#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6
+static inline uint64_t
+PyThreadState_GetID(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->id;
+}
+#endif
+
+
+// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
+#if PY_VERSION_HEX < 0x030900A1
+static inline PyObject*
+PyObject_CallNoArgs(PyObject *func)
+{
+ return PyObject_CallFunctionObjArgs(func, NULL);
+}
+#endif
+
+
+// bpo-39245 made PyObject_CallOneArg() public (previously called
+// _PyObject_CallOneArg) in Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4
+static inline PyObject*
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+ return PyObject_CallFunctionObjArgs(func, arg, NULL);
+}
+#endif
+
+
+// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline int
+PyModule_AddType(PyObject *module, PyTypeObject *type)
+{
+ const char *name, *dot;
+
+ if (PyType_Ready(type) < 0) {
+ return -1;
+ }
+
+ // inline _PyType_Name()
+ name = type->tp_name;
+ assert(name != NULL);
+ dot = strrchr(name, '.');
+ if (dot != NULL) {
+ name = dot + 1;
+ }
+
+ Py_INCREF(type);
+ if (PyModule_AddObject(module, name, (PyObject *)type) < 0) {
+ Py_DECREF(type);
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+
+// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
+// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
+#if PY_VERSION_HEX < 0x030900A6
+static inline int
+PyObject_GC_IsTracked(PyObject* obj)
+{
+ return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
+}
+#endif
+
+// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
+// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
+#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0
+static inline int
+PyObject_GC_IsFinalized(PyObject *obj)
+{
+ return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED((PyGC_Head *)(obj)-1));
+}
+#endif
+
+
+// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
+static inline int
+_Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+ return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE((const PyObject*)(ob), type)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif // PYTHONCAPI_COMPAT
--- a/mercurial/revlog.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/revlog.py Fri Jan 29 17:32:09 2021 +0530
@@ -2375,6 +2375,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=False,
addrevisioncb=None,
duplicaterevisioncb=None,
):
@@ -2475,7 +2476,7 @@
(baserev, delta),
ifh,
dfh,
- alwayscache=bool(addrevisioncb),
+ alwayscache=alwayscache,
deltacomputer=deltacomputer,
)
--- a/mercurial/setdiscovery.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/setdiscovery.py Fri Jan 29 17:32:09 2021 +0530
@@ -286,8 +286,6 @@
ui,
local,
remote,
- initialsamplesize=100,
- fullsamplesize=200,
abortwhenunrelated=True,
ancestorsof=None,
audit=None,
@@ -315,7 +313,8 @@
ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
initial_head_exchange = ui.configbool(b'devel', b'discovery.exchange-heads')
-
+ initialsamplesize = ui.configint(b'devel', b'discovery.sample-size.initial')
+ fullsamplesize = ui.configint(b'devel', b'discovery.sample-size')
# We also ask remote about all the local heads. That set can be arbitrarily
# large, so we used to limit it size to `initialsamplesize`. We no longer
# do as it proved counter productive. The skipped heads could lead to a
--- a/mercurial/shelve.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/shelve.py Fri Jan 29 17:32:09 2021 +0530
@@ -812,7 +812,7 @@
with repo.lock():
checkparents(repo, state)
ms = mergestatemod.mergestate.read(repo)
- if list(ms.unresolved()):
+ if ms.unresolvedcount():
raise error.Abort(
_(b"unresolved conflicts, can't continue"),
hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
--- a/mercurial/simplemerge.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/simplemerge.py Fri Jan 29 17:32:09 2021 +0530
@@ -402,31 +402,6 @@
return sl
- def find_unconflicted(self):
- """Return a list of ranges in base that are not conflicted."""
- am = mdiff.get_matching_blocks(self.basetext, self.atext)
- bm = mdiff.get_matching_blocks(self.basetext, self.btext)
-
- unc = []
-
- while am and bm:
- # there is an unconflicted block at i; how long does it
- # extend? until whichever one ends earlier.
- a1 = am[0][0]
- a2 = a1 + am[0][2]
- b1 = bm[0][0]
- b2 = b1 + bm[0][2]
- i = intersect((a1, a2), (b1, b2))
- if i:
- unc.append(i)
-
- if a2 < b2:
- del am[0]
- else:
- del bm[0]
-
- return unc
-
def _verifytext(text, path, ui, opts):
"""verifies that text is non-binary (unless opts[text] is passed,
--- a/mercurial/store.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/store.py Fri Jan 29 17:32:09 2021 +0530
@@ -387,13 +387,13 @@
b'requires',
]
+REVLOG_FILES_EXT = (b'.i', b'.d', b'.n', b'.nd')
+
def isrevlog(f, kind, st):
if kind != stat.S_IFREG:
return False
- if f[-2:] in (b'.i', b'.d', b'.n'):
- return True
- return f[-3:] == b'.nd'
+ return f.endswith(REVLOG_FILES_EXT)
class basicstore(object):
--- a/mercurial/unionrepo.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/unionrepo.py Fri Jan 29 17:32:09 2021 +0530
@@ -128,6 +128,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=False,
addrevisioncb=None,
duplicaterevisioncb=None,
maybemissingparents=False,
--- a/mercurial/upgrade.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/upgrade.py Fri Jan 29 17:32:09 2021 +0530
@@ -118,6 +118,7 @@
up_actions,
removed_actions,
revlogs,
+ backup,
)
if not run:
@@ -215,12 +216,6 @@
backuppath = upgrade_engine.upgrade(
ui, repo, dstrepo, upgrade_op
)
- if not backup:
- ui.status(
- _(b'removing old repository content %s\n') % backuppath
- )
- repo.vfs.rmtree(backuppath, forcibly=True)
- backuppath = None
finally:
ui.status(_(b'removing temporary repository %s\n') % tmppath)
--- a/mercurial/upgrade_utils/actions.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/upgrade_utils/actions.py Fri Jan 29 17:32:09 2021 +0530
@@ -66,6 +66,18 @@
postdowngrademessage
Message intended for humans which will be shown post an upgrade
operation in which this improvement was removed
+
+ touches_filelogs (bool)
+ Whether this improvement touches filelogs
+
+ touches_manifests (bool)
+ Whether this improvement touches manifests
+
+ touches_changelog (bool)
+ Whether this improvement touches changelog
+
+ touches_requirements (bool)
+ Whether this improvement changes repository requirements
"""
def __init__(self, name, type, description, upgrademessage):
@@ -75,6 +87,12 @@
self.upgrademessage = upgrademessage
self.postupgrademessage = None
self.postdowngrademessage = None
+ # By default for now, we assume every improvement touches
+ # all the things
+ self.touches_filelogs = True
+ self.touches_manifests = True
+ self.touches_changelog = True
+ self.touches_requirements = True
def __eq__(self, other):
if not isinstance(other, improvement):
@@ -128,6 +146,12 @@
# operation in which this improvement was removed
postdowngrademessage = None
+ # By default for now, we assume every improvement touches all the things
+ touches_filelogs = True
+ touches_manifests = True
+ touches_changelog = True
+ touches_requirements = True
+
def __init__(self):
raise NotImplementedError()
@@ -267,6 +291,12 @@
b' New shares will be created in safe mode.'
)
+ # upgrade only needs to change the requirements
+ touches_filelogs = False
+ touches_manifests = False
+ touches_changelog = False
+ touches_requirements = True
+
@registerformatvariant
class sparserevlog(requirementformatvariant):
@@ -626,6 +656,7 @@
upgrade_actions,
removed_actions,
revlogs_to_process,
+ backup_store,
):
self.ui = ui
self.new_requirements = new_requirements
@@ -670,6 +701,75 @@
b're-delta-multibase' in self._upgrade_actions_names
)
+ # should this operation create a backup of the store
+ self.backup_store = backup_store
+
+ # whether the operation touches different revlogs at all or not
+ self.touches_filelogs = self._touches_filelogs()
+ self.touches_manifests = self._touches_manifests()
+ self.touches_changelog = self._touches_changelog()
+ # whether the operation touches requirements file or not
+ self.touches_requirements = self._touches_requirements()
+ self.touches_store = (
+ self.touches_filelogs
+ or self.touches_manifests
+ or self.touches_changelog
+ )
+ # does the operation only touches repository requirement
+ self.requirements_only = (
+ self.touches_requirements and not self.touches_store
+ )
+
+ def _touches_filelogs(self):
+ for a in self.upgrade_actions:
+ # in optimisations, we re-process the revlogs again
+ if a.type == OPTIMISATION:
+ return True
+ elif a.touches_filelogs:
+ return True
+ for a in self.removed_actions:
+ if a.touches_filelogs:
+ return True
+ return False
+
+ def _touches_manifests(self):
+ for a in self.upgrade_actions:
+ # in optimisations, we re-process the revlogs again
+ if a.type == OPTIMISATION:
+ return True
+ elif a.touches_manifests:
+ return True
+ for a in self.removed_actions:
+ if a.touches_manifests:
+ return True
+ return False
+
+ def _touches_changelog(self):
+ for a in self.upgrade_actions:
+ # in optimisations, we re-process the revlogs again
+ if a.type == OPTIMISATION:
+ return True
+ elif a.touches_changelog:
+ return True
+ for a in self.removed_actions:
+ if a.touches_changelog:
+ return True
+ return False
+
+ def _touches_requirements(self):
+ for a in self.upgrade_actions:
+ # optimisations are used to re-process revlogs and does not result
+ # in a requirement being added or removed
+ if a.type == OPTIMISATION:
+ pass
+ elif a.touches_requirements:
+ return True
+ for a in self.removed_actions:
+ if a.touches_requirements:
+ return True
+
+ return False
+
def _write_labeled(self, l, label):
"""
Utility function to aid writing of a list under one label
--- a/mercurial/upgrade_utils/engine.py Wed Jan 27 00:54:57 2021 -0500
+++ b/mercurial/upgrade_utils/engine.py Fri Jan 29 17:32:09 2021 +0530
@@ -412,7 +412,10 @@
"""
# TODO: don't blindly rename everything in store
# There can be upgrades where store is not touched at all
- util.rename(currentrepo.spath, backupvfs.join(b'store'))
+ if upgrade_op.backup_store:
+ util.rename(currentrepo.spath, backupvfs.join(b'store'))
+ else:
+ currentrepo.vfs.rmtree(b'store', forcibly=True)
util.rename(upgradedrepo.spath, currentrepo.spath)
@@ -436,6 +439,8 @@
"""
assert srcrepo.currentwlock()
assert dstrepo.currentwlock()
+ backuppath = None
+ backupvfs = None
ui.status(
_(
@@ -444,78 +449,91 @@
)
)
- with dstrepo.transaction(b'upgrade') as tr:
- _clonerevlogs(
- ui,
- srcrepo,
- dstrepo,
- tr,
- upgrade_op,
+ if not upgrade_op.requirements_only:
+ with dstrepo.transaction(b'upgrade') as tr:
+ _clonerevlogs(
+ ui,
+ srcrepo,
+ dstrepo,
+ tr,
+ upgrade_op,
+ )
+
+ # Now copy other files in the store directory.
+ for p in _files_to_copy_post_revlog_clone(srcrepo):
+ srcrepo.ui.status(_(b'copying %s\n') % p)
+ src = srcrepo.store.rawvfs.join(p)
+ dst = dstrepo.store.rawvfs.join(p)
+ util.copyfile(src, dst, copystat=True)
+
+ finishdatamigration(ui, srcrepo, dstrepo, requirements)
+
+ ui.status(_(b'data fully upgraded in a temporary repository\n'))
+
+ if upgrade_op.backup_store:
+ backuppath = pycompat.mkdtemp(
+ prefix=b'upgradebackup.', dir=srcrepo.path
+ )
+ backupvfs = vfsmod.vfs(backuppath)
+
+ # Make a backup of requires file first, as it is the first to be modified.
+ util.copyfile(
+ srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
+ )
+
+ # We install an arbitrary requirement that clients must not support
+ # as a mechanism to lock out new clients during the data swap. This is
+ # better than allowing a client to continue while the repository is in
+ # an inconsistent state.
+ ui.status(
+ _(
+ b'marking source repository as being upgraded; clients will be '
+ b'unable to read from repository\n'
+ )
+ )
+ scmutil.writereporequirements(
+ srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
)
- # Now copy other files in the store directory.
- for p in _files_to_copy_post_revlog_clone(srcrepo):
- srcrepo.ui.status(_(b'copying %s\n') % p)
- src = srcrepo.store.rawvfs.join(p)
- dst = dstrepo.store.rawvfs.join(p)
- util.copyfile(src, dst, copystat=True)
-
- finishdatamigration(ui, srcrepo, dstrepo, requirements)
-
- ui.status(_(b'data fully upgraded in a temporary repository\n'))
-
- backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
- backupvfs = vfsmod.vfs(backuppath)
-
- # Make a backup of requires file first, as it is the first to be modified.
- util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
-
- # We install an arbitrary requirement that clients must not support
- # as a mechanism to lock out new clients during the data swap. This is
- # better than allowing a client to continue while the repository is in
- # an inconsistent state.
- ui.status(
- _(
- b'marking source repository as being upgraded; clients will be '
- b'unable to read from repository\n'
- )
- )
- scmutil.writereporequirements(
- srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
- )
+ ui.status(_(b'starting in-place swap of repository data\n'))
+ if upgrade_op.backup_store:
+ ui.status(
+ _(b'replaced files will be backed up at %s\n') % backuppath
+ )
- ui.status(_(b'starting in-place swap of repository data\n'))
- ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
-
- # Now swap in the new store directory. Doing it as a rename should make
- # the operation nearly instantaneous and atomic (at least in well-behaved
- # environments).
- ui.status(_(b'replacing store...\n'))
- tstart = util.timer()
- _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
- elapsed = util.timer() - tstart
- ui.status(
- _(
- b'store replacement complete; repository was inconsistent for '
- b'%0.1fs\n'
+ # Now swap in the new store directory. Doing it as a rename should make
+ # the operation nearly instantaneous and atomic (at least in well-behaved
+ # environments).
+ ui.status(_(b'replacing store...\n'))
+ tstart = util.timer()
+ _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
+ elapsed = util.timer() - tstart
+ ui.status(
+ _(
+ b'store replacement complete; repository was inconsistent for '
+ b'%0.1fs\n'
+ )
+ % elapsed
)
- % elapsed
- )
- # We first write the requirements file. Any new requirements will lock
- # out legacy clients.
- ui.status(
- _(
- b'finalizing requirements file and making repository readable '
- b'again\n'
+ # We first write the requirements file. Any new requirements will lock
+ # out legacy clients.
+ ui.status(
+ _(
+ b'finalizing requirements file and making repository readable '
+ b'again\n'
+ )
)
- )
- scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
- # The lock file from the old store won't be removed because nothing has a
- # reference to its new location. So clean it up manually. Alternatively, we
- # could update srcrepo.svfs and other variables to point to the new
- # location. This is simpler.
- backupvfs.unlink(b'store/lock')
+ if upgrade_op.backup_store:
+ # The lock file from the old store won't be removed because nothing has a
+ # reference to its new location. So clean it up manually. Alternatively, we
+ # could update srcrepo.svfs and other variables to point to the new
+ # location. This is simpler.
+ backupvfs.unlink(b'store/lock')
+ else:
+ ui.status(_(b'upgrading repository requirements\n'))
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
return backuppath
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pyproject.toml Fri Jan 29 17:32:09 2021 +0530
@@ -0,0 +1,18 @@
+[build-system]
+requires = ["setuptools", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[tool.black]
+line-length = 80
+exclude = '''
+build/
+| wheelhouse/
+| dist/
+| packages/
+| \.hg/
+| \.mypy_cache/
+| \.venv/
+| mercurial/thirdparty/
+'''
+skip-string-normalization = true
+quiet = true
--- a/relnotes/next Wed Jan 27 00:54:57 2021 -0500
+++ b/relnotes/next Fri Jan 29 17:32:09 2021 +0530
@@ -17,6 +17,8 @@
can be e.g. `rebase`. As part of this effort, the default format
from `hg rebase` was reorganized a bit.
+ * `hg purge` is now a core command using `--confirm` by default.
+
* `hg strip`, from the strip extension, is now a core command, `hg
debugstrip`. The extension remains for compatibility.
@@ -42,6 +44,9 @@
* The `branchmap` cache is updated more intelligently and can be
significantly faster for repositories with many branches and changesets.
+ * The `rev-branch-cache` is now updated incrementally whenever changesets
+ are added.
+
== New Experimental Features ==
@@ -63,4 +68,5 @@
== Internal API Changes ==
-
+ * `changelog.branchinfo` is deprecated and will be removed after 5.8.
+ It is superseded by `changelogrevision.branchinfo`.
--- a/rust/Cargo.lock Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/Cargo.lock Fri Jan 29 17:32:09 2021 +0530
@@ -55,6 +55,24 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
+name = "bytes-cast"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes-cast-derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "bytes-cast-derive"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "cc"
version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -277,6 +295,7 @@
version = "0.1.0"
dependencies = [
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes-cast 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -910,6 +929,8 @@
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+"checksum bytes-cast 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+"checksum bytes-cast-derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
"checksum cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)" = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
--- a/rust/hg-core/Cargo.toml Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/hg-core/Cargo.toml Fri Jan 29 17:32:09 2021 +0530
@@ -9,6 +9,7 @@
name = "hg"
[dependencies]
+bytes-cast = "0.1"
byteorder = "1.3.4"
hex = "0.4.2"
im-rc = "15.0.*"
--- a/rust/hg-core/src/lib.rs Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/hg-core/src/lib.rs Fri Jan 29 17:32:09 2021 +0530
@@ -30,14 +30,6 @@
pub mod operations;
pub mod utils;
-// Remove this to see (potential) non-artificial compile failures. MacOS
-// *should* compile, but fail to compile tests for example as of 2020-03-06
-#[cfg(not(target_os = "linux"))]
-compile_error!(
- "`hg-core` has only been tested on Linux and will most \
- likely not behave correctly on other platforms."
-);
-
use crate::utils::hg_path::{HgPathBuf, HgPathError};
pub use filepatterns::{
parse_pattern_syntax, read_pattern_file, IgnorePattern,
--- a/rust/hg-core/src/revlog/node.rs Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/hg-core/src/revlog/node.rs Fri Jan 29 17:32:09 2021 +0530
@@ -8,8 +8,9 @@
//! In Mercurial code base, it is customary to call "a node" the binary SHA
//! of a revision.
+use bytes_cast::BytesCast;
use hex::{self, FromHex, FromHexError};
-use std::convert::{TryFrom, TryInto};
+use std::convert::TryFrom;
/// The length in bytes of a `Node`
///
@@ -49,7 +50,7 @@
///
/// [`nybbles_len`]: #method.nybbles_len
/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, BytesCast)]
#[repr(transparent)]
pub struct Node {
data: NodeData,
@@ -68,14 +69,14 @@
/// Return an error if the slice has an unexpected length
impl<'a> TryFrom<&'a [u8]> for &'a Node {
- type Error = std::array::TryFromSliceError;
+ type Error = ();
#[inline]
fn try_from(bytes: &'a [u8]) -> Result<&'a Node, Self::Error> {
- let data = bytes.try_into()?;
- // Safety: `#[repr(transparent)]` makes it ok to "wrap" the target
- // of a reference to the type of the single field.
- Ok(unsafe { std::mem::transmute::<&NodeData, &Node>(data) })
+ match Node::from_bytes(bytes) {
+ Ok((node, rest)) if rest.is_empty() => Ok(node),
+ _ => Err(()),
+ }
}
}
--- a/rust/hg-core/src/revlog/nodemap.rs Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/hg-core/src/revlog/nodemap.rs Fri Jan 29 17:32:09 2021 +0530
@@ -17,12 +17,12 @@
RevlogIndex, NULL_REVISION,
};
+use bytes_cast::{unaligned, BytesCast};
use std::cmp::max;
use std::fmt;
-use std::mem;
+use std::mem::{self, align_of, size_of};
use std::ops::Deref;
use std::ops::Index;
-use std::slice;
#[derive(Debug, PartialEq)]
pub enum NodeMapError {
@@ -149,7 +149,7 @@
/// Low level NodeTree [`Blocks`] elements
///
/// These are exactly as for instance on persistent storage.
-type RawElement = i32;
+type RawElement = unaligned::I32Be;
/// High level representation of values in NodeTree
/// [`Blocks`](struct.Block.html)
@@ -168,23 +168,24 @@
///
/// See [`Block`](struct.Block.html) for explanation about the encoding.
fn from(raw: RawElement) -> Element {
- if raw >= 0 {
- Element::Block(raw as usize)
- } else if raw == -1 {
+ let int = raw.get();
+ if int >= 0 {
+ Element::Block(int as usize)
+ } else if int == -1 {
Element::None
} else {
- Element::Rev(-raw - 2)
+ Element::Rev(-int - 2)
}
}
}
impl From<Element> for RawElement {
fn from(element: Element) -> RawElement {
- match element {
+ RawElement::from(match element {
Element::None => 0,
- Element::Block(i) => i as RawElement,
+ Element::Block(i) => i as i32,
Element::Rev(rev) => -rev - 2,
- }
+ })
}
}
@@ -212,42 +213,24 @@
/// represented at all, because we want an immutable empty nodetree
/// to be valid.
-#[derive(Copy, Clone)]
-pub struct Block([u8; BLOCK_SIZE]);
+const ELEMENTS_PER_BLOCK: usize = 16; // number of different values in a nybble
-/// Not derivable for arrays of length >32 until const generics are stable
-impl PartialEq for Block {
- fn eq(&self, other: &Self) -> bool {
- self.0[..] == other.0[..]
- }
-}
-
-pub const BLOCK_SIZE: usize = 64;
+#[derive(Copy, Clone, BytesCast, PartialEq)]
+#[repr(transparent)]
+pub struct Block([RawElement; ELEMENTS_PER_BLOCK]);
impl Block {
fn new() -> Self {
- // -1 in 2's complement to create an absent node
- let byte: u8 = 255;
- Block([byte; BLOCK_SIZE])
+ let absent_node = RawElement::from(-1);
+ Block([absent_node; ELEMENTS_PER_BLOCK])
}
fn get(&self, nybble: u8) -> Element {
- let index = nybble as usize * mem::size_of::<RawElement>();
- Element::from(RawElement::from_be_bytes([
- self.0[index],
- self.0[index + 1],
- self.0[index + 2],
- self.0[index + 3],
- ]))
+ self.0[nybble as usize].into()
}
fn set(&mut self, nybble: u8, element: Element) {
- let values = RawElement::to_be_bytes(element.into());
- let index = nybble as usize * mem::size_of::<RawElement>();
- self.0[index] = values[0];
- self.0[index + 1] = values[1];
- self.0[index + 2] = values[2];
- self.0[index + 3] = values[3];
+ self.0[nybble as usize] = element.into()
}
}
@@ -398,16 +381,17 @@
// Transmute the `Vec<Block>` to a `Vec<u8>`. Blocks are contiguous
// bytes, so this is perfectly safe.
let bytes = unsafe {
- // Assert that `Block` hasn't been changed and has no padding
- let _: [u8; 4 * BLOCK_SIZE] =
- std::mem::transmute([Block::new(); 4]);
+ // Check for compatible allocation layout.
+ // (Optimized away by constant-folding + dead code elimination.)
+ assert_eq!(size_of::<Block>(), 64);
+ assert_eq!(align_of::<Block>(), 1);
// /!\ Any use of `vec` after this is use-after-free.
// TODO: use `into_raw_parts` once stabilized
Vec::from_raw_parts(
vec.as_ptr() as *mut u8,
- vec.len() * BLOCK_SIZE,
- vec.capacity() * BLOCK_SIZE,
+ vec.len() * size_of::<Block>(),
+ vec.capacity() * size_of::<Block>(),
)
};
(readonly, bytes)
@@ -613,7 +597,7 @@
amount: usize,
) -> Self {
assert!(buffer.len() >= amount);
- let len_in_blocks = amount / BLOCK_SIZE;
+ let len_in_blocks = amount / size_of::<Block>();
NodeTreeBytes {
buffer,
len_in_blocks,
@@ -625,12 +609,11 @@
type Target = [Block];
fn deref(&self) -> &[Block] {
- unsafe {
- slice::from_raw_parts(
- (&self.buffer).as_ptr() as *const Block,
- self.len_in_blocks,
- )
- }
+ Block::slice_from_bytes(&self.buffer, self.len_in_blocks)
+ // `NodeTreeBytes::new` already asserted that `self.buffer` is
+ // large enough.
+ .unwrap()
+ .0
}
}
@@ -774,13 +757,13 @@
let mut raw = [255u8; 64];
let mut counter = 0;
- for val in [0, 15, -2, -1, -3].iter() {
- for byte in RawElement::to_be_bytes(*val).iter() {
+ for val in [0_i32, 15, -2, -1, -3].iter() {
+ for byte in val.to_be_bytes().iter() {
raw[counter] = *byte;
counter += 1;
}
}
- let block = Block(raw);
+ let (block, _) = Block::from_bytes(&raw).unwrap();
assert_eq!(block.get(0), Element::Block(0));
assert_eq!(block.get(1), Element::Block(15));
assert_eq!(block.get(3), Element::None);
@@ -1108,7 +1091,7 @@
let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
// only the root block has been changed
- assert_eq!(bytes.len(), BLOCK_SIZE);
+ assert_eq!(bytes.len(), size_of::<Block>());
// big endian for -2
assert_eq!(&bytes[4..2 * 4], [255, 255, 255, 254]);
// big endian for -6
--- a/rust/hg-core/src/revlog/nodemap_docket.rs Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/hg-core/src/revlog/nodemap_docket.rs Fri Jan 29 17:32:09 2021 +0530
@@ -1,5 +1,5 @@
+use bytes_cast::{unaligned, BytesCast};
use memmap::Mmap;
-use std::convert::TryInto;
use std::path::{Path, PathBuf};
use super::revlog::RevlogError;
@@ -13,6 +13,16 @@
// TODO: keep here more of the data from `parse()` when we need it
}
+#[derive(BytesCast)]
+#[repr(C)]
+struct DocketHeader {
+ uid_size: u8,
+ _tip_rev: unaligned::U64Be,
+ data_length: unaligned::U64Be,
+ _data_unused: unaligned::U64Be,
+ tip_node_size: unaligned::U64Be,
+}
+
impl NodeMapDocket {
/// Return `Ok(None)` when the caller should proceed without a persistent
/// nodemap:
@@ -36,25 +46,22 @@
Ok(bytes) => bytes,
};
- let mut input = if let Some((&ONDISK_VERSION, rest)) =
+ let input = if let Some((&ONDISK_VERSION, rest)) =
docket_bytes.split_first()
{
rest
} else {
return Ok(None);
};
- let input = &mut input;
- let uid_size = read_u8(input)? as usize;
- let _tip_rev = read_be_u64(input)?;
+ let (header, rest) = DocketHeader::from_bytes(input)?;
+ let uid_size = header.uid_size as usize;
// TODO: do we care about overflow for 4 GB+ nodemap files on 32-bit
// systems?
- let data_length = read_be_u64(input)? as usize;
- let _data_unused = read_be_u64(input)?;
- let tip_node_size = read_be_u64(input)? as usize;
- let uid = read_bytes(input, uid_size)?;
- let _tip_node = read_bytes(input, tip_node_size)?;
-
+ let tip_node_size = header.tip_node_size.get() as usize;
+ let data_length = header.data_length.get() as usize;
+ let (uid, rest) = u8::slice_from_bytes(rest, uid_size)?;
+ let (_tip_node, _rest) = u8::slice_from_bytes(rest, tip_node_size)?;
let uid =
std::str::from_utf8(uid).map_err(|_| RevlogError::Corrupted)?;
let docket = NodeMapDocket { data_length };
@@ -81,29 +88,6 @@
}
}
-fn read_bytes<'a>(
- input: &mut &'a [u8],
- count: usize,
-) -> Result<&'a [u8], RevlogError> {
- if let Some(start) = input.get(..count) {
- *input = &input[count..];
- Ok(start)
- } else {
- Err(RevlogError::Corrupted)
- }
-}
-
-fn read_u8<'a>(input: &mut &[u8]) -> Result<u8, RevlogError> {
- Ok(read_bytes(input, 1)?[0])
-}
-
-fn read_be_u64<'a>(input: &mut &[u8]) -> Result<u64, RevlogError> {
- let array = read_bytes(input, std::mem::size_of::<u64>())?
- .try_into()
- .unwrap();
- Ok(u64::from_be_bytes(array))
-}
-
fn rawdata_path(docket_path: &Path, uid: &str) -> PathBuf {
let docket_name = docket_path
.file_name()
--- a/rust/hg-core/src/revlog/revlog.rs Wed Jan 27 00:54:57 2021 -0500
+++ b/rust/hg-core/src/revlog/revlog.rs Fri Jan 29 17:32:09 2021 +0530
@@ -29,6 +29,12 @@
UnknowDataFormat(u8),
}
+impl From<bytes_cast::FromBytesError> for RevlogError {
+ fn from(_: bytes_cast::FromBytesError) -> Self {
+ RevlogError::Corrupted
+ }
+}
+
/// Read only implementation of revlog.
pub struct Revlog {
/// When index and data are not interleaved: bytes of the revlog index.
--- a/setup.py Wed Jan 27 00:54:57 2021 -0500
+++ b/setup.py Fri Jan 29 17:32:09 2021 +0530
@@ -609,6 +609,12 @@
# and its build is not explictely disabled (for external build
# as Linux distributions would do)
if self.distribution.rust and self.rust:
+ if not sys.platform.startswith('linux'):
+ self.warn(
+ "rust extensions have only been tested on Linux "
+ "and may not behave correctly on other platforms"
+ )
+
for rustext in ruststandalones:
rustext.build('' if self.inplace else self.build_lib)
--- a/tests/hghave.py Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/hghave.py Fri Jan 29 17:32:09 2021 +0530
@@ -591,7 +591,7 @@
return matchoutput("pylint --help", br"Usage:[ ]+pylint", True)
-@check("clang-format", "clang-format C code formatter")
+@check("clang-format", "clang-format C code formatter (>= 11)")
def has_clang_format():
m = matchoutput('clang-format --version', br'clang-format version (\d+)')
# style changed somewhere between 10.x and 11.x
@@ -1034,7 +1034,7 @@
return matchoutput('sqlite3 -version', br'^3\.\d+')
-@check('vcr', 'vcr http mocking library')
+@check('vcr', 'vcr http mocking library (pytest-vcr)')
def has_vcr():
try:
import vcr
@@ -1054,7 +1054,7 @@
return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
-@check('black', 'the black formatter for python')
+@check('black', 'the black formatter for python (>= 20.8b1)')
def has_black():
blackcmd = 'black --version'
version_regex = b'black, version ([0-9a-b.]+)'
--- a/tests/run-tests.py Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/run-tests.py Fri Jan 29 17:32:09 2021 +0530
@@ -2278,7 +2278,7 @@
if test.path.endswith(b'.t'):
rename(test.errpath, test.path)
else:
- rename(test.errpath, '%s.out' % test.path)
+ rename(test.errpath, b'%s.out' % test.path)
accepted = True
if not accepted:
self.faildata[test.name] = b''.join(lines)
--- a/tests/svnxml.py Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/svnxml.py Fri Jan 29 17:32:09 2021 +0530
@@ -15,6 +15,7 @@
e['revision'] = entry.getAttribute('revision')
e['author'] = xmltext(entry.getElementsByTagName('author')[0])
e['msg'] = xmltext(entry.getElementsByTagName('msg')[0])
+ e['date'] = xmltext(entry.getElementsByTagName('date')[0])
e['paths'] = []
paths = entry.getElementsByTagName('paths')
if paths:
@@ -42,7 +43,7 @@
except AttributeError:
fp = sys.stdout
for e in entries:
- for k in ('revision', 'author', 'msg'):
+ for k in ('revision', 'author', 'date', 'msg'):
fp.write(('%s: %s\n' % (k, e[k])).encode('utf-8'))
for path, action, fpath, frev in sorted(e['paths']):
frominfo = b''
--- a/tests/test-acl.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-acl.t Fri Jan 29 17:32:09 2021 +0530
@@ -204,6 +204,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -283,6 +284,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -806,6 +808,7 @@
acl: acl.deny.bookmarks not enabled
acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
bundle2-input-bundle: 7 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
invalid branch cache (served.hidden): tip differs
added 1 changesets with 1 changes to 1 files
@@ -982,6 +985,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1318,6 +1322,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1408,6 +1413,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1577,6 +1583,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
--- a/tests/test-check-code.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-check-code.t Fri Jan 29 17:32:09 2021 +0530
@@ -11,6 +11,7 @@
> -X contrib/python-zstandard \
> -X hgext/fsmonitor/pywatchman \
> -X mercurial/thirdparty \
+ > -X mercurial/pythoncapi_compat.h \
> | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
Skipping contrib/automation/hgautomation/__init__.py it has no-che?k-code (glob)
Skipping contrib/automation/hgautomation/aws.py it has no-che?k-code (glob)
@@ -65,10 +66,10 @@
COPYING
Makefile
README.rst
- black.toml
hg
hgeditor
hgweb.cgi
+ pyproject.toml
rustfmt.toml
setup.py
--- a/tests/test-check-format.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-check-format.t Fri Jan 29 17:32:09 2021 +0530
@@ -1,5 +1,5 @@
#require black test-repo
$ cd $RUNTESTDIR/..
- $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
+ $ black --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
--- a/tests/test-completion.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-completion.t Fri Jan 29 17:32:09 2021 +0530
@@ -38,6 +38,7 @@
paths
phase
pull
+ purge
push
recover
remove
@@ -129,6 +130,7 @@
debugrevspec
debugserve
debugsetparents
+ debugshell
debugsidedata
debugssl
debugstrip
@@ -318,6 +320,7 @@
debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
debugserve: sshstdio, logiofd, logiofile
debugsetparents:
+ debugshell:
debugsidedata: changelog, manifest, dir
debugssl:
debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
@@ -354,6 +357,7 @@
paths: template
phase: public, draft, secret, force, rev
pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
+ purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
recover: verify
remove: after, force, subrepos, include, exclude, dry-run
--- a/tests/test-contrib-perf.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-contrib-perf.t Fri Jan 29 17:32:09 2021 +0530
@@ -78,111 +78,137 @@
list of commands:
- perfaddremove
+ perf::addremove
+ (no help text available)
+ perf::ancestors
(no help text available)
- perfancestors
+ perf::ancestorset
(no help text available)
- perfancestorset
+ perf::annotate
(no help text available)
- perfannotate (no help text available)
- perfbdiff benchmark a bdiff between revisions
- perfbookmarks
+ perf::bdiff benchmark a bdiff between revisions
+ perf::bookmarks
benchmark parsing bookmarks from disk to memory
- perfbranchmap
+ perf::branchmap
benchmark the update of a branchmap
- perfbranchmapload
+ perf::branchmapload
benchmark reading the branchmap
- perfbranchmapupdate
+ perf::branchmapupdate
benchmark branchmap update from for <base> revs to <target>
revs
- perfbundleread
+ perf::bundleread
Benchmark reading of bundle files.
- perfcca (no help text available)
- perfchangegroupchangelog
+ perf::cca (no help text available)
+ perf::changegroupchangelog
Benchmark producing a changelog group for a changegroup.
- perfchangeset
+ perf::changeset
+ (no help text available)
+ perf::ctxfiles
(no help text available)
- perfctxfiles (no help text available)
- perfdiffwd Profile diff of working directory changes
- perfdirfoldmap
+ perf::diffwd Profile diff of working directory changes
+ perf::dirfoldmap
benchmap a 'dirstate._map.dirfoldmap.get()' request
- perfdirs (no help text available)
- perfdirstate benchmap the time of various distate operations
- perfdirstatedirs
+ perf::dirs (no help text available)
+ perf::dirstate
+ benchmap the time of various distate operations
+ perf::dirstatedirs
benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
- perfdirstatefoldmap
+ perf::dirstatefoldmap
benchmap a 'dirstate._map.filefoldmap.get()' request
- perfdirstatewrite
+ perf::dirstatewrite
benchmap the time it take to write a dirstate on disk
- perfdiscovery
+ perf::discovery
benchmark discovery between local repo and the peer at given
path
- perffncacheencode
+ perf::fncacheencode
(no help text available)
- perffncacheload
+ perf::fncacheload
(no help text available)
- perffncachewrite
+ perf::fncachewrite
(no help text available)
- perfheads benchmark the computation of a changelog heads
- perfhelper-mergecopies
+ perf::heads benchmark the computation of a changelog heads
+ perf::helper-mergecopies
find statistics about potential parameters for
'perfmergecopies'
- perfhelper-pathcopies
+ perf::helper-pathcopies
find statistic about potential parameters for the
'perftracecopies'
- perfignore benchmark operation related to computing ignore
- perfindex benchmark index creation time followed by a lookup
- perflinelogedits
+ perf::ignore benchmark operation related to computing ignore
+ perf::index benchmark index creation time followed by a lookup
+ perf::linelogedits
(no help text available)
- perfloadmarkers
+ perf::loadmarkers
benchmark the time to parse the on-disk markers for a repo
- perflog (no help text available)
- perflookup (no help text available)
- perflrucachedict
+ perf::log (no help text available)
+ perf::lookup (no help text available)
+ perf::lrucachedict
(no help text available)
- perfmanifest benchmark the time to read a manifest from disk and return a
+ perf::manifest
+ benchmark the time to read a manifest from disk and return a
usable
- perfmergecalculate
+ perf::mergecalculate
(no help text available)
- perfmergecopies
+ perf::mergecopies
measure runtime of 'copies.mergecopies'
- perfmoonwalk benchmark walking the changelog backwards
- perfnodelookup
+ perf::moonwalk
+ benchmark walking the changelog backwards
+ perf::nodelookup
(no help text available)
- perfnodemap benchmark the time necessary to look up revision from a cold
+ perf::nodemap
+ benchmark the time necessary to look up revision from a cold
nodemap
- perfparents benchmark the time necessary to fetch one changeset's parents.
- perfpathcopies
+ perf::parents
+ benchmark the time necessary to fetch one changeset's parents.
+ perf::pathcopies
benchmark the copy tracing logic
- perfphases benchmark phasesets computation
- perfphasesremote
+ perf::phases benchmark phasesets computation
+ perf::phasesremote
benchmark time needed to analyse phases of the remote server
- perfprogress printing of progress bars
- perfrawfiles (no help text available)
- perfrevlogchunks
+ perf::progress
+ printing of progress bars
+ perf::rawfiles
+ (no help text available)
+ perf::revlogchunks
Benchmark operations on revlog chunks.
- perfrevlogindex
+ perf::revlogindex
Benchmark operations against a revlog index.
- perfrevlogrevision
+ perf::revlogrevision
Benchmark obtaining a revlog revision.
- perfrevlogrevisions
+ perf::revlogrevisions
Benchmark reading a series of revisions from a revlog.
- perfrevlogwrite
+ perf::revlogwrite
Benchmark writing a series of revisions to a revlog.
- perfrevrange (no help text available)
- perfrevset benchmark the execution time of a revset
- perfstartup (no help text available)
- perfstatus benchmark the performance of a single status call
- perftags (no help text available)
- perftemplating
+ perf::revrange
+ (no help text available)
+ perf::revset benchmark the execution time of a revset
+ perf::startup
+ (no help text available)
+ perf::status benchmark the performance of a single status call
+ perf::tags (no help text available)
+ perf::templating
test the rendering time of a given template
- perfunidiff benchmark a unified diff between revisions
- perfvolatilesets
+ perf::unidiff
+ benchmark a unified diff between revisions
+ perf::volatilesets
benchmark the computation of various volatile set
- perfwalk (no help text available)
- perfwrite microbenchmark ui.write (and others)
+ perf::walk (no help text available)
+ perf::write microbenchmark ui.write (and others)
(use 'hg help -v perf' to show built-in aliases and global options)
+
+ $ hg help perfaddremove
+ hg perf::addremove
+
+ aliases: perfaddremove
+
+ (no help text available)
+
+ options:
+
+ -T --template TEMPLATE display with template
+
+ (some details hidden, use --verbose to show complete help)
+
$ hg perfaddremove
$ hg perfancestors
$ hg perfancestorset 2
--- a/tests/test-convert-svn-sink.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-convert-svn-sink.t Fri Jan 29 17:32:09 2021 +0530
@@ -54,10 +54,12 @@
2 2 test a
revision: 2
author: test
+ date: * (glob)
msg: modify a file
M /a
revision: 1
author: test
+ date: * (glob)
msg: add a file
A /a
A /d1
@@ -95,6 +97,7 @@
3 3 test b
revision: 3
author: test
+ date: * (glob)
msg: rename a file
D /a
A /b (from /a@2)
@@ -131,6 +134,7 @@
4 4 test c
revision: 4
author: test
+ date: * (glob)
msg: copy a file
A /c (from /b@3)
$ ls a a-hg-wc
@@ -167,6 +171,7 @@
5 5 test .
revision: 5
author: test
+ date: * (glob)
msg: remove a file
D /b
$ ls a a-hg-wc
@@ -209,6 +214,7 @@
6 6 test c
revision: 6
author: test
+ date: * (glob)
msg: make a file executable
M /c
#if execbit
@@ -247,6 +253,7 @@
8 8 test newlink
revision: 8
author: test
+ date: * (glob)
msg: move symlink
D /link
A /newlink (from /link@7)
@@ -278,6 +285,7 @@
7 7 test f
revision: 7
author: test
+ date: * (glob)
msg: f
D /c
A /d
@@ -315,6 +323,7 @@
1 1 test d1/a
revision: 1
author: test
+ date: * (glob)
msg: add executable file in new directory
A /d1
A /d1/a
@@ -343,6 +352,7 @@
2 2 test d2/a
revision: 2
author: test
+ date: * (glob)
msg: copy file to new directory
A /d2
A /d2/a (from /d1/a@1)
@@ -416,21 +426,25 @@
4 4 test right-2
revision: 4
author: test
+ date: * (glob)
msg: merge
A /right-1
A /right-2
revision: 3
author: test
+ date: * (glob)
msg: left-2
M /b
A /left-2
revision: 2
author: test
+ date: * (glob)
msg: left-1
M /b
A /left-1
revision: 1
author: test
+ date: * (glob)
msg: base
A /b
@@ -459,10 +473,12 @@
2 2 test .hgtags
revision: 2
author: test
+ date: * (glob)
msg: Tagged as v1.0
A /.hgtags
revision: 1
author: test
+ date: * (glob)
msg: Add file a
A /a
$ rm -rf a a-hg a-hg-wc
@@ -494,10 +510,12 @@
2 2 test exec
revision: 2
author: test
+ date: * (glob)
msg: remove executable bit
M /exec
revision: 1
author: test
+ date: * (glob)
msg: create executable
A /exec
$ test ! -x a-hg-wc/exec
@@ -540,11 +558,77 @@
2 2 test b
revision: 2
author: test
+ date: * (glob)
msg: Another change
A /b
revision: 1
author: test
+ date: * (glob)
msg: Some change
A /a
$ rm -rf a a-hg a-hg-wc
+
+Commit dates convertion
+
+ $ hg init a
+
+ $ echo a >> a/a
+ $ hg add a
+ adding a/a
+ $ hg --cwd a ci -d '1 0' -A -m 'Change 1'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '2 0' -m 'Change 2'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '2 0' -m 'Change at the same time'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '1 0' -m 'Change in the past'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '3 0' -m 'Change in the future'
+
+ $ hg convert --config convert.svn.dangerous-set-commit-dates=true -d svn a
+ assuming destination a-hg
+ initializing svn repository 'a-hg'
+ initializing svn working copy 'a-hg-wc'
+ scanning source...
+ sorting...
+ converting...
+ 4 Change 1
+ 3 Change 2
+ 2 Change at the same time
+ 1 Change in the past
+ 0 Change in the future
+ $ svnupanddisplay a-hg-wc 0
+ 5 5 test .
+ 5 5 test a
+ revision: 5
+ author: test
+ date: 1970-01-01T00:00:03.000000Z
+ msg: Change in the future
+ M /a
+ revision: 4
+ author: test
+ date: 1970-01-01T00:00:01.000000Z
+ msg: Change in the past
+ M /a
+ revision: 3
+ author: test
+ date: 1970-01-01T00:00:02.000000Z
+ msg: Change at the same time
+ M /a
+ revision: 2
+ author: test
+ date: 1970-01-01T00:00:02.000000Z
+ msg: Change 2
+ M /a
+ revision: 1
+ author: test
+ date: 1970-01-01T00:00:01.000000Z
+ msg: Change 1
+ A /a
+
+ $ rm -rf a a-hg a-hg-wc
--- a/tests/test-convert.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-convert.t Fri Jan 29 17:32:09 2021 +0530
@@ -388,6 +388,23 @@
does not convert tags from the source repo to the target
repo. The default is False.
+ Subversion Destination
+ ######################
+
+ Original commit dates are not preserved by default.
+
+ convert.svn.dangerous-set-commit-dates
+ preserve original commit dates, forcefully setting
+ "svn:date" revision properties. This option is DANGEROUS and
+ may break some subversion functionality for the resulting
+ repository (e.g. filtering revisions with date ranges in
+ "svn log"), as original commit dates are not guaranteed to
+ be monotonically increasing.
+
+ For commit dates setting to work destination repository must have "pre-
+ revprop-change" hook configured to allow setting of "svn:date" revision
+ properties. See Subversion documentation for more details.
+
options ([+] can be repeated):
-s --source-type TYPE source repository type
--- a/tests/test-copies-in-changeset.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-copies-in-changeset.t Fri Jan 29 17:32:09 2021 +0530
@@ -345,7 +345,10 @@
$ hg co -q 0
$ hg mv a b
$ hg ci -qm 'rename a to b'
- $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
+Not only do we want this to run in-memory, it shouldn't fall back to
+on-disk merge (no conflicts), so we force it to be in-memory
+with no fallback.
+ $ hg rebase -d 1 --config rebase.experimental.inmemory=yes --config devel.rebase.force-in-memory-merge=yes
rebasing 2:* tip "rename a to b" (glob)
merging a and b to b
saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/*-*-rebase.hg (glob)
--- a/tests/test-copies.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-copies.t Fri Jan 29 17:32:09 2021 +0530
@@ -93,8 +93,10 @@
x y
$ hg debugp1copies -r 1
x -> y
-Incorrectly doesn't show the rename
$ hg debugpathcopies 0 1
+ x -> y (no-filelog !)
+ $ hg debugpathcopies 0 1 --config devel.copy-tracing.trace-all-files=yes
+ x -> y
Copy a file onto another file with same content. If metadata is stored in changeset, this does not
produce a new filelog entry. The changeset's "files" entry should still list the file.
@@ -111,8 +113,10 @@
x x2
$ hg debugp1copies -r 1
x -> x2
-Incorrectly doesn't show the rename
$ hg debugpathcopies 0 1
+ x -> x2 (no-filelog !)
+ $ hg debugpathcopies 0 1 --config devel.copy-tracing.trace-all-files=yes
+ x -> x2
Rename file in a loop: x->y->z->x
$ newrepo
--- a/tests/test-copy.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-copy.t Fri Jan 29 17:32:09 2021 +0530
@@ -228,6 +228,17 @@
should show no copies
$ hg st -C
+note: since filelog based copy tracing only trace copy for new file, the copy information here is not displayed.
+
+ $ hg status --copies --change .
+ M bar
+
+They are a devel option to walk all file and fine this information anyway.
+
+ $ hg status --copies --change . --config devel.copy-tracing.trace-all-files=yes
+ M bar
+ foo
+
copy --after on an added file
$ cp bar baz
$ hg add baz
--- a/tests/test-globalopts.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-globalopts.t Fri Jan 29 17:32:09 2021 +0530
@@ -351,6 +351,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -483,6 +484,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
--- a/tests/test-help-hide.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-help-hide.t Fri Jan 29 17:32:09 2021 +0530
@@ -55,6 +55,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -191,6 +192,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
--- a/tests/test-help.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-help.t Fri Jan 29 17:32:09 2021 +0530
@@ -107,6 +107,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -235,6 +236,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -375,8 +377,6 @@
mq manage a stack of patches
notify hooks for sending email push notifications
patchbomb command to send changesets as (a series of) patch emails
- purge command to delete untracked files from the working
- directory
relink recreates hardlinks between repository clones
schemes extend schemes with shortcuts to repository swarms
share share a common history between several working directories
@@ -1069,6 +1069,7 @@
debugsetparents
manually set the parents of the current working directory
(DANGEROUS)
+ debugshell run an interactive Python interpreter
debugsidedata
dump the side data for a cl/manifest/file revision
debugssl test a secure connection to a server
@@ -2720,6 +2721,13 @@
set or show the current phase name
</td></tr>
<tr><td>
+ <a href="/help/purge">
+ purge
+ </a>
+ </td><td>
+ removes files not tracked by Mercurial
+ </td></tr>
+ <tr><td>
<a href="/help/recover">
recover
</a>
--- a/tests/test-hgweb-json.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-hgweb-json.t Fri Jan 29 17:32:09 2021 +0530
@@ -2190,6 +2190,10 @@
"topic": "phase"
},
{
+ "summary": "removes files not tracked by Mercurial",
+ "topic": "purge"
+ },
+ {
"summary": "roll back an interrupted transaction",
"topic": "recover"
},
--- a/tests/test-inherit-mode.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-inherit-mode.t Fri Jan 29 17:32:09 2021 +0530
@@ -134,6 +134,8 @@
00660 ../push/.hg/00changelog.i
00770 ../push/.hg/cache/
00660 ../push/.hg/cache/branch2-base
+ 00660 ../push/.hg/cache/rbc-names-v1
+ 00660 ../push/.hg/cache/rbc-revs-v1
00660 ../push/.hg/dirstate
00660 ../push/.hg/requires
00770 ../push/.hg/store/
--- a/tests/test-minirst.py Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-minirst.py Fri Jan 29 17:32:09 2021 +0530
@@ -159,6 +159,8 @@
:a: First item.
:ab: Second item. Indentation and wrapping
is handled automatically.
+:c\:d: a key with colon
+:efg\:\:hh: a key with many colon
Next list:
--- a/tests/test-minirst.py.out Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-minirst.py.out Fri Jan 29 17:32:09 2021 +0530
@@ -439,6 +439,8 @@
a First item.
ab Second item. Indentation and wrapping is
handled automatically.
+c:d a key with colon
+efg::hh a key with many colon
Next list:
@@ -456,6 +458,9 @@
wrapping is
handled
automatically.
+c:d a key with colon
+efg::hh a key with many
+ colon
Next list:
@@ -476,6 +481,10 @@
<dd>First item.
<dt>ab
<dd>Second item. Indentation and wrapping is handled automatically.
+ <dt>c:d
+ <dd>a key with colon
+ <dt>efg::hh
+ <dd>a key with many colon
</dl>
<p>
Next list:
--- a/tests/test-purge.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-purge.t Fri Jan 29 17:32:09 2021 +0530
@@ -1,8 +1,3 @@
- $ cat <<EOF >> $HGRCPATH
- > [extensions]
- > purge =
- > EOF
-
init
$ hg init t
@@ -18,11 +13,35 @@
$ echo 'ignored' > .hgignore
$ hg ci -qAmr3 -d'2 0'
+purge without the extension
+
+ $ hg st
+ $ touch foo
+ $ hg purge
+ permanently delete 1 unkown files? (yN) n
+ abort: removal cancelled
+ [250]
+ $ hg st
+ ? foo
+ $ hg purge --no-confirm
+ $ hg st
+
+now enabling the extension
+
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > purge =
+ > EOF
+
delete an empty directory
$ mkdir empty_dir
$ hg purge -p -v
empty_dir
+ $ hg purge --confirm
+ permanently delete at least 1 empty directories? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v
removing directory empty_dir
$ ls -A
@@ -62,6 +81,10 @@
$ hg purge -p
untracked_file
untracked_file_readonly
+ $ hg purge --confirm
+ permanently delete 2 unkown files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v
removing file untracked_file
removing file untracked_file_readonly
@@ -121,6 +144,10 @@
$ cd directory
$ hg purge -p ../untracked_directory
untracked_directory/nested_directory
+ $ hg purge --confirm
+ permanently delete 1 unkown files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v ../untracked_directory
removing directory untracked_directory/nested_directory
removing directory untracked_directory
@@ -138,6 +165,7 @@
$ touch ignored
$ hg purge -p
+ $ hg purge --confirm
$ hg purge -v
$ touch untracked_file
$ ls
@@ -147,6 +175,10 @@
untracked_file
$ hg purge -p -i
ignored
+ $ hg purge --confirm -i
+ permanently delete 1 ignored files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v -i
removing file ignored
$ ls -A
@@ -159,6 +191,10 @@
$ hg purge -p --all
ignored
untracked_file
+ $ hg purge --confirm --all
+ permanently delete 1 unkown and 1 ignored files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v --all
removing file ignored
removing file untracked_file
--- a/tests/test-rebase-conflicts.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-rebase-conflicts.t Fri Jan 29 17:32:09 2021 +0530
@@ -318,10 +318,10 @@
bundle2-input-part: total payload size 1686
bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
bundle2-input-part: total payload size 74
- truncating cache/rbc-revs-v1 to 56
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 3 parts total
+ truncating cache/rbc-revs-v1 to 72
added 2 changesets with 2 changes to 1 files
updating the branch cache
invalid branch cache (served): tip differs
--- a/tests/test-requires.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-requires.t Fri Jan 29 17:32:09 2021 +0530
@@ -5,7 +5,7 @@
$ hg commit -m test
$ rm .hg/requires
$ hg tip
- abort: unknown version (2) in revlog 00changelog.i
+ abort: unknown version (65535) in revlog 00changelog.i
[50]
$ echo indoor-pool > .hg/requires
$ hg tip
--- a/tests/test-resolve.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-resolve.t Fri Jan 29 17:32:09 2021 +0530
@@ -344,6 +344,24 @@
$ hg resolve -l
R file1
R file2
+Test with :mergediff conflict markers
+ $ hg resolve --unmark
+ $ hg resolve --re-merge -t :mergediff file2
+ merging file2
+ warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
+ [1]
+ $ hg resolve -l
+ U file1
+ U file2
+ $ hg --config commands.resolve.mark-check=abort resolve -m
+ warning: the following files still have conflict markers:
+ file2
+ abort: conflict markers detected
+ (use --all to mark anyway)
+ [20]
+ $ hg resolve -l
+ U file1
+ U file2
Test option value 'warn'
$ hg resolve --unmark
$ hg resolve -l
--- a/tests/test-setdiscovery.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-setdiscovery.t Fri Jan 29 17:32:09 2021 +0530
@@ -1328,25 +1328,25 @@
updating to branch b
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false
+ $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false --config devel.discovery.sample-size.initial=50
comparing with b
query 1; heads
searching for changes
taking quick initial sample
searching: 2 queries
- query 2; still undecided: 1080, sample size is: 100
+ query 2; still undecided: 1080, sample size is: 50
sampling from both directions
searching: 3 queries
- query 3; still undecided: 980, sample size is: 200
+ query 3; still undecided: 1030, sample size is: 200
sampling from both directions
searching: 4 queries
- query 4; still undecided: 497, sample size is: 210
+ query 4; still undecided: 547, sample size is: 210
sampling from both directions
searching: 5 queries
- query 5; still undecided: 285, sample size is: 220
+ query 5; still undecided: 336, sample size is: 220
sampling from both directions
searching: 6 queries
- query 6; still undecided: 63, sample size is: 63
+ query 6; still undecided: 114, sample size is: 114
6 total queries in *.????s (glob)
elapsed time: * seconds (glob)
round-trips: 6
@@ -1412,22 +1412,30 @@
missing: 1040
common heads: 3ee37d65064a
- $ hg -R a debugdiscovery b --debug --config devel.discovery.exchange-heads=false --config devel.discovery.randomize=false --config devel.discovery.grow-sample.rate=1.01
+ $ hg -R a debugdiscovery b --debug --config devel.discovery.exchange-heads=false --config devel.discovery.randomize=false --config devel.discovery.grow-sample.rate=1.20 --config devel.discovery.sample-size=50
comparing with b
searching for changes
sampling from both directions
- query 1; still undecided: 1340, sample size is: 200
+ query 1; still undecided: 1340, sample size is: 50
+ sampling from both directions
+ query 2; still undecided: 995, sample size is: 60
sampling from both directions
- query 2; still undecided: 795, sample size is: 202
+ query 3; still undecided: 913, sample size is: 72
sampling from both directions
- query 3; still undecided: 525, sample size is: 204
+ query 4; still undecided: 816, sample size is: 204
+ sampling from both directions
+ query 5; still undecided: 612, sample size is: 153
sampling from both directions
- query 4; still undecided: 252, sample size is: 206
+ query 6; still undecided: 456, sample size is: 123
+ sampling from both directions
+ query 7; still undecided: 332, sample size is: 147
sampling from both directions
- query 5; still undecided: 44, sample size is: 44
- 5 total queries in *s (glob)
- elapsed time: * seconds (glob)
- round-trips: 5
+ query 8; still undecided: 184, sample size is: 176
+ sampling from both directions
+ query 9; still undecided: 8, sample size is: 8
+ 9 total queries in *s (glob)
+ elapsed time: * seconds (glob)
+ round-trips: 9
heads summary:
total common heads: 1
also local heads: 0
--- a/tests/test-share-safe.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-share-safe.t Fri Jan 29 17:32:09 2021 +0530
@@ -352,18 +352,27 @@
- changelog
- manifest
- $ hg debugupgraderepo --run -q
+ $ hg debugupgraderepo --run
upgrade will perform the following actions:
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
added: share-safe
+ share-safe
+ Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
+
processed revlogs:
- all-filelogs
- changelog
- manifest
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ upgrading repository requirements
+ removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
$ hg debugrequirements
@@ -433,7 +442,7 @@
- changelog
- manifest
- $ hg debugupgraderepo -q --run
+ $ hg debugupgraderepo --run
upgrade will perform the following actions:
requirements
@@ -445,6 +454,12 @@
- changelog
- manifest
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ upgrading repository requirements
+ removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
$ hg debugrequirements
--- a/tests/test-simplemerge.py Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-simplemerge.py Fri Jan 29 17:32:09 2021 +0530
@@ -141,8 +141,6 @@
"""No conflicts because nothing changed"""
m3 = Merge3([b'aaa', b'bbb'], [b'aaa', b'bbb'], [b'aaa', b'bbb'])
- self.assertEqual(m3.find_unconflicted(), [(0, 2)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 2, 0, 2, 0, 2), (2, 2, 2, 2, 2, 2)],
@@ -189,8 +187,6 @@
[b'aaa', b'bbb'], [b'aaa', b'111', b'bbb'], [b'aaa', b'bbb']
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 1, 2), (2, 2, 3, 3, 2, 2)],
@@ -271,8 +267,6 @@
[b'aaa\n', b'222\n', b'bbb\n'],
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 2, 3), (2, 2, 3, 3, 3, 3)],
@@ -323,8 +317,6 @@
[b'aaa', b'222', b'bbb'],
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (2, 3)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (2, 3, 2, 3, 2, 3), (3, 3, 3, 3, 3, 3)],
@@ -338,8 +330,6 @@
[b'aaa', b'222', b'222', b'222', b'222', b'bbb'],
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (3, 4)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (3, 4, 4, 5, 5, 6), (4, 4, 5, 5, 6, 6)],
--- a/tests/test-upgrade-repo.t Wed Jan 27 00:54:57 2021 -0500
+++ b/tests/test-upgrade-repo.t Fri Jan 29 17:32:09 2021 +0530
@@ -632,11 +632,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for * (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ ls -1 .hg/ | grep upgradebackup
[1]
@@ -679,11 +677,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
Check that the repo still works fine
@@ -759,11 +755,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -810,11 +804,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -861,11 +853,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -919,11 +909,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -978,11 +966,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets