tests/generate-working-copy-states.py
author Pierre-Yves David <pierre-yves.david@octobus.net>
Fri, 05 Apr 2024 11:05:54 +0200
changeset 51576 de5bf3fe0233
parent 48875 6000f5b25c9b
permissions -rw-r--r--
revset: stop serializing node when using "%ln" Turning hundred of thousand of node from node to hex and back can be slow… what about we stop doing it? In many case were we are using node id we should be using revision id. However this is not a good reason to have a stupidly slow implementation of "%ln". This caught my attention again because the phase discovery during push make an extensive use of "%ln" or huge set. In absolute, that phase discovery probably should use "%ld" and need to improves its algorithmic complexity, but improving "%ln" seems simple and long overdue. This greatly speeds up `hg push` on repository with many drafts. Here are some relevant poulpe benchmarks: ### data-env-vars.name = mozilla-try-2023-03-22-zstd-sparse-revlog # benchmark.name = hg.command.push # bin-env-vars.hg.flavor = default # bin-env-vars.hg.py-re2-module = default # benchmark.variants.explicit-rev = all-out-heads # benchmark.variants.issue6528 = disabled # benchmark.variants.protocol = ssh # benchmark.variants.reuse-external-delta-parent = default ## benchmark.variants.revs = any-1-extra-rev before: 44.235070 after: 20.416329 (-53.85%, -23.82) ## benchmark.variants.revs = any-100-extra-rev before: 49.234697 after: 26.519829 (-46.14%, -22.71) ### benchmark.name = hg.command.bundle # bin-env-vars.hg.flavor = default # bin-env-vars.hg.py-re2-module = default # benchmark.variants.revs = all # benchmark.variants.type = none-streamv2 ## data-env-vars.name = heptapod-public-2024-03-25-zstd-sparse-revlog before: 10.138396 after: 7.750458 (-23.55%, -2.39) ## data-env-vars.name = mercurial-public-2024-03-22-zstd-sparse-revlog before: 1.263859 after: 0.700229 (-44.60%, -0.56) ## data-env-vars.name = mozilla-try-2023-03-22-zstd-sparse-revlog before: 399.484481 after: 346.5089 (-13.26%, -52.98) ## data-env-vars.name = pypy-2024-03-22-zstd-sparse-revlog before: 4.540080 after: 3.401700 (-25.07%, -1.14) ## data-env-vars.name = tryton-public-2024-03-22-zstd-sparse-revlog before: 2.975765 after: 1.870798 (-37.13%, -1.10)

# Helper script used for generating history and working copy files and content.
# The file's name corresponds to its history. The number of changesets can
# be specified on the command line. With 2 changesets, files with names like
# content1_content2_content1-untracked are generated. The first two filename
# segments describe the contents in the two changesets. The third segment
# ("content1-untracked") describes the state in the working copy, i.e.
# the file has content "content1" and is untracked (since it was previously
# tracked, it has been forgotten).
#
# This script generates the filenames and their content, but it's up to the
# caller to tell hg about the state.
#
# There are two subcommands:
#   filelist <numchangesets>
#   state <numchangesets> (<changeset>|wc)
#
# Typical usage:
#
# $ python $TESTDIR/generate-working-copy-states.py state 2 1
# $ hg addremove --similarity 0
# $ hg commit -m 'first'
#
# $ python $TESTDIR/generate-working-copy-states.py state 2 1
# $ hg addremove --similarity 0
# $ hg commit -m 'second'
#
# $ python $TESTDIR/generate-working-copy-states.py state 2 wc
# $ hg addremove --similarity 0
# $ hg forget *_*_*-untracked
# $ rm *_*_missing-*


import os
import sys

# Generates pairs of (filename, contents), where 'contents' is a list
# describing the file's content at each revision (or in the working copy).
# At each revision, it is either None or the file's actual content. When not
# None, it may be either new content or the same content as an earlier
# revisions, so all of (modified,clean,added,removed) can be tested.
def generatestates(maxchangesets, parentcontents):
    depth = len(parentcontents)
    if depth == maxchangesets + 1:
        for tracked in (b'untracked', b'tracked'):
            filename = (
                b"_".join(
                    [
                        (content is None and b'missing' or content)
                        for content in parentcontents
                    ]
                )
                + b"-"
                + tracked
            )
            yield (filename, parentcontents)
    else:
        for content in {None, b'content' + (b"%d" % (depth + 1))} | set(
            parentcontents
        ):
            for combination in generatestates(
                maxchangesets, parentcontents + [content]
            ):
                yield combination


# retrieve the command line arguments
target = sys.argv[1]
maxchangesets = int(sys.argv[2])
if target == 'state':
    depth = sys.argv[3]

# sort to make sure we have stable output
combinations = sorted(generatestates(maxchangesets, []))

# compute file content
content = []
for filename, states in combinations:
    if target == 'filelist':
        print(filename.decode('ascii'))
    elif target == 'state':
        if depth == 'wc':
            # Make sure there is content so the file gets written and can be
            # tracked. It will be deleted outside of this script.
            content.append((filename, states[maxchangesets] or b'TOBEDELETED'))
        else:
            content.append((filename, states[int(depth) - 1]))
    else:
        print("unknown target:", target, file=sys.stderr)
        sys.exit(1)

# write actual content
for filename, data in content:
    if data is not None:
        f = open(filename, 'wb')
        f.write(data + b'\n')
        f.close()
    elif os.path.exists(filename):
        os.remove(filename)