contrib/python-zstandard/make_cffi.py
author Arun Kulshreshtha <akulshreshtha@janestreet.com>
Tue, 30 Aug 2022 15:29:55 -0400
changeset 49491 c6a1beba27e9
parent 48875 6000f5b25c9b
permissions -rw-r--r--
bisect: avoid copying ancestor list for non-merge commits During a bisection, hg needs to compute a list of all ancestors for every candidate commit. This is accomplished via a bottom-up traversal of the set of candidates, during which each revision's ancestor list is populated using the ancestor list of its parent(s). Previously, this involved copying the entire list, which could be very long in if the bisection range was large. To help improve this, we can observe that each candidate commit is visited exactly once, at which point its ancestor list is copied into its children's lists and then dropped. In the case of non-merge commits, a commit's ancestor list consists exactly of its parent's list plus itself. This means that we can trivially reuse the parent's existing list for one of its non-merge children, which avoids copying entirely if that commit is the parent's only child. This makes bisections over linear ranges of commits much faster. During some informal testing in the large publicly-available `mozilla-central` repository, this noticeably sped up bisections over large ranges of history: Setup: $ cd mozilla-central $ hg bisect --reset $ hg bisect --good 0 $ hg log -r tip -T '{rev}\n' 628417 Test: $ time hg bisect --bad tip --noupdate Before: real 3m35.927s user 3m35.553s sys 0m0.319s After: real 1m41.142s user 1m40.810s sys 0m0.285s

# Copyright (c) 2016-present, Gregory Szorc
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.


import cffi
import distutils.ccompiler
import os
import re
import subprocess
import tempfile


HERE = os.path.abspath(os.path.dirname(__file__))

SOURCES = [
    "zstd/%s" % p
    for p in (
        "common/debug.c",
        "common/entropy_common.c",
        "common/error_private.c",
        "common/fse_decompress.c",
        "common/pool.c",
        "common/threading.c",
        "common/xxhash.c",
        "common/zstd_common.c",
        "compress/fse_compress.c",
        "compress/hist.c",
        "compress/huf_compress.c",
        "compress/zstd_compress.c",
        "compress/zstd_compress_literals.c",
        "compress/zstd_compress_sequences.c",
        "compress/zstd_double_fast.c",
        "compress/zstd_fast.c",
        "compress/zstd_lazy.c",
        "compress/zstd_ldm.c",
        "compress/zstd_opt.c",
        "compress/zstdmt_compress.c",
        "decompress/huf_decompress.c",
        "decompress/zstd_ddict.c",
        "decompress/zstd_decompress.c",
        "decompress/zstd_decompress_block.c",
        "dictBuilder/cover.c",
        "dictBuilder/fastcover.c",
        "dictBuilder/divsufsort.c",
        "dictBuilder/zdict.c",
    )
]

# Headers whose preprocessed output will be fed into cdef().
HEADERS = [
    os.path.join(HERE, "zstd", *p)
    for p in (
        ("zstd.h",),
        ("dictBuilder", "zdict.h"),
    )
]

INCLUDE_DIRS = [
    os.path.join(HERE, d)
    for d in (
        "zstd",
        "zstd/common",
        "zstd/compress",
        "zstd/decompress",
        "zstd/dictBuilder",
    )
]

# cffi can't parse some of the primitives in zstd.h. So we invoke the
# preprocessor and feed its output into cffi.
compiler = distutils.ccompiler.new_compiler()

# Needed for MSVC.
if hasattr(compiler, "initialize"):
    compiler.initialize()

# Distutils doesn't set compiler.preprocessor, so invoke the preprocessor
# manually.
if compiler.compiler_type == "unix":
    args = list(compiler.executables["compiler"])
    args.extend(
        [
            "-E",
            "-DZSTD_STATIC_LINKING_ONLY",
            "-DZDICT_STATIC_LINKING_ONLY",
        ]
    )
elif compiler.compiler_type == "msvc":
    args = [compiler.cc]
    args.extend(
        [
            "/EP",
            "/DZSTD_STATIC_LINKING_ONLY",
            "/DZDICT_STATIC_LINKING_ONLY",
        ]
    )
else:
    raise Exception("unsupported compiler type: %s" % compiler.compiler_type)


def preprocess(path):
    with open(path, "rb") as fh:
        lines = []
        it = iter(fh)

        for l in it:
            # zstd.h includes <stddef.h>, which is also included by cffi's
            # boilerplate. This can lead to duplicate declarations. So we strip
            # this include from the preprocessor invocation.
            #
            # The same things happens for including zstd.h, so give it the same
            # treatment.
            #
            # We define ZSTD_STATIC_LINKING_ONLY, which is redundant with the inline
            # #define in zstdmt_compress.h and results in a compiler warning. So drop
            # the inline #define.
            if l.startswith(
                (
                    b"#include <stddef.h>",
                    b'#include "zstd.h"',
                    b"#define ZSTD_STATIC_LINKING_ONLY",
                )
            ):
                continue

            # The preprocessor environment on Windows doesn't define include
            # paths, so the #include of limits.h fails. We work around this
            # by removing that import and defining INT_MAX ourselves. This is
            # a bit hacky. But it gets the job done.
            # TODO make limits.h work on Windows so we ensure INT_MAX is
            # correct.
            if l.startswith(b"#include <limits.h>"):
                l = b"#define INT_MAX 2147483647\n"

            # ZSTDLIB_API may not be defined if we dropped zstd.h. It isn't
            # important so just filter it out.
            if l.startswith(b"ZSTDLIB_API"):
                l = l[len(b"ZSTDLIB_API ") :]

            lines.append(l)

    fd, input_file = tempfile.mkstemp(suffix=".h")
    os.write(fd, b"".join(lines))
    os.close(fd)

    try:
        env = dict(os.environ)
        if getattr(compiler, "_paths", None):
            env["PATH"] = compiler._paths
        process = subprocess.Popen(
            args + [input_file], stdout=subprocess.PIPE, env=env
        )
        output = process.communicate()[0]
        ret = process.poll()
        if ret:
            raise Exception("preprocessor exited with error")

        return output
    finally:
        os.unlink(input_file)


def normalize_output(output):
    lines = []
    for line in output.splitlines():
        # CFFI's parser doesn't like __attribute__ on UNIX compilers.
        if line.startswith(b'__attribute__ ((visibility ("default"))) '):
            line = line[len(b'__attribute__ ((visibility ("default"))) ') :]

        if line.startswith(b"__attribute__((deprecated("):
            continue
        elif b"__declspec(deprecated(" in line:
            continue

        lines.append(line)

    return b"\n".join(lines)


ffi = cffi.FFI()
# zstd.h uses a possible undefined MIN(). Define it until
# https://github.com/facebook/zstd/issues/976 is fixed.
# *_DISABLE_DEPRECATE_WARNINGS prevents the compiler from emitting a warning
# when cffi uses the function. Since we statically link against zstd, even
# if we use the deprecated functions it shouldn't be a huge problem.
ffi.set_source(
    "_zstd_cffi",
    """
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h>
#define ZDICT_STATIC_LINKING_ONLY
#define ZDICT_DISABLE_DEPRECATE_WARNINGS
#include <zdict.h>
""",
    sources=SOURCES,
    include_dirs=INCLUDE_DIRS,
    extra_compile_args=["-DZSTD_MULTITHREAD"],
)

DEFINE = re.compile(b"^\\#define ([a-zA-Z0-9_]+) ")

sources = []

# Feed normalized preprocessor output for headers into the cdef parser.
for header in HEADERS:
    preprocessed = preprocess(header)
    sources.append(normalize_output(preprocessed))

    # #define's are effectively erased as part of going through preprocessor.
    # So perform a manual pass to re-add those to the cdef source.
    with open(header, "rb") as fh:
        for line in fh:
            line = line.strip()
            m = DEFINE.match(line)
            if not m:
                continue

            if m.group(1) == b"ZSTD_STATIC_LINKING_ONLY":
                continue

            # The parser doesn't like some constants with complex values.
            if m.group(1) in (b"ZSTD_LIB_VERSION", b"ZSTD_VERSION_STRING"):
                continue

            # The ... is magic syntax by the cdef parser to resolve the
            # value at compile time.
            sources.append(m.group(0) + b" ...")

cdeflines = b"\n".join(sources).splitlines()
cdeflines = [l for l in cdeflines if l.strip()]
ffi.cdef(b"\n".join(cdeflines).decode("latin1"))

if __name__ == "__main__":
    ffi.compile()