python-zstandard: blacken at 80 characters
authorGregory Szorc <gregory.szorc@gmail.com>
Wed, 22 Jan 2020 22:23:04 -0800
changeset 44147 5e84a96d865b
parent 44146 45ec64d93b3a
child 44148 00aaf11ec399
python-zstandard: blacken at 80 characters I made this change upstream and it will make it into the next release of python-zstandard. I figured I'd send it Mercurial's way because it will allow us to drop this directory from the black exclusion list. # skip-blame blackening Differential Revision: https://phab.mercurial-scm.org/D7937
black.toml
contrib/examples/fix.hgrc
contrib/python-zstandard/make_cffi.py
contrib/python-zstandard/setup.py
contrib/python-zstandard/setup_zstd.py
contrib/python-zstandard/tests/common.py
contrib/python-zstandard/tests/test_buffer_util.py
contrib/python-zstandard/tests/test_compressor.py
contrib/python-zstandard/tests/test_compressor_fuzzing.py
contrib/python-zstandard/tests/test_data_structures.py
contrib/python-zstandard/tests/test_data_structures_fuzzing.py
contrib/python-zstandard/tests/test_decompressor.py
contrib/python-zstandard/tests/test_decompressor_fuzzing.py
contrib/python-zstandard/tests/test_train_dictionary.py
contrib/python-zstandard/zstandard/cffi.py
tests/test-check-format.t
--- a/black.toml	Tue Jan 21 15:45:06 2020 -0800
+++ b/black.toml	Wed Jan 22 22:23:04 2020 -0800
@@ -9,7 +9,6 @@
 | \.mypy_cache/
 | \.venv/
 | mercurial/thirdparty/
-| contrib/python-zstandard/
 '''
 skip-string-normalization = true
 quiet = true
--- a/contrib/examples/fix.hgrc	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/examples/fix.hgrc	Wed Jan 22 22:23:04 2020 -0800
@@ -6,7 +6,7 @@
 rustfmt:pattern = set:**.rs
 
 black:command = black --config=black.toml -
-black:pattern = set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"
+black:pattern = set:**.py - mercurial/thirdparty/**
 
 # Mercurial doesn't have any Go code, but if we did this is how we
 # would configure `hg fix` for Go:
--- a/contrib/python-zstandard/make_cffi.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/make_cffi.py	Wed Jan 22 22:23:04 2020 -0800
@@ -52,7 +52,8 @@
 
 # Headers whose preprocessed output will be fed into cdef().
 HEADERS = [
-    os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
+    os.path.join(HERE, "zstd", *p)
+    for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
 ]
 
 INCLUDE_DIRS = [
@@ -139,7 +140,9 @@
         env = dict(os.environ)
         if getattr(compiler, "_paths", None):
             env["PATH"] = compiler._paths
-        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env)
+        process = subprocess.Popen(
+            args + [input_file], stdout=subprocess.PIPE, env=env
+        )
         output = process.communicate()[0]
         ret = process.poll()
         if ret:
--- a/contrib/python-zstandard/setup.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/setup.py	Wed Jan 22 22:23:04 2020 -0800
@@ -87,7 +87,9 @@
         break
 
 if not version:
-    raise Exception("could not resolve package version; " "this should never happen")
+    raise Exception(
+        "could not resolve package version; " "this should never happen"
+    )
 
 setup(
     name="zstandard",
--- a/contrib/python-zstandard/setup_zstd.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/setup_zstd.py	Wed Jan 22 22:23:04 2020 -0800
@@ -138,12 +138,16 @@
     if not system_zstd:
         sources.update([os.path.join(actual_root, p) for p in zstd_sources])
         if support_legacy:
-            sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy])
+            sources.update(
+                [os.path.join(actual_root, p) for p in zstd_sources_legacy]
+            )
     sources = list(sources)
 
     include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
     if not system_zstd:
-        include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes])
+        include_dirs.update(
+            [os.path.join(actual_root, d) for d in zstd_includes]
+        )
         if support_legacy:
             include_dirs.update(
                 [os.path.join(actual_root, d) for d in zstd_includes_legacy]
--- a/contrib/python-zstandard/tests/common.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/common.py	Wed Jan 22 22:23:04 2020 -0800
@@ -50,7 +50,9 @@
         os.environ.update(old_env)
 
     if mod.backend != "cffi":
-        raise Exception("got the zstandard %s backend instead of cffi" % mod.backend)
+        raise Exception(
+            "got the zstandard %s backend instead of cffi" % mod.backend
+        )
 
     # If CFFI version is available, dynamically construct test methods
     # that use it.
@@ -84,7 +86,9 @@
                 fn.__func__.func_defaults,
                 fn.__func__.func_closure,
             )
-            new_method = types.UnboundMethodType(new_fn, fn.im_self, fn.im_class)
+            new_method = types.UnboundMethodType(
+                new_fn, fn.im_self, fn.im_class
+            )
 
         setattr(cls, name, new_method)
 
@@ -194,4 +198,6 @@
     expensive_settings = hypothesis.settings(deadline=None, max_examples=10000)
     hypothesis.settings.register_profile("expensive", expensive_settings)
 
-    hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", "default"))
+    hypothesis.settings.load_profile(
+        os.environ.get("HYPOTHESIS_PROFILE", "default")
+    )
--- a/contrib/python-zstandard/tests/test_buffer_util.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_buffer_util.py	Wed Jan 22 22:23:04 2020 -0800
@@ -67,7 +67,8 @@
             self.skipTest("BufferWithSegments not available")
 
         b = zstd.BufferWithSegments(
-            b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)])
+            b"foofooxfooxy",
+            b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]),
         )
         self.assertEqual(len(b), 3)
         self.assertEqual(b.size, 12)
@@ -83,17 +84,23 @@
         if not hasattr(zstd, "BufferWithSegmentsCollection"):
             self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegex(ValueError, "must pass at least 1 argument"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass at least 1 argument"
+        ):
             zstd.BufferWithSegmentsCollection()
 
     def test_argument_validation(self):
         if not hasattr(zstd, "BufferWithSegmentsCollection"):
             self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+        with self.assertRaisesRegex(
+            TypeError, "arguments must be BufferWithSegments"
+        ):
             zstd.BufferWithSegmentsCollection(None)
 
-        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+        with self.assertRaisesRegex(
+            TypeError, "arguments must be BufferWithSegments"
+        ):
             zstd.BufferWithSegmentsCollection(
                 zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None
             )
--- a/contrib/python-zstandard/tests/test_compressor.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_compressor.py	Wed Jan 22 22:23:04 2020 -0800
@@ -24,7 +24,9 @@
 
 
 def multithreaded_chunk_size(level, source_size=0):
-    params = zstd.ZstdCompressionParameters.from_level(level, source_size=source_size)
+    params = zstd.ZstdCompressionParameters.from_level(
+        level, source_size=source_size
+    )
 
     return 1 << (params.window_log + 2)
 
@@ -86,7 +88,9 @@
 
         # This matches the test for read_to_iter() below.
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o")
+        result = cctx.compress(
+            b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o"
+        )
         self.assertEqual(
             result,
             b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00"
@@ -99,7 +103,9 @@
         result = cctx.compress(b"foo" * 256)
 
     def test_no_magic(self):
-        params = zstd.ZstdCompressionParameters.from_level(1, format=zstd.FORMAT_ZSTD1)
+        params = zstd.ZstdCompressionParameters.from_level(
+            1, format=zstd.FORMAT_ZSTD1
+        )
         cctx = zstd.ZstdCompressor(compression_params=params)
         magic = cctx.compress(b"foobar")
 
@@ -223,7 +229,8 @@
 
         self.assertEqual(
             result,
-            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" b"\x66\x6f\x6f",
+            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00"
+            b"\x66\x6f\x6f",
         )
 
     def test_multithreaded_compression_params(self):
@@ -234,7 +241,9 @@
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, 3)
 
-        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f")
+        self.assertEqual(
+            result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f"
+        )
 
 
 @make_cffi
@@ -347,7 +356,9 @@
         )
         self.assertEqual(cobj.compress(b"bar"), b"")
         # 3 byte header plus content.
-        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar")
+        self.assertEqual(
+            cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar"
+        )
         self.assertEqual(cobj.flush(), b"\x01\x00\x00")
 
     def test_flush_empty_block(self):
@@ -445,7 +456,9 @@
         self.assertEqual(int(r), 0)
         self.assertEqual(w, 9)
 
-        self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
+        self.assertEqual(
+            dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00"
+        )
 
     def test_large_data(self):
         source = io.BytesIO()
@@ -478,7 +491,9 @@
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         cctx.copy_stream(source, with_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+        self.assertEqual(
+            len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+        )
 
         no_params = zstd.get_frame_parameters(no_checksum.getvalue())
         with_params = zstd.get_frame_parameters(with_checksum.getvalue())
@@ -585,7 +600,9 @@
         cctx = zstd.ZstdCompressor()
 
         with cctx.stream_reader(b"foo") as reader:
-            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot __enter__ multiple times"
+            ):
                 with reader as reader2:
                     pass
 
@@ -744,7 +761,9 @@
         source = io.BytesIO(b"foobar")
 
         with cctx.stream_reader(source, size=2) as reader:
-            with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+            with self.assertRaisesRegex(
+                zstd.ZstdError, "Src size is incorrect"
+            ):
                 reader.read(10)
 
         # Try another compression operation.
@@ -1126,7 +1145,9 @@
         self.assertFalse(no_params.has_checksum)
         self.assertTrue(with_params.has_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+        self.assertEqual(
+            len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+        )
 
     def test_write_content_size(self):
         no_size = NonClosingBytesIO()
@@ -1145,7 +1166,9 @@
 
         # Declaring size will write the header.
         with_size = NonClosingBytesIO()
-        with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor:
+        with cctx.stream_writer(
+            with_size, size=len(b"foobar" * 256)
+        ) as compressor:
             self.assertEqual(compressor.write(b"foobar" * 256), 0)
 
         no_params = zstd.get_frame_parameters(no_size.getvalue())
@@ -1191,7 +1214,9 @@
         self.assertFalse(no_params.has_checksum)
         self.assertFalse(with_params.has_checksum)
 
-        self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
+        self.assertEqual(
+            len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4
+        )
 
     def test_memory_size(self):
         cctx = zstd.ZstdCompressor(level=3)
@@ -1337,7 +1362,9 @@
         for chunk in cctx.read_to_iter(b"foobar"):
             pass
 
-        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass an object with a read"
+        ):
             for chunk in cctx.read_to_iter(True):
                 pass
 
@@ -1513,7 +1540,9 @@
 
         dctx = zstd.ZstdDecompressor()
 
-        self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24))
+        self.assertEqual(
+            dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24)
+        )
 
     def test_small_chunk_size(self):
         cctx = zstd.ZstdCompressor()
@@ -1533,7 +1562,8 @@
 
         dctx = zstd.ZstdDecompressor()
         self.assertEqual(
-            dctx.decompress(b"".join(chunks), max_output_size=10000), b"foo" * 1024
+            dctx.decompress(b"".join(chunks), max_output_size=10000),
+            b"foo" * 1024,
         )
 
     def test_input_types(self):
@@ -1602,7 +1632,8 @@
         list(chunker.finish())
 
         with self.assertRaisesRegex(
-            zstd.ZstdError, r"cannot call compress\(\) after compression finished"
+            zstd.ZstdError,
+            r"cannot call compress\(\) after compression finished",
         ):
             list(chunker.compress(b"foo"))
 
@@ -1644,7 +1675,9 @@
         with self.assertRaises(TypeError):
             cctx.multi_compress_to_buffer((1, 2))
 
-        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+        with self.assertRaisesRegex(
+            TypeError, "item 0 not a bytes like object"
+        ):
             cctx.multi_compress_to_buffer([u"foo"])
 
     def test_empty_input(self):
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Wed Jan 22 22:23:04 2020 -0800
@@ -28,9 +28,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_read(self, original, level, source_read_size, read_size):
+    def test_stream_source_read(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -58,9 +62,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_read(self, original, level, source_read_size, read_size):
+    def test_buffer_source_read(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -155,9 +163,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_readinto(self, original, level, source_read_size, read_size):
+    def test_stream_source_readinto(
+        self, original, level, source_read_size, read_size
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
@@ -184,9 +196,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_readinto(self, original, level, source_read_size, read_size):
+    def test_buffer_source_readinto(
+        self, original, level, source_read_size, read_size
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
@@ -285,9 +301,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_read1(self, original, level, source_read_size, read_size):
+    def test_stream_source_read1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -315,9 +335,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_read1(self, original, level, source_read_size, read_size):
+    def test_buffer_source_read1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -412,7 +436,9 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
     def test_stream_source_readinto1(
         self, original, level, source_read_size, read_size
@@ -446,7 +472,9 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
     def test_buffer_source_readinto1(
         self, original, level, source_read_size, read_size
@@ -576,7 +604,9 @@
         read_size=strategies.integers(min_value=1, max_value=1048576),
         write_size=strategies.integers(min_value=1, max_value=1048576),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
@@ -585,7 +615,11 @@
         dest = io.BytesIO()
 
         cctx.copy_stream(
-            source, dest, size=len(original), read_size=read_size, write_size=write_size
+            source,
+            dest,
+            size=len(original),
+            read_size=read_size,
+            write_size=write_size,
         )
 
         self.assertEqual(dest.getvalue(), ref_frame)
@@ -675,7 +709,9 @@
         decompressed_chunks.append(dobj.decompress(chunk))
 
         self.assertEqual(
-            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            dctx.decompress(
+                b"".join(compressed_chunks), max_output_size=len(original)
+            ),
             original,
         )
         self.assertEqual(b"".join(decompressed_chunks), original)
@@ -690,7 +726,9 @@
         read_size=strategies.integers(min_value=1, max_value=4096),
         write_size=strategies.integers(min_value=1, max_value=4096),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         refcctx = zstd.ZstdCompressor(level=level)
         ref_frame = refcctx.compress(original)
 
@@ -699,7 +737,10 @@
         cctx = zstd.ZstdCompressor(level=level)
         chunks = list(
             cctx.read_to_iter(
-                source, size=len(original), read_size=read_size, write_size=write_size
+                source,
+                size=len(original),
+                read_size=read_size,
+                write_size=write_size,
             )
         )
 
@@ -710,7 +751,9 @@
 class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase):
     @hypothesis.given(
         original=strategies.lists(
-            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+            strategies.sampled_from(random_input_data()),
+            min_size=1,
+            max_size=1024,
         ),
         threads=strategies.integers(min_value=1, max_value=8),
         use_dict=strategies.booleans(),
@@ -776,7 +819,8 @@
         dctx = zstd.ZstdDecompressor()
 
         self.assertEqual(
-            dctx.decompress(b"".join(chunks), max_output_size=len(original)), original
+            dctx.decompress(b"".join(chunks), max_output_size=len(original)),
+            original,
         )
 
         self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1]))
@@ -794,7 +838,9 @@
         input_sizes=strategies.data(),
         flushes=strategies.data(),
     )
-    def test_flush_block(self, original, level, chunk_size, input_sizes, flushes):
+    def test_flush_block(
+        self, original, level, chunk_size, input_sizes, flushes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         chunker = cctx.chunker(chunk_size=chunk_size)
 
@@ -830,7 +876,9 @@
         decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
 
         self.assertEqual(
-            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            dctx.decompress(
+                b"".join(compressed_chunks), max_output_size=len(original)
+            ),
             original,
         )
         self.assertEqual(b"".join(decompressed_chunks), original)
--- a/contrib/python-zstandard/tests/test_data_structures.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Wed Jan 22 22:23:04 2020 -0800
@@ -65,7 +65,9 @@
         p = zstd.ZstdCompressionParameters(threads=4)
         self.assertEqual(p.threads, 4)
 
-        p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576, overlap_log=6)
+        p = zstd.ZstdCompressionParameters(
+            threads=2, job_size=1048576, overlap_log=6
+        )
         self.assertEqual(p.threads, 2)
         self.assertEqual(p.job_size, 1048576)
         self.assertEqual(p.overlap_log, 6)
@@ -128,7 +130,9 @@
         with self.assertRaisesRegex(
             ValueError, "cannot specify both ldm_hash_rate_log"
         ):
-            zstd.ZstdCompressionParameters(ldm_hash_rate_log=8, ldm_hash_every_log=4)
+            zstd.ZstdCompressionParameters(
+                ldm_hash_rate_log=8, ldm_hash_every_log=4
+            )
 
         p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8)
         self.assertEqual(p.ldm_hash_every_log, 8)
@@ -137,7 +141,9 @@
         self.assertEqual(p.ldm_hash_every_log, 16)
 
     def test_overlap_log(self):
-        with self.assertRaisesRegex(ValueError, "cannot specify both overlap_log"):
+        with self.assertRaisesRegex(
+            ValueError, "cannot specify both overlap_log"
+        ):
             zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9)
 
         p = zstd.ZstdCompressionParameters(overlap_log=2)
@@ -169,10 +175,14 @@
                     zstd.get_frame_parameters(u"foobarbaz")
 
     def test_invalid_input_sizes(self):
-        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "not enough data for frame"
+        ):
             zstd.get_frame_parameters(b"")
 
-        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "not enough data for frame"
+        ):
             zstd.get_frame_parameters(zstd.FRAME_HEADER)
 
     def test_invalid_frame(self):
@@ -201,7 +211,9 @@
         self.assertTrue(params.has_checksum)
 
         # Upper 2 bits indicate content size.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x40\x00\xff\x00")
+        params = zstd.get_frame_parameters(
+            zstd.FRAME_HEADER + b"\x40\x00\xff\x00"
+        )
         self.assertEqual(params.content_size, 511)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 0)
@@ -215,7 +227,9 @@
         self.assertFalse(params.has_checksum)
 
         # Set multiple things.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00")
+        params = zstd.get_frame_parameters(
+            zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00"
+        )
         self.assertEqual(params.content_size, 272)
         self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 15)
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Wed Jan 22 22:23:04 2020 -0800
@@ -23,7 +23,9 @@
 s_chainlog = strategies.integers(
     min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX
 )
-s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX)
+s_hashlog = strategies.integers(
+    min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX
+)
 s_searchlog = strategies.integers(
     min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX
 )
@@ -61,7 +63,14 @@
         s_strategy,
     )
     def test_valid_init(
-        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+        self,
+        windowlog,
+        chainlog,
+        hashlog,
+        searchlog,
+        minmatch,
+        targetlength,
+        strategy,
     ):
         zstd.ZstdCompressionParameters(
             window_log=windowlog,
@@ -83,7 +92,14 @@
         s_strategy,
     )
     def test_estimated_compression_context_size(
-        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+        self,
+        windowlog,
+        chainlog,
+        hashlog,
+        searchlog,
+        minmatch,
+        targetlength,
+        strategy,
     ):
         if minmatch == zstd.MINMATCH_MIN and strategy in (
             zstd.STRATEGY_FAST,
--- a/contrib/python-zstandard/tests/test_decompressor.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Wed Jan 22 22:23:04 2020 -0800
@@ -170,11 +170,15 @@
             dctx.decompress(compressed, max_output_size=len(source) - 1)
 
         # Input size + 1 works
-        decompressed = dctx.decompress(compressed, max_output_size=len(source) + 1)
+        decompressed = dctx.decompress(
+            compressed, max_output_size=len(source) + 1
+        )
         self.assertEqual(decompressed, source)
 
         # A much larger buffer works.
-        decompressed = dctx.decompress(compressed, max_output_size=len(source) * 64)
+        decompressed = dctx.decompress(
+            compressed, max_output_size=len(source) * 64
+        )
         self.assertEqual(decompressed, source)
 
     def test_stupidly_large_output_buffer(self):
@@ -237,7 +241,8 @@
         dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
 
         with self.assertRaisesRegex(
-            zstd.ZstdError, "decompression error: Frame requires too much memory"
+            zstd.ZstdError,
+            "decompression error: Frame requires too much memory",
         ):
             dctx.decompress(frame, max_output_size=len(source))
 
@@ -291,7 +296,9 @@
         self.assertEqual(w, len(source.getvalue()))
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+        source = OpCountingBytesIO(
+            zstd.ZstdCompressor().compress(b"foobarfoobar")
+        )
 
         dest = OpCountingBytesIO()
         dctx = zstd.ZstdDecompressor()
@@ -309,7 +316,9 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(b"foo") as reader:
-            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot __enter__ multiple times"
+            ):
                 with reader as reader2:
                     pass
 
@@ -474,7 +483,9 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(frame) as reader:
-            with self.assertRaisesRegex(ValueError, "cannot seek to negative position"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot seek to negative position"
+            ):
                 reader.seek(-1, os.SEEK_SET)
 
             reader.read(1)
@@ -490,7 +501,8 @@
                 reader.seek(-1, os.SEEK_CUR)
 
             with self.assertRaisesRegex(
-                ValueError, "zstd decompression streams cannot be seeked with SEEK_END"
+                ValueError,
+                "zstd decompression streams cannot be seeked with SEEK_END",
             ):
                 reader.seek(0, os.SEEK_END)
 
@@ -743,7 +755,9 @@
 
     def test_read_lines(self):
         cctx = zstd.ZstdCompressor()
-        source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024))
+        source = b"\n".join(
+            ("line %d" % i).encode("ascii") for i in range(1024)
+        )
 
         frame = cctx.compress(source)
 
@@ -821,7 +835,9 @@
         dobj = dctx.decompressobj()
         dobj.decompress(data)
 
-        with self.assertRaisesRegex(zstd.ZstdError, "cannot use a decompressobj"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "cannot use a decompressobj"
+        ):
             dobj.decompress(data)
             self.assertIsNone(dobj.flush())
 
@@ -1124,7 +1140,9 @@
         # Buffer protocol works.
         dctx.read_to_iter(b"foobar")
 
-        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass an object with a read"
+        ):
             b"".join(dctx.read_to_iter(True))
 
     def test_empty_input(self):
@@ -1226,7 +1244,9 @@
         decompressed = b"".join(chunks)
         self.assertEqual(decompressed, source.getvalue())
 
-    @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+    @unittest.skipUnless(
+        "ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set"
+    )
     def test_large_input(self):
         bytes = list(struct.Struct(">B").pack(i) for i in range(256))
         compressed = NonClosingBytesIO()
@@ -1241,13 +1261,16 @@
                     len(compressed.getvalue())
                     > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
                 )
-                have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                have_raw = (
+                    input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                )
                 if have_compressed and have_raw:
                     break
 
         compressed = io.BytesIO(compressed.getvalue())
         self.assertGreater(
-            len(compressed.getvalue()), zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+            len(compressed.getvalue()),
+            zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
         )
 
         dctx = zstd.ZstdDecompressor()
@@ -1303,7 +1326,9 @@
         self.assertEqual(streamed, source.getvalue())
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+        source = OpCountingBytesIO(
+            zstd.ZstdCompressor().compress(b"foobarfoobar")
+        )
         dctx = zstd.ZstdDecompressor()
         for chunk in dctx.read_to_iter(source, read_size=1, write_size=1):
             self.assertEqual(len(chunk), 1)
@@ -1355,10 +1380,14 @@
         ):
             dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 0 is not a valid zstd frame"
+        ):
             dctx.decompress_content_dict_chain([b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+            b"foo" * 64
+        )
 
         with self.assertRaisesRegex(
             ValueError, "chunk 0 missing content size in frame"
@@ -1389,10 +1418,14 @@
         ):
             dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 1 is not a valid zstd frame"
+        ):
             dctx.decompress_content_dict_chain([initial, b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+            b"foo" * 64
+        )
 
         with self.assertRaisesRegex(
             ValueError, "chunk 1 missing content size in frame"
@@ -1400,7 +1433,9 @@
             dctx.decompress_content_dict_chain([initial, no_size])
 
         # Corrupt second frame.
-        cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64))
+        cctx = zstd.ZstdCompressor(
+            dict_data=zstd.ZstdCompressionDict(b"foo" * 64)
+        )
         frame = cctx.compress(b"bar" * 64)
         frame = frame[0:12] + frame[15:]
 
@@ -1447,7 +1482,9 @@
         with self.assertRaises(TypeError):
             dctx.multi_decompress_to_buffer((1, 2))
 
-        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+        with self.assertRaisesRegex(
+            TypeError, "item 0 not a bytes like object"
+        ):
             dctx.multi_decompress_to_buffer([u"foo"])
 
         with self.assertRaisesRegex(
@@ -1491,7 +1528,9 @@
         if not hasattr(dctx, "multi_decompress_to_buffer"):
             self.skipTest("multi_decompress_to_buffer not available")
 
-        result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+        result = dctx.multi_decompress_to_buffer(
+            frames, decompressed_sizes=sizes
+        )
 
         self.assertEqual(len(result), len(frames))
         self.assertEqual(result.size(), sum(map(len, original)))
@@ -1582,10 +1621,15 @@
         # And a manual mode.
         b = b"".join([frames[0].tobytes(), frames[1].tobytes()])
         b1 = zstd.BufferWithSegments(
-            b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]))
+            b,
+            struct.pack(
+                "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
+            ),
         )
 
-        b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
+        b = b"".join(
+            [frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]
+        )
         b2 = zstd.BufferWithSegments(
             b,
             struct.pack(
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Wed Jan 22 22:23:04 2020 -0800
@@ -196,7 +196,9 @@
         streaming=strategies.booleans(),
         source_read_size=strategies.integers(1, 1048576),
     )
-    def test_stream_source_readall(self, original, level, streaming, source_read_size):
+    def test_stream_source_readall(
+        self, original, level, streaming, source_read_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -398,7 +400,9 @@
         write_size=strategies.integers(min_value=1, max_value=8192),
         input_sizes=strategies.data(),
     )
-    def test_write_size_variance(self, original, level, write_size, input_sizes):
+    def test_write_size_variance(
+        self, original, level, write_size, input_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -433,7 +437,9 @@
         read_size=strategies.integers(min_value=1, max_value=8192),
         write_size=strategies.integers(min_value=1, max_value=8192),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -441,7 +447,9 @@
         dest = io.BytesIO()
 
         dctx = zstd.ZstdDecompressor()
-        dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size)
+        dctx.copy_stream(
+            source, dest, read_size=read_size, write_size=write_size
+        )
 
         self.assertEqual(dest.getvalue(), original)
 
@@ -490,11 +498,14 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         write_size=strategies.integers(
-            min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+            min_value=1,
+            max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
         ),
         chunk_sizes=strategies.data(),
     )
-    def test_random_output_sizes(self, original, level, write_size, chunk_sizes):
+    def test_random_output_sizes(
+        self, original, level, write_size, chunk_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -524,7 +535,9 @@
         read_size=strategies.integers(min_value=1, max_value=4096),
         write_size=strategies.integers(min_value=1, max_value=4096),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -532,7 +545,9 @@
 
         dctx = zstd.ZstdDecompressor()
         chunks = list(
-            dctx.read_to_iter(source, read_size=read_size, write_size=write_size)
+            dctx.read_to_iter(
+                source, read_size=read_size, write_size=write_size
+            )
         )
 
         self.assertEqual(b"".join(chunks), original)
@@ -542,7 +557,9 @@
 class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase):
     @hypothesis.given(
         original=strategies.lists(
-            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+            strategies.sampled_from(random_input_data()),
+            min_size=1,
+            max_size=1024,
         ),
         threads=strategies.integers(min_value=1, max_value=8),
         use_dict=strategies.booleans(),
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Wed Jan 22 22:23:04 2020 -0800
@@ -51,11 +51,15 @@
         self.assertEqual(d.d, 16)
 
     def test_set_dict_id(self):
-        d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42)
+        d = zstd.train_dictionary(
+            8192, generate_samples(), k=64, d=16, dict_id=42
+        )
         self.assertEqual(d.dict_id(), 42)
 
     def test_optimize(self):
-        d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16)
+        d = zstd.train_dictionary(
+            8192, generate_samples(), threads=-1, steps=1, d=16
+        )
 
         # This varies by platform.
         self.assertIn(d.k, (50, 2000))
@@ -71,10 +75,14 @@
     def test_bad_precompute_compress(self):
         d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16)
 
-        with self.assertRaisesRegex(ValueError, "must specify one of level or "):
+        with self.assertRaisesRegex(
+            ValueError, "must specify one of level or "
+        ):
             d.precompute_compress()
 
-        with self.assertRaisesRegex(ValueError, "must only specify one of level or "):
+        with self.assertRaisesRegex(
+            ValueError, "must only specify one of level or "
+        ):
             d.precompute_compress(
                 level=3, compression_params=zstd.CompressionParameters()
             )
@@ -88,5 +96,7 @@
         d = zstd.ZstdCompressionDict(
             b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT
         )
-        with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "unable to precompute dictionary"
+        ):
             d.precompute_compress(level=1)
--- a/contrib/python-zstandard/zstandard/cffi.py	Tue Jan 21 15:45:06 2020 -0800
+++ b/contrib/python-zstandard/zstandard/cffi.py	Wed Jan 22 22:23:04 2020 -0800
@@ -299,10 +299,14 @@
         _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
         _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log)
         _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match)
-        _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_targetLength, target_length
+        )
 
         if strategy != -1 and compression_strategy != -1:
-            raise ValueError("cannot specify both compression_strategy and strategy")
+            raise ValueError(
+                "cannot specify both compression_strategy and strategy"
+            )
 
         if compression_strategy != -1:
             strategy = compression_strategy
@@ -313,12 +317,16 @@
         _set_compression_parameter(
             params, lib.ZSTD_c_contentSizeFlag, write_content_size
         )
-        _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_checksumFlag, write_checksum
+        )
         _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
         _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
 
         if overlap_log != -1 and overlap_size_log != -1:
-            raise ValueError("cannot specify both overlap_log and overlap_size_log")
+            raise ValueError(
+                "cannot specify both overlap_log and overlap_size_log"
+            )
 
         if overlap_size_log != -1:
             overlap_log = overlap_size_log
@@ -326,12 +334,16 @@
             overlap_log = 0
 
         _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
-        _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_forceMaxWindow, force_max_window
+        )
         _set_compression_parameter(
             params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
         )
         _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
-        _set_compression_parameter(params, lib.ZSTD_c_ldmMinMatch, ldm_min_match)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmMinMatch, ldm_min_match
+        )
         _set_compression_parameter(
             params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
         )
@@ -346,7 +358,9 @@
         elif ldm_hash_rate_log == -1:
             ldm_hash_rate_log = 0
 
-        _set_compression_parameter(params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log
+        )
 
     @property
     def format(self):
@@ -354,7 +368,9 @@
 
     @property
     def compression_level(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_compressionLevel)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_compressionLevel
+        )
 
     @property
     def window_log(self):
@@ -386,7 +402,9 @@
 
     @property
     def write_content_size(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_contentSizeFlag)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_contentSizeFlag
+        )
 
     @property
     def write_checksum(self):
@@ -410,7 +428,9 @@
 
     @property
     def force_max_window(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_forceMaxWindow)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_forceMaxWindow
+        )
 
     @property
     def enable_ldm(self):
@@ -428,11 +448,15 @@
 
     @property
     def ldm_bucket_size_log(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmBucketSizeLog)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmBucketSizeLog
+        )
 
     @property
     def ldm_hash_rate_log(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashRateLog)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmHashRateLog
+        )
 
     @property
     def ldm_hash_every_log(self):
@@ -457,7 +481,8 @@
     zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
     if lib.ZSTD_isError(zresult):
         raise ZstdError(
-            "unable to set compression context parameter: %s" % _zstd_error(zresult)
+            "unable to set compression context parameter: %s"
+            % _zstd_error(zresult)
         )
 
 
@@ -467,14 +492,17 @@
     zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
     if lib.ZSTD_isError(zresult):
         raise ZstdError(
-            "unable to get compression context parameter: %s" % _zstd_error(zresult)
+            "unable to get compression context parameter: %s"
+            % _zstd_error(zresult)
         )
 
     return result[0]
 
 
 class ZstdCompressionWriter(object):
-    def __init__(self, compressor, writer, source_size, write_size, write_return_read):
+    def __init__(
+        self, compressor, writer, source_size, write_size, write_return_read
+    ):
         self._compressor = compressor
         self._writer = writer
         self._write_size = write_size
@@ -491,7 +519,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
     def __enter__(self):
         if self._closed:
@@ -595,13 +625,20 @@
 
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_compressStream2(
-                self._compressor._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                self._compressor._cctx,
+                out_buffer,
+                in_buffer,
+                lib.ZSTD_e_continue,
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 self._bytes_compressed += out_buffer.pos
                 out_buffer.pos = 0
@@ -637,10 +674,14 @@
                 self._compressor._cctx, out_buffer, in_buffer, flush
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 self._bytes_compressed += out_buffer.pos
                 out_buffer.pos = 0
@@ -672,7 +713,9 @@
                 self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
@@ -681,7 +724,10 @@
         return b"".join(chunks)
 
     def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
-        if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
+        if flush_mode not in (
+            COMPRESSOBJ_FLUSH_FINISH,
+            COMPRESSOBJ_FLUSH_BLOCK,
+        ):
             raise ValueError("flush mode not recognized")
 
         if self._finished:
@@ -768,7 +814,9 @@
                 self._in.pos = 0
 
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos == self._out.size:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -780,7 +828,8 @@
 
         if self._in.src != ffi.NULL:
             raise ZstdError(
-                "cannot call flush() before consuming output from " "previous operation"
+                "cannot call flush() before consuming output from "
+                "previous operation"
             )
 
         while True:
@@ -788,7 +837,9 @@
                 self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -812,7 +863,9 @@
                 self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -939,7 +992,10 @@
         old_pos = out_buffer.pos
 
         zresult = lib.ZSTD_compressStream2(
-            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue
+            self._compressor._cctx,
+            out_buffer,
+            self._in_buffer,
+            lib.ZSTD_e_continue,
         )
 
         self._bytes_compressed += out_buffer.pos - old_pos
@@ -997,7 +1053,9 @@
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1102,7 +1160,9 @@
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1170,13 +1230,17 @@
         threads=0,
     ):
         if level > lib.ZSTD_maxCLevel():
-            raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel())
+            raise ValueError(
+                "level must be less than %d" % lib.ZSTD_maxCLevel()
+            )
 
         if threads < 0:
             threads = _cpu_count()
 
         if compression_params and write_checksum is not None:
-            raise ValueError("cannot define compression_params and " "write_checksum")
+            raise ValueError(
+                "cannot define compression_params and " "write_checksum"
+            )
 
         if compression_params and write_content_size is not None:
             raise ValueError(
@@ -1184,7 +1248,9 @@
             )
 
         if compression_params and write_dict_id is not None:
-            raise ValueError("cannot define compression_params and " "write_dict_id")
+            raise ValueError(
+                "cannot define compression_params and " "write_dict_id"
+            )
 
         if compression_params and threads:
             raise ValueError("cannot define compression_params and threads")
@@ -1201,7 +1267,9 @@
 
             self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
 
-            _set_compression_parameter(self._params, lib.ZSTD_c_compressionLevel, level)
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_compressionLevel, level
+            )
 
             _set_compression_parameter(
                 self._params,
@@ -1210,7 +1278,9 @@
             )
 
             _set_compression_parameter(
-                self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0
+                self._params,
+                lib.ZSTD_c_checksumFlag,
+                1 if write_checksum else 0,
             )
 
             _set_compression_parameter(
@@ -1218,7 +1288,9 @@
             )
 
             if threads:
-                _set_compression_parameter(self._params, lib.ZSTD_c_nbWorkers, threads)
+                _set_compression_parameter(
+                    self._params, lib.ZSTD_c_nbWorkers, threads
+                )
 
         cctx = lib.ZSTD_createCCtx()
         if cctx == ffi.NULL:
@@ -1237,10 +1309,13 @@
             )
 
     def _setup_cctx(self):
-        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx, self._params)
+        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(
+            self._cctx, self._params
+        )
         if lib.ZSTD_isError(zresult):
             raise ZstdError(
-                "could not set compression parameters: %s" % _zstd_error(zresult)
+                "could not set compression parameters: %s"
+                % _zstd_error(zresult)
             )
 
         dict_data = self._dict_data
@@ -1259,7 +1334,8 @@
 
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
-                    "could not load compression dictionary: %s" % _zstd_error(zresult)
+                    "could not load compression dictionary: %s"
+                    % _zstd_error(zresult)
                 )
 
     def memory_size(self):
@@ -1275,7 +1351,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         out_buffer = ffi.new("ZSTD_outBuffer *")
         in_buffer = ffi.new("ZSTD_inBuffer *")
@@ -1307,11 +1385,15 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         cobj = ZstdCompressionObj()
         cobj._out = ffi.new("ZSTD_outBuffer *")
-        cobj._dst_buffer = ffi.new("char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        cobj._dst_buffer = ffi.new(
+            "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        )
         cobj._out.dst = cobj._dst_buffer
         cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
         cobj._out.pos = 0
@@ -1328,7 +1410,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         return ZstdCompressionChunker(self, chunk_size=chunk_size)
 
@@ -1353,7 +1437,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         in_buffer = ffi.new("ZSTD_inBuffer *")
         out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1381,7 +1467,9 @@
                     self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
                 )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -1423,7 +1511,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         return ZstdCompressionReader(self, source, read_size)
 
@@ -1443,7 +1533,9 @@
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
 
-        return ZstdCompressionWriter(self, writer, size, write_size, write_return_read)
+        return ZstdCompressionWriter(
+            self, writer, size, write_size, write_return_read
+        )
 
     write_to = stream_writer
 
@@ -1473,7 +1565,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         in_buffer = ffi.new("ZSTD_inBuffer *")
         out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1517,7 +1611,9 @@
                     self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
                 )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -1596,10 +1692,14 @@
     data_buffer = ffi.from_buffer(data)
     zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
     if lib.ZSTD_isError(zresult):
-        raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult))
+        raise ZstdError(
+            "cannot get frame parameters: %s" % _zstd_error(zresult)
+        )
 
     if zresult:
-        raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult)
+        raise ZstdError(
+            "not enough data for frame parameters; need %d bytes" % zresult
+        )
 
     return FrameParameters(params[0])
 
@@ -1611,9 +1711,14 @@
         self.k = k
         self.d = d
 
-        if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT):
+        if dict_type not in (
+            DICT_TYPE_AUTO,
+            DICT_TYPE_RAWCONTENT,
+            DICT_TYPE_FULLDICT,
+        ):
             raise ValueError(
-                "invalid dictionary load mode: %d; must use " "DICT_TYPE_* constants"
+                "invalid dictionary load mode: %d; must use "
+                "DICT_TYPE_* constants"
             )
 
         self._dict_type = dict_type
@@ -1630,7 +1735,9 @@
 
     def precompute_compress(self, level=0, compression_params=None):
         if level and compression_params:
-            raise ValueError("must only specify one of level or " "compression_params")
+            raise ValueError(
+                "must only specify one of level or " "compression_params"
+            )
 
         if not level and not compression_params:
             raise ValueError("must specify one of level or compression_params")
@@ -1675,7 +1782,9 @@
         if ddict == ffi.NULL:
             raise ZstdError("could not create decompression dict")
 
-        ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict))
+        ddict = ffi.gc(
+            ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)
+        )
         self.__dict__["_ddict"] = ddict
 
         return ddict
@@ -1805,7 +1914,9 @@
                 self._decompressor._dctx, out_buffer, in_buffer
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd decompressor error: %s" % _zstd_error(zresult)
+                )
 
             if zresult == 0:
                 self._finished = True
@@ -2105,16 +2216,22 @@
 
         if whence == os.SEEK_SET:
             if pos < 0:
-                raise ValueError("cannot seek to negative position with SEEK_SET")
+                raise ValueError(
+                    "cannot seek to negative position with SEEK_SET"
+                )
 
             if pos < self._bytes_decompressed:
-                raise ValueError("cannot seek zstd decompression stream " "backwards")
+                raise ValueError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
 
             read_amount = pos - self._bytes_decompressed
 
         elif whence == os.SEEK_CUR:
             if pos < 0:
-                raise ValueError("cannot seek zstd decompression stream " "backwards")
+                raise ValueError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
 
             read_amount = pos
         elif whence == os.SEEK_END:
@@ -2123,7 +2240,9 @@
             )
 
         while read_amount:
-            result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE))
+            result = self.read(
+                min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+            )
 
             if not result:
                 break
@@ -2257,10 +2376,14 @@
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd decompress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 out_buffer.pos = 0
 
@@ -2299,7 +2422,9 @@
 
         data_buffer = ffi.from_buffer(data)
 
-        output_size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
+        output_size = lib.ZSTD_getFrameContentSize(
+            data_buffer, len(data_buffer)
+        )
 
         if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
             raise ZstdError("error determining content size from frame header")
@@ -2307,7 +2432,9 @@
             return b""
         elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
             if not max_output_size:
-                raise ZstdError("could not determine content size in frame header")
+                raise ZstdError(
+                    "could not determine content size in frame header"
+                )
 
             result_buffer = ffi.new("char[]", max_output_size)
             result_size = max_output_size
@@ -2330,7 +2457,9 @@
         if lib.ZSTD_isError(zresult):
             raise ZstdError("decompression error: %s" % _zstd_error(zresult))
         elif zresult:
-            raise ZstdError("decompression error: did not decompress full frame")
+            raise ZstdError(
+                "decompression error: did not decompress full frame"
+            )
         elif output_size and out_buffer.pos != output_size:
             raise ZstdError(
                 "decompression error: decompressed %d bytes; expected %d"
@@ -2346,7 +2475,9 @@
         read_across_frames=False,
     ):
         self._ensure_dctx()
-        return ZstdDecompressionReader(self, source, read_size, read_across_frames)
+        return ZstdDecompressionReader(
+            self, source, read_size, read_across_frames
+        )
 
     def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
         if write_size < 1:
@@ -2421,9 +2552,13 @@
             while in_buffer.pos < in_buffer.size:
                 assert out_buffer.pos == 0
 
-                zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd decompress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -2449,7 +2584,9 @@
         if not hasattr(writer, "write"):
             raise ValueError("must pass an object with a write() method")
 
-        return ZstdDecompressionWriter(self, writer, write_size, write_return_read)
+        return ZstdDecompressionWriter(
+            self, writer, write_size, write_return_read
+        )
 
     write_to = stream_writer
 
@@ -2491,7 +2628,9 @@
 
             # Flush all read data to output.
             while in_buffer.pos < in_buffer.size:
-                zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
                 if lib.ZSTD_isError(zresult):
                     raise ZstdError(
                         "zstd decompressor error: %s" % _zstd_error(zresult)
@@ -2521,7 +2660,9 @@
         # All chunks should be zstd frames and should have content size set.
         chunk_buffer = ffi.from_buffer(chunk)
         params = ffi.new("ZSTD_frameHeader *")
-        zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+        zresult = lib.ZSTD_getFrameHeader(
+            params, chunk_buffer, len(chunk_buffer)
+        )
         if lib.ZSTD_isError(zresult):
             raise ValueError("chunk 0 is not a valid zstd frame")
         elif zresult:
@@ -2546,7 +2687,9 @@
 
         zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "could not decompress chunk 0: %s" % _zstd_error(zresult)
+            )
         elif zresult:
             raise ZstdError("chunk 0 did not decompress full frame")
 
@@ -2561,11 +2704,15 @@
                 raise ValueError("chunk %d must be bytes" % i)
 
             chunk_buffer = ffi.from_buffer(chunk)
-            zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+            zresult = lib.ZSTD_getFrameHeader(
+                params, chunk_buffer, len(chunk_buffer)
+            )
             if lib.ZSTD_isError(zresult):
                 raise ValueError("chunk %d is not a valid zstd frame" % i)
             elif zresult:
-                raise ValueError("chunk %d is too small to contain a zstd frame" % i)
+                raise ValueError(
+                    "chunk %d is too small to contain a zstd frame" % i
+                )
 
             if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
                 raise ValueError("chunk %d missing content size in frame" % i)
@@ -2580,7 +2727,9 @@
             in_buffer.size = len(chunk_buffer)
             in_buffer.pos = 0
 
-            zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+            zresult = lib.ZSTD_decompressStream(
+                self._dctx, out_buffer, in_buffer
+            )
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
                     "could not decompress chunk %d: %s" % _zstd_error(zresult)
@@ -2597,7 +2746,9 @@
         lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
 
         if self._max_window_size:
-            zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx, self._max_window_size)
+            zresult = lib.ZSTD_DCtx_setMaxWindowSize(
+                self._dctx, self._max_window_size
+            )
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
                     "unable to set max window size: %s" % _zstd_error(zresult)
@@ -2605,11 +2756,14 @@
 
         zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "unable to set decoding format: %s" % _zstd_error(zresult)
+            )
 
         if self._dict_data and load_dict:
             zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
-                    "unable to reference prepared dictionary: %s" % _zstd_error(zresult)
+                    "unable to reference prepared dictionary: %s"
+                    % _zstd_error(zresult)
                 )
--- a/tests/test-check-format.t	Tue Jan 21 15:45:06 2020 -0800
+++ b/tests/test-check-format.t	Wed Jan 22 22:23:04 2020 -0800
@@ -1,5 +1,5 @@
 #require black
 
   $ cd $RUNTESTDIR/..
-  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/** - "contrib/python-zstandard/**"'`
+  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`