# HG changeset patch # User Manuel Jacob # Date 1653184234 -7200 # Node ID 395f28064826493f4fa6efbd7504b0912f11cedd # Parent 7b0cf4517d82175930d4f55ce24c99b05fd8b2f6 worker: avoid potential partial write of pickled data Previously, the code wrote the pickled data using os.write(). However, os.write() can write less bytes than passed to it. To trigger the problem, the pickled data had to be larger than 2147479552 bytes on my system. Instead, open a file object and pass it to pickle.dump(). This also has the advantage that it doesn’t buffer the whole pickled data in memory. Note that the opened file must be buffered because pickle doesn’t support unbuffered streams because unbuffered streams’ write() method might write less bytes than passed to it (like os.write()) but pickle.dump() relies on that all bytes are written (see https://github.com/python/cpython/issues/93050). The side effect of using a file object and a with statement is that wfd is explicitly closed now while it seems like before it was implicitly closed by process exit. diff -r 7b0cf4517d82 -r 395f28064826 mercurial/worker.py --- a/mercurial/worker.py Wed Jun 01 03:12:23 2022 +0200 +++ b/mercurial/worker.py Sun May 22 03:50:34 2022 +0200 @@ -250,8 +250,10 @@ os.close(r) os.close(w) os.close(rfd) - for result in func(*(staticargs + (pargs,))): - os.write(wfd, pickle.dumps(result)) + with os.fdopen(wfd, 'wb') as wf: + for result in func(*(staticargs + (pargs,))): + pickle.dump(result, wf) + wf.flush() return 0 ret = scmutil.callcatch(ui, workerfunc)