# HG changeset patch # User Phil Cohen # Date 1505160207 25200 # Node ID b90e5b2a9c822797b7fe6266af00923105deda2d # Parent 9c07cff039bc8f81f4c3b560315b11296e3d95b1 merge: flush any deferred writes before, and after, running any workers Since we fork to create workers, any changes they queue up will be lost after the worker terminates, so the easiest solution is to have each worker flush the writes they accumulate--we are close to the end of the merge in any case. To prevent duplicated writes, we also have the master processs flush before forking. In an in-memory merge (M2), we'll instead disable the use of workers. Differential Revision: https://phab.mercurial-scm.org/D628 diff -r 9c07cff039bc -r b90e5b2a9c82 mercurial/merge.py --- a/mercurial/merge.py Mon Sep 11 13:03:27 2017 -0700 +++ b/mercurial/merge.py Mon Sep 11 13:03:27 2017 -0700 @@ -1126,6 +1126,10 @@ "(consider changing to repo root: %s)\n") % repo.root) + # It's necessary to flush here in case we're inside a worker fork and will + # quit after this function. + wctx.flushall() + def batchget(repo, mctx, wctx, actions): """apply gets to the working directory @@ -1161,6 +1165,10 @@ if i > 0: yield i, f + # It's necessary to flush here in case we're inside a worker fork and will + # quit after this function. + wctx.flushall() + def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None): """apply the merge action list to the working directory @@ -1229,6 +1237,10 @@ progress(_updating, z, item=item, total=numupdates, unit=_files) removed = len(actions['r']) + # We should flush before forking into worker processes, since those workers + # flush when they complete, and we don't want to duplicate work. + wctx.flushall() + # get in parallel prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx), actions['g'])