merge with crew
authorMatt Mackall <mpm@selenic.com>
Fri, 22 Feb 2013 13:46:54 -0600
changeset 18716 963468e9f9e5
parent 18715 c4ff927b6f68 (current diff)
parent 18714 7790d69af6d6 (diff)
child 18717 fcc4b55876c3
merge with crew
--- a/hgext/largefiles/__init__.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/hgext/largefiles/__init__.py	Fri Feb 22 13:46:54 2013 -0600
@@ -41,13 +41,17 @@
 enabled for this to work.
 
 When you pull a changeset that affects largefiles from a remote
-repository, the largefiles for the changeset usually won't be
-pulled down until you update to the revision (there is one exception
-to this case).  However, when you update to such a revision, any
-largefiles needed by that revision are downloaded and cached (if
-they have never been downloaded before).  This means that network
-access may be required to update to changesets you have no
-previously updated to.
+repository, the largefiles for the changeset won't be pulled down.
+Instead, when you later update to such a revision, any largefiles
+needed by that revision are downloaded and cached (if they have
+never been downloaded before).  This means that network access may
+be required to update to changesets you have previously updated to.
+
+If you know you are pulling from a non-default location and want to
+ensure that you will have the largefiles needed to merge or rebase
+with new heads that you are pulling, then you can pull with the
+--cache-largefiles flag to pre-emptively download any largefiles
+that are new in the heads you are pulling.
 
 The one exception to the "largefiles won't be pulled until you update
 to a revision that changes them" rule is when you pull new heads.
--- a/hgext/largefiles/overrides.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/hgext/largefiles/overrides.py	Fri Feb 22 13:46:54 2013 -0600
@@ -733,19 +733,21 @@
         repo.lfpullsource = source
         oldheads = lfutil.getcurrentheads(repo)
         result = orig(ui, repo, source, **opts)
-        # If we do not have the new largefiles for any new heads we pulled, we
-        # will run into a problem later if we try to merge or rebase with one of
-        # these heads, so cache the largefiles now directly into the system
-        # cache.
-        numcached = 0
-        heads = lfutil.getcurrentheads(repo)
-        newheads = set(heads).difference(set(oldheads))
-        if len(newheads) > 0:
-            ui.status(_("caching largefiles for %s heads\n") % len(newheads))
-        for head in newheads:
-            (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
-            numcached += len(cached)
-        ui.status(_("%d largefiles cached\n") % numcached)
+        if opts.get('cache_largefiles'):
+            # If you are pulling from a remote location that is not your
+            # default location, you may want to cache largefiles for new heads
+            # that have been pulled, so you can easily merge or rebase with
+            # them later
+            numcached = 0
+            heads = lfutil.getcurrentheads(repo)
+            newheads = set(heads).difference(set(oldheads))
+            if len(newheads) > 0:
+                ui.status(_("caching largefiles for %s heads\n") %
+                          len(newheads))
+            for head in newheads:
+                (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
+                numcached += len(cached)
+            ui.status(_("%d largefiles cached\n") % numcached)
     if opts.get('all_largefiles'):
         revspostpull = len(repo)
         revs = []
--- a/hgext/largefiles/uisetup.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/hgext/largefiles/uisetup.py	Fri Feb 22 13:46:54 2013 -0600
@@ -79,7 +79,9 @@
     entry = extensions.wrapcommand(commands.table, 'pull',
                                    overrides.overridepull)
     pullopt = [('', 'all-largefiles', None,
-                 _('download all pulled versions of largefiles'))]
+                 _('download all pulled versions of largefiles')),
+               ('', 'cache-largefiles', None,
+                 _('caches new largefiles in all pulled heads'))]
     entry[1].extend(pullopt)
     entry = extensions.wrapcommand(commands.table, 'clone',
                                    overrides.overrideclone)
--- a/mercurial/cmdutil.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/mercurial/cmdutil.py	Fri Feb 22 13:46:54 2013 -0600
@@ -1210,6 +1210,13 @@
             if ff.match(x):
                 wanted.discard(x)
 
+    # Choose a small initial window if we will probably only visit a
+    # few commits.
+    limit = loglimit(opts)
+    windowsize = 8
+    if limit:
+        windowsize = min(limit, windowsize)
+
     # Now that wanted is correctly initialized, we can iterate over the
     # revision range, yielding only revisions in wanted.
     def iterate():
@@ -1221,7 +1228,7 @@
             def want(rev):
                 return rev in wanted
 
-        for i, window in increasingwindows(0, len(revs)):
+        for i, window in increasingwindows(0, len(revs), windowsize):
             nrevs = [rev for rev in revs[i:i + window] if want(rev)]
             for rev in sorted(nrevs):
                 fns = fncache.get(rev)
--- a/mercurial/commands.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/mercurial/commands.py	Fri Feb 22 13:46:54 2013 -0600
@@ -4215,10 +4215,10 @@
         displayer.show(ctx, copies=copies, matchfn=revmatchfn)
 
     for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
+        if displayer.flush(ctx.rev()):
+            count += 1
         if count == limit:
             break
-        if displayer.flush(ctx.rev()):
-            count += 1
     displayer.close()
 
 @command('manifest',
--- a/mercurial/localrepo.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/mercurial/localrepo.py	Fri Feb 22 13:46:54 2013 -0600
@@ -1532,12 +1532,12 @@
 
             modified, added, clean = [], [], []
             withflags = mf1.withflags() | mf2.withflags()
-            for fn in mf2:
+            for fn, mf2node in mf2.iteritems():
                 if fn in mf1:
                     if (fn not in deleted and
                         ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
-                         (mf1[fn] != mf2[fn] and
-                          (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
+                         (mf1[fn] != mf2node and
+                          (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
                         modified.append(fn)
                     elif listclean:
                         clean.append(fn)
--- a/mercurial/match.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/mercurial/match.py	Fri Feb 22 13:46:54 2013 -0600
@@ -62,6 +62,7 @@
         self._files = []
         self._anypats = bool(include or exclude)
         self._ctx = ctx
+        self._always = False
 
         if include:
             pats = _normalize(include, 'glob', root, cwd, auditor)
@@ -103,6 +104,7 @@
                     m = lambda f: not em(f)
                 else:
                     m = lambda f: True
+                    self._always = True
 
         self.matchfn = m
         self._fmap = set(self._files)
@@ -130,7 +132,7 @@
     def anypats(self):
         return self._anypats
     def always(self):
-        return False
+        return self._always
 
 class exact(match):
     def __init__(self, root, cwd, files):
@@ -139,8 +141,7 @@
 class always(match):
     def __init__(self, root, cwd):
         match.__init__(self, root, cwd, [])
-    def always(self):
-        return True
+        self._always = True
 
 class narrowmatcher(match):
     """Adapt a matcher to work on a subdirectory only.
@@ -175,6 +176,7 @@
         self._cwd = matcher._cwd
         self._path = path
         self._matcher = matcher
+        self._always = matcher._always
 
         self._files = [f[len(path) + 1:] for f in matcher._files
                        if f.startswith(path + "/")]
--- a/mercurial/scmwindows.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/mercurial/scmwindows.py	Fri Feb 22 13:46:54 2013 -0600
@@ -1,5 +1,6 @@
 import os
 import osutil
+import util
 import _winreg
 
 def systemrcpath():
--- a/mercurial/worker.py	Fri Feb 22 13:45:46 2013 -0600
+++ b/mercurial/worker.py	Fri Feb 22 13:46:54 2013 -0600
@@ -6,7 +6,7 @@
 # GNU General Public License version 2 or any later version.
 
 from i18n import _
-import os, signal, sys, util
+import os, signal, sys, threading, util
 
 def countcpus():
     '''try to count the number of CPUs on the system'''
@@ -75,9 +75,13 @@
 def _posixworker(ui, func, staticargs, args):
     rfd, wfd = os.pipe()
     workers = _numworkers(ui)
+    oldhandler = signal.getsignal(signal.SIGINT)
+    signal.signal(signal.SIGINT, signal.SIG_IGN)
+    pids, problem = [], [0]
     for pargs in partition(args, workers):
         pid = os.fork()
         if pid == 0:
+            signal.signal(signal.SIGINT, oldhandler)
             try:
                 os.close(rfd)
                 for i, item in func(*(staticargs + (pargs,))):
@@ -85,29 +89,57 @@
                 os._exit(0)
             except KeyboardInterrupt:
                 os._exit(255)
+        pids.append(pid)
+    pids.reverse()
     os.close(wfd)
     fp = os.fdopen(rfd, 'rb', 0)
-    oldhandler = signal.getsignal(signal.SIGINT)
-    signal.signal(signal.SIGINT, signal.SIG_IGN)
+    def killworkers():
+        # if one worker bails, there's no good reason to wait for the rest
+        for p in pids:
+            try:
+                os.kill(p, signal.SIGTERM)
+            except OSError, err:
+                if err.errno != errno.ESRCH:
+                    raise
+    def waitforworkers():
+        for _ in pids:
+            st = _exitstatus(os.wait()[1])
+            if st and not problem:
+                problem[0] = st
+                killworkers()
+    t = threading.Thread(target=waitforworkers)
+    t.start()
     def cleanup():
-        # python 2.4 is too dumb for try/yield/finally
         signal.signal(signal.SIGINT, oldhandler)
-        problems = 0
-        for i in xrange(workers):
-            problems |= os.wait()[1]
-        if problems:
-            sys.exit(1)
+        t.join()
+        status = problem[0]
+        if status:
+            if status < 0:
+                os.kill(os.getpid(), -status)
+            sys.exit(status)
     try:
         for line in fp:
             l = line.split(' ', 1)
             yield int(l[0]), l[1][:-1]
     except: # re-raises
+        killworkers()
         cleanup()
         raise
     cleanup()
 
+def _posixexitstatus(code):
+    '''convert a posix exit status into the same form returned by
+    os.spawnv
+
+    returns None if the process was stopped instead of exiting'''
+    if os.WIFEXITED(code):
+        return os.WEXITSTATUS(code)
+    elif os.WIFSIGNALED(code):
+        return -os.WTERMSIG(code)
+
 if os.name != 'nt':
     _platformworker = _posixworker
+    _exitstatus = _posixexitstatus
 
 def partition(lst, nslices):
     '''partition a list into N slices of equal size'''
--- a/tests/test-largefiles-cache.t	Fri Feb 22 13:45:46 2013 -0600
+++ b/tests/test-largefiles-cache.t	Fri Feb 22 13:46:54 2013 -0600
@@ -37,8 +37,6 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   (run 'hg update' to get a working copy)
-  caching largefiles for 1 heads
-  0 largefiles cached
 
 Update working directory to "tip", which requires largefile("large"),
 but there is no cache file for it.  So, hg must treat it as
--- a/tests/test-largefiles.t	Fri Feb 22 13:45:46 2013 -0600
+++ b/tests/test-largefiles.t	Fri Feb 22 13:46:54 2013 -0600
@@ -883,9 +883,7 @@
   adding file changes
   added 6 changesets with 16 changes to 8 files
   (run 'hg update' to get a working copy)
-  caching largefiles for 1 heads
-  3 largefiles cached
-  3 additional largefiles cached
+  6 additional largefiles cached
   $ cd ..
 
 Rebasing between two repositories does not revert largefiles to old
@@ -974,8 +972,6 @@
   adding file changes
   added 1 changesets with 2 changes to 2 files (+1 heads)
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  caching largefiles for 1 heads
-  0 largefiles cached
   $ hg rebase
   Invoking status precommit hook
   M sub/normal4
@@ -1265,7 +1261,8 @@
   $ hg commit -m "Modify large4 to test merge"
   Invoking status precommit hook
   M sub/large4
-  $ hg pull ../e
+# Test --cache-largefiles flag
+  $ hg pull --cache-largefiles ../e
   pulling from ../e
   searching for changes
   adding changesets