chunkiter: handle large reads more efficiently
authorMatt Mackall <mpm@selenic.com>
Thu, 11 Oct 2007 00:46:52 -0500
changeset 5449 17a4b20eda7b
parent 5448 e038738714fd
child 5450 c728424d44c6
chunkiter: handle large reads more efficiently - for large reads, don't attempt to read more than necessary - if we've gathered the exact number of bytes needed, avoid a string copy
mercurial/util.py
--- a/mercurial/util.py	Thu Oct 11 00:46:51 2007 -0500
+++ b/mercurial/util.py	Thu Oct 11 00:46:52 2007 -0500
@@ -1408,7 +1408,7 @@
         Returns less than L bytes if the iterator runs dry."""
         if l > len(self.buf) and self.iter:
             # Clamp to a multiple of self.targetsize
-            targetsize = self.targetsize * ((l // self.targetsize) + 1)
+            targetsize = max(l, self.targetsize)
             collector = cStringIO.StringIO()
             collector.write(self.buf)
             collected = len(self.buf)
@@ -1420,7 +1420,10 @@
             if collected < targetsize:
                 self.iter = False
             self.buf = collector.getvalue()
-        s, self.buf = self.buf[:l], buffer(self.buf, l)
+        if len(self.buf) == l:
+            s, self.buf = self.buf, ''
+        else:
+            s, self.buf = self.buf[:l], buffer(self.buf, l)
         return s
 
 def filechunkiter(f, size=65536, limit=None):