largefiles: drop limitreader, use filechunkiter limit
authorMads Kiilerich <madski@unity3d.com>
Tue, 16 Apr 2013 01:55:57 +0200
changeset 19005 1b84047e7d16
parent 19004 6614e5e24e66
child 19006 0b3b84222a2d
largefiles: drop limitreader, use filechunkiter limit filechunkiter.close was a noop.
hgext/largefiles/lfutil.py
hgext/largefiles/proto.py
--- a/hgext/largefiles/lfutil.py	Tue Apr 16 01:46:39 2013 +0200
+++ b/hgext/largefiles/lfutil.py	Tue Apr 16 01:55:57 2013 +0200
@@ -309,21 +309,6 @@
     fd.close()
     return hasher.hexdigest()
 
-class limitreader(object):
-    def __init__(self, f, limit):
-        self.f = f
-        self.limit = limit
-
-    def read(self, length):
-        if self.limit == 0:
-            return ''
-        length = length > self.limit and self.limit or length
-        self.limit -= length
-        return self.f.read(length)
-
-    def close(self):
-        pass
-
 def writehash(hash, filename, executable):
     util.makedirs(os.path.dirname(filename))
     util.writefile(filename, hash + '\n')
--- a/hgext/largefiles/proto.py	Tue Apr 16 01:46:39 2013 +0200
+++ b/hgext/largefiles/proto.py	Tue Apr 16 01:55:57 2013 +0200
@@ -123,11 +123,9 @@
                 self._abort(error.ResponseError(_("unexpected response:"),
                                                 length))
 
-            # Mercurial doesn't close SSH connections after writing a stream
-            infile = lfutil.limitreader(stream, length)
-            for chunk in util.filechunkiter(infile, 128 * 1024):
+            # SSH streams will block if reading more than length
+            for chunk in util.filechunkiter(stream, 128 * 1024, length):
                 yield chunk
-            infile.close()
 
         @batchable
         def statlfile(self, sha):