56 |
56 |
57 def peersetup(ui, peer): |
57 def peersetup(ui, peer): |
58 class remotefilepeer(peer.__class__): |
58 class remotefilepeer(peer.__class__): |
59 @wireprotov1peer.batchable |
59 @wireprotov1peer.batchable |
60 def x_rfl_getfile(self, file, node): |
60 def x_rfl_getfile(self, file, node): |
61 if not self.capable('x_rfl_getfile'): |
61 if not self.capable(b'x_rfl_getfile'): |
62 raise error.Abort( |
62 raise error.Abort( |
63 'configured remotefile server does not support getfile' |
63 b'configured remotefile server does not support getfile' |
64 ) |
64 ) |
65 f = wireprotov1peer.future() |
65 f = wireprotov1peer.future() |
66 yield {'file': file, 'node': node}, f |
66 yield {b'file': file, b'node': node}, f |
67 code, data = f.value.split('\0', 1) |
67 code, data = f.value.split(b'\0', 1) |
68 if int(code): |
68 if int(code): |
69 raise error.LookupError(file, node, data) |
69 raise error.LookupError(file, node, data) |
70 yield data |
70 yield data |
71 |
71 |
72 @wireprotov1peer.batchable |
72 @wireprotov1peer.batchable |
73 def x_rfl_getflogheads(self, path): |
73 def x_rfl_getflogheads(self, path): |
74 if not self.capable('x_rfl_getflogheads'): |
74 if not self.capable(b'x_rfl_getflogheads'): |
75 raise error.Abort( |
75 raise error.Abort( |
76 'configured remotefile server does not ' |
76 b'configured remotefile server does not ' |
77 'support getflogheads' |
77 b'support getflogheads' |
78 ) |
78 ) |
79 f = wireprotov1peer.future() |
79 f = wireprotov1peer.future() |
80 yield {'path': path}, f |
80 yield {b'path': path}, f |
81 heads = f.value.split('\n') if f.value else [] |
81 heads = f.value.split(b'\n') if f.value else [] |
82 yield heads |
82 yield heads |
83 |
83 |
84 def _updatecallstreamopts(self, command, opts): |
84 def _updatecallstreamopts(self, command, opts): |
85 if command != 'getbundle': |
85 if command != b'getbundle': |
86 return |
86 return |
87 if ( |
87 if ( |
88 constants.NETWORK_CAP_LEGACY_SSH_GETFILES |
88 constants.NETWORK_CAP_LEGACY_SSH_GETFILES |
89 not in self.capabilities() |
89 not in self.capabilities() |
90 ): |
90 ): |
91 return |
91 return |
92 if not util.safehasattr(self, '_localrepo'): |
92 if not util.safehasattr(self, b'_localrepo'): |
93 return |
93 return |
94 if ( |
94 if ( |
95 constants.SHALLOWREPO_REQUIREMENT |
95 constants.SHALLOWREPO_REQUIREMENT |
96 not in self._localrepo.requirements |
96 not in self._localrepo.requirements |
97 ): |
97 ): |
98 return |
98 return |
99 |
99 |
100 bundlecaps = opts.get('bundlecaps') |
100 bundlecaps = opts.get(b'bundlecaps') |
101 if bundlecaps: |
101 if bundlecaps: |
102 bundlecaps = [bundlecaps] |
102 bundlecaps = [bundlecaps] |
103 else: |
103 else: |
104 bundlecaps = [] |
104 bundlecaps = [] |
105 |
105 |
110 # getbundle args before it goes across the wire. Once we get rid |
110 # getbundle args before it goes across the wire. Once we get rid |
111 # of bundle1, we can use bundle2's _pullbundle2extraprepare to |
111 # of bundle1, we can use bundle2's _pullbundle2extraprepare to |
112 # do this more cleanly. |
112 # do this more cleanly. |
113 bundlecaps.append(constants.BUNDLE2_CAPABLITY) |
113 bundlecaps.append(constants.BUNDLE2_CAPABLITY) |
114 if self._localrepo.includepattern: |
114 if self._localrepo.includepattern: |
115 patterns = '\0'.join(self._localrepo.includepattern) |
115 patterns = b'\0'.join(self._localrepo.includepattern) |
116 includecap = "includepattern=" + patterns |
116 includecap = b"includepattern=" + patterns |
117 bundlecaps.append(includecap) |
117 bundlecaps.append(includecap) |
118 if self._localrepo.excludepattern: |
118 if self._localrepo.excludepattern: |
119 patterns = '\0'.join(self._localrepo.excludepattern) |
119 patterns = b'\0'.join(self._localrepo.excludepattern) |
120 excludecap = "excludepattern=" + patterns |
120 excludecap = b"excludepattern=" + patterns |
121 bundlecaps.append(excludecap) |
121 bundlecaps.append(excludecap) |
122 opts['bundlecaps'] = ','.join(bundlecaps) |
122 opts[b'bundlecaps'] = b','.join(bundlecaps) |
123 |
123 |
124 def _sendrequest(self, command, args, **opts): |
124 def _sendrequest(self, command, args, **opts): |
125 self._updatecallstreamopts(command, args) |
125 self._updatecallstreamopts(command, args) |
126 return super(remotefilepeer, self)._sendrequest( |
126 return super(remotefilepeer, self)._sendrequest( |
127 command, args, **opts |
127 command, args, **opts |
128 ) |
128 ) |
129 |
129 |
130 def _callstream(self, command, **opts): |
130 def _callstream(self, command, **opts): |
131 supertype = super(remotefilepeer, self) |
131 supertype = super(remotefilepeer, self) |
132 if not util.safehasattr(supertype, '_sendrequest'): |
132 if not util.safehasattr(supertype, b'_sendrequest'): |
133 self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) |
133 self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) |
134 return super(remotefilepeer, self)._callstream(command, **opts) |
134 return super(remotefilepeer, self)._callstream(command, **opts) |
135 |
135 |
136 peer.__class__ = remotefilepeer |
136 peer.__class__ = remotefilepeer |
137 |
137 |
147 self.subprocess = None |
147 self.subprocess = None |
148 self.connected = False |
148 self.connected = False |
149 |
149 |
150 def connect(self, cachecommand): |
150 def connect(self, cachecommand): |
151 if self.pipeo: |
151 if self.pipeo: |
152 raise error.Abort(_("cache connection already open")) |
152 raise error.Abort(_(b"cache connection already open")) |
153 self.pipei, self.pipeo, self.pipee, self.subprocess = procutil.popen4( |
153 self.pipei, self.pipeo, self.pipee, self.subprocess = procutil.popen4( |
154 cachecommand |
154 cachecommand |
155 ) |
155 ) |
156 self.connected = True |
156 self.connected = True |
157 |
157 |
221 with remote.commandexecutor() as e: |
221 with remote.commandexecutor() as e: |
222 futures = [] |
222 futures = [] |
223 for m in missed: |
223 for m in missed: |
224 futures.append( |
224 futures.append( |
225 e.callcommand( |
225 e.callcommand( |
226 'x_rfl_getfile', {'file': idmap[m], 'node': m[-40:]} |
226 b'x_rfl_getfile', {b'file': idmap[m], b'node': m[-40:]} |
227 ) |
227 ) |
228 ) |
228 ) |
229 |
229 |
230 for i, m in enumerate(missed): |
230 for i, m in enumerate(missed): |
231 r = futures[i].result() |
231 r = futures[i].result() |
232 futures[i] = None # release memory |
232 futures[i] = None # release memory |
233 file_ = idmap[m] |
233 file_ = idmap[m] |
234 node = m[-40:] |
234 node = m[-40:] |
235 receivemissing(io.BytesIO('%d\n%s' % (len(r), r)), file_, node) |
235 receivemissing(io.BytesIO(b'%d\n%s' % (len(r), r)), file_, node) |
236 progresstick() |
236 progresstick() |
237 |
237 |
238 |
238 |
239 def _getfiles_optimistic( |
239 def _getfiles_optimistic( |
240 remote, receivemissing, progresstick, missed, idmap, step |
240 remote, receivemissing, progresstick, missed, idmap, step |
241 ): |
241 ): |
242 remote._callstream("x_rfl_getfiles") |
242 remote._callstream(b"x_rfl_getfiles") |
243 i = 0 |
243 i = 0 |
244 pipeo = remote._pipeo |
244 pipeo = remote._pipeo |
245 pipei = remote._pipei |
245 pipei = remote._pipei |
246 while i < len(missed): |
246 while i < len(missed): |
247 # issue a batch of requests |
247 # issue a batch of requests |
250 i = end |
250 i = end |
251 for missingid in missed[start:end]: |
251 for missingid in missed[start:end]: |
252 # issue new request |
252 # issue new request |
253 versionid = missingid[-40:] |
253 versionid = missingid[-40:] |
254 file = idmap[missingid] |
254 file = idmap[missingid] |
255 sshrequest = "%s%s\n" % (versionid, file) |
255 sshrequest = b"%s%s\n" % (versionid, file) |
256 pipeo.write(sshrequest) |
256 pipeo.write(sshrequest) |
257 pipeo.flush() |
257 pipeo.flush() |
258 |
258 |
259 # receive batch results |
259 # receive batch results |
260 for missingid in missed[start:end]: |
260 for missingid in missed[start:end]: |
262 file = idmap[missingid] |
262 file = idmap[missingid] |
263 receivemissing(pipei, file, versionid) |
263 receivemissing(pipei, file, versionid) |
264 progresstick() |
264 progresstick() |
265 |
265 |
266 # End the command |
266 # End the command |
267 pipeo.write('\n') |
267 pipeo.write(b'\n') |
268 pipeo.flush() |
268 pipeo.flush() |
269 |
269 |
270 |
270 |
271 def _getfiles_threaded( |
271 def _getfiles_threaded( |
272 remote, receivemissing, progresstick, missed, idmap, step |
272 remote, receivemissing, progresstick, missed, idmap, step |
273 ): |
273 ): |
274 remote._callstream("getfiles") |
274 remote._callstream(b"getfiles") |
275 pipeo = remote._pipeo |
275 pipeo = remote._pipeo |
276 pipei = remote._pipei |
276 pipei = remote._pipei |
277 |
277 |
278 def writer(): |
278 def writer(): |
279 for missingid in missed: |
279 for missingid in missed: |
280 versionid = missingid[-40:] |
280 versionid = missingid[-40:] |
281 file = idmap[missingid] |
281 file = idmap[missingid] |
282 sshrequest = "%s%s\n" % (versionid, file) |
282 sshrequest = b"%s%s\n" % (versionid, file) |
283 pipeo.write(sshrequest) |
283 pipeo.write(sshrequest) |
284 pipeo.flush() |
284 pipeo.flush() |
285 |
285 |
286 writerthread = threading.Thread(target=writer) |
286 writerthread = threading.Thread(target=writer) |
287 writerthread.daemon = True |
287 writerthread.daemon = True |
305 |
305 |
306 def __init__(self, repo): |
306 def __init__(self, repo): |
307 ui = repo.ui |
307 ui = repo.ui |
308 self.repo = repo |
308 self.repo = repo |
309 self.ui = ui |
309 self.ui = ui |
310 self.cacheprocess = ui.config("remotefilelog", "cacheprocess") |
310 self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess") |
311 if self.cacheprocess: |
311 if self.cacheprocess: |
312 self.cacheprocess = util.expandpath(self.cacheprocess) |
312 self.cacheprocess = util.expandpath(self.cacheprocess) |
313 |
313 |
314 # This option causes remotefilelog to pass the full file path to the |
314 # This option causes remotefilelog to pass the full file path to the |
315 # cacheprocess instead of a hashed key. |
315 # cacheprocess instead of a hashed key. |
316 self.cacheprocesspasspath = ui.configbool( |
316 self.cacheprocesspasspath = ui.configbool( |
317 "remotefilelog", "cacheprocess.includepath" |
317 b"remotefilelog", b"cacheprocess.includepath" |
318 ) |
318 ) |
319 |
319 |
320 self.debugoutput = ui.configbool("remotefilelog", "debug") |
320 self.debugoutput = ui.configbool(b"remotefilelog", b"debug") |
321 |
321 |
322 self.remotecache = cacheconnection() |
322 self.remotecache = cacheconnection() |
323 |
323 |
324 def setstore(self, datastore, historystore, writedata, writehistory): |
324 def setstore(self, datastore, historystore, writedata, writehistory): |
325 self.datastore = datastore |
325 self.datastore = datastore |
341 cache = self.remotecache |
341 cache = self.remotecache |
342 writedata = self.writedata |
342 writedata = self.writedata |
343 |
343 |
344 repo = self.repo |
344 repo = self.repo |
345 total = len(fileids) |
345 total = len(fileids) |
346 request = "get\n%d\n" % total |
346 request = b"get\n%d\n" % total |
347 idmap = {} |
347 idmap = {} |
348 reponame = repo.name |
348 reponame = repo.name |
349 for file, id in fileids: |
349 for file, id in fileids: |
350 fullid = getcachekey(reponame, file, id) |
350 fullid = getcachekey(reponame, file, id) |
351 if self.cacheprocesspasspath: |
351 if self.cacheprocesspasspath: |
352 request += file + '\0' |
352 request += file + b'\0' |
353 request += fullid + "\n" |
353 request += fullid + b"\n" |
354 idmap[fullid] = file |
354 idmap[fullid] = file |
355 |
355 |
356 cache.request(request) |
356 cache.request(request) |
357 |
357 |
358 progress = self.ui.makeprogress(_('downloading'), total=total) |
358 progress = self.ui.makeprogress(_(b'downloading'), total=total) |
359 progress.update(0) |
359 progress.update(0) |
360 |
360 |
361 missed = [] |
361 missed = [] |
362 while True: |
362 while True: |
363 missingid = cache.receiveline() |
363 missingid = cache.receiveline() |
366 for missingid in idmap: |
366 for missingid in idmap: |
367 if not missingid in missedset: |
367 if not missingid in missedset: |
368 missed.append(missingid) |
368 missed.append(missingid) |
369 self.ui.warn( |
369 self.ui.warn( |
370 _( |
370 _( |
371 "warning: cache connection closed early - " |
371 b"warning: cache connection closed early - " |
372 + "falling back to server\n" |
372 + b"falling back to server\n" |
373 ) |
373 ) |
374 ) |
374 ) |
375 break |
375 break |
376 if missingid == "0": |
376 if missingid == b"0": |
377 break |
377 break |
378 if missingid.startswith("_hits_"): |
378 if missingid.startswith(b"_hits_"): |
379 # receive progress reports |
379 # receive progress reports |
380 parts = missingid.split("_") |
380 parts = missingid.split(b"_") |
381 progress.increment(int(parts[2])) |
381 progress.increment(int(parts[2])) |
382 continue |
382 continue |
383 |
383 |
384 missed.append(missingid) |
384 missed.append(missingid) |
385 |
385 |
412 if remote.capable( |
412 if remote.capable( |
413 constants.NETWORK_CAP_LEGACY_SSH_GETFILES |
413 constants.NETWORK_CAP_LEGACY_SSH_GETFILES |
414 ): |
414 ): |
415 if not isinstance(remote, _sshv1peer): |
415 if not isinstance(remote, _sshv1peer): |
416 raise error.Abort( |
416 raise error.Abort( |
417 'remotefilelog requires ssh ' 'servers' |
417 b'remotefilelog requires ssh ' b'servers' |
418 ) |
418 ) |
419 step = self.ui.configint( |
419 step = self.ui.configint( |
420 'remotefilelog', 'getfilesstep' |
420 b'remotefilelog', b'getfilesstep' |
421 ) |
421 ) |
422 getfilestype = self.ui.config( |
422 getfilestype = self.ui.config( |
423 'remotefilelog', 'getfilestype' |
423 b'remotefilelog', b'getfilestype' |
424 ) |
424 ) |
425 if getfilestype == 'threaded': |
425 if getfilestype == b'threaded': |
426 _getfiles = _getfiles_threaded |
426 _getfiles = _getfiles_threaded |
427 else: |
427 else: |
428 _getfiles = _getfiles_optimistic |
428 _getfiles = _getfiles_optimistic |
429 _getfiles( |
429 _getfiles( |
430 remote, |
430 remote, |
454 idmap, |
454 idmap, |
455 batchsize, |
455 batchsize, |
456 ) |
456 ) |
457 else: |
457 else: |
458 raise error.Abort( |
458 raise error.Abort( |
459 "configured remotefilelog server" |
459 b"configured remotefilelog server" |
460 " does not support remotefilelog" |
460 b" does not support remotefilelog" |
461 ) |
461 ) |
462 |
462 |
463 self.ui.log( |
463 self.ui.log( |
464 "remotefilefetchlog", |
464 b"remotefilefetchlog", |
465 "Success\n", |
465 b"Success\n", |
466 fetched_files=progress.pos - fromcache, |
466 fetched_files=progress.pos - fromcache, |
467 total_to_fetch=total - fromcache, |
467 total_to_fetch=total - fromcache, |
468 ) |
468 ) |
469 except Exception: |
469 except Exception: |
470 self.ui.log( |
470 self.ui.log( |
471 "remotefilefetchlog", |
471 b"remotefilefetchlog", |
472 "Fail\n", |
472 b"Fail\n", |
473 fetched_files=progress.pos - fromcache, |
473 fetched_files=progress.pos - fromcache, |
474 total_to_fetch=total - fromcache, |
474 total_to_fetch=total - fromcache, |
475 ) |
475 ) |
476 raise |
476 raise |
477 finally: |
477 finally: |
478 self.ui.verbose = verbose |
478 self.ui.verbose = verbose |
479 # send to memcache |
479 # send to memcache |
480 request = "set\n%d\n%s\n" % (len(missed), "\n".join(missed)) |
480 request = b"set\n%d\n%s\n" % (len(missed), b"\n".join(missed)) |
481 cache.request(request) |
481 cache.request(request) |
482 |
482 |
483 progress.complete() |
483 progress.complete() |
484 |
484 |
485 # mark ourselves as a user of this cache |
485 # mark ourselves as a user of this cache |
489 |
489 |
490 def receivemissing(self, pipe, filename, node): |
490 def receivemissing(self, pipe, filename, node): |
491 line = pipe.readline()[:-1] |
491 line = pipe.readline()[:-1] |
492 if not line: |
492 if not line: |
493 raise error.ResponseError( |
493 raise error.ResponseError( |
494 _("error downloading file contents:"), |
494 _(b"error downloading file contents:"), |
495 _("connection closed early"), |
495 _(b"connection closed early"), |
496 ) |
496 ) |
497 size = int(line) |
497 size = int(line) |
498 data = pipe.read(size) |
498 data = pipe.read(size) |
499 if len(data) != size: |
499 if len(data) != size: |
500 raise error.ResponseError( |
500 raise error.ResponseError( |
501 _("error downloading file contents:"), |
501 _(b"error downloading file contents:"), |
502 _("only received %s of %s bytes") % (len(data), size), |
502 _(b"only received %s of %s bytes") % (len(data), size), |
503 ) |
503 ) |
504 |
504 |
505 self.writedata.addremotefilelognode( |
505 self.writedata.addremotefilelognode( |
506 filename, bin(node), zlib.decompress(data) |
506 filename, bin(node), zlib.decompress(data) |
507 ) |
507 ) |
508 |
508 |
509 def connect(self): |
509 def connect(self): |
510 if self.cacheprocess: |
510 if self.cacheprocess: |
511 cmd = "%s %s" % (self.cacheprocess, self.writedata._path) |
511 cmd = b"%s %s" % (self.cacheprocess, self.writedata._path) |
512 self.remotecache.connect(cmd) |
512 self.remotecache.connect(cmd) |
513 else: |
513 else: |
514 # If no cache process is specified, we fake one that always |
514 # If no cache process is specified, we fake one that always |
515 # returns cache misses. This enables tests to run easily |
515 # returns cache misses. This enables tests to run easily |
516 # and may eventually allow us to be a drop in replacement |
516 # and may eventually allow us to be a drop in replacement |
522 |
522 |
523 def close(self): |
523 def close(self): |
524 pass |
524 pass |
525 |
525 |
526 def request(self, value, flush=True): |
526 def request(self, value, flush=True): |
527 lines = value.split("\n") |
527 lines = value.split(b"\n") |
528 if lines[0] != "get": |
528 if lines[0] != b"get": |
529 return |
529 return |
530 self.missingids = lines[2:-1] |
530 self.missingids = lines[2:-1] |
531 self.missingids.append('0') |
531 self.missingids.append(b'0') |
532 |
532 |
533 def receiveline(self): |
533 def receiveline(self): |
534 if len(self.missingids) > 0: |
534 if len(self.missingids) > 0: |
535 return self.missingids.pop(0) |
535 return self.missingids.pop(0) |
536 return None |
536 return None |
538 self.remotecache = simplecache() |
538 self.remotecache = simplecache() |
539 |
539 |
540 def close(self): |
540 def close(self): |
541 if fetches: |
541 if fetches: |
542 msg = ( |
542 msg = ( |
543 "%d files fetched over %d fetches - " |
543 b"%d files fetched over %d fetches - " |
544 + "(%d misses, %0.2f%% hit ratio) over %0.2fs\n" |
544 + b"(%d misses, %0.2f%% hit ratio) over %0.2fs\n" |
545 ) % ( |
545 ) % ( |
546 fetched, |
546 fetched, |
547 fetches, |
547 fetches, |
548 fetchmisses, |
548 fetchmisses, |
549 float(fetched - fetchmisses) / float(fetched) * 100.0, |
549 float(fetched - fetchmisses) / float(fetched) * 100.0, |
550 fetchcost, |
550 fetchcost, |
551 ) |
551 ) |
552 if self.debugoutput: |
552 if self.debugoutput: |
553 self.ui.warn(msg) |
553 self.ui.warn(msg) |
554 self.ui.log( |
554 self.ui.log( |
555 "remotefilelog.prefetch", |
555 b"remotefilelog.prefetch", |
556 msg.replace("%", "%%"), |
556 msg.replace(b"%", b"%%"), |
557 remotefilelogfetched=fetched, |
557 remotefilelogfetched=fetched, |
558 remotefilelogfetches=fetches, |
558 remotefilelogfetches=fetches, |
559 remotefilelogfetchmisses=fetchmisses, |
559 remotefilelogfetchmisses=fetchmisses, |
560 remotefilelogfetchtime=fetchcost * 1000, |
560 remotefilelogfetchtime=fetchcost * 1000, |
561 ) |
561 ) |
603 nullids = len([None for unused, id in missingids if id == nullid]) |
603 nullids = len([None for unused, id in missingids if id == nullid]) |
604 if nullids: |
604 if nullids: |
605 missingids = [(f, id) for f, id in missingids if id != nullid] |
605 missingids = [(f, id) for f, id in missingids if id != nullid] |
606 repo.ui.develwarn( |
606 repo.ui.develwarn( |
607 ( |
607 ( |
608 'remotefilelog not fetching %d null revs' |
608 b'remotefilelog not fetching %d null revs' |
609 ' - this is likely hiding bugs' % nullids |
609 b' - this is likely hiding bugs' % nullids |
610 ), |
610 ), |
611 config='remotefilelog-ext', |
611 config=b'remotefilelog-ext', |
612 ) |
612 ) |
613 if missingids: |
613 if missingids: |
614 global fetches, fetched, fetchcost |
614 global fetches, fetched, fetchcost |
615 fetches += 1 |
615 fetches += 1 |
616 |
616 |
617 # We want to be able to detect excess individual file downloads, so |
617 # We want to be able to detect excess individual file downloads, so |
618 # let's log that information for debugging. |
618 # let's log that information for debugging. |
619 if fetches >= 15 and fetches < 18: |
619 if fetches >= 15 and fetches < 18: |
620 if fetches == 15: |
620 if fetches == 15: |
621 fetchwarning = self.ui.config( |
621 fetchwarning = self.ui.config( |
622 'remotefilelog', 'fetchwarning' |
622 b'remotefilelog', b'fetchwarning' |
623 ) |
623 ) |
624 if fetchwarning: |
624 if fetchwarning: |
625 self.ui.warn(fetchwarning + '\n') |
625 self.ui.warn(fetchwarning + b'\n') |
626 self.logstacktrace() |
626 self.logstacktrace() |
627 missingids = [(file, hex(id)) for file, id in sorted(missingids)] |
627 missingids = [(file, hex(id)) for file, id in sorted(missingids)] |
628 fetched += len(missingids) |
628 fetched += len(missingids) |
629 start = time.time() |
629 start = time.time() |
630 missingids = self.request(missingids) |
630 missingids = self.request(missingids) |
631 if missingids: |
631 if missingids: |
632 raise error.Abort( |
632 raise error.Abort( |
633 _("unable to download %d files") % len(missingids) |
633 _(b"unable to download %d files") % len(missingids) |
634 ) |
634 ) |
635 fetchcost += time.time() - start |
635 fetchcost += time.time() - start |
636 self._lfsprefetch(fileids) |
636 self._lfsprefetch(fileids) |
637 |
637 |
638 def _lfsprefetch(self, fileids): |
638 def _lfsprefetch(self, fileids): |
639 if not _lfsmod or not util.safehasattr( |
639 if not _lfsmod or not util.safehasattr( |
640 self.repo.svfs, 'lfslocalblobstore' |
640 self.repo.svfs, b'lfslocalblobstore' |
641 ): |
641 ): |
642 return |
642 return |
643 if not _lfsmod.wrapper.candownload(self.repo): |
643 if not _lfsmod.wrapper.candownload(self.repo): |
644 return |
644 return |
645 pointers = [] |
645 pointers = [] |