37 |
37 |
38 |
38 |
39 def backgroundrepack( |
39 def backgroundrepack( |
40 repo, incremental=True, packsonly=False, ensurestart=False |
40 repo, incremental=True, packsonly=False, ensurestart=False |
41 ): |
41 ): |
42 cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack'] |
42 cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'repack'] |
43 msg = _("(running background repack)\n") |
43 msg = _(b"(running background repack)\n") |
44 if incremental: |
44 if incremental: |
45 cmd.append('--incremental') |
45 cmd.append(b'--incremental') |
46 msg = _("(running background incremental repack)\n") |
46 msg = _(b"(running background incremental repack)\n") |
47 if packsonly: |
47 if packsonly: |
48 cmd.append('--packsonly') |
48 cmd.append(b'--packsonly') |
49 repo.ui.warn(msg) |
49 repo.ui.warn(msg) |
50 # We know this command will find a binary, so don't block on it starting. |
50 # We know this command will find a binary, so don't block on it starting. |
51 procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) |
51 procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) |
52 |
52 |
53 |
53 |
54 def fullrepack(repo, options=None): |
54 def fullrepack(repo, options=None): |
55 """If ``packsonly`` is True, stores creating only loose objects are skipped. |
55 """If ``packsonly`` is True, stores creating only loose objects are skipped. |
56 """ |
56 """ |
57 if util.safehasattr(repo, 'shareddatastores'): |
57 if util.safehasattr(repo, b'shareddatastores'): |
58 datasource = contentstore.unioncontentstore(*repo.shareddatastores) |
58 datasource = contentstore.unioncontentstore(*repo.shareddatastores) |
59 historysource = metadatastore.unionmetadatastore( |
59 historysource = metadatastore.unionmetadatastore( |
60 *repo.sharedhistorystores, allowincomplete=True |
60 *repo.sharedhistorystores, allowincomplete=True |
61 ) |
61 ) |
62 |
62 |
70 packpath, |
70 packpath, |
71 constants.FILEPACK_CATEGORY, |
71 constants.FILEPACK_CATEGORY, |
72 options=options, |
72 options=options, |
73 ) |
73 ) |
74 |
74 |
75 if util.safehasattr(repo.manifestlog, 'datastore'): |
75 if util.safehasattr(repo.manifestlog, b'datastore'): |
76 localdata, shareddata = _getmanifeststores(repo) |
76 localdata, shareddata = _getmanifeststores(repo) |
77 lpackpath, ldstores, lhstores = localdata |
77 lpackpath, ldstores, lhstores = localdata |
78 spackpath, sdstores, shstores = shareddata |
78 spackpath, sdstores, shstores = shareddata |
79 |
79 |
80 # Repack the shared manifest store |
80 # Repack the shared manifest store |
110 |
110 |
111 def incrementalrepack(repo, options=None): |
111 def incrementalrepack(repo, options=None): |
112 """This repacks the repo by looking at the distribution of pack files in the |
112 """This repacks the repo by looking at the distribution of pack files in the |
113 repo and performing the most minimal repack to keep the repo in good shape. |
113 repo and performing the most minimal repack to keep the repo in good shape. |
114 """ |
114 """ |
115 if util.safehasattr(repo, 'shareddatastores'): |
115 if util.safehasattr(repo, b'shareddatastores'): |
116 packpath = shallowutil.getcachepackpath( |
116 packpath = shallowutil.getcachepackpath( |
117 repo, constants.FILEPACK_CATEGORY |
117 repo, constants.FILEPACK_CATEGORY |
118 ) |
118 ) |
119 _incrementalrepack( |
119 _incrementalrepack( |
120 repo, |
120 repo, |
123 packpath, |
123 packpath, |
124 constants.FILEPACK_CATEGORY, |
124 constants.FILEPACK_CATEGORY, |
125 options=options, |
125 options=options, |
126 ) |
126 ) |
127 |
127 |
128 if util.safehasattr(repo.manifestlog, 'datastore'): |
128 if util.safehasattr(repo.manifestlog, b'datastore'): |
129 localdata, shareddata = _getmanifeststores(repo) |
129 localdata, shareddata = _getmanifeststores(repo) |
130 lpackpath, ldstores, lhstores = localdata |
130 lpackpath, ldstores, lhstores = localdata |
131 spackpath, sdstores, shstores = shareddata |
131 spackpath, sdstores, shstores = shareddata |
132 |
132 |
133 # Repack the shared manifest store |
133 # Repack the shared manifest store |
179 |
179 |
180 def _deletebigpacks(repo, folder, files): |
180 def _deletebigpacks(repo, folder, files): |
181 """Deletes packfiles that are bigger than ``packs.maxpacksize``. |
181 """Deletes packfiles that are bigger than ``packs.maxpacksize``. |
182 |
182 |
183 Returns ``files` with the removed files omitted.""" |
183 Returns ``files` with the removed files omitted.""" |
184 maxsize = repo.ui.configbytes("packs", "maxpacksize") |
184 maxsize = repo.ui.configbytes(b"packs", b"maxpacksize") |
185 if maxsize <= 0: |
185 if maxsize <= 0: |
186 return files |
186 return files |
187 |
187 |
188 # This only considers datapacks today, but we could broaden it to include |
188 # This only considers datapacks today, but we could broaden it to include |
189 # historypacks. |
189 # historypacks. |
190 VALIDEXTS = [".datapack", ".dataidx"] |
190 VALIDEXTS = [b".datapack", b".dataidx"] |
191 |
191 |
192 # Either an oversize index or datapack will trigger cleanup of the whole |
192 # Either an oversize index or datapack will trigger cleanup of the whole |
193 # pack: |
193 # pack: |
194 oversized = { |
194 oversized = { |
195 os.path.splitext(path)[0] |
195 os.path.splitext(path)[0] |
200 for rootfname in oversized: |
200 for rootfname in oversized: |
201 rootpath = os.path.join(folder, rootfname) |
201 rootpath = os.path.join(folder, rootfname) |
202 for ext in VALIDEXTS: |
202 for ext in VALIDEXTS: |
203 path = rootpath + ext |
203 path = rootpath + ext |
204 repo.ui.debug( |
204 repo.ui.debug( |
205 'removing oversize packfile %s (%s)\n' |
205 b'removing oversize packfile %s (%s)\n' |
206 % (path, util.bytecount(os.stat(path).st_size)) |
206 % (path, util.bytecount(os.stat(path).st_size)) |
207 ) |
207 ) |
208 os.unlink(path) |
208 os.unlink(path) |
209 return [row for row in files if os.path.basename(row[0]) not in oversized] |
209 return [row for row in files if os.path.basename(row[0]) not in oversized] |
210 |
210 |
271 ) |
271 ) |
272 |
272 |
273 |
273 |
274 def _computeincrementaldatapack(ui, files): |
274 def _computeincrementaldatapack(ui, files): |
275 opts = { |
275 opts = { |
276 'gencountlimit': ui.configint('remotefilelog', 'data.gencountlimit'), |
276 b'gencountlimit': ui.configint(b'remotefilelog', b'data.gencountlimit'), |
277 'generations': ui.configlist('remotefilelog', 'data.generations'), |
277 b'generations': ui.configlist(b'remotefilelog', b'data.generations'), |
278 'maxrepackpacks': ui.configint('remotefilelog', 'data.maxrepackpacks'), |
278 b'maxrepackpacks': ui.configint( |
279 'repackmaxpacksize': ui.configbytes( |
279 b'remotefilelog', b'data.maxrepackpacks' |
280 'remotefilelog', 'data.repackmaxpacksize' |
|
281 ), |
280 ), |
282 'repacksizelimit': ui.configbytes( |
281 b'repackmaxpacksize': ui.configbytes( |
283 'remotefilelog', 'data.repacksizelimit' |
282 b'remotefilelog', b'data.repackmaxpacksize' |
|
283 ), |
|
284 b'repacksizelimit': ui.configbytes( |
|
285 b'remotefilelog', b'data.repacksizelimit' |
284 ), |
286 ), |
285 } |
287 } |
286 |
288 |
287 packfiles = _allpackfileswithsuffix( |
289 packfiles = _allpackfileswithsuffix( |
288 files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX |
290 files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX |
290 return _computeincrementalpack(packfiles, opts) |
292 return _computeincrementalpack(packfiles, opts) |
291 |
293 |
292 |
294 |
293 def _computeincrementalhistorypack(ui, files): |
295 def _computeincrementalhistorypack(ui, files): |
294 opts = { |
296 opts = { |
295 'gencountlimit': ui.configint('remotefilelog', 'history.gencountlimit'), |
297 b'gencountlimit': ui.configint( |
296 'generations': ui.configlist( |
298 b'remotefilelog', b'history.gencountlimit' |
297 'remotefilelog', 'history.generations', ['100MB'] |
|
298 ), |
299 ), |
299 'maxrepackpacks': ui.configint( |
300 b'generations': ui.configlist( |
300 'remotefilelog', 'history.maxrepackpacks' |
301 b'remotefilelog', b'history.generations', [b'100MB'] |
301 ), |
302 ), |
302 'repackmaxpacksize': ui.configbytes( |
303 b'maxrepackpacks': ui.configint( |
303 'remotefilelog', 'history.repackmaxpacksize', '400MB' |
304 b'remotefilelog', b'history.maxrepackpacks' |
304 ), |
305 ), |
305 'repacksizelimit': ui.configbytes( |
306 b'repackmaxpacksize': ui.configbytes( |
306 'remotefilelog', 'history.repacksizelimit' |
307 b'remotefilelog', b'history.repackmaxpacksize', b'400MB' |
|
308 ), |
|
309 b'repacksizelimit': ui.configbytes( |
|
310 b'remotefilelog', b'history.repacksizelimit' |
307 ), |
311 ), |
308 } |
312 } |
309 |
313 |
310 packfiles = _allpackfileswithsuffix( |
314 packfiles = _allpackfileswithsuffix( |
311 files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX |
315 files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX |
351 generations.append([]) |
355 generations.append([]) |
352 |
356 |
353 sizes = {} |
357 sizes = {} |
354 for prefix, mode, stat in files: |
358 for prefix, mode, stat in files: |
355 size = stat.st_size |
359 size = stat.st_size |
356 if size > opts['repackmaxpacksize']: |
360 if size > opts[b'repackmaxpacksize']: |
357 continue |
361 continue |
358 |
362 |
359 sizes[prefix] = size |
363 sizes[prefix] = size |
360 for i, limit in enumerate(limits): |
364 for i, limit in enumerate(limits): |
361 if size > limit: |
365 if size > limit: |
368 # 3. While total-size-of-packs < repacksizelimit: add another pack |
372 # 3. While total-size-of-packs < repacksizelimit: add another pack |
369 |
373 |
370 # Find the largest generation with more than gencountlimit packs |
374 # Find the largest generation with more than gencountlimit packs |
371 genpacks = [] |
375 genpacks = [] |
372 for i, limit in enumerate(limits): |
376 for i, limit in enumerate(limits): |
373 if len(generations[i]) > opts['gencountlimit']: |
377 if len(generations[i]) > opts[b'gencountlimit']: |
374 # Sort to be smallest last, for easy popping later |
378 # Sort to be smallest last, for easy popping later |
375 genpacks.extend( |
379 genpacks.extend( |
376 sorted(generations[i], reverse=True, key=lambda x: sizes[x]) |
380 sorted(generations[i], reverse=True, key=lambda x: sizes[x]) |
377 ) |
381 ) |
378 break |
382 break |
380 # Take as many packs from the generation as we can |
384 # Take as many packs from the generation as we can |
381 chosenpacks = genpacks[-3:] |
385 chosenpacks = genpacks[-3:] |
382 genpacks = genpacks[:-3] |
386 genpacks = genpacks[:-3] |
383 repacksize = sum(sizes[n] for n in chosenpacks) |
387 repacksize = sum(sizes[n] for n in chosenpacks) |
384 while ( |
388 while ( |
385 repacksize < opts['repacksizelimit'] |
389 repacksize < opts[b'repacksizelimit'] |
386 and genpacks |
390 and genpacks |
387 and len(chosenpacks) < opts['maxrepackpacks'] |
391 and len(chosenpacks) < opts[b'maxrepackpacks'] |
388 ): |
392 ): |
389 chosenpacks.append(genpacks.pop()) |
393 chosenpacks.append(genpacks.pop()) |
390 repacksize += sizes[chosenpacks[-1]] |
394 repacksize += sizes[chosenpacks[-1]] |
391 |
395 |
392 return chosenpacks |
396 return chosenpacks |
402 Unless a limit is specified in the config the default limit is taken. |
406 Unless a limit is specified in the config the default limit is taken. |
403 """ |
407 """ |
404 filectx = repo.filectx(filename, fileid=node) |
408 filectx = repo.filectx(filename, fileid=node) |
405 filetime = repo[filectx.linkrev()].date() |
409 filetime = repo[filectx.linkrev()].date() |
406 |
410 |
407 ttl = repo.ui.configint('remotefilelog', 'nodettl') |
411 ttl = repo.ui.configint(b'remotefilelog', b'nodettl') |
408 |
412 |
409 limit = time.time() - ttl |
413 limit = time.time() - ttl |
410 return filetime[0] < limit |
414 return filetime[0] < limit |
411 |
415 |
412 garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') |
416 garbagecollect = repo.ui.configbool(b'remotefilelog', b'gcrepack') |
413 if not fullhistory: |
417 if not fullhistory: |
414 fullhistory = history |
418 fullhistory = history |
415 packer = repacker( |
419 packer = repacker( |
416 repo, |
420 repo, |
417 data, |
421 data, |
427 with historypack.mutablehistorypack(repo.ui, packpath) as hpack: |
431 with historypack.mutablehistorypack(repo.ui, packpath) as hpack: |
428 try: |
432 try: |
429 packer.run(dpack, hpack) |
433 packer.run(dpack, hpack) |
430 except error.LockHeld: |
434 except error.LockHeld: |
431 raise RepackAlreadyRunning( |
435 raise RepackAlreadyRunning( |
432 _("skipping repack - another repack " "is already running") |
436 _( |
|
437 b"skipping repack - another repack " |
|
438 b"is already running" |
|
439 ) |
433 ) |
440 ) |
434 |
441 |
435 |
442 |
436 def keepset(repo, keyfn, lastkeepkeys=None): |
443 def keepset(repo, keyfn, lastkeepkeys=None): |
437 """Computes a keepset which is not garbage collected. |
444 """Computes a keepset which is not garbage collected. |
447 # We want to keep: |
454 # We want to keep: |
448 # 1. Working copy parent |
455 # 1. Working copy parent |
449 # 2. Draft commits |
456 # 2. Draft commits |
450 # 3. Parents of draft commits |
457 # 3. Parents of draft commits |
451 # 4. Pullprefetch and bgprefetchrevs revsets if specified |
458 # 4. Pullprefetch and bgprefetchrevs revsets if specified |
452 revs = ['.', 'draft()', 'parents(draft())'] |
459 revs = [b'.', b'draft()', b'parents(draft())'] |
453 prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) |
460 prefetchrevs = repo.ui.config(b'remotefilelog', b'pullprefetch', None) |
454 if prefetchrevs: |
461 if prefetchrevs: |
455 revs.append('(%s)' % prefetchrevs) |
462 revs.append(b'(%s)' % prefetchrevs) |
456 prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) |
463 prefetchrevs = repo.ui.config(b'remotefilelog', b'bgprefetchrevs', None) |
457 if prefetchrevs: |
464 if prefetchrevs: |
458 revs.append('(%s)' % prefetchrevs) |
465 revs.append(b'(%s)' % prefetchrevs) |
459 revs = '+'.join(revs) |
466 revs = b'+'.join(revs) |
460 |
467 |
461 revs = ['sort((%s), "topo")' % revs] |
468 revs = [b'sort((%s), "topo")' % revs] |
462 keep = scmutil.revrange(repo, revs) |
469 keep = scmutil.revrange(repo, revs) |
463 |
470 |
464 processed = set() |
471 processed = set() |
465 lastmanifest = None |
472 lastmanifest = None |
466 |
473 |
518 self.unit = constants.getunits(category) |
525 self.unit = constants.getunits(category) |
519 self.garbagecollect = gc |
526 self.garbagecollect = gc |
520 self.options = options |
527 self.options = options |
521 if self.garbagecollect: |
528 if self.garbagecollect: |
522 if not isold: |
529 if not isold: |
523 raise ValueError("Function 'isold' is not properly specified") |
530 raise ValueError(b"Function 'isold' is not properly specified") |
524 # use (filename, node) tuple as a keepset key |
531 # use (filename, node) tuple as a keepset key |
525 self.keepkeys = keepset(repo, lambda f, n: (f, n)) |
532 self.keepkeys = keepset(repo, lambda f, n: (f, n)) |
526 self.isold = isold |
533 self.isold = isold |
527 |
534 |
528 def run(self, targetdata, targethistory): |
535 def run(self, targetdata, targethistory): |
529 ledger = repackledger() |
536 ledger = repackledger() |
530 |
537 |
531 with lockmod.lock( |
538 with lockmod.lock( |
532 repacklockvfs(self.repo), "repacklock", desc=None, timeout=0 |
539 repacklockvfs(self.repo), b"repacklock", desc=None, timeout=0 |
533 ): |
540 ): |
534 self.repo.hook('prerepack') |
541 self.repo.hook(b'prerepack') |
535 |
542 |
536 # Populate ledger from source |
543 # Populate ledger from source |
537 self.data.markledger(ledger, options=self.options) |
544 self.data.markledger(ledger, options=self.options) |
538 self.history.markledger(ledger, options=self.options) |
545 self.history.markledger(ledger, options=self.options) |
539 |
546 |
569 # (Sort by node first to ensure the sort is stable.) |
576 # (Sort by node first to ensure the sort is stable.) |
570 orphans = sorted(orphans) |
577 orphans = sorted(orphans) |
571 orphans = list(sorted(orphans, key=getsize, reverse=True)) |
578 orphans = list(sorted(orphans, key=getsize, reverse=True)) |
572 if ui.debugflag: |
579 if ui.debugflag: |
573 ui.debug( |
580 ui.debug( |
574 "%s: orphan chain: %s\n" |
581 b"%s: orphan chain: %s\n" |
575 % (filename, ", ".join([short(s) for s in orphans])) |
582 % (filename, b", ".join([short(s) for s in orphans])) |
576 ) |
583 ) |
577 |
584 |
578 # Create one contiguous chain and reassign deltabases. |
585 # Create one contiguous chain and reassign deltabases. |
579 for i, node in enumerate(orphans): |
586 for i, node in enumerate(orphans): |
580 if i == 0: |
587 if i == 0: |
586 nodes += orphans |
593 nodes += orphans |
587 return nodes |
594 return nodes |
588 |
595 |
589 def repackdata(self, ledger, target): |
596 def repackdata(self, ledger, target): |
590 ui = self.repo.ui |
597 ui = self.repo.ui |
591 maxchainlen = ui.configint('packs', 'maxchainlen', 1000) |
598 maxchainlen = ui.configint(b'packs', b'maxchainlen', 1000) |
592 |
599 |
593 byfile = {} |
600 byfile = {} |
594 for entry in ledger.entries.itervalues(): |
601 for entry in ledger.entries.itervalues(): |
595 if entry.datasource: |
602 if entry.datasource: |
596 byfile.setdefault(entry.filename, {})[entry.node] = entry |
603 byfile.setdefault(entry.filename, {})[entry.node] = entry |
597 |
604 |
598 count = 0 |
605 count = 0 |
599 repackprogress = ui.makeprogress( |
606 repackprogress = ui.makeprogress( |
600 _("repacking data"), unit=self.unit, total=len(byfile) |
607 _(b"repacking data"), unit=self.unit, total=len(byfile) |
601 ) |
608 ) |
602 for filename, entries in sorted(byfile.iteritems()): |
609 for filename, entries in sorted(byfile.iteritems()): |
603 repackprogress.update(count) |
610 repackprogress.update(count) |
604 |
611 |
605 ancestors = {} |
612 ancestors = {} |
606 nodes = list(node for node in entries) |
613 nodes = list(node for node in entries) |
607 nohistory = [] |
614 nohistory = [] |
608 buildprogress = ui.makeprogress( |
615 buildprogress = ui.makeprogress( |
609 _("building history"), unit='nodes', total=len(nodes) |
616 _(b"building history"), unit=b'nodes', total=len(nodes) |
610 ) |
617 ) |
611 for i, node in enumerate(nodes): |
618 for i, node in enumerate(nodes): |
612 if node in ancestors: |
619 if node in ancestors: |
613 continue |
620 continue |
614 buildprogress.update(i) |
621 buildprogress.update(i) |
627 |
634 |
628 # Order the nodes children first, so we can produce reverse deltas |
635 # Order the nodes children first, so we can produce reverse deltas |
629 orderednodes = list(reversed(self._toposort(ancestors))) |
636 orderednodes = list(reversed(self._toposort(ancestors))) |
630 if len(nohistory) > 0: |
637 if len(nohistory) > 0: |
631 ui.debug( |
638 ui.debug( |
632 'repackdata: %d nodes without history\n' % len(nohistory) |
639 b'repackdata: %d nodes without history\n' % len(nohistory) |
633 ) |
640 ) |
634 orderednodes.extend(sorted(nohistory)) |
641 orderednodes.extend(sorted(nohistory)) |
635 |
642 |
636 # Filter orderednodes to just the nodes we want to serialize (it |
643 # Filter orderednodes to just the nodes we want to serialize (it |
637 # currently also has the edge nodes' ancestors). |
644 # currently also has the edge nodes' ancestors). |
657 deltabases = {} |
664 deltabases = {} |
658 nobase = set() |
665 nobase = set() |
659 referenced = set() |
666 referenced = set() |
660 nodes = set(nodes) |
667 nodes = set(nodes) |
661 processprogress = ui.makeprogress( |
668 processprogress = ui.makeprogress( |
662 _("processing nodes"), unit='nodes', total=len(orderednodes) |
669 _(b"processing nodes"), unit=b'nodes', total=len(orderednodes) |
663 ) |
670 ) |
664 for i, node in enumerate(orderednodes): |
671 for i, node in enumerate(orderednodes): |
665 processprogress.update(i) |
672 processprogress.update(i) |
666 # Find delta base |
673 # Find delta base |
667 # TODO: allow delta'ing against most recent descendant instead |
674 # TODO: allow delta'ing against most recent descendant instead |
696 deltabases[p1] = (node, chainlen + 1) |
703 deltabases[p1] = (node, chainlen + 1) |
697 if p2 != nullid: |
704 if p2 != nullid: |
698 deltabases[p2] = (node, chainlen + 1) |
705 deltabases[p2] = (node, chainlen + 1) |
699 |
706 |
700 # experimental config: repack.chainorphansbysize |
707 # experimental config: repack.chainorphansbysize |
701 if ui.configbool('repack', 'chainorphansbysize'): |
708 if ui.configbool(b'repack', b'chainorphansbysize'): |
702 orphans = nobase - referenced |
709 orphans = nobase - referenced |
703 orderednodes = self._chainorphans( |
710 orderednodes = self._chainorphans( |
704 ui, filename, orderednodes, orphans, deltabases |
711 ui, filename, orderednodes, orphans, deltabases |
705 ) |
712 ) |
706 |
713 |
749 for entry in ledger.entries.itervalues(): |
756 for entry in ledger.entries.itervalues(): |
750 if entry.historysource: |
757 if entry.historysource: |
751 byfile.setdefault(entry.filename, {})[entry.node] = entry |
758 byfile.setdefault(entry.filename, {})[entry.node] = entry |
752 |
759 |
753 progress = ui.makeprogress( |
760 progress = ui.makeprogress( |
754 _("repacking history"), unit=self.unit, total=len(byfile) |
761 _(b"repacking history"), unit=self.unit, total=len(byfile) |
755 ) |
762 ) |
756 for filename, entries in sorted(byfile.iteritems()): |
763 for filename, entries in sorted(byfile.iteritems()): |
757 ancestors = {} |
764 ancestors = {} |
758 nodes = list(node for node in entries) |
765 nodes = list(node for node in entries) |
759 |
766 |
892 # If garbage collected |
899 # If garbage collected |
893 self.gced = False |
900 self.gced = False |
894 |
901 |
895 |
902 |
896 def repacklockvfs(repo): |
903 def repacklockvfs(repo): |
897 if util.safehasattr(repo, 'name'): |
904 if util.safehasattr(repo, b'name'): |
898 # Lock in the shared cache so repacks across multiple copies of the same |
905 # Lock in the shared cache so repacks across multiple copies of the same |
899 # repo are coordinated. |
906 # repo are coordinated. |
900 sharedcachepath = shallowutil.getcachepackpath( |
907 sharedcachepath = shallowutil.getcachepackpath( |
901 repo, constants.FILEPACK_CATEGORY |
908 repo, constants.FILEPACK_CATEGORY |
902 ) |
909 ) |