241 @classmethod |
254 @classmethod |
242 def fromconfig(cls, repo): |
255 def fromconfig(cls, repo): |
243 assert cls._requirement is not None |
256 assert cls._requirement is not None |
244 return cls._requirement in cls._newreporequirements(repo.ui) |
257 return cls._requirement in cls._newreporequirements(repo.ui) |
245 |
258 |
|
259 |
246 @registerformatvariant |
260 @registerformatvariant |
247 class fncache(requirementformatvariant): |
261 class fncache(requirementformatvariant): |
248 name = 'fncache' |
262 name = 'fncache' |
249 |
263 |
250 _requirement = 'fncache' |
264 _requirement = 'fncache' |
251 |
265 |
252 default = True |
266 default = True |
253 |
267 |
254 description = _('long and reserved filenames may not work correctly; ' |
268 description = _( |
255 'repository performance is sub-optimal') |
269 'long and reserved filenames may not work correctly; ' |
256 |
270 'repository performance is sub-optimal' |
257 upgrademessage = _('repository will be more resilient to storing ' |
271 ) |
258 'certain paths and performance of certain ' |
272 |
259 'operations should be improved') |
273 upgrademessage = _( |
|
274 'repository will be more resilient to storing ' |
|
275 'certain paths and performance of certain ' |
|
276 'operations should be improved' |
|
277 ) |
|
278 |
260 |
279 |
261 @registerformatvariant |
280 @registerformatvariant |
262 class dotencode(requirementformatvariant): |
281 class dotencode(requirementformatvariant): |
263 name = 'dotencode' |
282 name = 'dotencode' |
264 |
283 |
265 _requirement = 'dotencode' |
284 _requirement = 'dotencode' |
266 |
285 |
267 default = True |
286 default = True |
268 |
287 |
269 description = _('storage of filenames beginning with a period or ' |
288 description = _( |
270 'space may not work correctly') |
289 'storage of filenames beginning with a period or ' |
271 |
290 'space may not work correctly' |
272 upgrademessage = _('repository will be better able to store files ' |
291 ) |
273 'beginning with a space or period') |
292 |
|
293 upgrademessage = _( |
|
294 'repository will be better able to store files ' |
|
295 'beginning with a space or period' |
|
296 ) |
|
297 |
274 |
298 |
275 @registerformatvariant |
299 @registerformatvariant |
276 class generaldelta(requirementformatvariant): |
300 class generaldelta(requirementformatvariant): |
277 name = 'generaldelta' |
301 name = 'generaldelta' |
278 |
302 |
279 _requirement = 'generaldelta' |
303 _requirement = 'generaldelta' |
280 |
304 |
281 default = True |
305 default = True |
282 |
306 |
283 description = _('deltas within internal storage are unable to ' |
307 description = _( |
284 'choose optimal revisions; repository is larger and ' |
308 'deltas within internal storage are unable to ' |
285 'slower than it could be; interaction with other ' |
309 'choose optimal revisions; repository is larger and ' |
286 'repositories may require extra network and CPU ' |
310 'slower than it could be; interaction with other ' |
287 'resources, making "hg push" and "hg pull" slower') |
311 'repositories may require extra network and CPU ' |
288 |
312 'resources, making "hg push" and "hg pull" slower' |
289 upgrademessage = _('repository storage will be able to create ' |
313 ) |
290 'optimal deltas; new repository data will be ' |
314 |
291 'smaller and read times should decrease; ' |
315 upgrademessage = _( |
292 'interacting with other repositories using this ' |
316 'repository storage will be able to create ' |
293 'storage model should require less network and ' |
317 'optimal deltas; new repository data will be ' |
294 'CPU resources, making "hg push" and "hg pull" ' |
318 'smaller and read times should decrease; ' |
295 'faster') |
319 'interacting with other repositories using this ' |
|
320 'storage model should require less network and ' |
|
321 'CPU resources, making "hg push" and "hg pull" ' |
|
322 'faster' |
|
323 ) |
|
324 |
296 |
325 |
297 @registerformatvariant |
326 @registerformatvariant |
298 class sparserevlog(requirementformatvariant): |
327 class sparserevlog(requirementformatvariant): |
299 name = 'sparserevlog' |
328 name = 'sparserevlog' |
300 |
329 |
301 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT |
330 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT |
302 |
331 |
303 default = True |
332 default = True |
304 |
333 |
305 description = _('in order to limit disk reading and memory usage on older ' |
334 description = _( |
306 'version, the span of a delta chain from its root to its ' |
335 'in order to limit disk reading and memory usage on older ' |
307 'end is limited, whatever the relevant data in this span. ' |
336 'version, the span of a delta chain from its root to its ' |
308 'This can severly limit Mercurial ability to build good ' |
337 'end is limited, whatever the relevant data in this span. ' |
309 'chain of delta resulting is much more storage space being ' |
338 'This can severly limit Mercurial ability to build good ' |
310 'taken and limit reusability of on disk delta during ' |
339 'chain of delta resulting is much more storage space being ' |
311 'exchange.' |
340 'taken and limit reusability of on disk delta during ' |
312 ) |
341 'exchange.' |
313 |
342 ) |
314 upgrademessage = _('Revlog supports delta chain with more unused data ' |
343 |
315 'between payload. These gaps will be skipped at read ' |
344 upgrademessage = _( |
316 'time. This allows for better delta chains, making a ' |
345 'Revlog supports delta chain with more unused data ' |
317 'better compression and faster exchange with server.') |
346 'between payload. These gaps will be skipped at read ' |
|
347 'time. This allows for better delta chains, making a ' |
|
348 'better compression and faster exchange with server.' |
|
349 ) |
|
350 |
318 |
351 |
319 @registerformatvariant |
352 @registerformatvariant |
320 class sidedata(requirementformatvariant): |
353 class sidedata(requirementformatvariant): |
321 name = 'sidedata' |
354 name = 'sidedata' |
322 |
355 |
323 _requirement = localrepo.SIDEDATA_REQUIREMENT |
356 _requirement = localrepo.SIDEDATA_REQUIREMENT |
324 |
357 |
325 default = False |
358 default = False |
326 |
359 |
327 description = _('Allows storage of extra data alongside a revision, ' |
360 description = _( |
328 'unlocking various caching options.') |
361 'Allows storage of extra data alongside a revision, ' |
|
362 'unlocking various caching options.' |
|
363 ) |
329 |
364 |
330 upgrademessage = _('Allows storage of extra data alongside a revision.') |
365 upgrademessage = _('Allows storage of extra data alongside a revision.') |
|
366 |
331 |
367 |
332 @registerformatvariant |
368 @registerformatvariant |
333 class removecldeltachain(formatvariant): |
369 class removecldeltachain(formatvariant): |
334 name = 'plain-cl-delta' |
370 name = 'plain-cl-delta' |
335 |
371 |
336 default = True |
372 default = True |
337 |
373 |
338 description = _('changelog storage is using deltas instead of ' |
374 description = _( |
339 'raw entries; changelog reading and any ' |
375 'changelog storage is using deltas instead of ' |
340 'operation relying on changelog data are slower ' |
376 'raw entries; changelog reading and any ' |
341 'than they could be') |
377 'operation relying on changelog data are slower ' |
342 |
378 'than they could be' |
343 upgrademessage = _('changelog storage will be reformated to ' |
379 ) |
344 'store raw entries; changelog reading will be ' |
380 |
345 'faster; changelog size may be reduced') |
381 upgrademessage = _( |
|
382 'changelog storage will be reformated to ' |
|
383 'store raw entries; changelog reading will be ' |
|
384 'faster; changelog size may be reduced' |
|
385 ) |
346 |
386 |
347 @staticmethod |
387 @staticmethod |
348 def fromrepo(repo): |
388 def fromrepo(repo): |
349 # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
389 # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
350 # changelogs with deltas. |
390 # changelogs with deltas. |
442 'redeltamultibase': 're-delta-multibase', |
489 'redeltamultibase': 're-delta-multibase', |
443 'redeltaall': 're-delta-all', |
490 'redeltaall': 're-delta-all', |
444 'redeltafulladd': 're-delta-fulladd', |
491 'redeltafulladd': 're-delta-fulladd', |
445 } |
492 } |
446 |
493 |
|
494 |
447 def findoptimizations(repo): |
495 def findoptimizations(repo): |
448 """Determine optimisation that could be used during upgrade""" |
496 """Determine optimisation that could be used during upgrade""" |
449 # These are unconditionally added. There is logic later that figures out |
497 # These are unconditionally added. There is logic later that figures out |
450 # which ones to apply. |
498 # which ones to apply. |
451 optimizations = [] |
499 optimizations = [] |
452 |
500 |
453 optimizations.append(improvement( |
501 optimizations.append( |
454 name='re-delta-parent', |
502 improvement( |
455 type=optimisation, |
503 name='re-delta-parent', |
456 description=_('deltas within internal storage will be recalculated to ' |
504 type=optimisation, |
457 'choose an optimal base revision where this was not ' |
505 description=_( |
458 'already done; the size of the repository may shrink and ' |
506 'deltas within internal storage will be recalculated to ' |
459 'various operations may become faster; the first time ' |
507 'choose an optimal base revision where this was not ' |
460 'this optimization is performed could slow down upgrade ' |
508 'already done; the size of the repository may shrink and ' |
461 'execution considerably; subsequent invocations should ' |
509 'various operations may become faster; the first time ' |
462 'not run noticeably slower'), |
510 'this optimization is performed could slow down upgrade ' |
463 upgrademessage=_('deltas within internal storage will choose a new ' |
511 'execution considerably; subsequent invocations should ' |
464 'base revision if needed'))) |
512 'not run noticeably slower' |
465 |
513 ), |
466 optimizations.append(improvement( |
514 upgrademessage=_( |
467 name='re-delta-multibase', |
515 'deltas within internal storage will choose a new ' |
468 type=optimisation, |
516 'base revision if needed' |
469 description=_('deltas within internal storage will be recalculated ' |
517 ), |
470 'against multiple base revision and the smallest ' |
518 ) |
471 'difference will be used; the size of the repository may ' |
519 ) |
472 'shrink significantly when there are many merges; this ' |
520 |
473 'optimization will slow down execution in proportion to ' |
521 optimizations.append( |
474 'the number of merges in the repository and the amount ' |
522 improvement( |
475 'of files in the repository; this slow down should not ' |
523 name='re-delta-multibase', |
476 'be significant unless there are tens of thousands of ' |
524 type=optimisation, |
477 'files and thousands of merges'), |
525 description=_( |
478 upgrademessage=_('deltas within internal storage will choose an ' |
526 'deltas within internal storage will be recalculated ' |
479 'optimal delta by computing deltas against multiple ' |
527 'against multiple base revision and the smallest ' |
480 'parents; may slow down execution time ' |
528 'difference will be used; the size of the repository may ' |
481 'significantly'))) |
529 'shrink significantly when there are many merges; this ' |
482 |
530 'optimization will slow down execution in proportion to ' |
483 optimizations.append(improvement( |
531 'the number of merges in the repository and the amount ' |
484 name='re-delta-all', |
532 'of files in the repository; this slow down should not ' |
485 type=optimisation, |
533 'be significant unless there are tens of thousands of ' |
486 description=_('deltas within internal storage will always be ' |
534 'files and thousands of merges' |
487 'recalculated without reusing prior deltas; this will ' |
535 ), |
488 'likely make execution run several times slower; this ' |
536 upgrademessage=_( |
489 'optimization is typically not needed'), |
537 'deltas within internal storage will choose an ' |
490 upgrademessage=_('deltas within internal storage will be fully ' |
538 'optimal delta by computing deltas against multiple ' |
491 'recomputed; this will likely drastically slow down ' |
539 'parents; may slow down execution time ' |
492 'execution time'))) |
540 'significantly' |
493 |
541 ), |
494 optimizations.append(improvement( |
542 ) |
495 name='re-delta-fulladd', |
543 ) |
496 type=optimisation, |
544 |
497 description=_('every revision will be re-added as if it was new ' |
545 optimizations.append( |
498 'content. It will go through the full storage ' |
546 improvement( |
499 'mechanism giving extensions a chance to process it ' |
547 name='re-delta-all', |
500 '(eg. lfs). This is similar to "re-delta-all" but even ' |
548 type=optimisation, |
501 'slower since more logic is involved.'), |
549 description=_( |
502 upgrademessage=_('each revision will be added as new content to the ' |
550 'deltas within internal storage will always be ' |
503 'internal storage; this will likely drastically slow ' |
551 'recalculated without reusing prior deltas; this will ' |
504 'down execution time, but some extensions might need ' |
552 'likely make execution run several times slower; this ' |
505 'it'))) |
553 'optimization is typically not needed' |
|
554 ), |
|
555 upgrademessage=_( |
|
556 'deltas within internal storage will be fully ' |
|
557 'recomputed; this will likely drastically slow down ' |
|
558 'execution time' |
|
559 ), |
|
560 ) |
|
561 ) |
|
562 |
|
563 optimizations.append( |
|
564 improvement( |
|
565 name='re-delta-fulladd', |
|
566 type=optimisation, |
|
567 description=_( |
|
568 'every revision will be re-added as if it was new ' |
|
569 'content. It will go through the full storage ' |
|
570 'mechanism giving extensions a chance to process it ' |
|
571 '(eg. lfs). This is similar to "re-delta-all" but even ' |
|
572 'slower since more logic is involved.' |
|
573 ), |
|
574 upgrademessage=_( |
|
575 'each revision will be added as new content to the ' |
|
576 'internal storage; this will likely drastically slow ' |
|
577 'down execution time, but some extensions might need ' |
|
578 'it' |
|
579 ), |
|
580 ) |
|
581 ) |
506 |
582 |
507 return optimizations |
583 return optimizations |
|
584 |
508 |
585 |
509 def determineactions(repo, deficiencies, sourcereqs, destreqs): |
586 def determineactions(repo, deficiencies, sourcereqs, destreqs): |
510 """Determine upgrade actions that will be performed. |
587 """Determine upgrade actions that will be performed. |
511 |
588 |
512 Given a list of improvements as returned by ``finddeficiencies`` and |
589 Given a list of improvements as returned by ``finddeficiencies`` and |
535 |
612 |
536 # FUTURE consider adding some optimizations here for certain transitions. |
613 # FUTURE consider adding some optimizations here for certain transitions. |
537 # e.g. adding generaldelta could schedule parent redeltas. |
614 # e.g. adding generaldelta could schedule parent redeltas. |
538 |
615 |
539 return newactions |
616 return newactions |
|
617 |
540 |
618 |
541 def _revlogfrompath(repo, path): |
619 def _revlogfrompath(repo, path): |
542 """Obtain a revlog from a repo path. |
620 """Obtain a revlog from a repo path. |
543 |
621 |
544 An instance of the appropriate class is returned. |
622 An instance of the appropriate class is returned. |
545 """ |
623 """ |
546 if path == '00changelog.i': |
624 if path == '00changelog.i': |
547 return changelog.changelog(repo.svfs) |
625 return changelog.changelog(repo.svfs) |
548 elif path.endswith('00manifest.i'): |
626 elif path.endswith('00manifest.i'): |
549 mandir = path[:-len('00manifest.i')] |
627 mandir = path[: -len('00manifest.i')] |
550 return manifest.manifestrevlog(repo.svfs, tree=mandir) |
628 return manifest.manifestrevlog(repo.svfs, tree=mandir) |
551 else: |
629 else: |
552 #reverse of "/".join(("data", path + ".i")) |
630 # reverse of "/".join(("data", path + ".i")) |
553 return filelog.filelog(repo.svfs, path[5:-2]) |
631 return filelog.filelog(repo.svfs, path[5:-2]) |
|
632 |
554 |
633 |
555 def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
634 def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
556 """copy all relevant files for `oldrl` into `destrepo` store |
635 """copy all relevant files for `oldrl` into `destrepo` store |
557 |
636 |
558 Files are copied "as is" without any transformation. The copy is performed |
637 Files are copied "as is" without any transformation. The copy is performed |
569 newindex = newvfs.join(newrl.indexfile) |
648 newindex = newvfs.join(newrl.indexfile) |
570 olddata = oldvfs.join(oldrl.datafile) |
649 olddata = oldvfs.join(oldrl.datafile) |
571 newdata = newvfs.join(newrl.datafile) |
650 newdata = newvfs.join(newrl.datafile) |
572 |
651 |
573 with newvfs(newrl.indexfile, 'w'): |
652 with newvfs(newrl.indexfile, 'w'): |
574 pass # create all the directories |
653 pass # create all the directories |
575 |
654 |
576 util.copyfile(oldindex, newindex) |
655 util.copyfile(oldindex, newindex) |
577 copydata = oldrl.opener.exists(oldrl.datafile) |
656 copydata = oldrl.opener.exists(oldrl.datafile) |
578 if copydata: |
657 if copydata: |
579 util.copyfile(olddata, newdata) |
658 util.copyfile(olddata, newdata) |
580 |
659 |
581 if not (unencodedname.endswith('00changelog.i') |
660 if not ( |
582 or unencodedname.endswith('00manifest.i')): |
661 unencodedname.endswith('00changelog.i') |
|
662 or unencodedname.endswith('00manifest.i') |
|
663 ): |
583 destrepo.svfs.fncache.add(unencodedname) |
664 destrepo.svfs.fncache.add(unencodedname) |
584 if copydata: |
665 if copydata: |
585 destrepo.svfs.fncache.add(unencodedname[:-2] + '.d') |
666 destrepo.svfs.fncache.add(unencodedname[:-2] + '.d') |
586 |
667 |
|
668 |
587 UPGRADE_CHANGELOG = object() |
669 UPGRADE_CHANGELOG = object() |
588 UPGRADE_MANIFEST = object() |
670 UPGRADE_MANIFEST = object() |
589 UPGRADE_FILELOG = object() |
671 UPGRADE_FILELOG = object() |
590 |
672 |
591 UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG, |
673 UPGRADE_ALL_REVLOGS = frozenset( |
592 UPGRADE_MANIFEST, |
674 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG] |
593 UPGRADE_FILELOG]) |
675 ) |
|
676 |
594 |
677 |
595 def matchrevlog(revlogfilter, entry): |
678 def matchrevlog(revlogfilter, entry): |
596 """check is a revlog is selected for cloning |
679 """check is a revlog is selected for cloning |
597 |
680 |
598 The store entry is checked against the passed filter""" |
681 The store entry is checked against the passed filter""" |
684 continue |
786 continue |
685 |
787 |
686 oldrl = _revlogfrompath(srcrepo, unencoded) |
788 oldrl = _revlogfrompath(srcrepo, unencoded) |
687 |
789 |
688 if isinstance(oldrl, changelog.changelog) and 'c' not in seen: |
790 if isinstance(oldrl, changelog.changelog) and 'c' not in seen: |
689 ui.write(_('finished migrating %d manifest revisions across %d ' |
791 ui.write( |
690 'manifests; change in size: %s\n') % |
792 _( |
691 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))) |
793 'finished migrating %d manifest revisions across %d ' |
692 |
794 'manifests; change in size: %s\n' |
693 ui.write(_('migrating changelog containing %d revisions ' |
795 ) |
694 '(%s in store; %s tracked data)\n') % |
796 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
695 (crevcount, util.bytecount(csrcsize), |
797 ) |
696 util.bytecount(crawsize))) |
798 |
|
799 ui.write( |
|
800 _( |
|
801 'migrating changelog containing %d revisions ' |
|
802 '(%s in store; %s tracked data)\n' |
|
803 ) |
|
804 % ( |
|
805 crevcount, |
|
806 util.bytecount(csrcsize), |
|
807 util.bytecount(crawsize), |
|
808 ) |
|
809 ) |
697 seen.add('c') |
810 seen.add('c') |
698 progress = srcrepo.ui.makeprogress(_('changelog revisions'), |
811 progress = srcrepo.ui.makeprogress( |
699 total=crevcount) |
812 _('changelog revisions'), total=crevcount |
|
813 ) |
700 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen: |
814 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen: |
701 ui.write(_('finished migrating %d filelog revisions across %d ' |
815 ui.write( |
702 'filelogs; change in size: %s\n') % |
816 _( |
703 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))) |
817 'finished migrating %d filelog revisions across %d ' |
704 |
818 'filelogs; change in size: %s\n' |
705 ui.write(_('migrating %d manifests containing %d revisions ' |
819 ) |
706 '(%s in store; %s tracked data)\n') % |
820 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
707 (mcount, mrevcount, util.bytecount(msrcsize), |
821 ) |
708 util.bytecount(mrawsize))) |
822 |
|
823 ui.write( |
|
824 _( |
|
825 'migrating %d manifests containing %d revisions ' |
|
826 '(%s in store; %s tracked data)\n' |
|
827 ) |
|
828 % ( |
|
829 mcount, |
|
830 mrevcount, |
|
831 util.bytecount(msrcsize), |
|
832 util.bytecount(mrawsize), |
|
833 ) |
|
834 ) |
709 seen.add('m') |
835 seen.add('m') |
710 if progress: |
836 if progress: |
711 progress.complete() |
837 progress.complete() |
712 progress = srcrepo.ui.makeprogress(_('manifest revisions'), |
838 progress = srcrepo.ui.makeprogress( |
713 total=mrevcount) |
839 _('manifest revisions'), total=mrevcount |
|
840 ) |
714 elif 'f' not in seen: |
841 elif 'f' not in seen: |
715 ui.write(_('migrating %d filelogs containing %d revisions ' |
842 ui.write( |
716 '(%s in store; %s tracked data)\n') % |
843 _( |
717 (fcount, frevcount, util.bytecount(fsrcsize), |
844 'migrating %d filelogs containing %d revisions ' |
718 util.bytecount(frawsize))) |
845 '(%s in store; %s tracked data)\n' |
|
846 ) |
|
847 % ( |
|
848 fcount, |
|
849 frevcount, |
|
850 util.bytecount(fsrcsize), |
|
851 util.bytecount(frawsize), |
|
852 ) |
|
853 ) |
719 seen.add('f') |
854 seen.add('f') |
720 if progress: |
855 if progress: |
721 progress.complete() |
856 progress.complete() |
722 progress = srcrepo.ui.makeprogress(_('file revisions'), |
857 progress = srcrepo.ui.makeprogress( |
723 total=frevcount) |
858 _('file revisions'), total=frevcount |
|
859 ) |
724 |
860 |
725 if matchrevlog(revlogs, unencoded): |
861 if matchrevlog(revlogs, unencoded): |
726 ui.note(_('cloning %d revisions from %s\n') |
862 ui.note( |
727 % (len(oldrl), unencoded)) |
863 _('cloning %d revisions from %s\n') % (len(oldrl), unencoded) |
|
864 ) |
728 newrl = _revlogfrompath(dstrepo, unencoded) |
865 newrl = _revlogfrompath(dstrepo, unencoded) |
729 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, |
866 oldrl.clone( |
730 deltareuse=deltareuse, |
867 tr, |
731 forcedeltabothparents=forcedeltabothparents) |
868 newrl, |
|
869 addrevisioncb=oncopiedrevision, |
|
870 deltareuse=deltareuse, |
|
871 forcedeltabothparents=forcedeltabothparents, |
|
872 ) |
732 else: |
873 else: |
733 msg = _('blindly copying %s containing %i revisions\n') |
874 msg = _('blindly copying %s containing %i revisions\n') |
734 ui.note(msg % (unencoded, len(oldrl))) |
875 ui.note(msg % (unencoded, len(oldrl))) |
735 _copyrevlog(tr, dstrepo, oldrl, unencoded) |
876 _copyrevlog(tr, dstrepo, oldrl, unencoded) |
736 |
877 |
785 if path in ('lock', 'fncache'): |
934 if path in ('lock', 'fncache'): |
786 return False |
935 return False |
787 |
936 |
788 return True |
937 return True |
789 |
938 |
|
939 |
790 def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
940 def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
791 """Hook point for extensions to perform additional actions during upgrade. |
941 """Hook point for extensions to perform additional actions during upgrade. |
792 |
942 |
793 This function is called after revlogs and store files have been copied but |
943 This function is called after revlogs and store files have been copied but |
794 before the new store is swapped into the original location. |
944 before the new store is swapped into the original location. |
795 """ |
945 """ |
796 |
946 |
797 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions, |
947 |
798 revlogs=UPGRADE_ALL_REVLOGS): |
948 def _upgraderepo( |
|
949 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS |
|
950 ): |
799 """Do the low-level work of upgrading a repository. |
951 """Do the low-level work of upgrading a repository. |
800 |
952 |
801 The upgrade is effectively performed as a copy between a source |
953 The upgrade is effectively performed as a copy between a source |
802 repository and a temporary destination repository. |
954 repository and a temporary destination repository. |
803 |
955 |
821 deltareuse = revlog.revlog.DELTAREUSEFULLADD |
977 deltareuse = revlog.revlog.DELTAREUSEFULLADD |
822 else: |
978 else: |
823 deltareuse = revlog.revlog.DELTAREUSEALWAYS |
979 deltareuse = revlog.revlog.DELTAREUSEALWAYS |
824 |
980 |
825 with dstrepo.transaction('upgrade') as tr: |
981 with dstrepo.transaction('upgrade') as tr: |
826 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, |
982 _clonerevlogs( |
827 're-delta-multibase' in actions, revlogs=revlogs) |
983 ui, |
|
984 srcrepo, |
|
985 dstrepo, |
|
986 tr, |
|
987 deltareuse, |
|
988 're-delta-multibase' in actions, |
|
989 revlogs=revlogs, |
|
990 ) |
828 |
991 |
829 # Now copy other files in the store directory. |
992 # Now copy other files in the store directory. |
830 # The sorted() makes execution deterministic. |
993 # The sorted() makes execution deterministic. |
831 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)): |
994 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)): |
832 if not _filterstorefile(srcrepo, dstrepo, requirements, |
995 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): |
833 p, kind, st): |
|
834 continue |
996 continue |
835 |
997 |
836 srcrepo.ui.write(_('copying %s\n') % p) |
998 srcrepo.ui.write(_('copying %s\n') % p) |
837 src = srcrepo.store.rawvfs.join(p) |
999 src = srcrepo.store.rawvfs.join(p) |
838 dst = dstrepo.store.rawvfs.join(p) |
1000 dst = dstrepo.store.rawvfs.join(p) |
850 |
1012 |
851 # We install an arbitrary requirement that clients must not support |
1013 # We install an arbitrary requirement that clients must not support |
852 # as a mechanism to lock out new clients during the data swap. This is |
1014 # as a mechanism to lock out new clients during the data swap. This is |
853 # better than allowing a client to continue while the repository is in |
1015 # better than allowing a client to continue while the repository is in |
854 # an inconsistent state. |
1016 # an inconsistent state. |
855 ui.write(_('marking source repository as being upgraded; clients will be ' |
1017 ui.write( |
856 'unable to read from repository\n')) |
1018 _( |
857 scmutil.writerequires(srcrepo.vfs, |
1019 'marking source repository as being upgraded; clients will be ' |
858 srcrepo.requirements | {'upgradeinprogress'}) |
1020 'unable to read from repository\n' |
|
1021 ) |
|
1022 ) |
|
1023 scmutil.writerequires( |
|
1024 srcrepo.vfs, srcrepo.requirements | {'upgradeinprogress'} |
|
1025 ) |
859 |
1026 |
860 ui.write(_('starting in-place swap of repository data\n')) |
1027 ui.write(_('starting in-place swap of repository data\n')) |
861 ui.write(_('replaced files will be backed up at %s\n') % |
1028 ui.write(_('replaced files will be backed up at %s\n') % backuppath) |
862 backuppath) |
|
863 |
1029 |
864 # Now swap in the new store directory. Doing it as a rename should make |
1030 # Now swap in the new store directory. Doing it as a rename should make |
865 # the operation nearly instantaneous and atomic (at least in well-behaved |
1031 # the operation nearly instantaneous and atomic (at least in well-behaved |
866 # environments). |
1032 # environments). |
867 ui.write(_('replacing store...\n')) |
1033 ui.write(_('replacing store...\n')) |
868 tstart = util.timer() |
1034 tstart = util.timer() |
869 util.rename(srcrepo.spath, backupvfs.join('store')) |
1035 util.rename(srcrepo.spath, backupvfs.join('store')) |
870 util.rename(dstrepo.spath, srcrepo.spath) |
1036 util.rename(dstrepo.spath, srcrepo.spath) |
871 elapsed = util.timer() - tstart |
1037 elapsed = util.timer() - tstart |
872 ui.write(_('store replacement complete; repository was inconsistent for ' |
1038 ui.write( |
873 '%0.1fs\n') % elapsed) |
1039 _( |
|
1040 'store replacement complete; repository was inconsistent for ' |
|
1041 '%0.1fs\n' |
|
1042 ) |
|
1043 % elapsed |
|
1044 ) |
874 |
1045 |
875 # We first write the requirements file. Any new requirements will lock |
1046 # We first write the requirements file. Any new requirements will lock |
876 # out legacy clients. |
1047 # out legacy clients. |
877 ui.write(_('finalizing requirements file and making repository readable ' |
1048 ui.write( |
878 'again\n')) |
1049 _( |
|
1050 'finalizing requirements file and making repository readable ' |
|
1051 'again\n' |
|
1052 ) |
|
1053 ) |
879 scmutil.writerequires(srcrepo.vfs, requirements) |
1054 scmutil.writerequires(srcrepo.vfs, requirements) |
880 |
1055 |
881 # The lock file from the old store won't be removed because nothing has a |
1056 # The lock file from the old store won't be removed because nothing has a |
882 # reference to its new location. So clean it up manually. Alternatively, we |
1057 # reference to its new location. So clean it up manually. Alternatively, we |
883 # could update srcrepo.svfs and other variables to point to the new |
1058 # could update srcrepo.svfs and other variables to point to the new |
884 # location. This is simpler. |
1059 # location. This is simpler. |
885 backupvfs.unlink('store/lock') |
1060 backupvfs.unlink('store/lock') |
886 |
1061 |
887 return backuppath |
1062 return backuppath |
888 |
1063 |
889 def upgraderepo(ui, repo, run=False, optimize=None, backup=True, |
1064 |
890 manifest=None, changelog=None): |
1065 def upgraderepo( |
|
1066 ui, |
|
1067 repo, |
|
1068 run=False, |
|
1069 optimize=None, |
|
1070 backup=True, |
|
1071 manifest=None, |
|
1072 changelog=None, |
|
1073 ): |
891 """Upgrade a repository in place.""" |
1074 """Upgrade a repository in place.""" |
892 if optimize is None: |
1075 if optimize is None: |
893 optimize = [] |
1076 optimize = [] |
894 optimize = set(legacy_opts_map.get(o, o) for o in optimize) |
1077 optimize = set(legacy_opts_map.get(o, o) for o in optimize) |
895 repo = repo.unfiltered() |
1078 repo = repo.unfiltered() |
916 revlogs.discard(UPGRADE_MANIFEST) |
1099 revlogs.discard(UPGRADE_MANIFEST) |
917 |
1100 |
918 # Ensure the repository can be upgraded. |
1101 # Ensure the repository can be upgraded. |
919 missingreqs = requiredsourcerequirements(repo) - repo.requirements |
1102 missingreqs = requiredsourcerequirements(repo) - repo.requirements |
920 if missingreqs: |
1103 if missingreqs: |
921 raise error.Abort(_('cannot upgrade repository; requirement ' |
1104 raise error.Abort( |
922 'missing: %s') % _(', ').join(sorted(missingreqs))) |
1105 _('cannot upgrade repository; requirement ' 'missing: %s') |
|
1106 % _(', ').join(sorted(missingreqs)) |
|
1107 ) |
923 |
1108 |
924 blockedreqs = blocksourcerequirements(repo) & repo.requirements |
1109 blockedreqs = blocksourcerequirements(repo) & repo.requirements |
925 if blockedreqs: |
1110 if blockedreqs: |
926 raise error.Abort(_('cannot upgrade repository; unsupported source ' |
1111 raise error.Abort( |
927 'requirement: %s') % |
1112 _( |
928 _(', ').join(sorted(blockedreqs))) |
1113 'cannot upgrade repository; unsupported source ' |
|
1114 'requirement: %s' |
|
1115 ) |
|
1116 % _(', ').join(sorted(blockedreqs)) |
|
1117 ) |
929 |
1118 |
930 # FUTURE there is potentially a need to control the wanted requirements via |
1119 # FUTURE there is potentially a need to control the wanted requirements via |
931 # command arguments or via an extension hook point. |
1120 # command arguments or via an extension hook point. |
932 newreqs = localrepo.newreporequirements( |
1121 newreqs = localrepo.newreporequirements( |
933 repo.ui, localrepo.defaultcreateopts(repo.ui)) |
1122 repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
1123 ) |
934 newreqs.update(preservedrequirements(repo)) |
1124 newreqs.update(preservedrequirements(repo)) |
935 |
1125 |
936 noremovereqs = (repo.requirements - newreqs - |
1126 noremovereqs = ( |
937 supportremovedrequirements(repo)) |
1127 repo.requirements - newreqs - supportremovedrequirements(repo) |
|
1128 ) |
938 if noremovereqs: |
1129 if noremovereqs: |
939 raise error.Abort(_('cannot upgrade repository; requirement would be ' |
1130 raise error.Abort( |
940 'removed: %s') % _(', ').join(sorted(noremovereqs))) |
1131 _('cannot upgrade repository; requirement would be ' 'removed: %s') |
941 |
1132 % _(', ').join(sorted(noremovereqs)) |
942 noaddreqs = (newreqs - repo.requirements - |
1133 ) |
943 allowednewrequirements(repo)) |
1134 |
|
1135 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
944 if noaddreqs: |
1136 if noaddreqs: |
945 raise error.Abort(_('cannot upgrade repository; do not support adding ' |
1137 raise error.Abort( |
946 'requirement: %s') % |
1138 _( |
947 _(', ').join(sorted(noaddreqs))) |
1139 'cannot upgrade repository; do not support adding ' |
|
1140 'requirement: %s' |
|
1141 ) |
|
1142 % _(', ').join(sorted(noaddreqs)) |
|
1143 ) |
948 |
1144 |
949 unsupportedreqs = newreqs - supporteddestrequirements(repo) |
1145 unsupportedreqs = newreqs - supporteddestrequirements(repo) |
950 if unsupportedreqs: |
1146 if unsupportedreqs: |
951 raise error.Abort(_('cannot upgrade repository; do not support ' |
1147 raise error.Abort( |
952 'destination requirement: %s') % |
1148 _( |
953 _(', ').join(sorted(unsupportedreqs))) |
1149 'cannot upgrade repository; do not support ' |
|
1150 'destination requirement: %s' |
|
1151 ) |
|
1152 % _(', ').join(sorted(unsupportedreqs)) |
|
1153 ) |
954 |
1154 |
955 # Find and validate all improvements that can be made. |
1155 # Find and validate all improvements that can be made. |
956 alloptimizations = findoptimizations(repo) |
1156 alloptimizations = findoptimizations(repo) |
957 |
1157 |
958 # Apply and Validate arguments. |
1158 # Apply and Validate arguments. |
960 for o in alloptimizations: |
1160 for o in alloptimizations: |
961 if o.name in optimize: |
1161 if o.name in optimize: |
962 optimizations.append(o) |
1162 optimizations.append(o) |
963 optimize.discard(o.name) |
1163 optimize.discard(o.name) |
964 |
1164 |
965 if optimize: # anything left is unknown |
1165 if optimize: # anything left is unknown |
966 raise error.Abort(_('unknown optimization action requested: %s') % |
1166 raise error.Abort( |
967 ', '.join(sorted(optimize)), |
1167 _('unknown optimization action requested: %s') |
968 hint=_('run without arguments to see valid ' |
1168 % ', '.join(sorted(optimize)), |
969 'optimizations')) |
1169 hint=_('run without arguments to see valid ' 'optimizations'), |
|
1170 ) |
970 |
1171 |
971 deficiencies = finddeficiencies(repo) |
1172 deficiencies = finddeficiencies(repo) |
972 actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
1173 actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
973 actions.extend(o for o in sorted(optimizations) |
1174 actions.extend( |
974 # determineactions could have added optimisation |
1175 o |
975 if o not in actions) |
1176 for o in sorted(optimizations) |
|
1177 # determineactions could have added optimisation |
|
1178 if o not in actions |
|
1179 ) |
976 |
1180 |
977 removedreqs = repo.requirements - newreqs |
1181 removedreqs = repo.requirements - newreqs |
978 addedreqs = newreqs - repo.requirements |
1182 addedreqs = newreqs - repo.requirements |
979 |
1183 |
980 if revlogs != UPGRADE_ALL_REVLOGS: |
1184 if revlogs != UPGRADE_ALL_REVLOGS: |
981 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
1185 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
982 if incompatible: |
1186 if incompatible: |
983 msg = _('ignoring revlogs selection flags, format requirements ' |
1187 msg = _( |
984 'change: %s\n') |
1188 'ignoring revlogs selection flags, format requirements ' |
|
1189 'change: %s\n' |
|
1190 ) |
985 ui.warn(msg % ', '.join(sorted(incompatible))) |
1191 ui.warn(msg % ', '.join(sorted(incompatible))) |
986 revlogs = UPGRADE_ALL_REVLOGS |
1192 revlogs = UPGRADE_ALL_REVLOGS |
987 |
1193 |
988 def printrequirements(): |
1194 def printrequirements(): |
989 ui.write(_('requirements\n')) |
1195 ui.write(_('requirements\n')) |
990 ui.write(_(' preserved: %s\n') % |
1196 ui.write( |
991 _(', ').join(sorted(newreqs & repo.requirements))) |
1197 _(' preserved: %s\n') |
|
1198 % _(', ').join(sorted(newreqs & repo.requirements)) |
|
1199 ) |
992 |
1200 |
993 if repo.requirements - newreqs: |
1201 if repo.requirements - newreqs: |
994 ui.write(_(' removed: %s\n') % |
1202 ui.write( |
995 _(', ').join(sorted(repo.requirements - newreqs))) |
1203 _(' removed: %s\n') |
|
1204 % _(', ').join(sorted(repo.requirements - newreqs)) |
|
1205 ) |
996 |
1206 |
997 if newreqs - repo.requirements: |
1207 if newreqs - repo.requirements: |
998 ui.write(_(' added: %s\n') % |
1208 ui.write( |
999 _(', ').join(sorted(newreqs - repo.requirements))) |
1209 _(' added: %s\n') |
|
1210 % _(', ').join(sorted(newreqs - repo.requirements)) |
|
1211 ) |
1000 |
1212 |
1001 ui.write('\n') |
1213 ui.write('\n') |
1002 |
1214 |
1003 def printupgradeactions(): |
1215 def printupgradeactions(): |
1004 for a in actions: |
1216 for a in actions: |
1015 onlydefault.append(d) |
1227 onlydefault.append(d) |
1016 |
1228 |
1017 if fromconfig or onlydefault: |
1229 if fromconfig or onlydefault: |
1018 |
1230 |
1019 if fromconfig: |
1231 if fromconfig: |
1020 ui.write(_('repository lacks features recommended by ' |
1232 ui.write( |
1021 'current config options:\n\n')) |
1233 _( |
|
1234 'repository lacks features recommended by ' |
|
1235 'current config options:\n\n' |
|
1236 ) |
|
1237 ) |
1022 for i in fromconfig: |
1238 for i in fromconfig: |
1023 ui.write('%s\n %s\n\n' % (i.name, i.description)) |
1239 ui.write('%s\n %s\n\n' % (i.name, i.description)) |
1024 |
1240 |
1025 if onlydefault: |
1241 if onlydefault: |
1026 ui.write(_('repository lacks features used by the default ' |
1242 ui.write( |
1027 'config options:\n\n')) |
1243 _( |
|
1244 'repository lacks features used by the default ' |
|
1245 'config options:\n\n' |
|
1246 ) |
|
1247 ) |
1028 for i in onlydefault: |
1248 for i in onlydefault: |
1029 ui.write('%s\n %s\n\n' % (i.name, i.description)) |
1249 ui.write('%s\n %s\n\n' % (i.name, i.description)) |
1030 |
1250 |
1031 ui.write('\n') |
1251 ui.write('\n') |
1032 else: |
1252 else: |
1033 ui.write(_('(no feature deficiencies found in existing ' |
1253 ui.write( |
1034 'repository)\n')) |
1254 _('(no feature deficiencies found in existing ' 'repository)\n') |
1035 |
1255 ) |
1036 ui.write(_('performing an upgrade with "--run" will make the following ' |
1256 |
1037 'changes:\n\n')) |
1257 ui.write( |
|
1258 _( |
|
1259 'performing an upgrade with "--run" will make the following ' |
|
1260 'changes:\n\n' |
|
1261 ) |
|
1262 ) |
1038 |
1263 |
1039 printrequirements() |
1264 printrequirements() |
1040 printupgradeactions() |
1265 printupgradeactions() |
1041 |
1266 |
1042 unusedoptimize = [i for i in alloptimizations if i not in actions] |
1267 unusedoptimize = [i for i in alloptimizations if i not in actions] |
1043 |
1268 |
1044 if unusedoptimize: |
1269 if unusedoptimize: |
1045 ui.write(_('additional optimizations are available by specifying ' |
1270 ui.write( |
1046 '"--optimize <name>":\n\n')) |
1271 _( |
|
1272 'additional optimizations are available by specifying ' |
|
1273 '"--optimize <name>":\n\n' |
|
1274 ) |
|
1275 ) |
1047 for i in unusedoptimize: |
1276 for i in unusedoptimize: |
1048 ui.write(_('%s\n %s\n\n') % (i.name, i.description)) |
1277 ui.write(_('%s\n %s\n\n') % (i.name, i.description)) |
1049 return |
1278 return |
1050 |
1279 |
1051 # Else we're in the run=true case. |
1280 # Else we're in the run=true case. |
1064 # to create a new repo object than to instantiate all the components |
1293 # to create a new repo object than to instantiate all the components |
1065 # (like the store) separately. |
1294 # (like the store) separately. |
1066 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path) |
1295 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path) |
1067 backuppath = None |
1296 backuppath = None |
1068 try: |
1297 try: |
1069 ui.write(_('creating temporary repository to stage migrated ' |
1298 ui.write( |
1070 'data: %s\n') % tmppath) |
1299 _( |
|
1300 'creating temporary repository to stage migrated ' |
|
1301 'data: %s\n' |
|
1302 ) |
|
1303 % tmppath |
|
1304 ) |
1071 |
1305 |
1072 # clone ui without using ui.copy because repo.ui is protected |
1306 # clone ui without using ui.copy because repo.ui is protected |
1073 repoui = repo.ui.__class__(repo.ui) |
1307 repoui = repo.ui.__class__(repo.ui) |
1074 dstrepo = hg.repository(repoui, path=tmppath, create=True) |
1308 dstrepo = hg.repository(repoui, path=tmppath, create=True) |
1075 |
1309 |
1076 with dstrepo.wlock(), dstrepo.lock(): |
1310 with dstrepo.wlock(), dstrepo.lock(): |
1077 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, |
1311 backuppath = _upgraderepo( |
1078 upgradeactions, revlogs=revlogs) |
1312 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
1313 ) |
1079 if not (backup or backuppath is None): |
1314 if not (backup or backuppath is None): |
1080 ui.write(_('removing old repository content%s\n') % backuppath) |
1315 ui.write(_('removing old repository content%s\n') % backuppath) |
1081 repo.vfs.rmtree(backuppath, forcibly=True) |
1316 repo.vfs.rmtree(backuppath, forcibly=True) |
1082 backuppath = None |
1317 backuppath = None |
1083 |
1318 |
1084 finally: |
1319 finally: |
1085 ui.write(_('removing temporary repository %s\n') % tmppath) |
1320 ui.write(_('removing temporary repository %s\n') % tmppath) |
1086 repo.vfs.rmtree(tmppath, forcibly=True) |
1321 repo.vfs.rmtree(tmppath, forcibly=True) |
1087 |
1322 |
1088 if backuppath: |
1323 if backuppath: |
1089 ui.warn(_('copy of old repository backed up at %s\n') % |
1324 ui.warn( |
1090 backuppath) |
1325 _('copy of old repository backed up at %s\n') % backuppath |
1091 ui.warn(_('the old repository will not be deleted; remove ' |
1326 ) |
1092 'it to free up disk space once the upgraded ' |
1327 ui.warn( |
1093 'repository is verified\n')) |
1328 _( |
|
1329 'the old repository will not be deleted; remove ' |
|
1330 'it to free up disk space once the upgraded ' |
|
1331 'repository is verified\n' |
|
1332 ) |
|
1333 ) |