mercurial/upgrade.py
changeset 43077 687b865b95ad
parent 43076 2372284d9457
child 43089 c59eb1560c44
equal deleted inserted replaced
43076:2372284d9457 43077:687b865b95ad
    26 
    26 
    27 from .utils import compression
    27 from .utils import compression
    28 
    28 
    29 # list of requirements that request a clone of all revlog if added/removed
    29 # list of requirements that request a clone of all revlog if added/removed
    30 RECLONES_REQUIREMENTS = {
    30 RECLONES_REQUIREMENTS = {
    31     'generaldelta',
    31     b'generaldelta',
    32     localrepo.SPARSEREVLOG_REQUIREMENT,
    32     localrepo.SPARSEREVLOG_REQUIREMENT,
    33 }
    33 }
    34 
    34 
    35 
    35 
    36 def requiredsourcerequirements(repo):
    36 def requiredsourcerequirements(repo):
    39     An upgrade will not be allowed if the repository doesn't have the
    39     An upgrade will not be allowed if the repository doesn't have the
    40     requirements returned by this function.
    40     requirements returned by this function.
    41     """
    41     """
    42     return {
    42     return {
    43         # Introduced in Mercurial 0.9.2.
    43         # Introduced in Mercurial 0.9.2.
    44         'revlogv1',
    44         b'revlogv1',
    45         # Introduced in Mercurial 0.9.2.
    45         # Introduced in Mercurial 0.9.2.
    46         'store',
    46         b'store',
    47     }
    47     }
    48 
    48 
    49 
    49 
    50 def blocksourcerequirements(repo):
    50 def blocksourcerequirements(repo):
    51     """Obtain requirements that will prevent an upgrade from occurring.
    51     """Obtain requirements that will prevent an upgrade from occurring.
    54     requirements in the returned set.
    54     requirements in the returned set.
    55     """
    55     """
    56     return {
    56     return {
    57         # The upgrade code does not yet support these experimental features.
    57         # The upgrade code does not yet support these experimental features.
    58         # This is an artificial limitation.
    58         # This is an artificial limitation.
    59         'treemanifest',
    59         b'treemanifest',
    60         # This was a precursor to generaldelta and was never enabled by default.
    60         # This was a precursor to generaldelta and was never enabled by default.
    61         # It should (hopefully) not exist in the wild.
    61         # It should (hopefully) not exist in the wild.
    62         'parentdelta',
    62         b'parentdelta',
    63         # Upgrade should operate on the actual store, not the shared link.
    63         # Upgrade should operate on the actual store, not the shared link.
    64         'shared',
    64         b'shared',
    65     }
    65     }
    66 
    66 
    67 
    67 
    68 def supportremovedrequirements(repo):
    68 def supportremovedrequirements(repo):
    69     """Obtain requirements that can be removed during an upgrade.
    69     """Obtain requirements that can be removed during an upgrade.
    77     }
    77     }
    78     for name in compression.compengines:
    78     for name in compression.compengines:
    79         engine = compression.compengines[name]
    79         engine = compression.compengines[name]
    80         if engine.available() and engine.revlogheader():
    80         if engine.available() and engine.revlogheader():
    81             supported.add(b'exp-compression-%s' % name)
    81             supported.add(b'exp-compression-%s' % name)
    82             if engine.name() == 'zstd':
    82             if engine.name() == b'zstd':
    83                 supported.add(b'revlog-compression-zstd')
    83                 supported.add(b'revlog-compression-zstd')
    84     return supported
    84     return supported
    85 
    85 
    86 
    86 
    87 def supporteddestrequirements(repo):
    87 def supporteddestrequirements(repo):
    91     the upgrade is disallowed.
    91     the upgrade is disallowed.
    92 
    92 
    93     Extensions should monkeypatch this to add their custom requirements.
    93     Extensions should monkeypatch this to add their custom requirements.
    94     """
    94     """
    95     supported = {
    95     supported = {
    96         'dotencode',
    96         b'dotencode',
    97         'fncache',
    97         b'fncache',
    98         'generaldelta',
    98         b'generaldelta',
    99         'revlogv1',
    99         b'revlogv1',
   100         'store',
   100         b'store',
   101         localrepo.SPARSEREVLOG_REQUIREMENT,
   101         localrepo.SPARSEREVLOG_REQUIREMENT,
   102     }
   102     }
   103     for name in compression.compengines:
   103     for name in compression.compengines:
   104         engine = compression.compengines[name]
   104         engine = compression.compengines[name]
   105         if engine.available() and engine.revlogheader():
   105         if engine.available() and engine.revlogheader():
   106             supported.add(b'exp-compression-%s' % name)
   106             supported.add(b'exp-compression-%s' % name)
   107             if engine.name() == 'zstd':
   107             if engine.name() == b'zstd':
   108                 supported.add(b'revlog-compression-zstd')
   108                 supported.add(b'revlog-compression-zstd')
   109     return supported
   109     return supported
   110 
   110 
   111 
   111 
   112 def allowednewrequirements(repo):
   112 def allowednewrequirements(repo):
   118     We use a list of allowed requirement additions instead of a list of known
   118     We use a list of allowed requirement additions instead of a list of known
   119     bad additions because the whitelist approach is safer and will prevent
   119     bad additions because the whitelist approach is safer and will prevent
   120     future, unknown requirements from accidentally being added.
   120     future, unknown requirements from accidentally being added.
   121     """
   121     """
   122     supported = {
   122     supported = {
   123         'dotencode',
   123         b'dotencode',
   124         'fncache',
   124         b'fncache',
   125         'generaldelta',
   125         b'generaldelta',
   126         localrepo.SPARSEREVLOG_REQUIREMENT,
   126         localrepo.SPARSEREVLOG_REQUIREMENT,
   127     }
   127     }
   128     for name in compression.compengines:
   128     for name in compression.compengines:
   129         engine = compression.compengines[name]
   129         engine = compression.compengines[name]
   130         if engine.available() and engine.revlogheader():
   130         if engine.available() and engine.revlogheader():
   131             supported.add(b'exp-compression-%s' % name)
   131             supported.add(b'exp-compression-%s' % name)
   132             if engine.name() == 'zstd':
   132             if engine.name() == b'zstd':
   133                 supported.add(b'revlog-compression-zstd')
   133                 supported.add(b'revlog-compression-zstd')
   134     return supported
   134     return supported
   135 
   135 
   136 
   136 
   137 def preservedrequirements(repo):
   137 def preservedrequirements(repo):
   138     return set()
   138     return set()
   139 
   139 
   140 
   140 
   141 deficiency = 'deficiency'
   141 deficiency = b'deficiency'
   142 optimisation = 'optimization'
   142 optimisation = b'optimization'
   143 
   143 
   144 
   144 
   145 class improvement(object):
   145 class improvement(object):
   146     """Represents an improvement that can be made as part of an upgrade.
   146     """Represents an improvement that can be made as part of an upgrade.
   147 
   147 
   257         return cls._requirement in cls._newreporequirements(repo.ui)
   257         return cls._requirement in cls._newreporequirements(repo.ui)
   258 
   258 
   259 
   259 
   260 @registerformatvariant
   260 @registerformatvariant
   261 class fncache(requirementformatvariant):
   261 class fncache(requirementformatvariant):
   262     name = 'fncache'
   262     name = b'fncache'
   263 
   263 
   264     _requirement = 'fncache'
   264     _requirement = b'fncache'
   265 
   265 
   266     default = True
   266     default = True
   267 
   267 
   268     description = _(
   268     description = _(
   269         'long and reserved filenames may not work correctly; '
   269         b'long and reserved filenames may not work correctly; '
   270         'repository performance is sub-optimal'
   270         b'repository performance is sub-optimal'
   271     )
   271     )
   272 
   272 
   273     upgrademessage = _(
   273     upgrademessage = _(
   274         'repository will be more resilient to storing '
   274         b'repository will be more resilient to storing '
   275         'certain paths and performance of certain '
   275         b'certain paths and performance of certain '
   276         'operations should be improved'
   276         b'operations should be improved'
   277     )
   277     )
   278 
   278 
   279 
   279 
   280 @registerformatvariant
   280 @registerformatvariant
   281 class dotencode(requirementformatvariant):
   281 class dotencode(requirementformatvariant):
   282     name = 'dotencode'
   282     name = b'dotencode'
   283 
   283 
   284     _requirement = 'dotencode'
   284     _requirement = b'dotencode'
   285 
   285 
   286     default = True
   286     default = True
   287 
   287 
   288     description = _(
   288     description = _(
   289         'storage of filenames beginning with a period or '
   289         b'storage of filenames beginning with a period or '
   290         'space may not work correctly'
   290         b'space may not work correctly'
   291     )
   291     )
   292 
   292 
   293     upgrademessage = _(
   293     upgrademessage = _(
   294         'repository will be better able to store files '
   294         b'repository will be better able to store files '
   295         'beginning with a space or period'
   295         b'beginning with a space or period'
   296     )
   296     )
   297 
   297 
   298 
   298 
   299 @registerformatvariant
   299 @registerformatvariant
   300 class generaldelta(requirementformatvariant):
   300 class generaldelta(requirementformatvariant):
   301     name = 'generaldelta'
   301     name = b'generaldelta'
   302 
   302 
   303     _requirement = 'generaldelta'
   303     _requirement = b'generaldelta'
   304 
   304 
   305     default = True
   305     default = True
   306 
   306 
   307     description = _(
   307     description = _(
   308         'deltas within internal storage are unable to '
   308         b'deltas within internal storage are unable to '
   309         'choose optimal revisions; repository is larger and '
   309         b'choose optimal revisions; repository is larger and '
   310         'slower than it could be; interaction with other '
   310         b'slower than it could be; interaction with other '
   311         'repositories may require extra network and CPU '
   311         b'repositories may require extra network and CPU '
   312         'resources, making "hg push" and "hg pull" slower'
   312         b'resources, making "hg push" and "hg pull" slower'
   313     )
   313     )
   314 
   314 
   315     upgrademessage = _(
   315     upgrademessage = _(
   316         'repository storage will be able to create '
   316         b'repository storage will be able to create '
   317         'optimal deltas; new repository data will be '
   317         b'optimal deltas; new repository data will be '
   318         'smaller and read times should decrease; '
   318         b'smaller and read times should decrease; '
   319         'interacting with other repositories using this '
   319         b'interacting with other repositories using this '
   320         'storage model should require less network and '
   320         b'storage model should require less network and '
   321         'CPU resources, making "hg push" and "hg pull" '
   321         b'CPU resources, making "hg push" and "hg pull" '
   322         'faster'
   322         b'faster'
   323     )
   323     )
   324 
   324 
   325 
   325 
   326 @registerformatvariant
   326 @registerformatvariant
   327 class sparserevlog(requirementformatvariant):
   327 class sparserevlog(requirementformatvariant):
   328     name = 'sparserevlog'
   328     name = b'sparserevlog'
   329 
   329 
   330     _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
   330     _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
   331 
   331 
   332     default = True
   332     default = True
   333 
   333 
   334     description = _(
   334     description = _(
   335         'in order to limit disk reading and memory usage on older '
   335         b'in order to limit disk reading and memory usage on older '
   336         'version, the span of a delta chain from its root to its '
   336         b'version, the span of a delta chain from its root to its '
   337         'end is limited, whatever the relevant data in this span. '
   337         b'end is limited, whatever the relevant data in this span. '
   338         'This can severly limit Mercurial ability to build good '
   338         b'This can severly limit Mercurial ability to build good '
   339         'chain of delta resulting is much more storage space being '
   339         b'chain of delta resulting is much more storage space being '
   340         'taken and limit reusability of on disk delta during '
   340         b'taken and limit reusability of on disk delta during '
   341         'exchange.'
   341         b'exchange.'
   342     )
   342     )
   343 
   343 
   344     upgrademessage = _(
   344     upgrademessage = _(
   345         'Revlog supports delta chain with more unused data '
   345         b'Revlog supports delta chain with more unused data '
   346         'between payload. These gaps will be skipped at read '
   346         b'between payload. These gaps will be skipped at read '
   347         'time. This allows for better delta chains, making a '
   347         b'time. This allows for better delta chains, making a '
   348         'better compression and faster exchange with server.'
   348         b'better compression and faster exchange with server.'
   349     )
   349     )
   350 
   350 
   351 
   351 
   352 @registerformatvariant
   352 @registerformatvariant
   353 class sidedata(requirementformatvariant):
   353 class sidedata(requirementformatvariant):
   354     name = 'sidedata'
   354     name = b'sidedata'
   355 
   355 
   356     _requirement = localrepo.SIDEDATA_REQUIREMENT
   356     _requirement = localrepo.SIDEDATA_REQUIREMENT
   357 
   357 
   358     default = False
   358     default = False
   359 
   359 
   360     description = _(
   360     description = _(
   361         'Allows storage of extra data alongside a revision, '
   361         b'Allows storage of extra data alongside a revision, '
   362         'unlocking various caching options.'
   362         b'unlocking various caching options.'
   363     )
   363     )
   364 
   364 
   365     upgrademessage = _('Allows storage of extra data alongside a revision.')
   365     upgrademessage = _(b'Allows storage of extra data alongside a revision.')
   366 
   366 
   367 
   367 
   368 @registerformatvariant
   368 @registerformatvariant
   369 class removecldeltachain(formatvariant):
   369 class removecldeltachain(formatvariant):
   370     name = 'plain-cl-delta'
   370     name = b'plain-cl-delta'
   371 
   371 
   372     default = True
   372     default = True
   373 
   373 
   374     description = _(
   374     description = _(
   375         'changelog storage is using deltas instead of '
   375         b'changelog storage is using deltas instead of '
   376         'raw entries; changelog reading and any '
   376         b'raw entries; changelog reading and any '
   377         'operation relying on changelog data are slower '
   377         b'operation relying on changelog data are slower '
   378         'than they could be'
   378         b'than they could be'
   379     )
   379     )
   380 
   380 
   381     upgrademessage = _(
   381     upgrademessage = _(
   382         'changelog storage will be reformated to '
   382         b'changelog storage will be reformated to '
   383         'store raw entries; changelog reading will be '
   383         b'store raw entries; changelog reading will be '
   384         'faster; changelog size may be reduced'
   384         b'faster; changelog size may be reduced'
   385     )
   385     )
   386 
   386 
   387     @staticmethod
   387     @staticmethod
   388     def fromrepo(repo):
   388     def fromrepo(repo):
   389         # Mercurial 4.0 changed changelogs to not use delta chains. Search for
   389         # Mercurial 4.0 changed changelogs to not use delta chains. Search for
   397         return True
   397         return True
   398 
   398 
   399 
   399 
   400 @registerformatvariant
   400 @registerformatvariant
   401 class compressionengine(formatvariant):
   401 class compressionengine(formatvariant):
   402     name = 'compression'
   402     name = b'compression'
   403     default = 'zlib'
   403     default = b'zlib'
   404 
   404 
   405     description = _(
   405     description = _(
   406         'Compresion algorithm used to compress data. '
   406         b'Compresion algorithm used to compress data. '
   407         'Some engine are faster than other'
   407         b'Some engine are faster than other'
   408     )
   408     )
   409 
   409 
   410     upgrademessage = _(
   410     upgrademessage = _(
   411         'revlog content will be recompressed with the new ' 'algorithm.'
   411         b'revlog content will be recompressed with the new ' b'algorithm.'
   412     )
   412     )
   413 
   413 
   414     @classmethod
   414     @classmethod
   415     def fromrepo(cls, repo):
   415     def fromrepo(cls, repo):
   416         # we allow multiple compression engine requirement to co-exist because
   416         # we allow multiple compression engine requirement to co-exist because
   417         # strickly speaking, revlog seems to support mixed compression style.
   417         # strickly speaking, revlog seems to support mixed compression style.
   418         #
   418         #
   419         # The compression used for new entries will be "the last one"
   419         # The compression used for new entries will be "the last one"
   420         compression = 'zlib'
   420         compression = b'zlib'
   421         for req in repo.requirements:
   421         for req in repo.requirements:
   422             prefix = req.startswith
   422             prefix = req.startswith
   423             if prefix('revlog-compression-') or prefix('exp-compression-'):
   423             if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
   424                 compression = req.split('-', 2)[2]
   424                 compression = req.split(b'-', 2)[2]
   425         return compression
   425         return compression
   426 
   426 
   427     @classmethod
   427     @classmethod
   428     def fromconfig(cls, repo):
   428     def fromconfig(cls, repo):
   429         return repo.ui.config('format', 'revlog-compression')
   429         return repo.ui.config(b'format', b'revlog-compression')
   430 
   430 
   431 
   431 
   432 @registerformatvariant
   432 @registerformatvariant
   433 class compressionlevel(formatvariant):
   433 class compressionlevel(formatvariant):
   434     name = 'compression-level'
   434     name = b'compression-level'
   435     default = 'default'
   435     default = b'default'
   436 
   436 
   437     description = _('compression level')
   437     description = _(b'compression level')
   438 
   438 
   439     upgrademessage = _('revlog content will be recompressed')
   439     upgrademessage = _(b'revlog content will be recompressed')
   440 
   440 
   441     @classmethod
   441     @classmethod
   442     def fromrepo(cls, repo):
   442     def fromrepo(cls, repo):
   443         comp = compressionengine.fromrepo(repo)
   443         comp = compressionengine.fromrepo(repo)
   444         level = None
   444         level = None
   445         if comp == 'zlib':
   445         if comp == b'zlib':
   446             level = repo.ui.configint('storage', 'revlog.zlib.level')
   446             level = repo.ui.configint(b'storage', b'revlog.zlib.level')
   447         elif comp == 'zstd':
   447         elif comp == b'zstd':
   448             level = repo.ui.configint('storage', 'revlog.zstd.level')
   448             level = repo.ui.configint(b'storage', b'revlog.zstd.level')
   449         if level is None:
   449         if level is None:
   450             return 'default'
   450             return b'default'
   451         return bytes(level)
   451         return bytes(level)
   452 
   452 
   453     @classmethod
   453     @classmethod
   454     def fromconfig(cls, repo):
   454     def fromconfig(cls, repo):
   455         comp = compressionengine.fromconfig(repo)
   455         comp = compressionengine.fromconfig(repo)
   456         level = None
   456         level = None
   457         if comp == 'zlib':
   457         if comp == b'zlib':
   458             level = repo.ui.configint('storage', 'revlog.zlib.level')
   458             level = repo.ui.configint(b'storage', b'revlog.zlib.level')
   459         elif comp == 'zstd':
   459         elif comp == b'zstd':
   460             level = repo.ui.configint('storage', 'revlog.zstd.level')
   460             level = repo.ui.configint(b'storage', b'revlog.zstd.level')
   461         if level is None:
   461         if level is None:
   462             return 'default'
   462             return b'default'
   463         return bytes(level)
   463         return bytes(level)
   464 
   464 
   465 
   465 
   466 def finddeficiencies(repo):
   466 def finddeficiencies(repo):
   467     """returns a list of deficiencies that the repo suffer from"""
   467     """returns a list of deficiencies that the repo suffer from"""
   483 # We don't enforce backward compatibility for debug command so this
   483 # We don't enforce backward compatibility for debug command so this
   484 # might eventually be dropped. However, having to use two different
   484 # might eventually be dropped. However, having to use two different
   485 # forms in script when comparing result is anoying enough to add
   485 # forms in script when comparing result is anoying enough to add
   486 # backward compatibility for a while.
   486 # backward compatibility for a while.
   487 legacy_opts_map = {
   487 legacy_opts_map = {
   488     'redeltaparent': 're-delta-parent',
   488     b'redeltaparent': b're-delta-parent',
   489     'redeltamultibase': 're-delta-multibase',
   489     b'redeltamultibase': b're-delta-multibase',
   490     'redeltaall': 're-delta-all',
   490     b'redeltaall': b're-delta-all',
   491     'redeltafulladd': 're-delta-fulladd',
   491     b'redeltafulladd': b're-delta-fulladd',
   492 }
   492 }
   493 
   493 
   494 
   494 
   495 def findoptimizations(repo):
   495 def findoptimizations(repo):
   496     """Determine optimisation that could be used during upgrade"""
   496     """Determine optimisation that could be used during upgrade"""
   498     # which ones to apply.
   498     # which ones to apply.
   499     optimizations = []
   499     optimizations = []
   500 
   500 
   501     optimizations.append(
   501     optimizations.append(
   502         improvement(
   502         improvement(
   503             name='re-delta-parent',
   503             name=b're-delta-parent',
   504             type=optimisation,
   504             type=optimisation,
   505             description=_(
   505             description=_(
   506                 'deltas within internal storage will be recalculated to '
   506                 b'deltas within internal storage will be recalculated to '
   507                 'choose an optimal base revision where this was not '
   507                 b'choose an optimal base revision where this was not '
   508                 'already done; the size of the repository may shrink and '
   508                 b'already done; the size of the repository may shrink and '
   509                 'various operations may become faster; the first time '
   509                 b'various operations may become faster; the first time '
   510                 'this optimization is performed could slow down upgrade '
   510                 b'this optimization is performed could slow down upgrade '
   511                 'execution considerably; subsequent invocations should '
   511                 b'execution considerably; subsequent invocations should '
   512                 'not run noticeably slower'
   512                 b'not run noticeably slower'
   513             ),
   513             ),
   514             upgrademessage=_(
   514             upgrademessage=_(
   515                 'deltas within internal storage will choose a new '
   515                 b'deltas within internal storage will choose a new '
   516                 'base revision if needed'
   516                 b'base revision if needed'
   517             ),
   517             ),
   518         )
   518         )
   519     )
   519     )
   520 
   520 
   521     optimizations.append(
   521     optimizations.append(
   522         improvement(
   522         improvement(
   523             name='re-delta-multibase',
   523             name=b're-delta-multibase',
   524             type=optimisation,
   524             type=optimisation,
   525             description=_(
   525             description=_(
   526                 'deltas within internal storage will be recalculated '
   526                 b'deltas within internal storage will be recalculated '
   527                 'against multiple base revision and the smallest '
   527                 b'against multiple base revision and the smallest '
   528                 'difference will be used; the size of the repository may '
   528                 b'difference will be used; the size of the repository may '
   529                 'shrink significantly when there are many merges; this '
   529                 b'shrink significantly when there are many merges; this '
   530                 'optimization will slow down execution in proportion to '
   530                 b'optimization will slow down execution in proportion to '
   531                 'the number of merges in the repository and the amount '
   531                 b'the number of merges in the repository and the amount '
   532                 'of files in the repository; this slow down should not '
   532                 b'of files in the repository; this slow down should not '
   533                 'be significant unless there are tens of thousands of '
   533                 b'be significant unless there are tens of thousands of '
   534                 'files and thousands of merges'
   534                 b'files and thousands of merges'
   535             ),
   535             ),
   536             upgrademessage=_(
   536             upgrademessage=_(
   537                 'deltas within internal storage will choose an '
   537                 b'deltas within internal storage will choose an '
   538                 'optimal delta by computing deltas against multiple '
   538                 b'optimal delta by computing deltas against multiple '
   539                 'parents; may slow down execution time '
   539                 b'parents; may slow down execution time '
   540                 'significantly'
   540                 b'significantly'
   541             ),
   541             ),
   542         )
   542         )
   543     )
   543     )
   544 
   544 
   545     optimizations.append(
   545     optimizations.append(
   546         improvement(
   546         improvement(
   547             name='re-delta-all',
   547             name=b're-delta-all',
   548             type=optimisation,
   548             type=optimisation,
   549             description=_(
   549             description=_(
   550                 'deltas within internal storage will always be '
   550                 b'deltas within internal storage will always be '
   551                 'recalculated without reusing prior deltas; this will '
   551                 b'recalculated without reusing prior deltas; this will '
   552                 'likely make execution run several times slower; this '
   552                 b'likely make execution run several times slower; this '
   553                 'optimization is typically not needed'
   553                 b'optimization is typically not needed'
   554             ),
   554             ),
   555             upgrademessage=_(
   555             upgrademessage=_(
   556                 'deltas within internal storage will be fully '
   556                 b'deltas within internal storage will be fully '
   557                 'recomputed; this will likely drastically slow down '
   557                 b'recomputed; this will likely drastically slow down '
   558                 'execution time'
   558                 b'execution time'
   559             ),
   559             ),
   560         )
   560         )
   561     )
   561     )
   562 
   562 
   563     optimizations.append(
   563     optimizations.append(
   564         improvement(
   564         improvement(
   565             name='re-delta-fulladd',
   565             name=b're-delta-fulladd',
   566             type=optimisation,
   566             type=optimisation,
   567             description=_(
   567             description=_(
   568                 'every revision will be re-added as if it was new '
   568                 b'every revision will be re-added as if it was new '
   569                 'content. It will go through the full storage '
   569                 b'content. It will go through the full storage '
   570                 'mechanism giving extensions a chance to process it '
   570                 b'mechanism giving extensions a chance to process it '
   571                 '(eg. lfs). This is similar to "re-delta-all" but even '
   571                 b'(eg. lfs). This is similar to "re-delta-all" but even '
   572                 'slower since more logic is involved.'
   572                 b'slower since more logic is involved.'
   573             ),
   573             ),
   574             upgrademessage=_(
   574             upgrademessage=_(
   575                 'each revision will be added as new content to the '
   575                 b'each revision will be added as new content to the '
   576                 'internal storage; this will likely drastically slow '
   576                 b'internal storage; this will likely drastically slow '
   577                 'down execution time, but some extensions might need '
   577                 b'down execution time, but some extensions might need '
   578                 'it'
   578                 b'it'
   579             ),
   579             ),
   580         )
   580         )
   581     )
   581     )
   582 
   582 
   583     return optimizations
   583     return optimizations
   619 def _revlogfrompath(repo, path):
   619 def _revlogfrompath(repo, path):
   620     """Obtain a revlog from a repo path.
   620     """Obtain a revlog from a repo path.
   621 
   621 
   622     An instance of the appropriate class is returned.
   622     An instance of the appropriate class is returned.
   623     """
   623     """
   624     if path == '00changelog.i':
   624     if path == b'00changelog.i':
   625         return changelog.changelog(repo.svfs)
   625         return changelog.changelog(repo.svfs)
   626     elif path.endswith('00manifest.i'):
   626     elif path.endswith(b'00manifest.i'):
   627         mandir = path[: -len('00manifest.i')]
   627         mandir = path[: -len(b'00manifest.i')]
   628         return manifest.manifestrevlog(repo.svfs, tree=mandir)
   628         return manifest.manifestrevlog(repo.svfs, tree=mandir)
   629     else:
   629     else:
   630         # reverse of "/".join(("data", path + ".i"))
   630         # reverse of "/".join(("data", path + ".i"))
   631         return filelog.filelog(repo.svfs, path[5:-2])
   631         return filelog.filelog(repo.svfs, path[5:-2])
   632 
   632 
   647     oldindex = oldvfs.join(oldrl.indexfile)
   647     oldindex = oldvfs.join(oldrl.indexfile)
   648     newindex = newvfs.join(newrl.indexfile)
   648     newindex = newvfs.join(newrl.indexfile)
   649     olddata = oldvfs.join(oldrl.datafile)
   649     olddata = oldvfs.join(oldrl.datafile)
   650     newdata = newvfs.join(newrl.datafile)
   650     newdata = newvfs.join(newrl.datafile)
   651 
   651 
   652     with newvfs(newrl.indexfile, 'w'):
   652     with newvfs(newrl.indexfile, b'w'):
   653         pass  # create all the directories
   653         pass  # create all the directories
   654 
   654 
   655     util.copyfile(oldindex, newindex)
   655     util.copyfile(oldindex, newindex)
   656     copydata = oldrl.opener.exists(oldrl.datafile)
   656     copydata = oldrl.opener.exists(oldrl.datafile)
   657     if copydata:
   657     if copydata:
   658         util.copyfile(olddata, newdata)
   658         util.copyfile(olddata, newdata)
   659 
   659 
   660     if not (
   660     if not (
   661         unencodedname.endswith('00changelog.i')
   661         unencodedname.endswith(b'00changelog.i')
   662         or unencodedname.endswith('00manifest.i')
   662         or unencodedname.endswith(b'00manifest.i')
   663     ):
   663     ):
   664         destrepo.svfs.fncache.add(unencodedname)
   664         destrepo.svfs.fncache.add(unencodedname)
   665         if copydata:
   665         if copydata:
   666             destrepo.svfs.fncache.add(unencodedname[:-2] + '.d')
   666             destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
   667 
   667 
   668 
   668 
   669 UPGRADE_CHANGELOG = object()
   669 UPGRADE_CHANGELOG = object()
   670 UPGRADE_MANIFEST = object()
   670 UPGRADE_MANIFEST = object()
   671 UPGRADE_FILELOG = object()
   671 UPGRADE_FILELOG = object()
   677 
   677 
   678 def matchrevlog(revlogfilter, entry):
   678 def matchrevlog(revlogfilter, entry):
   679     """check is a revlog is selected for cloning
   679     """check is a revlog is selected for cloning
   680 
   680 
   681     The store entry is checked against the passed filter"""
   681     The store entry is checked against the passed filter"""
   682     if entry.endswith('00changelog.i'):
   682     if entry.endswith(b'00changelog.i'):
   683         return UPGRADE_CHANGELOG in revlogfilter
   683         return UPGRADE_CHANGELOG in revlogfilter
   684     elif entry.endswith('00manifest.i'):
   684     elif entry.endswith(b'00manifest.i'):
   685         return UPGRADE_MANIFEST in revlogfilter
   685         return UPGRADE_MANIFEST in revlogfilter
   686     return UPGRADE_FILELOG in revlogfilter
   686     return UPGRADE_FILELOG in revlogfilter
   687 
   687 
   688 
   688 
   689 def _clonerevlogs(
   689 def _clonerevlogs(
   718     alldatafiles = list(srcrepo.store.walk())
   718     alldatafiles = list(srcrepo.store.walk())
   719 
   719 
   720     # Perform a pass to collect metadata. This validates we can open all
   720     # Perform a pass to collect metadata. This validates we can open all
   721     # source files and allows a unified progress bar to be displayed.
   721     # source files and allows a unified progress bar to be displayed.
   722     for unencoded, encoded, size in alldatafiles:
   722     for unencoded, encoded, size in alldatafiles:
   723         if unencoded.endswith('.d'):
   723         if unencoded.endswith(b'.d'):
   724             continue
   724             continue
   725 
   725 
   726         rl = _revlogfrompath(srcrepo, unencoded)
   726         rl = _revlogfrompath(srcrepo, unencoded)
   727 
   727 
   728         info = rl.storageinfo(
   728         info = rl.storageinfo(
   730             revisionscount=True,
   730             revisionscount=True,
   731             trackedsize=True,
   731             trackedsize=True,
   732             storedsize=True,
   732             storedsize=True,
   733         )
   733         )
   734 
   734 
   735         revcount += info['revisionscount'] or 0
   735         revcount += info[b'revisionscount'] or 0
   736         datasize = info['storedsize'] or 0
   736         datasize = info[b'storedsize'] or 0
   737         rawsize = info['trackedsize'] or 0
   737         rawsize = info[b'trackedsize'] or 0
   738 
   738 
   739         srcsize += datasize
   739         srcsize += datasize
   740         srcrawsize += rawsize
   740         srcrawsize += rawsize
   741 
   741 
   742         # This is for the separate progress bars.
   742         # This is for the separate progress bars.
   753             fcount += 1
   753             fcount += 1
   754             frevcount += len(rl)
   754             frevcount += len(rl)
   755             fsrcsize += datasize
   755             fsrcsize += datasize
   756             frawsize += rawsize
   756             frawsize += rawsize
   757         else:
   757         else:
   758             error.ProgrammingError('unknown revlog type')
   758             error.ProgrammingError(b'unknown revlog type')
   759 
   759 
   760     if not revcount:
   760     if not revcount:
   761         return
   761         return
   762 
   762 
   763     ui.write(
   763     ui.write(
   764         _(
   764         _(
   765             'migrating %d total revisions (%d in filelogs, %d in manifests, '
   765             b'migrating %d total revisions (%d in filelogs, %d in manifests, '
   766             '%d in changelog)\n'
   766             b'%d in changelog)\n'
   767         )
   767         )
   768         % (revcount, frevcount, mrevcount, crevcount)
   768         % (revcount, frevcount, mrevcount, crevcount)
   769     )
   769     )
   770     ui.write(
   770     ui.write(
   771         _('migrating %s in store; %s tracked data\n')
   771         _(b'migrating %s in store; %s tracked data\n')
   772         % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
   772         % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
   773     )
   773     )
   774 
   774 
   775     # Used to keep track of progress.
   775     # Used to keep track of progress.
   776     progress = None
   776     progress = None
   780 
   780 
   781     # Do the actual copying.
   781     # Do the actual copying.
   782     # FUTURE this operation can be farmed off to worker processes.
   782     # FUTURE this operation can be farmed off to worker processes.
   783     seen = set()
   783     seen = set()
   784     for unencoded, encoded, size in alldatafiles:
   784     for unencoded, encoded, size in alldatafiles:
   785         if unencoded.endswith('.d'):
   785         if unencoded.endswith(b'.d'):
   786             continue
   786             continue
   787 
   787 
   788         oldrl = _revlogfrompath(srcrepo, unencoded)
   788         oldrl = _revlogfrompath(srcrepo, unencoded)
   789 
   789 
   790         if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
   790         if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
   791             ui.write(
   791             ui.write(
   792                 _(
   792                 _(
   793                     'finished migrating %d manifest revisions across %d '
   793                     b'finished migrating %d manifest revisions across %d '
   794                     'manifests; change in size: %s\n'
   794                     b'manifests; change in size: %s\n'
   795                 )
   795                 )
   796                 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
   796                 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
   797             )
   797             )
   798 
   798 
   799             ui.write(
   799             ui.write(
   800                 _(
   800                 _(
   801                     'migrating changelog containing %d revisions '
   801                     b'migrating changelog containing %d revisions '
   802                     '(%s in store; %s tracked data)\n'
   802                     b'(%s in store; %s tracked data)\n'
   803                 )
   803                 )
   804                 % (
   804                 % (
   805                     crevcount,
   805                     crevcount,
   806                     util.bytecount(csrcsize),
   806                     util.bytecount(csrcsize),
   807                     util.bytecount(crawsize),
   807                     util.bytecount(crawsize),
   808                 )
   808                 )
   809             )
   809             )
   810             seen.add('c')
   810             seen.add(b'c')
   811             progress = srcrepo.ui.makeprogress(
   811             progress = srcrepo.ui.makeprogress(
   812                 _('changelog revisions'), total=crevcount
   812                 _(b'changelog revisions'), total=crevcount
   813             )
   813             )
   814         elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
   814         elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
   815             ui.write(
   815             ui.write(
   816                 _(
   816                 _(
   817                     'finished migrating %d filelog revisions across %d '
   817                     b'finished migrating %d filelog revisions across %d '
   818                     'filelogs; change in size: %s\n'
   818                     b'filelogs; change in size: %s\n'
   819                 )
   819                 )
   820                 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
   820                 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
   821             )
   821             )
   822 
   822 
   823             ui.write(
   823             ui.write(
   824                 _(
   824                 _(
   825                     'migrating %d manifests containing %d revisions '
   825                     b'migrating %d manifests containing %d revisions '
   826                     '(%s in store; %s tracked data)\n'
   826                     b'(%s in store; %s tracked data)\n'
   827                 )
   827                 )
   828                 % (
   828                 % (
   829                     mcount,
   829                     mcount,
   830                     mrevcount,
   830                     mrevcount,
   831                     util.bytecount(msrcsize),
   831                     util.bytecount(msrcsize),
   832                     util.bytecount(mrawsize),
   832                     util.bytecount(mrawsize),
   833                 )
   833                 )
   834             )
   834             )
   835             seen.add('m')
   835             seen.add(b'm')
   836             if progress:
   836             if progress:
   837                 progress.complete()
   837                 progress.complete()
   838             progress = srcrepo.ui.makeprogress(
   838             progress = srcrepo.ui.makeprogress(
   839                 _('manifest revisions'), total=mrevcount
   839                 _(b'manifest revisions'), total=mrevcount
   840             )
   840             )
   841         elif 'f' not in seen:
   841         elif b'f' not in seen:
   842             ui.write(
   842             ui.write(
   843                 _(
   843                 _(
   844                     'migrating %d filelogs containing %d revisions '
   844                     b'migrating %d filelogs containing %d revisions '
   845                     '(%s in store; %s tracked data)\n'
   845                     b'(%s in store; %s tracked data)\n'
   846                 )
   846                 )
   847                 % (
   847                 % (
   848                     fcount,
   848                     fcount,
   849                     frevcount,
   849                     frevcount,
   850                     util.bytecount(fsrcsize),
   850                     util.bytecount(fsrcsize),
   851                     util.bytecount(frawsize),
   851                     util.bytecount(frawsize),
   852                 )
   852                 )
   853             )
   853             )
   854             seen.add('f')
   854             seen.add(b'f')
   855             if progress:
   855             if progress:
   856                 progress.complete()
   856                 progress.complete()
   857             progress = srcrepo.ui.makeprogress(
   857             progress = srcrepo.ui.makeprogress(
   858                 _('file revisions'), total=frevcount
   858                 _(b'file revisions'), total=frevcount
   859             )
   859             )
   860 
   860 
   861         if matchrevlog(revlogs, unencoded):
   861         if matchrevlog(revlogs, unencoded):
   862             ui.note(
   862             ui.note(
   863                 _('cloning %d revisions from %s\n') % (len(oldrl), unencoded)
   863                 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
   864             )
   864             )
   865             newrl = _revlogfrompath(dstrepo, unencoded)
   865             newrl = _revlogfrompath(dstrepo, unencoded)
   866             oldrl.clone(
   866             oldrl.clone(
   867                 tr,
   867                 tr,
   868                 newrl,
   868                 newrl,
   869                 addrevisioncb=oncopiedrevision,
   869                 addrevisioncb=oncopiedrevision,
   870                 deltareuse=deltareuse,
   870                 deltareuse=deltareuse,
   871                 forcedeltabothparents=forcedeltabothparents,
   871                 forcedeltabothparents=forcedeltabothparents,
   872             )
   872             )
   873         else:
   873         else:
   874             msg = _('blindly copying %s containing %i revisions\n')
   874             msg = _(b'blindly copying %s containing %i revisions\n')
   875             ui.note(msg % (unencoded, len(oldrl)))
   875             ui.note(msg % (unencoded, len(oldrl)))
   876             _copyrevlog(tr, dstrepo, oldrl, unencoded)
   876             _copyrevlog(tr, dstrepo, oldrl, unencoded)
   877 
   877 
   878             newrl = _revlogfrompath(dstrepo, unencoded)
   878             newrl = _revlogfrompath(dstrepo, unencoded)
   879 
   879 
   880         info = newrl.storageinfo(storedsize=True)
   880         info = newrl.storageinfo(storedsize=True)
   881         datasize = info['storedsize'] or 0
   881         datasize = info[b'storedsize'] or 0
   882 
   882 
   883         dstsize += datasize
   883         dstsize += datasize
   884 
   884 
   885         if isinstance(newrl, changelog.changelog):
   885         if isinstance(newrl, changelog.changelog):
   886             cdstsize += datasize
   886             cdstsize += datasize
   890             fdstsize += datasize
   890             fdstsize += datasize
   891 
   891 
   892     progress.complete()
   892     progress.complete()
   893 
   893 
   894     ui.write(
   894     ui.write(
   895         _('finished migrating %d changelog revisions; change in size: ' '%s\n')
   895         _(
       
   896             b'finished migrating %d changelog revisions; change in size: '
       
   897             b'%s\n'
       
   898         )
   896         % (crevcount, util.bytecount(cdstsize - csrcsize))
   899         % (crevcount, util.bytecount(cdstsize - csrcsize))
   897     )
   900     )
   898 
   901 
   899     ui.write(
   902     ui.write(
   900         _(
   903         _(
   901             'finished migrating %d total revisions; total change in store '
   904             b'finished migrating %d total revisions; total change in store '
   902             'size: %s\n'
   905             b'size: %s\n'
   903         )
   906         )
   904         % (revcount, util.bytecount(dstsize - srcsize))
   907         % (revcount, util.bytecount(dstsize - srcsize))
   905     )
   908     )
   906 
   909 
   907 
   910 
   920       st: ``stat`` data structure for ``path``
   923       st: ``stat`` data structure for ``path``
   921 
   924 
   922     Function should return ``True`` if the file is to be copied.
   925     Function should return ``True`` if the file is to be copied.
   923     """
   926     """
   924     # Skip revlogs.
   927     # Skip revlogs.
   925     if path.endswith(('.i', '.d')):
   928     if path.endswith((b'.i', b'.d')):
   926         return False
   929         return False
   927     # Skip transaction related files.
   930     # Skip transaction related files.
   928     if path.startswith('undo'):
   931     if path.startswith(b'undo'):
   929         return False
   932         return False
   930     # Only copy regular files.
   933     # Only copy regular files.
   931     if mode != stat.S_IFREG:
   934     if mode != stat.S_IFREG:
   932         return False
   935         return False
   933     # Skip other skipped files.
   936     # Skip other skipped files.
   934     if path in ('lock', 'fncache'):
   937     if path in (b'lock', b'fncache'):
   935         return False
   938         return False
   936 
   939 
   937     return True
   940     return True
   938 
   941 
   939 
   942 
   960     assert srcrepo.currentwlock()
   963     assert srcrepo.currentwlock()
   961     assert dstrepo.currentwlock()
   964     assert dstrepo.currentwlock()
   962 
   965 
   963     ui.write(
   966     ui.write(
   964         _(
   967         _(
   965             '(it is safe to interrupt this process any time before '
   968             b'(it is safe to interrupt this process any time before '
   966             'data migration completes)\n'
   969             b'data migration completes)\n'
   967         )
   970         )
   968     )
   971     )
   969 
   972 
   970     if 're-delta-all' in actions:
   973     if b're-delta-all' in actions:
   971         deltareuse = revlog.revlog.DELTAREUSENEVER
   974         deltareuse = revlog.revlog.DELTAREUSENEVER
   972     elif 're-delta-parent' in actions:
   975     elif b're-delta-parent' in actions:
   973         deltareuse = revlog.revlog.DELTAREUSESAMEREVS
   976         deltareuse = revlog.revlog.DELTAREUSESAMEREVS
   974     elif 're-delta-multibase' in actions:
   977     elif b're-delta-multibase' in actions:
   975         deltareuse = revlog.revlog.DELTAREUSESAMEREVS
   978         deltareuse = revlog.revlog.DELTAREUSESAMEREVS
   976     elif 're-delta-fulladd' in actions:
   979     elif b're-delta-fulladd' in actions:
   977         deltareuse = revlog.revlog.DELTAREUSEFULLADD
   980         deltareuse = revlog.revlog.DELTAREUSEFULLADD
   978     else:
   981     else:
   979         deltareuse = revlog.revlog.DELTAREUSEALWAYS
   982         deltareuse = revlog.revlog.DELTAREUSEALWAYS
   980 
   983 
   981     with dstrepo.transaction('upgrade') as tr:
   984     with dstrepo.transaction(b'upgrade') as tr:
   982         _clonerevlogs(
   985         _clonerevlogs(
   983             ui,
   986             ui,
   984             srcrepo,
   987             srcrepo,
   985             dstrepo,
   988             dstrepo,
   986             tr,
   989             tr,
   987             deltareuse,
   990             deltareuse,
   988             're-delta-multibase' in actions,
   991             b're-delta-multibase' in actions,
   989             revlogs=revlogs,
   992             revlogs=revlogs,
   990         )
   993         )
   991 
   994 
   992     # Now copy other files in the store directory.
   995     # Now copy other files in the store directory.
   993     # The sorted() makes execution deterministic.
   996     # The sorted() makes execution deterministic.
   994     for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
   997     for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
   995         if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
   998         if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
   996             continue
   999             continue
   997 
  1000 
   998         srcrepo.ui.write(_('copying %s\n') % p)
  1001         srcrepo.ui.write(_(b'copying %s\n') % p)
   999         src = srcrepo.store.rawvfs.join(p)
  1002         src = srcrepo.store.rawvfs.join(p)
  1000         dst = dstrepo.store.rawvfs.join(p)
  1003         dst = dstrepo.store.rawvfs.join(p)
  1001         util.copyfile(src, dst, copystat=True)
  1004         util.copyfile(src, dst, copystat=True)
  1002 
  1005 
  1003     _finishdatamigration(ui, srcrepo, dstrepo, requirements)
  1006     _finishdatamigration(ui, srcrepo, dstrepo, requirements)
  1004 
  1007 
  1005     ui.write(_('data fully migrated to temporary repository\n'))
  1008     ui.write(_(b'data fully migrated to temporary repository\n'))
  1006 
  1009 
  1007     backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
  1010     backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
  1008     backupvfs = vfsmod.vfs(backuppath)
  1011     backupvfs = vfsmod.vfs(backuppath)
  1009 
  1012 
  1010     # Make a backup of requires file first, as it is the first to be modified.
  1013     # Make a backup of requires file first, as it is the first to be modified.
  1011     util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
  1014     util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
  1012 
  1015 
  1013     # We install an arbitrary requirement that clients must not support
  1016     # We install an arbitrary requirement that clients must not support
  1014     # as a mechanism to lock out new clients during the data swap. This is
  1017     # as a mechanism to lock out new clients during the data swap. This is
  1015     # better than allowing a client to continue while the repository is in
  1018     # better than allowing a client to continue while the repository is in
  1016     # an inconsistent state.
  1019     # an inconsistent state.
  1017     ui.write(
  1020     ui.write(
  1018         _(
  1021         _(
  1019             'marking source repository as being upgraded; clients will be '
  1022             b'marking source repository as being upgraded; clients will be '
  1020             'unable to read from repository\n'
  1023             b'unable to read from repository\n'
  1021         )
  1024         )
  1022     )
  1025     )
  1023     scmutil.writerequires(
  1026     scmutil.writerequires(
  1024         srcrepo.vfs, srcrepo.requirements | {'upgradeinprogress'}
  1027         srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
  1025     )
  1028     )
  1026 
  1029 
  1027     ui.write(_('starting in-place swap of repository data\n'))
  1030     ui.write(_(b'starting in-place swap of repository data\n'))
  1028     ui.write(_('replaced files will be backed up at %s\n') % backuppath)
  1031     ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
  1029 
  1032 
  1030     # Now swap in the new store directory. Doing it as a rename should make
  1033     # Now swap in the new store directory. Doing it as a rename should make
  1031     # the operation nearly instantaneous and atomic (at least in well-behaved
  1034     # the operation nearly instantaneous and atomic (at least in well-behaved
  1032     # environments).
  1035     # environments).
  1033     ui.write(_('replacing store...\n'))
  1036     ui.write(_(b'replacing store...\n'))
  1034     tstart = util.timer()
  1037     tstart = util.timer()
  1035     util.rename(srcrepo.spath, backupvfs.join('store'))
  1038     util.rename(srcrepo.spath, backupvfs.join(b'store'))
  1036     util.rename(dstrepo.spath, srcrepo.spath)
  1039     util.rename(dstrepo.spath, srcrepo.spath)
  1037     elapsed = util.timer() - tstart
  1040     elapsed = util.timer() - tstart
  1038     ui.write(
  1041     ui.write(
  1039         _(
  1042         _(
  1040             'store replacement complete; repository was inconsistent for '
  1043             b'store replacement complete; repository was inconsistent for '
  1041             '%0.1fs\n'
  1044             b'%0.1fs\n'
  1042         )
  1045         )
  1043         % elapsed
  1046         % elapsed
  1044     )
  1047     )
  1045 
  1048 
  1046     # We first write the requirements file. Any new requirements will lock
  1049     # We first write the requirements file. Any new requirements will lock
  1047     # out legacy clients.
  1050     # out legacy clients.
  1048     ui.write(
  1051     ui.write(
  1049         _(
  1052         _(
  1050             'finalizing requirements file and making repository readable '
  1053             b'finalizing requirements file and making repository readable '
  1051             'again\n'
  1054             b'again\n'
  1052         )
  1055         )
  1053     )
  1056     )
  1054     scmutil.writerequires(srcrepo.vfs, requirements)
  1057     scmutil.writerequires(srcrepo.vfs, requirements)
  1055 
  1058 
  1056     # The lock file from the old store won't be removed because nothing has a
  1059     # The lock file from the old store won't be removed because nothing has a
  1057     # reference to its new location. So clean it up manually. Alternatively, we
  1060     # reference to its new location. So clean it up manually. Alternatively, we
  1058     # could update srcrepo.svfs and other variables to point to the new
  1061     # could update srcrepo.svfs and other variables to point to the new
  1059     # location. This is simpler.
  1062     # location. This is simpler.
  1060     backupvfs.unlink('store/lock')
  1063     backupvfs.unlink(b'store/lock')
  1061 
  1064 
  1062     return backuppath
  1065     return backuppath
  1063 
  1066 
  1064 
  1067 
  1065 def upgraderepo(
  1068 def upgraderepo(
  1076         optimize = []
  1079         optimize = []
  1077     optimize = set(legacy_opts_map.get(o, o) for o in optimize)
  1080     optimize = set(legacy_opts_map.get(o, o) for o in optimize)
  1078     repo = repo.unfiltered()
  1081     repo = repo.unfiltered()
  1079 
  1082 
  1080     revlogs = set(UPGRADE_ALL_REVLOGS)
  1083     revlogs = set(UPGRADE_ALL_REVLOGS)
  1081     specentries = (('c', changelog), ('m', manifest))
  1084     specentries = ((b'c', changelog), (b'm', manifest))
  1082     specified = [(y, x) for (y, x) in specentries if x is not None]
  1085     specified = [(y, x) for (y, x) in specentries if x is not None]
  1083     if specified:
  1086     if specified:
  1084         # we have some limitation on revlogs to be recloned
  1087         # we have some limitation on revlogs to be recloned
  1085         if any(x for y, x in specified):
  1088         if any(x for y, x in specified):
  1086             revlogs = set()
  1089             revlogs = set()
  1087             for r, enabled in specified:
  1090             for r, enabled in specified:
  1088                 if enabled:
  1091                 if enabled:
  1089                     if r == 'c':
  1092                     if r == b'c':
  1090                         revlogs.add(UPGRADE_CHANGELOG)
  1093                         revlogs.add(UPGRADE_CHANGELOG)
  1091                     elif r == 'm':
  1094                     elif r == b'm':
  1092                         revlogs.add(UPGRADE_MANIFEST)
  1095                         revlogs.add(UPGRADE_MANIFEST)
  1093         else:
  1096         else:
  1094             # none are enabled
  1097             # none are enabled
  1095             for r, __ in specified:
  1098             for r, __ in specified:
  1096                 if r == 'c':
  1099                 if r == b'c':
  1097                     revlogs.discard(UPGRADE_CHANGELOG)
  1100                     revlogs.discard(UPGRADE_CHANGELOG)
  1098                 elif r == 'm':
  1101                 elif r == b'm':
  1099                     revlogs.discard(UPGRADE_MANIFEST)
  1102                     revlogs.discard(UPGRADE_MANIFEST)
  1100 
  1103 
  1101     # Ensure the repository can be upgraded.
  1104     # Ensure the repository can be upgraded.
  1102     missingreqs = requiredsourcerequirements(repo) - repo.requirements
  1105     missingreqs = requiredsourcerequirements(repo) - repo.requirements
  1103     if missingreqs:
  1106     if missingreqs:
  1104         raise error.Abort(
  1107         raise error.Abort(
  1105             _('cannot upgrade repository; requirement ' 'missing: %s')
  1108             _(b'cannot upgrade repository; requirement ' b'missing: %s')
  1106             % _(', ').join(sorted(missingreqs))
  1109             % _(b', ').join(sorted(missingreqs))
  1107         )
  1110         )
  1108 
  1111 
  1109     blockedreqs = blocksourcerequirements(repo) & repo.requirements
  1112     blockedreqs = blocksourcerequirements(repo) & repo.requirements
  1110     if blockedreqs:
  1113     if blockedreqs:
  1111         raise error.Abort(
  1114         raise error.Abort(
  1112             _(
  1115             _(
  1113                 'cannot upgrade repository; unsupported source '
  1116                 b'cannot upgrade repository; unsupported source '
  1114                 'requirement: %s'
  1117                 b'requirement: %s'
  1115             )
  1118             )
  1116             % _(', ').join(sorted(blockedreqs))
  1119             % _(b', ').join(sorted(blockedreqs))
  1117         )
  1120         )
  1118 
  1121 
  1119     # FUTURE there is potentially a need to control the wanted requirements via
  1122     # FUTURE there is potentially a need to control the wanted requirements via
  1120     # command arguments or via an extension hook point.
  1123     # command arguments or via an extension hook point.
  1121     newreqs = localrepo.newreporequirements(
  1124     newreqs = localrepo.newreporequirements(
  1126     noremovereqs = (
  1129     noremovereqs = (
  1127         repo.requirements - newreqs - supportremovedrequirements(repo)
  1130         repo.requirements - newreqs - supportremovedrequirements(repo)
  1128     )
  1131     )
  1129     if noremovereqs:
  1132     if noremovereqs:
  1130         raise error.Abort(
  1133         raise error.Abort(
  1131             _('cannot upgrade repository; requirement would be ' 'removed: %s')
  1134             _(
  1132             % _(', ').join(sorted(noremovereqs))
  1135                 b'cannot upgrade repository; requirement would be '
       
  1136                 b'removed: %s'
       
  1137             )
       
  1138             % _(b', ').join(sorted(noremovereqs))
  1133         )
  1139         )
  1134 
  1140 
  1135     noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
  1141     noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
  1136     if noaddreqs:
  1142     if noaddreqs:
  1137         raise error.Abort(
  1143         raise error.Abort(
  1138             _(
  1144             _(
  1139                 'cannot upgrade repository; do not support adding '
  1145                 b'cannot upgrade repository; do not support adding '
  1140                 'requirement: %s'
  1146                 b'requirement: %s'
  1141             )
  1147             )
  1142             % _(', ').join(sorted(noaddreqs))
  1148             % _(b', ').join(sorted(noaddreqs))
  1143         )
  1149         )
  1144 
  1150 
  1145     unsupportedreqs = newreqs - supporteddestrequirements(repo)
  1151     unsupportedreqs = newreqs - supporteddestrequirements(repo)
  1146     if unsupportedreqs:
  1152     if unsupportedreqs:
  1147         raise error.Abort(
  1153         raise error.Abort(
  1148             _(
  1154             _(
  1149                 'cannot upgrade repository; do not support '
  1155                 b'cannot upgrade repository; do not support '
  1150                 'destination requirement: %s'
  1156                 b'destination requirement: %s'
  1151             )
  1157             )
  1152             % _(', ').join(sorted(unsupportedreqs))
  1158             % _(b', ').join(sorted(unsupportedreqs))
  1153         )
  1159         )
  1154 
  1160 
  1155     # Find and validate all improvements that can be made.
  1161     # Find and validate all improvements that can be made.
  1156     alloptimizations = findoptimizations(repo)
  1162     alloptimizations = findoptimizations(repo)
  1157 
  1163 
  1162             optimizations.append(o)
  1168             optimizations.append(o)
  1163             optimize.discard(o.name)
  1169             optimize.discard(o.name)
  1164 
  1170 
  1165     if optimize:  # anything left is unknown
  1171     if optimize:  # anything left is unknown
  1166         raise error.Abort(
  1172         raise error.Abort(
  1167             _('unknown optimization action requested: %s')
  1173             _(b'unknown optimization action requested: %s')
  1168             % ', '.join(sorted(optimize)),
  1174             % b', '.join(sorted(optimize)),
  1169             hint=_('run without arguments to see valid ' 'optimizations'),
  1175             hint=_(b'run without arguments to see valid ' b'optimizations'),
  1170         )
  1176         )
  1171 
  1177 
  1172     deficiencies = finddeficiencies(repo)
  1178     deficiencies = finddeficiencies(repo)
  1173     actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
  1179     actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
  1174     actions.extend(
  1180     actions.extend(
  1183 
  1189 
  1184     if revlogs != UPGRADE_ALL_REVLOGS:
  1190     if revlogs != UPGRADE_ALL_REVLOGS:
  1185         incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
  1191         incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
  1186         if incompatible:
  1192         if incompatible:
  1187             msg = _(
  1193             msg = _(
  1188                 'ignoring revlogs selection flags, format requirements '
  1194                 b'ignoring revlogs selection flags, format requirements '
  1189                 'change: %s\n'
  1195                 b'change: %s\n'
  1190             )
  1196             )
  1191             ui.warn(msg % ', '.join(sorted(incompatible)))
  1197             ui.warn(msg % b', '.join(sorted(incompatible)))
  1192             revlogs = UPGRADE_ALL_REVLOGS
  1198             revlogs = UPGRADE_ALL_REVLOGS
  1193 
  1199 
  1194     def printrequirements():
  1200     def printrequirements():
  1195         ui.write(_('requirements\n'))
  1201         ui.write(_(b'requirements\n'))
  1196         ui.write(
  1202         ui.write(
  1197             _('   preserved: %s\n')
  1203             _(b'   preserved: %s\n')
  1198             % _(', ').join(sorted(newreqs & repo.requirements))
  1204             % _(b', ').join(sorted(newreqs & repo.requirements))
  1199         )
  1205         )
  1200 
  1206 
  1201         if repo.requirements - newreqs:
  1207         if repo.requirements - newreqs:
  1202             ui.write(
  1208             ui.write(
  1203                 _('   removed: %s\n')
  1209                 _(b'   removed: %s\n')
  1204                 % _(', ').join(sorted(repo.requirements - newreqs))
  1210                 % _(b', ').join(sorted(repo.requirements - newreqs))
  1205             )
  1211             )
  1206 
  1212 
  1207         if newreqs - repo.requirements:
  1213         if newreqs - repo.requirements:
  1208             ui.write(
  1214             ui.write(
  1209                 _('   added: %s\n')
  1215                 _(b'   added: %s\n')
  1210                 % _(', ').join(sorted(newreqs - repo.requirements))
  1216                 % _(b', ').join(sorted(newreqs - repo.requirements))
  1211             )
  1217             )
  1212 
  1218 
  1213         ui.write('\n')
  1219         ui.write(b'\n')
  1214 
  1220 
  1215     def printupgradeactions():
  1221     def printupgradeactions():
  1216         for a in actions:
  1222         for a in actions:
  1217             ui.write('%s\n   %s\n\n' % (a.name, a.upgrademessage))
  1223             ui.write(b'%s\n   %s\n\n' % (a.name, a.upgrademessage))
  1218 
  1224 
  1219     if not run:
  1225     if not run:
  1220         fromconfig = []
  1226         fromconfig = []
  1221         onlydefault = []
  1227         onlydefault = []
  1222 
  1228 
  1229         if fromconfig or onlydefault:
  1235         if fromconfig or onlydefault:
  1230 
  1236 
  1231             if fromconfig:
  1237             if fromconfig:
  1232                 ui.write(
  1238                 ui.write(
  1233                     _(
  1239                     _(
  1234                         'repository lacks features recommended by '
  1240                         b'repository lacks features recommended by '
  1235                         'current config options:\n\n'
  1241                         b'current config options:\n\n'
  1236                     )
  1242                     )
  1237                 )
  1243                 )
  1238                 for i in fromconfig:
  1244                 for i in fromconfig:
  1239                     ui.write('%s\n   %s\n\n' % (i.name, i.description))
  1245                     ui.write(b'%s\n   %s\n\n' % (i.name, i.description))
  1240 
  1246 
  1241             if onlydefault:
  1247             if onlydefault:
  1242                 ui.write(
  1248                 ui.write(
  1243                     _(
  1249                     _(
  1244                         'repository lacks features used by the default '
  1250                         b'repository lacks features used by the default '
  1245                         'config options:\n\n'
  1251                         b'config options:\n\n'
  1246                     )
  1252                     )
  1247                 )
  1253                 )
  1248                 for i in onlydefault:
  1254                 for i in onlydefault:
  1249                     ui.write('%s\n   %s\n\n' % (i.name, i.description))
  1255                     ui.write(b'%s\n   %s\n\n' % (i.name, i.description))
  1250 
  1256 
  1251             ui.write('\n')
  1257             ui.write(b'\n')
  1252         else:
  1258         else:
  1253             ui.write(
  1259             ui.write(
  1254                 _('(no feature deficiencies found in existing ' 'repository)\n')
  1260                 _(
       
  1261                     b'(no feature deficiencies found in existing '
       
  1262                     b'repository)\n'
       
  1263                 )
  1255             )
  1264             )
  1256 
  1265 
  1257         ui.write(
  1266         ui.write(
  1258             _(
  1267             _(
  1259                 'performing an upgrade with "--run" will make the following '
  1268                 b'performing an upgrade with "--run" will make the following '
  1260                 'changes:\n\n'
  1269                 b'changes:\n\n'
  1261             )
  1270             )
  1262         )
  1271         )
  1263 
  1272 
  1264         printrequirements()
  1273         printrequirements()
  1265         printupgradeactions()
  1274         printupgradeactions()
  1267         unusedoptimize = [i for i in alloptimizations if i not in actions]
  1276         unusedoptimize = [i for i in alloptimizations if i not in actions]
  1268 
  1277 
  1269         if unusedoptimize:
  1278         if unusedoptimize:
  1270             ui.write(
  1279             ui.write(
  1271                 _(
  1280                 _(
  1272                     'additional optimizations are available by specifying '
  1281                     b'additional optimizations are available by specifying '
  1273                     '"--optimize <name>":\n\n'
  1282                     b'"--optimize <name>":\n\n'
  1274                 )
  1283                 )
  1275             )
  1284             )
  1276             for i in unusedoptimize:
  1285             for i in unusedoptimize:
  1277                 ui.write(_('%s\n   %s\n\n') % (i.name, i.description))
  1286                 ui.write(_(b'%s\n   %s\n\n') % (i.name, i.description))
  1278         return
  1287         return
  1279 
  1288 
  1280     # Else we're in the run=true case.
  1289     # Else we're in the run=true case.
  1281     ui.write(_('upgrade will perform the following actions:\n\n'))
  1290     ui.write(_(b'upgrade will perform the following actions:\n\n'))
  1282     printrequirements()
  1291     printrequirements()
  1283     printupgradeactions()
  1292     printupgradeactions()
  1284 
  1293 
  1285     upgradeactions = [a.name for a in actions]
  1294     upgradeactions = [a.name for a in actions]
  1286 
  1295 
  1287     ui.write(_('beginning upgrade...\n'))
  1296     ui.write(_(b'beginning upgrade...\n'))
  1288     with repo.wlock(), repo.lock():
  1297     with repo.wlock(), repo.lock():
  1289         ui.write(_('repository locked and read-only\n'))
  1298         ui.write(_(b'repository locked and read-only\n'))
  1290         # Our strategy for upgrading the repository is to create a new,
  1299         # Our strategy for upgrading the repository is to create a new,
  1291         # temporary repository, write data to it, then do a swap of the
  1300         # temporary repository, write data to it, then do a swap of the
  1292         # data. There are less heavyweight ways to do this, but it is easier
  1301         # data. There are less heavyweight ways to do this, but it is easier
  1293         # to create a new repo object than to instantiate all the components
  1302         # to create a new repo object than to instantiate all the components
  1294         # (like the store) separately.
  1303         # (like the store) separately.
  1295         tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
  1304         tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
  1296         backuppath = None
  1305         backuppath = None
  1297         try:
  1306         try:
  1298             ui.write(
  1307             ui.write(
  1299                 _(
  1308                 _(
  1300                     'creating temporary repository to stage migrated '
  1309                     b'creating temporary repository to stage migrated '
  1301                     'data: %s\n'
  1310                     b'data: %s\n'
  1302                 )
  1311                 )
  1303                 % tmppath
  1312                 % tmppath
  1304             )
  1313             )
  1305 
  1314 
  1306             # clone ui without using ui.copy because repo.ui is protected
  1315             # clone ui without using ui.copy because repo.ui is protected
  1310             with dstrepo.wlock(), dstrepo.lock():
  1319             with dstrepo.wlock(), dstrepo.lock():
  1311                 backuppath = _upgraderepo(
  1320                 backuppath = _upgraderepo(
  1312                     ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
  1321                     ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
  1313                 )
  1322                 )
  1314             if not (backup or backuppath is None):
  1323             if not (backup or backuppath is None):
  1315                 ui.write(_('removing old repository content%s\n') % backuppath)
  1324                 ui.write(_(b'removing old repository content%s\n') % backuppath)
  1316                 repo.vfs.rmtree(backuppath, forcibly=True)
  1325                 repo.vfs.rmtree(backuppath, forcibly=True)
  1317                 backuppath = None
  1326                 backuppath = None
  1318 
  1327 
  1319         finally:
  1328         finally:
  1320             ui.write(_('removing temporary repository %s\n') % tmppath)
  1329             ui.write(_(b'removing temporary repository %s\n') % tmppath)
  1321             repo.vfs.rmtree(tmppath, forcibly=True)
  1330             repo.vfs.rmtree(tmppath, forcibly=True)
  1322 
  1331 
  1323             if backuppath:
  1332             if backuppath:
  1324                 ui.warn(
  1333                 ui.warn(
  1325                     _('copy of old repository backed up at %s\n') % backuppath
  1334                     _(b'copy of old repository backed up at %s\n') % backuppath
  1326                 )
  1335                 )
  1327                 ui.warn(
  1336                 ui.warn(
  1328                     _(
  1337                     _(
  1329                         'the old repository will not be deleted; remove '
  1338                         b'the old repository will not be deleted; remove '
  1330                         'it to free up disk space once the upgraded '
  1339                         b'it to free up disk space once the upgraded '
  1331                         'repository is verified\n'
  1340                         b'repository is verified\n'
  1332                     )
  1341                     )
  1333                 )
  1342                 )