mercurial/streamclone.py
branchstable
changeset 47759 d7515d29761d
parent 47502 65c519661991
parent 47448 d370256636fe
child 47788 48f07adbda98
equal deleted inserted replaced
47506:29ea3b4c4f62 47759:d7515d29761d
     6 # GNU General Public License version 2 or any later version.
     6 # GNU General Public License version 2 or any later version.
     7 
     7 
     8 from __future__ import absolute_import
     8 from __future__ import absolute_import
     9 
     9 
    10 import contextlib
    10 import contextlib
       
    11 import errno
    11 import os
    12 import os
    12 import struct
    13 import struct
    13 
    14 
    14 from .i18n import _
    15 from .i18n import _
    15 from .pycompat import open
    16 from .pycompat import open
    16 from .interfaces import repository
    17 from .interfaces import repository
    17 from . import (
    18 from . import (
       
    19     bookmarks,
    18     cacheutil,
    20     cacheutil,
    19     error,
    21     error,
    20     narrowspec,
    22     narrowspec,
    21     phases,
    23     phases,
    22     pycompat,
    24     pycompat,
    23     requirements as requirementsmod,
    25     requirements as requirementsmod,
    24     scmutil,
    26     scmutil,
    25     store,
    27     store,
    26     util,
    28     util,
       
    29 )
       
    30 from .utils import (
       
    31     stringutil,
    27 )
    32 )
    28 
    33 
    29 
    34 
    30 def canperformstreamclone(pullop, bundle2=False):
    35 def canperformstreamclone(pullop, bundle2=False):
    31     """Whether it is possible to perform a streaming clone as part of pull.
    36     """Whether it is possible to perform a streaming clone as part of pull.
   611 
   616 
   612 def _test_sync_point_walk_2(repo):
   617 def _test_sync_point_walk_2(repo):
   613     """a function for synchronisation during tests"""
   618     """a function for synchronisation during tests"""
   614 
   619 
   615 
   620 
       
   621 def _v2_walk(repo, includes, excludes, includeobsmarkers):
       
   622     """emit a seris of files information useful to clone a repo
       
   623 
       
   624     return (entries, totalfilesize)
       
   625 
       
   626     entries is a list of tuple (vfs-key, file-path, file-type, size)
       
   627 
       
   628     - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
       
   629     - `name`: file path of the file to copy (to be feed to the vfss)
       
   630     - `file-type`: do this file need to be copied with the source lock ?
       
   631     - `size`: the size of the file (or None)
       
   632     """
       
   633     assert repo._currentlock(repo._lockref) is not None
       
   634     entries = []
       
   635     totalfilesize = 0
       
   636 
       
   637     matcher = None
       
   638     if includes or excludes:
       
   639         matcher = narrowspec.match(repo.root, includes, excludes)
       
   640 
       
   641     for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
       
   642         if size:
       
   643             ft = _fileappend
       
   644             if rl_type & store.FILEFLAGS_VOLATILE:
       
   645                 ft = _filefull
       
   646             entries.append((_srcstore, name, ft, size))
       
   647             totalfilesize += size
       
   648     for name in _walkstreamfullstorefiles(repo):
       
   649         if repo.svfs.exists(name):
       
   650             totalfilesize += repo.svfs.lstat(name).st_size
       
   651             entries.append((_srcstore, name, _filefull, None))
       
   652     if includeobsmarkers and repo.svfs.exists(b'obsstore'):
       
   653         totalfilesize += repo.svfs.lstat(b'obsstore').st_size
       
   654         entries.append((_srcstore, b'obsstore', _filefull, None))
       
   655     for name in cacheutil.cachetocopy(repo):
       
   656         if repo.cachevfs.exists(name):
       
   657             totalfilesize += repo.cachevfs.lstat(name).st_size
       
   658             entries.append((_srccache, name, _filefull, None))
       
   659     return entries, totalfilesize
       
   660 
       
   661 
   616 def generatev2(repo, includes, excludes, includeobsmarkers):
   662 def generatev2(repo, includes, excludes, includeobsmarkers):
   617     """Emit content for version 2 of a streaming clone.
   663     """Emit content for version 2 of a streaming clone.
   618 
   664 
   619     the data stream consists the following entries:
   665     the data stream consists the following entries:
   620     1) A char representing the file destination (eg: store or cache)
   666     1) A char representing the file destination (eg: store or cache)
   626     Returns a 3-tuple of (file count, file size, data iterator).
   672     Returns a 3-tuple of (file count, file size, data iterator).
   627     """
   673     """
   628 
   674 
   629     with repo.lock():
   675     with repo.lock():
   630 
   676 
   631         entries = []
       
   632         totalfilesize = 0
       
   633 
       
   634         matcher = None
       
   635         if includes or excludes:
       
   636             matcher = narrowspec.match(repo.root, includes, excludes)
       
   637 
       
   638         repo.ui.debug(b'scanning\n')
   677         repo.ui.debug(b'scanning\n')
   639         for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
   678 
   640             if size:
   679         entries, totalfilesize = _v2_walk(
   641                 ft = _fileappend
   680             repo,
   642                 if rl_type & store.FILEFLAGS_VOLATILE:
   681             includes=includes,
   643                     ft = _filefull
   682             excludes=excludes,
   644                 entries.append((_srcstore, name, ft, size))
   683             includeobsmarkers=includeobsmarkers,
   645                 totalfilesize += size
   684         )
   646         for name in _walkstreamfullstorefiles(repo):
       
   647             if repo.svfs.exists(name):
       
   648                 totalfilesize += repo.svfs.lstat(name).st_size
       
   649                 entries.append((_srcstore, name, _filefull, None))
       
   650         if includeobsmarkers and repo.svfs.exists(b'obsstore'):
       
   651             totalfilesize += repo.svfs.lstat(b'obsstore').st_size
       
   652             entries.append((_srcstore, b'obsstore', _filefull, None))
       
   653         for name in cacheutil.cachetocopy(repo):
       
   654             if repo.cachevfs.exists(name):
       
   655                 totalfilesize += repo.cachevfs.lstat(name).st_size
       
   656                 entries.append((_srccache, name, _filefull, None))
       
   657 
   685 
   658         chunks = _emit2(repo, entries, totalfilesize)
   686         chunks = _emit2(repo, entries, totalfilesize)
   659         first = next(chunks)
   687         first = next(chunks)
   660         assert first is None
   688         assert first is None
   661         _test_sync_point_walk_1(repo)
   689         _test_sync_point_walk_1(repo)
   765     )
   793     )
   766     repo.svfs.options = localrepo.resolvestorevfsoptions(
   794     repo.svfs.options = localrepo.resolvestorevfsoptions(
   767         repo.ui, repo.requirements, repo.features
   795         repo.ui, repo.requirements, repo.features
   768     )
   796     )
   769     scmutil.writereporequirements(repo)
   797     scmutil.writereporequirements(repo)
       
   798 
       
   799 
       
   800 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
       
   801     hardlink = [True]
       
   802 
       
   803     def copy_used():
       
   804         hardlink[0] = False
       
   805         progress.topic = _(b'copying')
       
   806 
       
   807     for k, path, size in entries:
       
   808         src_vfs = src_vfs_map[k]
       
   809         dst_vfs = dst_vfs_map[k]
       
   810         src_path = src_vfs.join(path)
       
   811         dst_path = dst_vfs.join(path)
       
   812         dirname = dst_vfs.dirname(path)
       
   813         if not dst_vfs.exists(dirname):
       
   814             dst_vfs.makedirs(dirname)
       
   815         dst_vfs.register_file(path)
       
   816         # XXX we could use the #nb_bytes argument.
       
   817         util.copyfile(
       
   818             src_path,
       
   819             dst_path,
       
   820             hardlink=hardlink[0],
       
   821             no_hardlink_cb=copy_used,
       
   822             check_fs_hardlink=False,
       
   823         )
       
   824         progress.increment()
       
   825     return hardlink[0]
       
   826 
       
   827 
       
   828 def local_copy(src_repo, dest_repo):
       
   829     """copy all content from one local repository to another
       
   830 
       
   831     This is useful for local clone"""
       
   832     src_store_requirements = {
       
   833         r
       
   834         for r in src_repo.requirements
       
   835         if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
       
   836     }
       
   837     dest_store_requirements = {
       
   838         r
       
   839         for r in dest_repo.requirements
       
   840         if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
       
   841     }
       
   842     assert src_store_requirements == dest_store_requirements
       
   843 
       
   844     with dest_repo.lock():
       
   845         with src_repo.lock():
       
   846 
       
   847             # bookmark is not integrated to the streaming as it might use the
       
   848             # `repo.vfs` and they are too many sentitive data accessible
       
   849             # through `repo.vfs` to expose it to streaming clone.
       
   850             src_book_vfs = bookmarks.bookmarksvfs(src_repo)
       
   851             srcbookmarks = src_book_vfs.join(b'bookmarks')
       
   852             bm_count = 0
       
   853             if os.path.exists(srcbookmarks):
       
   854                 bm_count = 1
       
   855 
       
   856             entries, totalfilesize = _v2_walk(
       
   857                 src_repo,
       
   858                 includes=None,
       
   859                 excludes=None,
       
   860                 includeobsmarkers=True,
       
   861             )
       
   862             src_vfs_map = _makemap(src_repo)
       
   863             dest_vfs_map = _makemap(dest_repo)
       
   864             progress = src_repo.ui.makeprogress(
       
   865                 topic=_(b'linking'),
       
   866                 total=len(entries) + bm_count,
       
   867                 unit=_(b'files'),
       
   868             )
       
   869             # copy  files
       
   870             #
       
   871             # We could copy the full file while the source repository is locked
       
   872             # and the other one without the lock. However, in the linking case,
       
   873             # this would also requires checks that nobody is appending any data
       
   874             # to the files while we do the clone, so this is not done yet. We
       
   875             # could do this blindly when copying files.
       
   876             files = ((k, path, size) for k, path, ftype, size in entries)
       
   877             hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
       
   878 
       
   879             # copy bookmarks over
       
   880             if bm_count:
       
   881                 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
       
   882                 dstbookmarks = dst_book_vfs.join(b'bookmarks')
       
   883                 util.copyfile(srcbookmarks, dstbookmarks)
       
   884         progress.complete()
       
   885         if hardlink:
       
   886             msg = b'linked %d files\n'
       
   887         else:
       
   888             msg = b'copied %d files\n'
       
   889         src_repo.ui.debug(msg % (len(entries) + bm_count))
       
   890 
       
   891         with dest_repo.transaction(b"localclone") as tr:
       
   892             dest_repo.store.write(tr)
       
   893 
       
   894         # clean up transaction file as they do not make sense
       
   895         undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
       
   896         undo_files.extend(dest_repo.undofiles())
       
   897         for undovfs, undofile in undo_files:
       
   898             try:
       
   899                 undovfs.unlink(undofile)
       
   900             except OSError as e:
       
   901                 if e.errno != errno.ENOENT:
       
   902                     msg = _(b'error removing %s: %s\n')
       
   903                     path = undovfs.join(undofile)
       
   904                     e_msg = stringutil.forcebytestr(e)
       
   905                     msg %= (path, e_msg)
       
   906                     dest_repo.ui.warn(msg)