store: make `walk` return an entry for obsolescence if requested so
authorPierre-Yves David <pierre-yves.david@octobus.net>
Sun, 21 May 2023 02:29:33 +0200
changeset 50514 0925eaf09c8b
parent 50513 5a62d56e3955
child 50515 06d580b8f432
store: make `walk` return an entry for obsolescence if requested so Instead of having dedicated code in the streamclone code, we should have the store deal with advertising the data it contains.
hgext/remotefilelog/remotefilelogserver.py
mercurial/store.py
mercurial/streamclone.py
--- a/hgext/remotefilelog/remotefilelogserver.py	Sun May 21 02:16:24 2023 +0200
+++ b/hgext/remotefilelog/remotefilelogserver.py	Sun May 21 02:29:33 2023 +0200
@@ -145,7 +145,9 @@
     )
 
     # don't clone filelogs to shallow clients
-    def _walkstreamfiles(orig, repo, matcher=None, phase=False):
+    def _walkstreamfiles(
+        orig, repo, matcher=None, phase=False, obsolescence=False
+    ):
         if state.shallowremote:
             # if we are shallow ourselves, stream our local commits
             if shallowutil.isenabled(repo):
@@ -200,7 +202,9 @@
                 _(b"Cannot clone from a shallow repo to a full repo.")
             )
         else:
-            for x in orig(repo, matcher, phase=phase):
+            for x in orig(
+                repo, matcher, phase=phase, obsolescence=obsolescence
+            ):
                 yield x
 
     extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
--- a/mercurial/store.py	Sun May 21 02:16:24 2023 +0200
+++ b/mercurial/store.py	Sun May 21 02:29:33 2023 +0200
@@ -685,13 +685,22 @@
                     details=file_details,
                 )
 
-    def top_entries(self, phase=False) -> Generator[BaseStoreEntry, None, None]:
+    def top_entries(
+        self, phase=False, obsolescence=False
+    ) -> Generator[BaseStoreEntry, None, None]:
         if phase and self.vfs.exists(b'phaseroots'):
             yield SimpleStoreEntry(
                 entry_path=b'phaseroots',
                 is_volatile=True,
             )
 
+        if obsolescence and self.vfs.exists(b'obsstore'):
+            # XXX if we had the file size it could be non-volatile
+            yield SimpleStoreEntry(
+                entry_path=b'obsstore',
+                is_volatile=True,
+            )
+
         files = reversed(self._walk(b'', False))
 
         changelogs = collections.defaultdict(dict)
@@ -733,7 +742,7 @@
                 )
 
     def walk(
-        self, matcher=None, phase=False
+        self, matcher=None, phase=False, obsolescence=False
     ) -> Generator[BaseStoreEntry, None, None]:
         """return files related to data storage (ie: revlogs)
 
@@ -745,7 +754,7 @@
         # yield data files first
         for x in self.data_entries(matcher):
             yield x
-        for x in self.top_entries(phase=phase):
+        for x in self.top_entries(phase=phase, obsolescence=obsolescence):
             yield x
 
     def copylist(self):
--- a/mercurial/streamclone.py	Sun May 21 02:16:24 2023 +0200
+++ b/mercurial/streamclone.py	Sun May 21 02:29:33 2023 +0200
@@ -241,8 +241,8 @@
 
 
 # This is it's own function so extensions can override it.
-def _walkstreamfiles(repo, matcher=None, phase=False):
-    return repo.store.walk(matcher, phase=phase)
+def _walkstreamfiles(repo, matcher=None, phase=False, obsolescence=False):
+    return repo.store.walk(matcher, phase=phase, obsolescence=obsolescence)
 
 
 def generatev1(repo):
@@ -672,7 +672,7 @@
     - `size`: the size of the file (or None)
     """
     assert repo._currentlock(repo._lockref) is not None
-    entries = []
+    files = []
     totalfilesize = 0
 
     matcher = None
@@ -680,23 +680,23 @@
         matcher = narrowspec.match(repo.root, includes, excludes)
 
     phase = not repo.publishing()
-    for entry in _walkstreamfiles(repo, matcher, phase=phase):
+    entries = _walkstreamfiles(
+        repo, matcher, phase=phase, obsolescence=includeobsmarkers
+    )
+    for entry in entries:
         for f in entry.files():
             file_size = f.file_size(repo.store.vfs)
             if file_size:
                 ft = _fileappend
                 if f.is_volatile:
                     ft = _filefull
-                entries.append((_srcstore, f.unencoded_path, ft, file_size))
+                files.append((_srcstore, f.unencoded_path, ft, file_size))
                 totalfilesize += file_size
-    if includeobsmarkers and repo.svfs.exists(b'obsstore'):
-        totalfilesize += repo.svfs.lstat(b'obsstore').st_size
-        entries.append((_srcstore, b'obsstore', _filefull, None))
     for name in cacheutil.cachetocopy(repo):
         if repo.cachevfs.exists(name):
             totalfilesize += repo.cachevfs.lstat(name).st_size
-            entries.append((_srccache, name, _filefull, None))
-    return entries, totalfilesize
+            files.append((_srccache, name, _filefull, None))
+    return files, totalfilesize
 
 
 def generatev2(repo, includes, excludes, includeobsmarkers):