mercurial/transaction.py
branchstable
changeset 50340 4c1061b3e55a
parent 50339 86dc9e097bed
child 50447 392e2f31444a
child 50452 a445194f0a4d
--- a/mercurial/transaction.py	Wed Mar 15 13:20:12 2023 +0100
+++ b/mercurial/transaction.py	Wed Mar 15 14:29:37 2023 +0100
@@ -105,6 +105,11 @@
     unlink=True,
     checkambigfiles=None,
 ):
+    """rollback a transaction :
+    - truncate files that have been appended to
+    - restore file backups
+    - delete temporary files
+    """
     backupfiles = []
 
     def restore_one_backup(vfs, f, b, checkambig):
@@ -118,7 +123,30 @@
             report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
             raise
 
+    # gather all backup files that impact the store
+    # (we need this to detect files that are both backed up and truncated)
+    store_backup = {}
+    for entry in backupentries:
+        location, file_path, backup_path, cache = entry
+        vfs = vfsmap[location]
+        is_store = vfs.join(b'') == opener.join(b'')
+        if is_store and file_path and backup_path:
+            store_backup[file_path] = entry
+    copy_done = set()
+
+    # truncate all file `f` to offset `o`
     for f, o in sorted(dict(entries).items()):
+        # if we have a backup for `f`, we should restore it first and truncate
+        # the restored file
+        bck_entry = store_backup.get(f)
+        if bck_entry is not None:
+            location, file_path, backup_path, cache = bck_entry
+            checkambig = False
+            if checkambigfiles:
+                checkambig = (file_path, location) in checkambigfiles
+            restore_one_backup(opener, file_path, backup_path, checkambig)
+            copy_done.add(bck_entry)
+        # truncate the file to its pre-transaction size
         if o or not unlink:
             checkambig = checkambigfiles and (f, b'') in checkambigfiles
             try:
@@ -137,12 +165,16 @@
                 report(_(b"failed to truncate %s\n") % f)
                 raise
         else:
+            # delete empty file
             try:
                 opener.unlink(f)
             except FileNotFoundError:
                 pass
-
-    for l, f, b, c in backupentries:
+    # restore backed up files and clean up temporary files
+    for entry in backupentries:
+        if entry in copy_done:
+            continue
+        l, f, b, c = entry
         if l not in vfsmap and c:
             report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
         vfs = vfsmap[l]
@@ -170,6 +202,7 @@
             if not c:
                 raise
 
+    # cleanup transaction state file and the backups file
     backuppath = b"%s.backupfiles" % journal
     if opener.exists(backuppath):
         opener.unlink(backuppath)
@@ -346,7 +379,7 @@
         self._file.flush()
 
     @active
-    def addbackup(self, file, hardlink=True, location=b''):
+    def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
         """Adds a backup of the file to the transaction
 
         Calling addbackup() creates a hardlink backup of the specified file
@@ -355,17 +388,25 @@
 
         * `file`: the file path, relative to .hg/store
         * `hardlink`: use a hardlink to quickly create the backup
+
+        If `for_offset` is set, we expect a offset for this file to have been previously recorded
         """
         if self._queue:
             msg = b'cannot use transaction.addbackup inside "group"'
             raise error.ProgrammingError(msg)
 
-        if (
-            file in self._newfiles
-            or file in self._offsetmap
-            or file in self._backupmap
-        ):
+        if file in self._newfiles or file in self._backupmap:
+            return
+        elif file in self._offsetmap and not for_offset:
             return
+        elif for_offset and file not in self._offsetmap:
+            msg = (
+                'calling `addbackup` with `for_offmap=True`, '
+                'but no offset recorded: [%r] %r'
+            )
+            msg %= (location, file)
+            raise error.ProgrammingError(msg)
+
         vfs = self._vfsmap[location]
         dirname, filename = vfs.split(file)
         backupfilename = b"%s.backup.%s" % (self._journal, filename)