hgext/remotefilelog/basestore.py
changeset 43077 687b865b95ad
parent 43076 2372284d9457
child 43085 eef9a2d67051
--- a/hgext/remotefilelog/basestore.py	Sun Oct 06 09:45:02 2019 -0400
+++ b/hgext/remotefilelog/basestore.py	Sun Oct 06 09:48:39 2019 -0400
@@ -39,14 +39,14 @@
         self._uid = os.getuid() if not pycompat.iswindows else None
 
         self._validatecachelog = self.ui.config(
-            "remotefilelog", "validatecachelog"
+            b"remotefilelog", b"validatecachelog"
         )
         self._validatecache = self.ui.config(
-            "remotefilelog", "validatecache", 'on'
+            b"remotefilelog", b"validatecache", b'on'
         )
-        if self._validatecache not in ('on', 'strict', 'off'):
-            self._validatecache = 'on'
-        if self._validatecache == 'off':
+        if self._validatecache not in (b'on', b'strict', b'off'):
+            self._validatecache = b'on'
+        if self._validatecache == b'off':
             self._validatecache = False
 
         if shared:
@@ -59,8 +59,8 @@
             exists = os.path.exists(filepath)
             if (
                 exists
-                and self._validatecache == 'strict'
-                and not self._validatekey(filepath, 'contains')
+                and self._validatecache == b'strict'
+                and not self._validatekey(filepath, b'contains')
             ):
                 exists = False
             if not exists:
@@ -84,7 +84,7 @@
         entries = ledger.sources.get(self, [])
         count = 0
         progress = ui.makeprogress(
-            _("cleaning up"), unit="files", total=len(entries)
+            _(b"cleaning up"), unit=b"files", total=len(entries)
         )
         for entry in entries:
             if entry.gced or (entry.datarepacked and entry.historyrepacked):
@@ -121,7 +121,7 @@
                     pass
 
             elif stat.S_ISREG(mode):
-                if name.endswith('_old'):
+                if name.endswith(b'_old'):
                     oldfiles.add(name[:-4])
                 else:
                     otherfiles.add(name)
@@ -130,7 +130,7 @@
         # corresponding file without the suffix '_old'. See addremotefilelognode
         # method for the generation/purpose of files with '_old' suffix.
         for filename in oldfiles - otherfiles:
-            filepath = os.path.join(rootdir, filename + '_old')
+            filepath = os.path.join(rootdir, filename + b'_old')
             util.tryunlink(filepath)
 
     def _getfiles(self):
@@ -164,7 +164,7 @@
         missingfilename = set(hashes)
 
         # Start with a full manifest, since it'll cover the majority of files
-        for filename in self.repo['tip'].manifest():
+        for filename in self.repo[b'tip'].manifest():
             sha = hashlib.sha1(filename).digest()
             if sha in missingfilename:
                 filenames[filename] = sha
@@ -224,13 +224,13 @@
             data = shallowutil.readfile(filepath)
             if self._validatecache and not self._validatedata(data, filepath):
                 if self._validatecachelog:
-                    with open(self._validatecachelog, 'a+') as f:
-                        f.write("corrupt %s during read\n" % filepath)
-                os.rename(filepath, filepath + ".corrupt")
-                raise KeyError("corrupt local cache file %s" % filepath)
+                    with open(self._validatecachelog, b'a+') as f:
+                        f.write(b"corrupt %s during read\n" % filepath)
+                os.rename(filepath, filepath + b".corrupt")
+                raise KeyError(b"corrupt local cache file %s" % filepath)
         except IOError:
             raise KeyError(
-                "no file found at %s for %s:%s" % (filepath, name, hex(node))
+                b"no file found at %s for %s:%s" % (filepath, name, hex(node))
             )
 
         return data
@@ -243,7 +243,7 @@
             # if this node already exists, save the old version for
             # recovery/debugging purposes.
             if os.path.exists(filepath):
-                newfilename = filepath + '_old'
+                newfilename = filepath + b'_old'
                 # newfilename can be read-only and shutil.copy will fail.
                 # Delete newfilename to avoid it
                 if os.path.exists(newfilename):
@@ -254,9 +254,9 @@
             shallowutil.writefile(filepath, data, readonly=True)
 
             if self._validatecache:
-                if not self._validatekey(filepath, 'write'):
+                if not self._validatekey(filepath, b'write'):
                     raise error.Abort(
-                        _("local cache write was corrupted %s") % filepath
+                        _(b"local cache write was corrupted %s") % filepath
                     )
         finally:
             os.umask(oldumask)
@@ -267,26 +267,26 @@
         collection, since it allows us to insecpt the repos to see what nodes
         they want to be kept alive in the store.
         """
-        repospath = os.path.join(self._path, "repos")
-        with open(repospath, 'ab') as reposfile:
-            reposfile.write(os.path.dirname(path) + "\n")
+        repospath = os.path.join(self._path, b"repos")
+        with open(repospath, b'ab') as reposfile:
+            reposfile.write(os.path.dirname(path) + b"\n")
 
         repospathstat = os.stat(repospath)
         if repospathstat.st_uid == self._uid:
             os.chmod(repospath, 0o0664)
 
     def _validatekey(self, path, action):
-        with open(path, 'rb') as f:
+        with open(path, b'rb') as f:
             data = f.read()
 
         if self._validatedata(data, path):
             return True
 
         if self._validatecachelog:
-            with open(self._validatecachelog, 'ab+') as f:
-                f.write("corrupt %s during %s\n" % (path, action))
+            with open(self._validatecachelog, b'ab+') as f:
+                f.write(b"corrupt %s during %s\n" % (path, action))
 
-        os.rename(path, path + ".corrupt")
+        os.rename(path, path + b".corrupt")
         return False
 
     def _validatedata(self, data, path):
@@ -327,16 +327,16 @@
         limit = time.time() - (60 * 60 * 24)
 
         progress = ui.makeprogress(
-            _("removing unnecessary files"), unit="files"
+            _(b"removing unnecessary files"), unit=b"files"
         )
         progress.update(0)
         for root, dirs, files in os.walk(cachepath):
             for file in files:
-                if file == 'repos':
+                if file == b'repos':
                     continue
 
                 # Don't delete pack files
-                if '/packs/' in root:
+                if b'/packs/' in root:
                     continue
 
                 progress.update(count)
@@ -349,7 +349,9 @@
                     # errno.ENOENT = no such file or directory
                     if e.errno != errno.ENOENT:
                         raise
-                    msg = _("warning: file %s was removed by another process\n")
+                    msg = _(
+                        b"warning: file %s was removed by another process\n"
+                    )
                     ui.warn(msg % path)
                     continue
 
@@ -366,8 +368,8 @@
                         if e.errno != errno.ENOENT:
                             raise
                         msg = _(
-                            "warning: file %s was removed by another "
-                            "process\n"
+                            b"warning: file %s was removed by another "
+                            b"process\n"
                         )
                         ui.warn(msg % path)
                         continue
@@ -375,11 +377,11 @@
         progress.complete()
 
         # remove oldest files until under limit
-        limit = ui.configbytes("remotefilelog", "cachelimit")
+        limit = ui.configbytes(b"remotefilelog", b"cachelimit")
         if size > limit:
             excess = size - limit
             progress = ui.makeprogress(
-                _("enforcing cache limit"), unit="bytes", total=excess
+                _(b"enforcing cache limit"), unit=b"bytes", total=excess
             )
             removedexcess = 0
             while queue and size > limit and size > 0:
@@ -391,7 +393,9 @@
                     # errno.ENOENT = no such file or directory
                     if e.errno != errno.ENOENT:
                         raise
-                    msg = _("warning: file %s was removed by another process\n")
+                    msg = _(
+                        b"warning: file %s was removed by another process\n"
+                    )
                     ui.warn(msg % oldpath)
                 size -= oldpathstat.st_size
                 removed += 1
@@ -399,7 +403,7 @@
             progress.complete()
 
         ui.status(
-            _("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
+            _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
             % (
                 removed,
                 count,
@@ -422,7 +426,7 @@
 
     def markforrefresh(self):
         for store in self.stores:
-            if util.safehasattr(store, 'markforrefresh'):
+            if util.safehasattr(store, b'markforrefresh'):
                 store.markforrefresh()
 
     @staticmethod
@@ -436,7 +440,7 @@
             i = 0
             while i < self.numattempts:
                 if i > 0:
-                    retrylog('re-attempting (n=%d) %s\n' % (i, funcname))
+                    retrylog(b're-attempting (n=%d) %s\n' % (i, funcname))
                     self.markforrefresh()
                 i += 1
                 try:
@@ -445,7 +449,7 @@
                     if i == self.numattempts:
                         # retries exhausted
                         retrylog(
-                            'retries exhausted in %s, raising KeyError\n'
+                            b'retries exhausted in %s, raising KeyError\n'
                             % pycompat.sysbytes(funcname)
                         )
                         raise