changeset 43077 | 687b865b95ad |
parent 43076 | 2372284d9457 |
child 43089 | c59eb1560c44 |
43076:2372284d9457 | 43077:687b865b95ad |
---|---|
109 """filecache for files in .hg but outside of .hg/store""" |
109 """filecache for files in .hg but outside of .hg/store""" |
110 |
110 |
111 def __init__(self, *paths): |
111 def __init__(self, *paths): |
112 super(repofilecache, self).__init__(*paths) |
112 super(repofilecache, self).__init__(*paths) |
113 for path in paths: |
113 for path in paths: |
114 _cachedfiles.add((path, 'plain')) |
114 _cachedfiles.add((path, b'plain')) |
115 |
115 |
116 def join(self, obj, fname): |
116 def join(self, obj, fname): |
117 return obj.vfs.join(fname) |
117 return obj.vfs.join(fname) |
118 |
118 |
119 |
119 |
121 """filecache for files in the store""" |
121 """filecache for files in the store""" |
122 |
122 |
123 def __init__(self, *paths): |
123 def __init__(self, *paths): |
124 super(storecache, self).__init__(*paths) |
124 super(storecache, self).__init__(*paths) |
125 for path in paths: |
125 for path in paths: |
126 _cachedfiles.add((path, '')) |
126 _cachedfiles.add((path, b'')) |
127 |
127 |
128 def join(self, obj, fname): |
128 def join(self, obj, fname): |
129 return obj.sjoin(fname) |
129 return obj.sjoin(fname) |
130 |
130 |
131 |
131 |
138 super(mixedrepostorecache, self).__init__(*pathsandlocations) |
138 super(mixedrepostorecache, self).__init__(*pathsandlocations) |
139 _cachedfiles.update(pathsandlocations) |
139 _cachedfiles.update(pathsandlocations) |
140 |
140 |
141 def join(self, obj, fnameandlocation): |
141 def join(self, obj, fnameandlocation): |
142 fname, location = fnameandlocation |
142 fname, location = fnameandlocation |
143 if location == 'plain': |
143 if location == b'plain': |
144 return obj.vfs.join(fname) |
144 return obj.vfs.join(fname) |
145 else: |
145 else: |
146 if location != '': |
146 if location != b'': |
147 raise error.ProgrammingError( |
147 raise error.ProgrammingError( |
148 'unexpected location: %s' % location |
148 b'unexpected location: %s' % location |
149 ) |
149 ) |
150 return obj.sjoin(fname) |
150 return obj.sjoin(fname) |
151 |
151 |
152 |
152 |
153 def isfilecached(repo, name): |
153 def isfilecached(repo, name): |
191 |
191 |
192 return wrapper |
192 return wrapper |
193 |
193 |
194 |
194 |
195 moderncaps = { |
195 moderncaps = { |
196 'lookup', |
196 b'lookup', |
197 'branchmap', |
197 b'branchmap', |
198 'pushkey', |
198 b'pushkey', |
199 'known', |
199 b'known', |
200 'getbundle', |
200 b'getbundle', |
201 'unbundle', |
201 b'unbundle', |
202 } |
202 } |
203 legacycaps = moderncaps.union({'changegroupsubset'}) |
203 legacycaps = moderncaps.union({b'changegroupsubset'}) |
204 |
204 |
205 |
205 |
206 @interfaceutil.implementer(repository.ipeercommandexecutor) |
206 @interfaceutil.implementer(repository.ipeercommandexecutor) |
207 class localcommandexecutor(object): |
207 class localcommandexecutor(object): |
208 def __init__(self, peer): |
208 def __init__(self, peer): |
217 self.close() |
217 self.close() |
218 |
218 |
219 def callcommand(self, command, args): |
219 def callcommand(self, command, args): |
220 if self._sent: |
220 if self._sent: |
221 raise error.ProgrammingError( |
221 raise error.ProgrammingError( |
222 'callcommand() cannot be used after ' 'sendcommands()' |
222 b'callcommand() cannot be used after ' b'sendcommands()' |
223 ) |
223 ) |
224 |
224 |
225 if self._closed: |
225 if self._closed: |
226 raise error.ProgrammingError( |
226 raise error.ProgrammingError( |
227 'callcommand() cannot be used after ' 'close()' |
227 b'callcommand() cannot be used after ' b'close()' |
228 ) |
228 ) |
229 |
229 |
230 # We don't need to support anything fancy. Just call the named |
230 # We don't need to support anything fancy. Just call the named |
231 # method on the peer and return a resolved future. |
231 # method on the peer and return a resolved future. |
232 fn = getattr(self._peer, pycompat.sysstr(command)) |
232 fn = getattr(self._peer, pycompat.sysstr(command)) |
256 def __init__(self, repo, caps=None): |
256 def __init__(self, repo, caps=None): |
257 super(localpeer, self).__init__() |
257 super(localpeer, self).__init__() |
258 |
258 |
259 if caps is None: |
259 if caps is None: |
260 caps = moderncaps.copy() |
260 caps = moderncaps.copy() |
261 self._repo = repo.filtered('served') |
261 self._repo = repo.filtered(b'served') |
262 self.ui = repo.ui |
262 self.ui = repo.ui |
263 self._caps = repo._restrictcapabilities(caps) |
263 self._caps = repo._restrictcapabilities(caps) |
264 |
264 |
265 # Begin of _basepeer interface. |
265 # Begin of _basepeer interface. |
266 |
266 |
288 |
288 |
289 def capabilities(self): |
289 def capabilities(self): |
290 return self._caps |
290 return self._caps |
291 |
291 |
292 def clonebundles(self): |
292 def clonebundles(self): |
293 return self._repo.tryread('clonebundles.manifest') |
293 return self._repo.tryread(b'clonebundles.manifest') |
294 |
294 |
295 def debugwireargs(self, one, two, three=None, four=None, five=None): |
295 def debugwireargs(self, one, two, three=None, four=None, five=None): |
296 """Used to test argument passing over the wire""" |
296 """Used to test argument passing over the wire""" |
297 return "%s %s %s %s %s" % ( |
297 return b"%s %s %s %s %s" % ( |
298 one, |
298 one, |
299 two, |
299 two, |
300 pycompat.bytestr(three), |
300 pycompat.bytestr(three), |
301 pycompat.bytestr(four), |
301 pycompat.bytestr(four), |
302 pycompat.bytestr(five), |
302 pycompat.bytestr(five), |
319 # When requesting a bundle2, getbundle returns a stream to make the |
319 # When requesting a bundle2, getbundle returns a stream to make the |
320 # wire level function happier. We need to build a proper object |
320 # wire level function happier. We need to build a proper object |
321 # from it in local peer. |
321 # from it in local peer. |
322 return bundle2.getunbundler(self.ui, cb) |
322 return bundle2.getunbundler(self.ui, cb) |
323 else: |
323 else: |
324 return changegroup.getunbundler('01', cb, None) |
324 return changegroup.getunbundler(b'01', cb, None) |
325 |
325 |
326 def heads(self): |
326 def heads(self): |
327 return self._repo.heads() |
327 return self._repo.heads() |
328 |
328 |
329 def known(self, nodes): |
329 def known(self, nodes): |
338 def pushkey(self, namespace, key, old, new): |
338 def pushkey(self, namespace, key, old, new): |
339 return self._repo.pushkey(namespace, key, old, new) |
339 return self._repo.pushkey(namespace, key, old, new) |
340 |
340 |
341 def stream_out(self): |
341 def stream_out(self): |
342 raise error.Abort( |
342 raise error.Abort( |
343 _('cannot perform stream clone against local ' 'peer') |
343 _(b'cannot perform stream clone against local ' b'peer') |
344 ) |
344 ) |
345 |
345 |
346 def unbundle(self, bundle, heads, url): |
346 def unbundle(self, bundle, heads, url): |
347 """apply a bundle on a repo |
347 """apply a bundle on a repo |
348 |
348 |
349 This function handles the repo locking itself.""" |
349 This function handles the repo locking itself.""" |
350 try: |
350 try: |
351 try: |
351 try: |
352 bundle = exchange.readbundle(self.ui, bundle, None) |
352 bundle = exchange.readbundle(self.ui, bundle, None) |
353 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url) |
353 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url) |
354 if util.safehasattr(ret, 'getchunks'): |
354 if util.safehasattr(ret, b'getchunks'): |
355 # This is a bundle20 object, turn it into an unbundler. |
355 # This is a bundle20 object, turn it into an unbundler. |
356 # This little dance should be dropped eventually when the |
356 # This little dance should be dropped eventually when the |
357 # API is finally improved. |
357 # API is finally improved. |
358 stream = util.chunkbuffer(ret.getchunks()) |
358 stream = util.chunkbuffer(ret.getchunks()) |
359 ret = bundle2.getunbundler(self.ui, stream) |
359 ret = bundle2.getunbundler(self.ui, stream) |
375 b = bundle2.getunbundler(self.ui, stream) |
375 b = bundle2.getunbundler(self.ui, stream) |
376 bundle2.processbundle(self._repo, b) |
376 bundle2.processbundle(self._repo, b) |
377 raise |
377 raise |
378 except error.PushRaced as exc: |
378 except error.PushRaced as exc: |
379 raise error.ResponseError( |
379 raise error.ResponseError( |
380 _('push failed:'), stringutil.forcebytestr(exc) |
380 _(b'push failed:'), stringutil.forcebytestr(exc) |
381 ) |
381 ) |
382 |
382 |
383 # End of _basewirecommands interface. |
383 # End of _basewirecommands interface. |
384 |
384 |
385 # Begin of peer interface. |
385 # Begin of peer interface. |
408 |
408 |
409 def changegroup(self, nodes, source): |
409 def changegroup(self, nodes, source): |
410 outgoing = discovery.outgoing( |
410 outgoing = discovery.outgoing( |
411 self._repo, missingroots=nodes, missingheads=self._repo.heads() |
411 self._repo, missingroots=nodes, missingheads=self._repo.heads() |
412 ) |
412 ) |
413 return changegroup.makechangegroup(self._repo, outgoing, '01', source) |
413 return changegroup.makechangegroup(self._repo, outgoing, b'01', source) |
414 |
414 |
415 def changegroupsubset(self, bases, heads, source): |
415 def changegroupsubset(self, bases, heads, source): |
416 outgoing = discovery.outgoing( |
416 outgoing = discovery.outgoing( |
417 self._repo, missingroots=bases, missingheads=heads |
417 self._repo, missingroots=bases, missingheads=heads |
418 ) |
418 ) |
419 return changegroup.makechangegroup(self._repo, outgoing, '01', source) |
419 return changegroup.makechangegroup(self._repo, outgoing, b'01', source) |
420 |
420 |
421 # End of baselegacywirecommands interface. |
421 # End of baselegacywirecommands interface. |
422 |
422 |
423 |
423 |
424 # Increment the sub-version when the revlog v2 format changes to lock out old |
424 # Increment the sub-version when the revlog v2 format changes to lock out old |
425 # clients. |
425 # clients. |
426 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1' |
426 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1' |
427 |
427 |
428 # A repository with the sparserevlog feature will have delta chains that |
428 # A repository with the sparserevlog feature will have delta chains that |
429 # can spread over a larger span. Sparse reading cuts these large spans into |
429 # can spread over a larger span. Sparse reading cuts these large spans into |
430 # pieces, so that each piece isn't too big. |
430 # pieces, so that each piece isn't too big. |
431 # Without the sparserevlog capability, reading from the repository could use |
431 # Without the sparserevlog capability, reading from the repository could use |
432 # huge amounts of memory, because the whole span would be read at once, |
432 # huge amounts of memory, because the whole span would be read at once, |
433 # including all the intermediate revisions that aren't pertinent for the chain. |
433 # including all the intermediate revisions that aren't pertinent for the chain. |
434 # This is why once a repository has enabled sparse-read, it becomes required. |
434 # This is why once a repository has enabled sparse-read, it becomes required. |
435 SPARSEREVLOG_REQUIREMENT = 'sparserevlog' |
435 SPARSEREVLOG_REQUIREMENT = b'sparserevlog' |
436 |
436 |
437 # A repository with the sidedataflag requirement will allow to store extra |
437 # A repository with the sidedataflag requirement will allow to store extra |
438 # information for revision without altering their original hashes. |
438 # information for revision without altering their original hashes. |
439 SIDEDATA_REQUIREMENT = 'exp-sidedata-flag' |
439 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag' |
440 |
440 |
441 # Functions receiving (ui, features) that extensions can register to impact |
441 # Functions receiving (ui, features) that extensions can register to impact |
442 # the ability to load repositories with custom requirements. Only |
442 # the ability to load repositories with custom requirements. Only |
443 # functions defined in loaded extensions are called. |
443 # functions defined in loaded extensions are called. |
444 # |
444 # |
625 baseclasses=bases, |
625 baseclasses=bases, |
626 ) |
626 ) |
627 |
627 |
628 if not isinstance(typ, type): |
628 if not isinstance(typ, type): |
629 raise error.ProgrammingError( |
629 raise error.ProgrammingError( |
630 'unable to construct type for %s' % iface |
630 b'unable to construct type for %s' % iface |
631 ) |
631 ) |
632 |
632 |
633 bases.append(typ) |
633 bases.append(typ) |
634 |
634 |
635 # type() allows you to use characters in type names that wouldn't be |
635 # type() allows you to use characters in type names that wouldn't be |
698 if requirement not in requirements: |
698 if requirement not in requirements: |
699 continue |
699 continue |
700 |
700 |
701 for name in names: |
701 for name in names: |
702 if not ui.hasconfig(b'extensions', name): |
702 if not ui.hasconfig(b'extensions', name): |
703 ui.setconfig(b'extensions', name, b'', source='autoload') |
703 ui.setconfig(b'extensions', name, b'', source=b'autoload') |
704 |
704 |
705 |
705 |
706 def gathersupportedrequirements(ui): |
706 def gathersupportedrequirements(ui): |
707 """Determine the complete set of recognized requirements.""" |
707 """Determine the complete set of recognized requirements.""" |
708 # Start with all requirements supported by this file. |
708 # Start with all requirements supported by this file. |
719 # Add derived requirements from registered compression engines. |
719 # Add derived requirements from registered compression engines. |
720 for name in util.compengines: |
720 for name in util.compengines: |
721 engine = util.compengines[name] |
721 engine = util.compengines[name] |
722 if engine.available() and engine.revlogheader(): |
722 if engine.available() and engine.revlogheader(): |
723 supported.add(b'exp-compression-%s' % name) |
723 supported.add(b'exp-compression-%s' % name) |
724 if engine.name() == 'zstd': |
724 if engine.name() == b'zstd': |
725 supported.add(b'revlog-compression-zstd') |
725 supported.add(b'revlog-compression-zstd') |
726 |
726 |
727 return supported |
727 return supported |
728 |
728 |
729 |
729 |
815 # opener options for it because those options wouldn't do anything |
815 # opener options for it because those options wouldn't do anything |
816 # meaningful on such old repos. |
816 # meaningful on such old repos. |
817 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements: |
817 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements: |
818 options.update(resolverevlogstorevfsoptions(ui, requirements, features)) |
818 options.update(resolverevlogstorevfsoptions(ui, requirements, features)) |
819 else: # explicitly mark repo as using revlogv0 |
819 else: # explicitly mark repo as using revlogv0 |
820 options['revlogv0'] = True |
820 options[b'revlogv0'] = True |
821 |
821 |
822 writecopiesto = ui.config('experimental', 'copies.write-to') |
822 writecopiesto = ui.config(b'experimental', b'copies.write-to') |
823 copiesextramode = ('changeset-only', 'compatibility') |
823 copiesextramode = (b'changeset-only', b'compatibility') |
824 if writecopiesto in copiesextramode: |
824 if writecopiesto in copiesextramode: |
825 options['copies-storage'] = 'extra' |
825 options[b'copies-storage'] = b'extra' |
826 |
826 |
827 return options |
827 return options |
828 |
828 |
829 |
829 |
830 def resolverevlogstorevfsoptions(ui, requirements, features): |
830 def resolverevlogstorevfsoptions(ui, requirements, features): |
899 # we allow multiple compression engine requirement to co-exist because |
899 # we allow multiple compression engine requirement to co-exist because |
900 # strickly speaking, revlog seems to support mixed compression style. |
900 # strickly speaking, revlog seems to support mixed compression style. |
901 # |
901 # |
902 # The compression used for new entries will be "the last one" |
902 # The compression used for new entries will be "the last one" |
903 prefix = r.startswith |
903 prefix = r.startswith |
904 if prefix('revlog-compression-') or prefix('exp-compression-'): |
904 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
905 options[b'compengine'] = r.split('-', 2)[2] |
905 options[b'compengine'] = r.split(b'-', 2)[2] |
906 |
906 |
907 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level') |
907 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level') |
908 if options[b'zlib.level'] is not None: |
908 if options[b'zlib.level'] is not None: |
909 if not (0 <= options[b'zlib.level'] <= 9): |
909 if not (0 <= options[b'zlib.level'] <= 9): |
910 msg = _('invalid value for `storage.revlog.zlib.level` config: %d') |
910 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d') |
911 raise error.Abort(msg % options[b'zlib.level']) |
911 raise error.Abort(msg % options[b'zlib.level']) |
912 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level') |
912 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level') |
913 if options[b'zstd.level'] is not None: |
913 if options[b'zstd.level'] is not None: |
914 if not (0 <= options[b'zstd.level'] <= 22): |
914 if not (0 <= options[b'zstd.level'] <= 22): |
915 msg = _('invalid value for `storage.revlog.zstd.level` config: %d') |
915 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d') |
916 raise error.Abort(msg % options[b'zstd.level']) |
916 raise error.Abort(msg % options[b'zstd.level']) |
917 |
917 |
918 if repository.NARROW_REQUIREMENT in requirements: |
918 if repository.NARROW_REQUIREMENT in requirements: |
919 options[b'enableellipsis'] = True |
919 options[b'enableellipsis'] = True |
920 |
920 |
990 # - manifestv2: An experimental new manifest format that allowed |
990 # - manifestv2: An experimental new manifest format that allowed |
991 # for stem compression of long paths. Experiment ended up not |
991 # for stem compression of long paths. Experiment ended up not |
992 # being successful (repository sizes went up due to worse delta |
992 # being successful (repository sizes went up due to worse delta |
993 # chains), and the code was deleted in 4.6. |
993 # chains), and the code was deleted in 4.6. |
994 supportedformats = { |
994 supportedformats = { |
995 'revlogv1', |
995 b'revlogv1', |
996 'generaldelta', |
996 b'generaldelta', |
997 'treemanifest', |
997 b'treemanifest', |
998 REVLOGV2_REQUIREMENT, |
998 REVLOGV2_REQUIREMENT, |
999 SIDEDATA_REQUIREMENT, |
999 SIDEDATA_REQUIREMENT, |
1000 SPARSEREVLOG_REQUIREMENT, |
1000 SPARSEREVLOG_REQUIREMENT, |
1001 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, |
1001 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, |
1002 } |
1002 } |
1003 _basesupported = supportedformats | { |
1003 _basesupported = supportedformats | { |
1004 'store', |
1004 b'store', |
1005 'fncache', |
1005 b'fncache', |
1006 'shared', |
1006 b'shared', |
1007 'relshared', |
1007 b'relshared', |
1008 'dotencode', |
1008 b'dotencode', |
1009 'exp-sparse', |
1009 b'exp-sparse', |
1010 'internal-phase', |
1010 b'internal-phase', |
1011 } |
1011 } |
1012 |
1012 |
1013 # list of prefix for file which can be written without 'wlock' |
1013 # list of prefix for file which can be written without 'wlock' |
1014 # Extensions should extend this list when needed |
1014 # Extensions should extend this list when needed |
1015 _wlockfreeprefix = { |
1015 _wlockfreeprefix = { |
1016 # We migh consider requiring 'wlock' for the next |
1016 # We migh consider requiring 'wlock' for the next |
1017 # two, but pretty much all the existing code assume |
1017 # two, but pretty much all the existing code assume |
1018 # wlock is not needed so we keep them excluded for |
1018 # wlock is not needed so we keep them excluded for |
1019 # now. |
1019 # now. |
1020 'hgrc', |
1020 b'hgrc', |
1021 'requires', |
1021 b'requires', |
1022 # XXX cache is a complicatged business someone |
1022 # XXX cache is a complicatged business someone |
1023 # should investigate this in depth at some point |
1023 # should investigate this in depth at some point |
1024 'cache/', |
1024 b'cache/', |
1025 # XXX shouldn't be dirstate covered by the wlock? |
1025 # XXX shouldn't be dirstate covered by the wlock? |
1026 'dirstate', |
1026 b'dirstate', |
1027 # XXX bisect was still a bit too messy at the time |
1027 # XXX bisect was still a bit too messy at the time |
1028 # this changeset was introduced. Someone should fix |
1028 # this changeset was introduced. Someone should fix |
1029 # the remainig bit and drop this line |
1029 # the remainig bit and drop this line |
1030 'bisect.state', |
1030 b'bisect.state', |
1031 } |
1031 } |
1032 |
1032 |
1033 def __init__( |
1033 def __init__( |
1034 self, |
1034 self, |
1035 baseui, |
1035 baseui, |
1115 self.wcachevfs = wcachevfs |
1115 self.wcachevfs = wcachevfs |
1116 self.features = features |
1116 self.features = features |
1117 |
1117 |
1118 self.filtername = None |
1118 self.filtername = None |
1119 |
1119 |
1120 if self.ui.configbool('devel', 'all-warnings') or self.ui.configbool( |
1120 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( |
1121 'devel', 'check-locks' |
1121 b'devel', b'check-locks' |
1122 ): |
1122 ): |
1123 self.vfs.audit = self._getvfsward(self.vfs.audit) |
1123 self.vfs.audit = self._getvfsward(self.vfs.audit) |
1124 # A list of callback to shape the phase if no data were found. |
1124 # A list of callback to shape the phase if no data were found. |
1125 # Callback are in the form: func(repo, roots) --> processed root. |
1125 # Callback are in the form: func(repo, roots) --> processed root. |
1126 # This list it to be filled by extension during repo setup |
1126 # This list it to be filled by extension during repo setup |
1129 color.setup(self.ui) |
1129 color.setup(self.ui) |
1130 |
1130 |
1131 self.spath = self.store.path |
1131 self.spath = self.store.path |
1132 self.svfs = self.store.vfs |
1132 self.svfs = self.store.vfs |
1133 self.sjoin = self.store.join |
1133 self.sjoin = self.store.join |
1134 if self.ui.configbool('devel', 'all-warnings') or self.ui.configbool( |
1134 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( |
1135 'devel', 'check-locks' |
1135 b'devel', b'check-locks' |
1136 ): |
1136 ): |
1137 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs |
1137 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs |
1138 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) |
1138 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) |
1139 else: # standard vfs |
1139 else: # standard vfs |
1140 self.svfs.audit = self._getsvfsward(self.svfs.audit) |
1140 self.svfs.audit = self._getsvfsward(self.svfs.audit) |
1141 |
1141 |
1142 self._dirstatevalidatewarned = False |
1142 self._dirstatevalidatewarned = False |
1182 def checkvfs(path, mode=None): |
1182 def checkvfs(path, mode=None): |
1183 ret = origfunc(path, mode=mode) |
1183 ret = origfunc(path, mode=mode) |
1184 repo = rref() |
1184 repo = rref() |
1185 if ( |
1185 if ( |
1186 repo is None |
1186 repo is None |
1187 or not util.safehasattr(repo, '_wlockref') |
1187 or not util.safehasattr(repo, b'_wlockref') |
1188 or not util.safehasattr(repo, '_lockref') |
1188 or not util.safehasattr(repo, b'_lockref') |
1189 ): |
1189 ): |
1190 return |
1190 return |
1191 if mode in (None, 'r', 'rb'): |
1191 if mode in (None, b'r', b'rb'): |
1192 return |
1192 return |
1193 if path.startswith(repo.path): |
1193 if path.startswith(repo.path): |
1194 # truncate name relative to the repository (.hg) |
1194 # truncate name relative to the repository (.hg) |
1195 path = path[len(repo.path) + 1 :] |
1195 path = path[len(repo.path) + 1 :] |
1196 if path.startswith('cache/'): |
1196 if path.startswith(b'cache/'): |
1197 msg = 'accessing cache with vfs instead of cachevfs: "%s"' |
1197 msg = b'accessing cache with vfs instead of cachevfs: "%s"' |
1198 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs") |
1198 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs") |
1199 if path.startswith('journal.') or path.startswith('undo.'): |
1199 if path.startswith(b'journal.') or path.startswith(b'undo.'): |
1200 # journal is covered by 'lock' |
1200 # journal is covered by 'lock' |
1201 if repo._currentlock(repo._lockref) is None: |
1201 if repo._currentlock(repo._lockref) is None: |
1202 repo.ui.develwarn( |
1202 repo.ui.develwarn( |
1203 'write with no lock: "%s"' % path, |
1203 b'write with no lock: "%s"' % path, |
1204 stacklevel=3, |
1204 stacklevel=3, |
1205 config='check-locks', |
1205 config=b'check-locks', |
1206 ) |
1206 ) |
1207 elif repo._currentlock(repo._wlockref) is None: |
1207 elif repo._currentlock(repo._wlockref) is None: |
1208 # rest of vfs files are covered by 'wlock' |
1208 # rest of vfs files are covered by 'wlock' |
1209 # |
1209 # |
1210 # exclude special files |
1210 # exclude special files |
1211 for prefix in self._wlockfreeprefix: |
1211 for prefix in self._wlockfreeprefix: |
1212 if path.startswith(prefix): |
1212 if path.startswith(prefix): |
1213 return |
1213 return |
1214 repo.ui.develwarn( |
1214 repo.ui.develwarn( |
1215 'write with no wlock: "%s"' % path, |
1215 b'write with no wlock: "%s"' % path, |
1216 stacklevel=3, |
1216 stacklevel=3, |
1217 config='check-locks', |
1217 config=b'check-locks', |
1218 ) |
1218 ) |
1219 return ret |
1219 return ret |
1220 |
1220 |
1221 return checkvfs |
1221 return checkvfs |
1222 |
1222 |
1225 rref = weakref.ref(self) |
1225 rref = weakref.ref(self) |
1226 |
1226 |
1227 def checksvfs(path, mode=None): |
1227 def checksvfs(path, mode=None): |
1228 ret = origfunc(path, mode=mode) |
1228 ret = origfunc(path, mode=mode) |
1229 repo = rref() |
1229 repo = rref() |
1230 if repo is None or not util.safehasattr(repo, '_lockref'): |
1230 if repo is None or not util.safehasattr(repo, b'_lockref'): |
1231 return |
1231 return |
1232 if mode in (None, 'r', 'rb'): |
1232 if mode in (None, b'r', b'rb'): |
1233 return |
1233 return |
1234 if path.startswith(repo.sharedpath): |
1234 if path.startswith(repo.sharedpath): |
1235 # truncate name relative to the repository (.hg) |
1235 # truncate name relative to the repository (.hg) |
1236 path = path[len(repo.sharedpath) + 1 :] |
1236 path = path[len(repo.sharedpath) + 1 :] |
1237 if repo._currentlock(repo._lockref) is None: |
1237 if repo._currentlock(repo._lockref) is None: |
1238 repo.ui.develwarn( |
1238 repo.ui.develwarn( |
1239 'write with no lock: "%s"' % path, stacklevel=4 |
1239 b'write with no lock: "%s"' % path, stacklevel=4 |
1240 ) |
1240 ) |
1241 return ret |
1241 return ret |
1242 |
1242 |
1243 return checksvfs |
1243 return checksvfs |
1244 |
1244 |
1248 def _writecaches(self): |
1248 def _writecaches(self): |
1249 if self._revbranchcache: |
1249 if self._revbranchcache: |
1250 self._revbranchcache.write() |
1250 self._revbranchcache.write() |
1251 |
1251 |
1252 def _restrictcapabilities(self, caps): |
1252 def _restrictcapabilities(self, caps): |
1253 if self.ui.configbool('experimental', 'bundle2-advertise'): |
1253 if self.ui.configbool(b'experimental', b'bundle2-advertise'): |
1254 caps = set(caps) |
1254 caps = set(caps) |
1255 capsblob = bundle2.encodecaps( |
1255 capsblob = bundle2.encodecaps( |
1256 bundle2.getrepocaps(self, role='client') |
1256 bundle2.getrepocaps(self, role=b'client') |
1257 ) |
1257 ) |
1258 caps.add('bundle2=' + urlreq.quote(capsblob)) |
1258 caps.add(b'bundle2=' + urlreq.quote(capsblob)) |
1259 return caps |
1259 return caps |
1260 |
1260 |
1261 def _writerequirements(self): |
1261 def _writerequirements(self): |
1262 scmutil.writerequires(self.vfs, self.requirements) |
1262 scmutil.writerequires(self.vfs, self.requirements) |
1263 |
1263 |
1303 # since we want to prevent access to nested repositories on |
1303 # since we want to prevent access to nested repositories on |
1304 # the filesystem *now*. |
1304 # the filesystem *now*. |
1305 ctx = self[None] |
1305 ctx = self[None] |
1306 parts = util.splitpath(subpath) |
1306 parts = util.splitpath(subpath) |
1307 while parts: |
1307 while parts: |
1308 prefix = '/'.join(parts) |
1308 prefix = b'/'.join(parts) |
1309 if prefix in ctx.substate: |
1309 if prefix in ctx.substate: |
1310 if prefix == normsubpath: |
1310 if prefix == normsubpath: |
1311 return True |
1311 return True |
1312 else: |
1312 else: |
1313 sub = ctx.sub(prefix) |
1313 sub = ctx.sub(prefix) |
1335 example calling `repo.filtered("served")` will return a repoview using |
1335 example calling `repo.filtered("served")` will return a repoview using |
1336 the "served" view, regardless of the initial view used by `repo`. |
1336 the "served" view, regardless of the initial view used by `repo`. |
1337 |
1337 |
1338 In other word, there is always only one level of `repoview` "filtering". |
1338 In other word, there is always only one level of `repoview` "filtering". |
1339 """ |
1339 """ |
1340 if self._extrafilterid is not None and '%' not in name: |
1340 if self._extrafilterid is not None and b'%' not in name: |
1341 name = name + '%' + self._extrafilterid |
1341 name = name + b'%' + self._extrafilterid |
1342 |
1342 |
1343 cls = repoview.newtype(self.unfiltered().__class__) |
1343 cls = repoview.newtype(self.unfiltered().__class__) |
1344 return cls(self, name, visibilityexceptions) |
1344 return cls(self, name, visibilityexceptions) |
1345 |
1345 |
1346 @mixedrepostorecache( |
1346 @mixedrepostorecache( |
1347 ('bookmarks', 'plain'), |
1347 (b'bookmarks', b'plain'), |
1348 ('bookmarks.current', 'plain'), |
1348 (b'bookmarks.current', b'plain'), |
1349 ('bookmarks', ''), |
1349 (b'bookmarks', b''), |
1350 ('00changelog.i', ''), |
1350 (b'00changelog.i', b''), |
1351 ) |
1351 ) |
1352 def _bookmarks(self): |
1352 def _bookmarks(self): |
1353 # Since the multiple files involved in the transaction cannot be |
1353 # Since the multiple files involved in the transaction cannot be |
1354 # written atomically (with current repository format), there is a race |
1354 # written atomically (with current repository format), there is a race |
1355 # condition here. |
1355 # condition here. |
1401 self._refreshchangelog() |
1401 self._refreshchangelog() |
1402 return bookmarks.bmstore(self) |
1402 return bookmarks.bmstore(self) |
1403 |
1403 |
1404 def _refreshchangelog(self): |
1404 def _refreshchangelog(self): |
1405 """make sure the in memory changelog match the on-disk one""" |
1405 """make sure the in memory changelog match the on-disk one""" |
1406 if 'changelog' in vars(self) and self.currenttransaction() is None: |
1406 if b'changelog' in vars(self) and self.currenttransaction() is None: |
1407 del self.changelog |
1407 del self.changelog |
1408 |
1408 |
1409 @property |
1409 @property |
1410 def _activebookmark(self): |
1410 def _activebookmark(self): |
1411 return self._bookmarks.active |
1411 return self._bookmarks.active |
1412 |
1412 |
1413 # _phasesets depend on changelog. what we need is to call |
1413 # _phasesets depend on changelog. what we need is to call |
1414 # _phasecache.invalidate() if '00changelog.i' was changed, but it |
1414 # _phasecache.invalidate() if '00changelog.i' was changed, but it |
1415 # can't be easily expressed in filecache mechanism. |
1415 # can't be easily expressed in filecache mechanism. |
1416 @storecache('phaseroots', '00changelog.i') |
1416 @storecache(b'phaseroots', b'00changelog.i') |
1417 def _phasecache(self): |
1417 def _phasecache(self): |
1418 return phases.phasecache(self, self._phasedefaults) |
1418 return phases.phasecache(self, self._phasedefaults) |
1419 |
1419 |
1420 @storecache('obsstore') |
1420 @storecache(b'obsstore') |
1421 def obsstore(self): |
1421 def obsstore(self): |
1422 return obsolete.makestore(self.ui, self) |
1422 return obsolete.makestore(self.ui, self) |
1423 |
1423 |
1424 @storecache('00changelog.i') |
1424 @storecache(b'00changelog.i') |
1425 def changelog(self): |
1425 def changelog(self): |
1426 return self.store.changelog(txnutil.mayhavepending(self.root)) |
1426 return self.store.changelog(txnutil.mayhavepending(self.root)) |
1427 |
1427 |
1428 @storecache('00manifest.i') |
1428 @storecache(b'00manifest.i') |
1429 def manifestlog(self): |
1429 def manifestlog(self): |
1430 return self.store.manifestlog(self, self._storenarrowmatch) |
1430 return self.store.manifestlog(self, self._storenarrowmatch) |
1431 |
1431 |
1432 @repofilecache('dirstate') |
1432 @repofilecache(b'dirstate') |
1433 def dirstate(self): |
1433 def dirstate(self): |
1434 return self._makedirstate() |
1434 return self._makedirstate() |
1435 |
1435 |
1436 def _makedirstate(self): |
1436 def _makedirstate(self): |
1437 """Extension point for wrapping the dirstate per-repo.""" |
1437 """Extension point for wrapping the dirstate per-repo.""" |
1447 return node |
1447 return node |
1448 except error.LookupError: |
1448 except error.LookupError: |
1449 if not self._dirstatevalidatewarned: |
1449 if not self._dirstatevalidatewarned: |
1450 self._dirstatevalidatewarned = True |
1450 self._dirstatevalidatewarned = True |
1451 self.ui.warn( |
1451 self.ui.warn( |
1452 _("warning: ignoring unknown" " working parent %s!\n") |
1452 _(b"warning: ignoring unknown" b" working parent %s!\n") |
1453 % short(node) |
1453 % short(node) |
1454 ) |
1454 ) |
1455 return nullid |
1455 return nullid |
1456 |
1456 |
1457 @storecache(narrowspec.FILENAME) |
1457 @storecache(narrowspec.FILENAME) |
1514 ] |
1514 ] |
1515 try: |
1515 try: |
1516 if isinstance(changeid, int): |
1516 if isinstance(changeid, int): |
1517 node = self.changelog.node(changeid) |
1517 node = self.changelog.node(changeid) |
1518 rev = changeid |
1518 rev = changeid |
1519 elif changeid == 'null': |
1519 elif changeid == b'null': |
1520 node = nullid |
1520 node = nullid |
1521 rev = nullrev |
1521 rev = nullrev |
1522 elif changeid == 'tip': |
1522 elif changeid == b'tip': |
1523 node = self.changelog.tip() |
1523 node = self.changelog.tip() |
1524 rev = self.changelog.rev(node) |
1524 rev = self.changelog.rev(node) |
1525 elif changeid == '.': |
1525 elif changeid == b'.': |
1526 # this is a hack to delay/avoid loading obsmarkers |
1526 # this is a hack to delay/avoid loading obsmarkers |
1527 # when we know that '.' won't be hidden |
1527 # when we know that '.' won't be hidden |
1528 node = self.dirstate.p1() |
1528 node = self.dirstate.p1() |
1529 rev = self.unfiltered().changelog.rev(node) |
1529 rev = self.unfiltered().changelog.rev(node) |
1530 elif len(changeid) == 20: |
1530 elif len(changeid) == 20: |
1541 # exception for filtered changeset access |
1541 # exception for filtered changeset access |
1542 if ( |
1542 if ( |
1543 self.local() |
1543 self.local() |
1544 and changeid in self.unfiltered().dirstate.parents() |
1544 and changeid in self.unfiltered().dirstate.parents() |
1545 ): |
1545 ): |
1546 msg = _("working directory has unknown parent '%s'!") |
1546 msg = _(b"working directory has unknown parent '%s'!") |
1547 raise error.Abort(msg % short(changeid)) |
1547 raise error.Abort(msg % short(changeid)) |
1548 changeid = hex(changeid) # for the error message |
1548 changeid = hex(changeid) # for the error message |
1549 raise |
1549 raise |
1550 |
1550 |
1551 elif len(changeid) == 40: |
1551 elif len(changeid) == 40: |
1552 node = bin(changeid) |
1552 node = bin(changeid) |
1553 rev = self.changelog.rev(node) |
1553 rev = self.changelog.rev(node) |
1554 else: |
1554 else: |
1555 raise error.ProgrammingError( |
1555 raise error.ProgrammingError( |
1556 "unsupported changeid '%s' of type %s" |
1556 b"unsupported changeid '%s' of type %s" |
1557 % (changeid, type(changeid)) |
1557 % (changeid, type(changeid)) |
1558 ) |
1558 ) |
1559 |
1559 |
1560 return context.changectx(self, rev, node) |
1560 return context.changectx(self, rev, node) |
1561 |
1561 |
1562 except (error.FilteredIndexError, error.FilteredLookupError): |
1562 except (error.FilteredIndexError, error.FilteredLookupError): |
1563 raise error.FilteredRepoLookupError( |
1563 raise error.FilteredRepoLookupError( |
1564 _("filtered revision '%s'") % pycompat.bytestr(changeid) |
1564 _(b"filtered revision '%s'") % pycompat.bytestr(changeid) |
1565 ) |
1565 ) |
1566 except (IndexError, LookupError): |
1566 except (IndexError, LookupError): |
1567 raise error.RepoLookupError( |
1567 raise error.RepoLookupError( |
1568 _("unknown revision '%s'") % pycompat.bytestr(changeid) |
1568 _(b"unknown revision '%s'") % pycompat.bytestr(changeid) |
1569 ) |
1569 ) |
1570 except error.WdirUnsupported: |
1570 except error.WdirUnsupported: |
1571 return context.workingctx(self) |
1571 return context.workingctx(self) |
1572 |
1572 |
1573 def __contains__(self, changeid): |
1573 def __contains__(self, changeid): |
1641 else: |
1641 else: |
1642 m = revset.matchany(None, specs, localalias=localalias) |
1642 m = revset.matchany(None, specs, localalias=localalias) |
1643 return m(self) |
1643 return m(self) |
1644 |
1644 |
1645 def url(self): |
1645 def url(self): |
1646 return 'file:' + self.root |
1646 return b'file:' + self.root |
1647 |
1647 |
1648 def hook(self, name, throw=False, **args): |
1648 def hook(self, name, throw=False, **args): |
1649 """Call a hook, passing this repo instance. |
1649 """Call a hook, passing this repo instance. |
1650 |
1650 |
1651 This a convenience method to aid invoking hooks. Extensions likely |
1651 This a convenience method to aid invoking hooks. Extensions likely |
1709 # quo fine? |
1709 # quo fine? |
1710 |
1710 |
1711 # map tag name to (node, hist) |
1711 # map tag name to (node, hist) |
1712 alltags = tagsmod.findglobaltags(self.ui, self) |
1712 alltags = tagsmod.findglobaltags(self.ui, self) |
1713 # map tag name to tag type |
1713 # map tag name to tag type |
1714 tagtypes = dict((tag, 'global') for tag in alltags) |
1714 tagtypes = dict((tag, b'global') for tag in alltags) |
1715 |
1715 |
1716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) |
1716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) |
1717 |
1717 |
1718 # Build the return dicts. Have to re-encode tag names because |
1718 # Build the return dicts. Have to re-encode tag names because |
1719 # the tags module always uses UTF-8 (in order not to lose info |
1719 # the tags module always uses UTF-8 (in order not to lose info |
1721 # local encoding. |
1721 # local encoding. |
1722 tags = {} |
1722 tags = {} |
1723 for (name, (node, hist)) in alltags.iteritems(): |
1723 for (name, (node, hist)) in alltags.iteritems(): |
1724 if node != nullid: |
1724 if node != nullid: |
1725 tags[encoding.tolocal(name)] = node |
1725 tags[encoding.tolocal(name)] = node |
1726 tags['tip'] = self.changelog.tip() |
1726 tags[b'tip'] = self.changelog.tip() |
1727 tagtypes = dict( |
1727 tagtypes = dict( |
1728 [ |
1728 [ |
1729 (encoding.tolocal(name), value) |
1729 (encoding.tolocal(name), value) |
1730 for (name, value) in tagtypes.iteritems() |
1730 for (name, value) in tagtypes.iteritems() |
1731 ] |
1731 ] |
1789 ''' |
1789 ''' |
1790 try: |
1790 try: |
1791 return self.branchmap().branchtip(branch) |
1791 return self.branchmap().branchtip(branch) |
1792 except KeyError: |
1792 except KeyError: |
1793 if not ignoremissing: |
1793 if not ignoremissing: |
1794 raise error.RepoLookupError(_("unknown branch '%s'") % branch) |
1794 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) |
1795 else: |
1795 else: |
1796 pass |
1796 pass |
1797 |
1797 |
1798 def lookup(self, key): |
1798 def lookup(self, key): |
1799 node = scmutil.revsymbol(self, key).node() |
1799 node = scmutil.revsymbol(self, key).node() |
1800 if node is None: |
1800 if node is None: |
1801 raise error.RepoLookupError(_("unknown revision '%s'") % key) |
1801 raise error.RepoLookupError(_(b"unknown revision '%s'") % key) |
1802 return node |
1802 return node |
1803 |
1803 |
1804 def lookupbranch(self, key): |
1804 def lookupbranch(self, key): |
1805 if self.branchmap().hasbranch(key): |
1805 if self.branchmap().hasbranch(key): |
1806 return key |
1806 return key |
1822 return self |
1822 return self |
1823 |
1823 |
1824 def publishing(self): |
1824 def publishing(self): |
1825 # it's safe (and desirable) to trust the publish flag unconditionally |
1825 # it's safe (and desirable) to trust the publish flag unconditionally |
1826 # so that we don't finalize changes shared between users via ssh or nfs |
1826 # so that we don't finalize changes shared between users via ssh or nfs |
1827 return self.ui.configbool('phases', 'publish', untrusted=True) |
1827 return self.ui.configbool(b'phases', b'publish', untrusted=True) |
1828 |
1828 |
1829 def cancopy(self): |
1829 def cancopy(self): |
1830 # so statichttprepo's override of local() works |
1830 # so statichttprepo's override of local() works |
1831 if not self.local(): |
1831 if not self.local(): |
1832 return False |
1832 return False |
1833 if not self.publishing(): |
1833 if not self.publishing(): |
1834 return True |
1834 return True |
1835 # if publishing we can't copy if there is filtered content |
1835 # if publishing we can't copy if there is filtered content |
1836 return not self.filtered('visible').changelog.filteredrevs |
1836 return not self.filtered(b'visible').changelog.filteredrevs |
1837 |
1837 |
1838 def shared(self): |
1838 def shared(self): |
1839 '''the type of shared repository (None if not shared)''' |
1839 '''the type of shared repository (None if not shared)''' |
1840 if self.sharedpath != self.path: |
1840 if self.sharedpath != self.path: |
1841 return 'store' |
1841 return b'store' |
1842 return None |
1842 return None |
1843 |
1843 |
1844 def wjoin(self, f, *insidef): |
1844 def wjoin(self, f, *insidef): |
1845 return self.vfs.reljoin(self.root, f, *insidef) |
1845 return self.vfs.reljoin(self.root, f, *insidef) |
1846 |
1846 |
1875 |
1875 |
1876 def _loadfilter(self, filter): |
1876 def _loadfilter(self, filter): |
1877 if filter not in self._filterpats: |
1877 if filter not in self._filterpats: |
1878 l = [] |
1878 l = [] |
1879 for pat, cmd in self.ui.configitems(filter): |
1879 for pat, cmd in self.ui.configitems(filter): |
1880 if cmd == '!': |
1880 if cmd == b'!': |
1881 continue |
1881 continue |
1882 mf = matchmod.match(self.root, '', [pat]) |
1882 mf = matchmod.match(self.root, b'', [pat]) |
1883 fn = None |
1883 fn = None |
1884 params = cmd |
1884 params = cmd |
1885 for name, filterfn in self._datafilters.iteritems(): |
1885 for name, filterfn in self._datafilters.iteritems(): |
1886 if cmd.startswith(name): |
1886 if cmd.startswith(name): |
1887 fn = filterfn |
1887 fn = filterfn |
1898 return self._filterpats[filter] |
1898 return self._filterpats[filter] |
1899 |
1899 |
1900 def _filter(self, filterpats, filename, data): |
1900 def _filter(self, filterpats, filename, data): |
1901 for mf, fn, cmd in filterpats: |
1901 for mf, fn, cmd in filterpats: |
1902 if mf(filename): |
1902 if mf(filename): |
1903 self.ui.debug("filtering %s through %s\n" % (filename, cmd)) |
1903 self.ui.debug(b"filtering %s through %s\n" % (filename, cmd)) |
1904 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) |
1904 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) |
1905 break |
1905 break |
1906 |
1906 |
1907 return data |
1907 return data |
1908 |
1908 |
1909 @unfilteredpropertycache |
1909 @unfilteredpropertycache |
1910 def _encodefilterpats(self): |
1910 def _encodefilterpats(self): |
1911 return self._loadfilter('encode') |
1911 return self._loadfilter(b'encode') |
1912 |
1912 |
1913 @unfilteredpropertycache |
1913 @unfilteredpropertycache |
1914 def _decodefilterpats(self): |
1914 def _decodefilterpats(self): |
1915 return self._loadfilter('decode') |
1915 return self._loadfilter(b'decode') |
1916 |
1916 |
1917 def adddatafilter(self, name, filter): |
1917 def adddatafilter(self, name, filter): |
1918 self._datafilters[name] = filter |
1918 self._datafilters[name] = filter |
1919 |
1919 |
1920 def wread(self, filename): |
1920 def wread(self, filename): |
1928 """write ``data`` into ``filename`` in the working directory |
1928 """write ``data`` into ``filename`` in the working directory |
1929 |
1929 |
1930 This returns length of written (maybe decoded) data. |
1930 This returns length of written (maybe decoded) data. |
1931 """ |
1931 """ |
1932 data = self._filter(self._decodefilterpats, filename, data) |
1932 data = self._filter(self._decodefilterpats, filename, data) |
1933 if 'l' in flags: |
1933 if b'l' in flags: |
1934 self.wvfs.symlink(data, filename) |
1934 self.wvfs.symlink(data, filename) |
1935 else: |
1935 else: |
1936 self.wvfs.write( |
1936 self.wvfs.write( |
1937 filename, data, backgroundclose=backgroundclose, **kwargs |
1937 filename, data, backgroundclose=backgroundclose, **kwargs |
1938 ) |
1938 ) |
1939 if 'x' in flags: |
1939 if b'x' in flags: |
1940 self.wvfs.setflags(filename, False, True) |
1940 self.wvfs.setflags(filename, False, True) |
1941 else: |
1941 else: |
1942 self.wvfs.setflags(filename, False, False) |
1942 self.wvfs.setflags(filename, False, False) |
1943 return len(data) |
1943 return len(data) |
1944 |
1944 |
1955 if tr and tr.running(): |
1955 if tr and tr.running(): |
1956 return tr |
1956 return tr |
1957 return None |
1957 return None |
1958 |
1958 |
1959 def transaction(self, desc, report=None): |
1959 def transaction(self, desc, report=None): |
1960 if self.ui.configbool('devel', 'all-warnings') or self.ui.configbool( |
1960 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( |
1961 'devel', 'check-locks' |
1961 b'devel', b'check-locks' |
1962 ): |
1962 ): |
1963 if self._currentlock(self._lockref) is None: |
1963 if self._currentlock(self._lockref) is None: |
1964 raise error.ProgrammingError('transaction requires locking') |
1964 raise error.ProgrammingError(b'transaction requires locking') |
1965 tr = self.currenttransaction() |
1965 tr = self.currenttransaction() |
1966 if tr is not None: |
1966 if tr is not None: |
1967 return tr.nest(name=desc) |
1967 return tr.nest(name=desc) |
1968 |
1968 |
1969 # abort here if the journal already exists |
1969 # abort here if the journal already exists |
1970 if self.svfs.exists("journal"): |
1970 if self.svfs.exists(b"journal"): |
1971 raise error.RepoError( |
1971 raise error.RepoError( |
1972 _("abandoned transaction found"), |
1972 _(b"abandoned transaction found"), |
1973 hint=_("run 'hg recover' to clean up transaction"), |
1973 hint=_(b"run 'hg recover' to clean up transaction"), |
1974 ) |
1974 ) |
1975 |
1975 |
1976 idbase = "%.40f#%f" % (random.random(), time.time()) |
1976 idbase = b"%.40f#%f" % (random.random(), time.time()) |
1977 ha = hex(hashlib.sha1(idbase).digest()) |
1977 ha = hex(hashlib.sha1(idbase).digest()) |
1978 txnid = 'TXN:' + ha |
1978 txnid = b'TXN:' + ha |
1979 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid) |
1979 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid) |
1980 |
1980 |
1981 self._writejournal(desc) |
1981 self._writejournal(desc) |
1982 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] |
1982 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] |
1983 if report: |
1983 if report: |
1984 rp = report |
1984 rp = report |
1985 else: |
1985 else: |
1986 rp = self.ui.warn |
1986 rp = self.ui.warn |
1987 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/ |
1987 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/ |
1988 # we must avoid cyclic reference between repo and transaction. |
1988 # we must avoid cyclic reference between repo and transaction. |
1989 reporef = weakref.ref(self) |
1989 reporef = weakref.ref(self) |
1990 # Code to track tag movement |
1990 # Code to track tag movement |
1991 # |
1991 # |
1992 # Since tags are all handled as file content, it is actually quite hard |
1992 # Since tags are all handled as file content, it is actually quite hard |
2020 # "+A": tag is added, |
2020 # "+A": tag is added, |
2021 # "-M": tag is moved (old value), |
2021 # "-M": tag is moved (old value), |
2022 # "+M": tag is moved (new value), |
2022 # "+M": tag is moved (new value), |
2023 tracktags = lambda x: None |
2023 tracktags = lambda x: None |
2024 # experimental config: experimental.hook-track-tags |
2024 # experimental config: experimental.hook-track-tags |
2025 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags') |
2025 shouldtracktags = self.ui.configbool( |
2026 if desc != 'strip' and shouldtracktags: |
2026 b'experimental', b'hook-track-tags' |
2027 ) |
|
2028 if desc != b'strip' and shouldtracktags: |
|
2027 oldheads = self.changelog.headrevs() |
2029 oldheads = self.changelog.headrevs() |
2028 |
2030 |
2029 def tracktags(tr2): |
2031 def tracktags(tr2): |
2030 repo = reporef() |
2032 repo = reporef() |
2031 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) |
2033 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) |
2033 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) |
2035 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) |
2034 # notes: we compare lists here. |
2036 # notes: we compare lists here. |
2035 # As we do it only once buiding set would not be cheaper |
2037 # As we do it only once buiding set would not be cheaper |
2036 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) |
2038 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) |
2037 if changes: |
2039 if changes: |
2038 tr2.hookargs['tag_moved'] = '1' |
2040 tr2.hookargs[b'tag_moved'] = b'1' |
2039 with repo.vfs( |
2041 with repo.vfs( |
2040 'changes/tags.changes', 'w', atomictemp=True |
2042 b'changes/tags.changes', b'w', atomictemp=True |
2041 ) as changesfile: |
2043 ) as changesfile: |
2042 # note: we do not register the file to the transaction |
2044 # note: we do not register the file to the transaction |
2043 # because we needs it to still exist on the transaction |
2045 # because we needs it to still exist on the transaction |
2044 # is close (for txnclose hooks) |
2046 # is close (for txnclose hooks) |
2045 tagsmod.writediff(changesfile, changes) |
2047 tagsmod.writediff(changesfile, changes) |
2064 # gating. |
2066 # gating. |
2065 tracktags(tr2) |
2067 tracktags(tr2) |
2066 repo = reporef() |
2068 repo = reporef() |
2067 |
2069 |
2068 r = repo.ui.configsuboptions( |
2070 r = repo.ui.configsuboptions( |
2069 'experimental', 'single-head-per-branch' |
2071 b'experimental', b'single-head-per-branch' |
2070 ) |
2072 ) |
2071 singlehead, singleheadsub = r |
2073 singlehead, singleheadsub = r |
2072 if singlehead: |
2074 if singlehead: |
2073 accountclosed = singleheadsub.get("account-closed-heads", False) |
2075 accountclosed = singleheadsub.get( |
2076 b"account-closed-heads", False |
|
2077 ) |
|
2074 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed) |
2078 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed) |
2075 if hook.hashook(repo.ui, 'pretxnclose-bookmark'): |
2079 if hook.hashook(repo.ui, b'pretxnclose-bookmark'): |
2076 for name, (old, new) in sorted(tr.changes['bookmarks'].items()): |
2080 for name, (old, new) in sorted( |
2081 tr.changes[b'bookmarks'].items() |
|
2082 ): |
|
2077 args = tr.hookargs.copy() |
2083 args = tr.hookargs.copy() |
2078 args.update(bookmarks.preparehookargs(name, old, new)) |
2084 args.update(bookmarks.preparehookargs(name, old, new)) |
2079 repo.hook( |
2085 repo.hook( |
2080 'pretxnclose-bookmark', |
2086 b'pretxnclose-bookmark', |
2081 throw=True, |
2087 throw=True, |
2082 **pycompat.strkwargs(args) |
2088 **pycompat.strkwargs(args) |
2083 ) |
2089 ) |
2084 if hook.hashook(repo.ui, 'pretxnclose-phase'): |
2090 if hook.hashook(repo.ui, b'pretxnclose-phase'): |
2085 cl = repo.unfiltered().changelog |
2091 cl = repo.unfiltered().changelog |
2086 for rev, (old, new) in tr.changes['phases'].items(): |
2092 for rev, (old, new) in tr.changes[b'phases'].items(): |
2087 args = tr.hookargs.copy() |
2093 args = tr.hookargs.copy() |
2088 node = hex(cl.node(rev)) |
2094 node = hex(cl.node(rev)) |
2089 args.update(phases.preparehookargs(node, old, new)) |
2095 args.update(phases.preparehookargs(node, old, new)) |
2090 repo.hook( |
2096 repo.hook( |
2091 'pretxnclose-phase', |
2097 b'pretxnclose-phase', |
2092 throw=True, |
2098 throw=True, |
2093 **pycompat.strkwargs(args) |
2099 **pycompat.strkwargs(args) |
2094 ) |
2100 ) |
2095 |
2101 |
2096 repo.hook( |
2102 repo.hook( |
2097 'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs) |
2103 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs) |
2098 ) |
2104 ) |
2099 |
2105 |
2100 def releasefn(tr, success): |
2106 def releasefn(tr, success): |
2101 repo = reporef() |
2107 repo = reporef() |
2102 if repo is None: |
2108 if repo is None: |
2113 # transaction running |
2119 # transaction running |
2114 repo.dirstate.write(None) |
2120 repo.dirstate.write(None) |
2115 else: |
2121 else: |
2116 # discard all changes (including ones already written |
2122 # discard all changes (including ones already written |
2117 # out) in this transaction |
2123 # out) in this transaction |
2118 narrowspec.restorebackup(self, 'journal.narrowspec') |
2124 narrowspec.restorebackup(self, b'journal.narrowspec') |
2119 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate') |
2125 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate') |
2120 repo.dirstate.restorebackup(None, 'journal.dirstate') |
2126 repo.dirstate.restorebackup(None, b'journal.dirstate') |
2121 |
2127 |
2122 repo.invalidate(clearfilecache=True) |
2128 repo.invalidate(clearfilecache=True) |
2123 |
2129 |
2124 tr = transaction.transaction( |
2130 tr = transaction.transaction( |
2125 rp, |
2131 rp, |
2126 self.svfs, |
2132 self.svfs, |
2127 vfsmap, |
2133 vfsmap, |
2128 "journal", |
2134 b"journal", |
2129 "undo", |
2135 b"undo", |
2130 aftertrans(renames), |
2136 aftertrans(renames), |
2131 self.store.createmode, |
2137 self.store.createmode, |
2132 validator=validate, |
2138 validator=validate, |
2133 releasefn=releasefn, |
2139 releasefn=releasefn, |
2134 checkambigfiles=_cachedfiles, |
2140 checkambigfiles=_cachedfiles, |
2135 name=desc, |
2141 name=desc, |
2136 ) |
2142 ) |
2137 tr.changes['origrepolen'] = len(self) |
2143 tr.changes[b'origrepolen'] = len(self) |
2138 tr.changes['obsmarkers'] = set() |
2144 tr.changes[b'obsmarkers'] = set() |
2139 tr.changes['phases'] = {} |
2145 tr.changes[b'phases'] = {} |
2140 tr.changes['bookmarks'] = {} |
2146 tr.changes[b'bookmarks'] = {} |
2141 |
2147 |
2142 tr.hookargs['txnid'] = txnid |
2148 tr.hookargs[b'txnid'] = txnid |
2143 tr.hookargs['txnname'] = desc |
2149 tr.hookargs[b'txnname'] = desc |
2144 # note: writing the fncache only during finalize mean that the file is |
2150 # note: writing the fncache only during finalize mean that the file is |
2145 # outdated when running hooks. As fncache is used for streaming clone, |
2151 # outdated when running hooks. As fncache is used for streaming clone, |
2146 # this is not expected to break anything that happen during the hooks. |
2152 # this is not expected to break anything that happen during the hooks. |
2147 tr.addfinalize('flush-fncache', self.store.write) |
2153 tr.addfinalize(b'flush-fncache', self.store.write) |
2148 |
2154 |
2149 def txnclosehook(tr2): |
2155 def txnclosehook(tr2): |
2150 """To be run if transaction is successful, will schedule a hook run |
2156 """To be run if transaction is successful, will schedule a hook run |
2151 """ |
2157 """ |
2152 # Don't reference tr2 in hook() so we don't hold a reference. |
2158 # Don't reference tr2 in hook() so we don't hold a reference. |
2155 # fixes the function accumulation. |
2161 # fixes the function accumulation. |
2156 hookargs = tr2.hookargs |
2162 hookargs = tr2.hookargs |
2157 |
2163 |
2158 def hookfunc(): |
2164 def hookfunc(): |
2159 repo = reporef() |
2165 repo = reporef() |
2160 if hook.hashook(repo.ui, 'txnclose-bookmark'): |
2166 if hook.hashook(repo.ui, b'txnclose-bookmark'): |
2161 bmchanges = sorted(tr.changes['bookmarks'].items()) |
2167 bmchanges = sorted(tr.changes[b'bookmarks'].items()) |
2162 for name, (old, new) in bmchanges: |
2168 for name, (old, new) in bmchanges: |
2163 args = tr.hookargs.copy() |
2169 args = tr.hookargs.copy() |
2164 args.update(bookmarks.preparehookargs(name, old, new)) |
2170 args.update(bookmarks.preparehookargs(name, old, new)) |
2165 repo.hook( |
2171 repo.hook( |
2166 'txnclose-bookmark', |
2172 b'txnclose-bookmark', |
2167 throw=False, |
2173 throw=False, |
2168 **pycompat.strkwargs(args) |
2174 **pycompat.strkwargs(args) |
2169 ) |
2175 ) |
2170 |
2176 |
2171 if hook.hashook(repo.ui, 'txnclose-phase'): |
2177 if hook.hashook(repo.ui, b'txnclose-phase'): |
2172 cl = repo.unfiltered().changelog |
2178 cl = repo.unfiltered().changelog |
2173 phasemv = sorted(tr.changes['phases'].items()) |
2179 phasemv = sorted(tr.changes[b'phases'].items()) |
2174 for rev, (old, new) in phasemv: |
2180 for rev, (old, new) in phasemv: |
2175 args = tr.hookargs.copy() |
2181 args = tr.hookargs.copy() |
2176 node = hex(cl.node(rev)) |
2182 node = hex(cl.node(rev)) |
2177 args.update(phases.preparehookargs(node, old, new)) |
2183 args.update(phases.preparehookargs(node, old, new)) |
2178 repo.hook( |
2184 repo.hook( |
2179 'txnclose-phase', |
2185 b'txnclose-phase', |
2180 throw=False, |
2186 throw=False, |
2181 **pycompat.strkwargs(args) |
2187 **pycompat.strkwargs(args) |
2182 ) |
2188 ) |
2183 |
2189 |
2184 repo.hook( |
2190 repo.hook( |
2185 'txnclose', throw=False, **pycompat.strkwargs(hookargs) |
2191 b'txnclose', throw=False, **pycompat.strkwargs(hookargs) |
2186 ) |
2192 ) |
2187 |
2193 |
2188 reporef()._afterlock(hookfunc) |
2194 reporef()._afterlock(hookfunc) |
2189 |
2195 |
2190 tr.addfinalize('txnclose-hook', txnclosehook) |
2196 tr.addfinalize(b'txnclose-hook', txnclosehook) |
2191 # Include a leading "-" to make it happen before the transaction summary |
2197 # Include a leading "-" to make it happen before the transaction summary |
2192 # reports registered via scmutil.registersummarycallback() whose names |
2198 # reports registered via scmutil.registersummarycallback() whose names |
2193 # are 00-txnreport etc. That way, the caches will be warm when the |
2199 # are 00-txnreport etc. That way, the caches will be warm when the |
2194 # callbacks run. |
2200 # callbacks run. |
2195 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr)) |
2201 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr)) |
2196 |
2202 |
2197 def txnaborthook(tr2): |
2203 def txnaborthook(tr2): |
2198 """To be run if transaction is aborted |
2204 """To be run if transaction is aborted |
2199 """ |
2205 """ |
2200 reporef().hook( |
2206 reporef().hook( |
2201 'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) |
2207 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) |
2202 ) |
2208 ) |
2203 |
2209 |
2204 tr.addabort('txnabort-hook', txnaborthook) |
2210 tr.addabort(b'txnabort-hook', txnaborthook) |
2205 # avoid eager cache invalidation. in-memory data should be identical |
2211 # avoid eager cache invalidation. in-memory data should be identical |
2206 # to stored data if transaction has no error. |
2212 # to stored data if transaction has no error. |
2207 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats) |
2213 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) |
2208 self._transref = weakref.ref(tr) |
2214 self._transref = weakref.ref(tr) |
2209 scmutil.registersummarycallback(self, tr, desc) |
2215 scmutil.registersummarycallback(self, tr, desc) |
2210 return tr |
2216 return tr |
2211 |
2217 |
2212 def _journalfiles(self): |
2218 def _journalfiles(self): |
2213 return ( |
2219 return ( |
2214 (self.svfs, 'journal'), |
2220 (self.svfs, b'journal'), |
2215 (self.svfs, 'journal.narrowspec'), |
2221 (self.svfs, b'journal.narrowspec'), |
2216 (self.vfs, 'journal.narrowspec.dirstate'), |
2222 (self.vfs, b'journal.narrowspec.dirstate'), |
2217 (self.vfs, 'journal.dirstate'), |
2223 (self.vfs, b'journal.dirstate'), |
2218 (self.vfs, 'journal.branch'), |
2224 (self.vfs, b'journal.branch'), |
2219 (self.vfs, 'journal.desc'), |
2225 (self.vfs, b'journal.desc'), |
2220 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'), |
2226 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'), |
2221 (self.svfs, 'journal.phaseroots'), |
2227 (self.svfs, b'journal.phaseroots'), |
2222 ) |
2228 ) |
2223 |
2229 |
2224 def undofiles(self): |
2230 def undofiles(self): |
2225 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] |
2231 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] |
2226 |
2232 |
2227 @unfilteredmethod |
2233 @unfilteredmethod |
2228 def _writejournal(self, desc): |
2234 def _writejournal(self, desc): |
2229 self.dirstate.savebackup(None, 'journal.dirstate') |
2235 self.dirstate.savebackup(None, b'journal.dirstate') |
2230 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate') |
2236 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate') |
2231 narrowspec.savebackup(self, 'journal.narrowspec') |
2237 narrowspec.savebackup(self, b'journal.narrowspec') |
2232 self.vfs.write( |
2238 self.vfs.write( |
2233 "journal.branch", encoding.fromlocal(self.dirstate.branch()) |
2239 b"journal.branch", encoding.fromlocal(self.dirstate.branch()) |
2234 ) |
2240 ) |
2235 self.vfs.write("journal.desc", "%d\n%s\n" % (len(self), desc)) |
2241 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc)) |
2236 bookmarksvfs = bookmarks.bookmarksvfs(self) |
2242 bookmarksvfs = bookmarks.bookmarksvfs(self) |
2237 bookmarksvfs.write( |
2243 bookmarksvfs.write( |
2238 "journal.bookmarks", bookmarksvfs.tryread("bookmarks") |
2244 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks") |
2239 ) |
2245 ) |
2240 self.svfs.write("journal.phaseroots", self.svfs.tryread("phaseroots")) |
2246 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots")) |
2241 |
2247 |
2242 def recover(self): |
2248 def recover(self): |
2243 with self.lock(): |
2249 with self.lock(): |
2244 if self.svfs.exists("journal"): |
2250 if self.svfs.exists(b"journal"): |
2245 self.ui.status(_("rolling back interrupted transaction\n")) |
2251 self.ui.status(_(b"rolling back interrupted transaction\n")) |
2246 vfsmap = { |
2252 vfsmap = { |
2247 '': self.svfs, |
2253 b'': self.svfs, |
2248 'plain': self.vfs, |
2254 b'plain': self.vfs, |
2249 } |
2255 } |
2250 transaction.rollback( |
2256 transaction.rollback( |
2251 self.svfs, |
2257 self.svfs, |
2252 vfsmap, |
2258 vfsmap, |
2253 "journal", |
2259 b"journal", |
2254 self.ui.warn, |
2260 self.ui.warn, |
2255 checkambigfiles=_cachedfiles, |
2261 checkambigfiles=_cachedfiles, |
2256 ) |
2262 ) |
2257 self.invalidate() |
2263 self.invalidate() |
2258 return True |
2264 return True |
2259 else: |
2265 else: |
2260 self.ui.warn(_("no interrupted transaction available\n")) |
2266 self.ui.warn(_(b"no interrupted transaction available\n")) |
2261 return False |
2267 return False |
2262 |
2268 |
2263 def rollback(self, dryrun=False, force=False): |
2269 def rollback(self, dryrun=False, force=False): |
2264 wlock = lock = dsguard = None |
2270 wlock = lock = dsguard = None |
2265 try: |
2271 try: |
2266 wlock = self.wlock() |
2272 wlock = self.wlock() |
2267 lock = self.lock() |
2273 lock = self.lock() |
2268 if self.svfs.exists("undo"): |
2274 if self.svfs.exists(b"undo"): |
2269 dsguard = dirstateguard.dirstateguard(self, 'rollback') |
2275 dsguard = dirstateguard.dirstateguard(self, b'rollback') |
2270 |
2276 |
2271 return self._rollback(dryrun, force, dsguard) |
2277 return self._rollback(dryrun, force, dsguard) |
2272 else: |
2278 else: |
2273 self.ui.warn(_("no rollback information available\n")) |
2279 self.ui.warn(_(b"no rollback information available\n")) |
2274 return 1 |
2280 return 1 |
2275 finally: |
2281 finally: |
2276 release(dsguard, lock, wlock) |
2282 release(dsguard, lock, wlock) |
2277 |
2283 |
2278 @unfilteredmethod # Until we get smarter cache management |
2284 @unfilteredmethod # Until we get smarter cache management |
2279 def _rollback(self, dryrun, force, dsguard): |
2285 def _rollback(self, dryrun, force, dsguard): |
2280 ui = self.ui |
2286 ui = self.ui |
2281 try: |
2287 try: |
2282 args = self.vfs.read('undo.desc').splitlines() |
2288 args = self.vfs.read(b'undo.desc').splitlines() |
2283 (oldlen, desc, detail) = (int(args[0]), args[1], None) |
2289 (oldlen, desc, detail) = (int(args[0]), args[1], None) |
2284 if len(args) >= 3: |
2290 if len(args) >= 3: |
2285 detail = args[2] |
2291 detail = args[2] |
2286 oldtip = oldlen - 1 |
2292 oldtip = oldlen - 1 |
2287 |
2293 |
2288 if detail and ui.verbose: |
2294 if detail and ui.verbose: |
2289 msg = _( |
2295 msg = _( |
2290 'repository tip rolled back to revision %d' |
2296 b'repository tip rolled back to revision %d' |
2291 ' (undo %s: %s)\n' |
2297 b' (undo %s: %s)\n' |
2292 ) % (oldtip, desc, detail) |
2298 ) % (oldtip, desc, detail) |
2293 else: |
2299 else: |
2294 msg = _( |
2300 msg = _( |
2295 'repository tip rolled back to revision %d' ' (undo %s)\n' |
2301 b'repository tip rolled back to revision %d' b' (undo %s)\n' |
2296 ) % (oldtip, desc) |
2302 ) % (oldtip, desc) |
2297 except IOError: |
2303 except IOError: |
2298 msg = _('rolling back unknown transaction\n') |
2304 msg = _(b'rolling back unknown transaction\n') |
2299 desc = None |
2305 desc = None |
2300 |
2306 |
2301 if not force and self['.'] != self['tip'] and desc == 'commit': |
2307 if not force and self[b'.'] != self[b'tip'] and desc == b'commit': |
2302 raise error.Abort( |
2308 raise error.Abort( |
2303 _( |
2309 _( |
2304 'rollback of last commit while not checked out ' |
2310 b'rollback of last commit while not checked out ' |
2305 'may lose data' |
2311 b'may lose data' |
2306 ), |
2312 ), |
2307 hint=_('use -f to force'), |
2313 hint=_(b'use -f to force'), |
2308 ) |
2314 ) |
2309 |
2315 |
2310 ui.status(msg) |
2316 ui.status(msg) |
2311 if dryrun: |
2317 if dryrun: |
2312 return 0 |
2318 return 0 |
2313 |
2319 |
2314 parents = self.dirstate.parents() |
2320 parents = self.dirstate.parents() |
2315 self.destroying() |
2321 self.destroying() |
2316 vfsmap = {'plain': self.vfs, '': self.svfs} |
2322 vfsmap = {b'plain': self.vfs, b'': self.svfs} |
2317 transaction.rollback( |
2323 transaction.rollback( |
2318 self.svfs, vfsmap, 'undo', ui.warn, checkambigfiles=_cachedfiles |
2324 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles |
2319 ) |
2325 ) |
2320 bookmarksvfs = bookmarks.bookmarksvfs(self) |
2326 bookmarksvfs = bookmarks.bookmarksvfs(self) |
2321 if bookmarksvfs.exists('undo.bookmarks'): |
2327 if bookmarksvfs.exists(b'undo.bookmarks'): |
2322 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True) |
2328 bookmarksvfs.rename( |
2323 if self.svfs.exists('undo.phaseroots'): |
2329 b'undo.bookmarks', b'bookmarks', checkambig=True |
2324 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True) |
2330 ) |
2331 if self.svfs.exists(b'undo.phaseroots'): |
|
2332 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True) |
|
2325 self.invalidate() |
2333 self.invalidate() |
2326 |
2334 |
2327 parentgone = any(p not in self.changelog.nodemap for p in parents) |
2335 parentgone = any(p not in self.changelog.nodemap for p in parents) |
2328 if parentgone: |
2336 if parentgone: |
2329 # prevent dirstateguard from overwriting already restored one |
2337 # prevent dirstateguard from overwriting already restored one |
2330 dsguard.close() |
2338 dsguard.close() |
2331 |
2339 |
2332 narrowspec.restorebackup(self, 'undo.narrowspec') |
2340 narrowspec.restorebackup(self, b'undo.narrowspec') |
2333 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate') |
2341 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate') |
2334 self.dirstate.restorebackup(None, 'undo.dirstate') |
2342 self.dirstate.restorebackup(None, b'undo.dirstate') |
2335 try: |
2343 try: |
2336 branch = self.vfs.read('undo.branch') |
2344 branch = self.vfs.read(b'undo.branch') |
2337 self.dirstate.setbranch(encoding.tolocal(branch)) |
2345 self.dirstate.setbranch(encoding.tolocal(branch)) |
2338 except IOError: |
2346 except IOError: |
2339 ui.warn( |
2347 ui.warn( |
2340 _( |
2348 _( |
2341 'named branch could not be reset: ' |
2349 b'named branch could not be reset: ' |
2342 'current branch is still \'%s\'\n' |
2350 b'current branch is still \'%s\'\n' |
2343 ) |
2351 ) |
2344 % self.dirstate.branch() |
2352 % self.dirstate.branch() |
2345 ) |
2353 ) |
2346 |
2354 |
2347 parents = tuple([p.rev() for p in self[None].parents()]) |
2355 parents = tuple([p.rev() for p in self[None].parents()]) |
2348 if len(parents) > 1: |
2356 if len(parents) > 1: |
2349 ui.status( |
2357 ui.status( |
2350 _('working directory now based on ' 'revisions %d and %d\n') |
2358 _( |
2359 b'working directory now based on ' |
|
2360 b'revisions %d and %d\n' |
|
2361 ) |
|
2351 % parents |
2362 % parents |
2352 ) |
2363 ) |
2353 else: |
2364 else: |
2354 ui.status( |
2365 ui.status( |
2355 _('working directory now based on ' 'revision %d\n') |
2366 _(b'working directory now based on ' b'revision %d\n') |
2356 % parents |
2367 % parents |
2357 ) |
2368 ) |
2358 mergemod.mergestate.clean(self, self['.'].node()) |
2369 mergemod.mergestate.clean(self, self[b'.'].node()) |
2359 |
2370 |
2360 # TODO: if we know which new heads may result from this rollback, pass |
2371 # TODO: if we know which new heads may result from this rollback, pass |
2361 # them to destroy(), which will prevent the branchhead cache from being |
2372 # them to destroy(), which will prevent the branchhead cache from being |
2362 # invalidated. |
2373 # invalidated. |
2363 self.destroyed() |
2374 self.destroyed() |
2388 update caches relevant to the changes in that transaction. |
2399 update caches relevant to the changes in that transaction. |
2389 |
2400 |
2390 If 'full' is set, make sure all caches the function knows about have |
2401 If 'full' is set, make sure all caches the function knows about have |
2391 up-to-date data. Even the ones usually loaded more lazily. |
2402 up-to-date data. Even the ones usually loaded more lazily. |
2392 """ |
2403 """ |
2393 if tr is not None and tr.hookargs.get('source') == 'strip': |
2404 if tr is not None and tr.hookargs.get(b'source') == b'strip': |
2394 # During strip, many caches are invalid but |
2405 # During strip, many caches are invalid but |
2395 # later call to `destroyed` will refresh them. |
2406 # later call to `destroyed` will refresh them. |
2396 return |
2407 return |
2397 |
2408 |
2398 if tr is None or tr.changes['origrepolen'] < len(self): |
2409 if tr is None or tr.changes[b'origrepolen'] < len(self): |
2399 # accessing the 'ser ved' branchmap should refresh all the others, |
2410 # accessing the 'ser ved' branchmap should refresh all the others, |
2400 self.ui.debug('updating the branch cache\n') |
2411 self.ui.debug(b'updating the branch cache\n') |
2401 self.filtered('served').branchmap() |
2412 self.filtered(b'served').branchmap() |
2402 self.filtered('served.hidden').branchmap() |
2413 self.filtered(b'served.hidden').branchmap() |
2403 |
2414 |
2404 if full: |
2415 if full: |
2405 unfi = self.unfiltered() |
2416 unfi = self.unfiltered() |
2406 rbc = unfi.revbranchcache() |
2417 rbc = unfi.revbranchcache() |
2407 for r in unfi.changelog: |
2418 for r in unfi.changelog: |
2408 rbc.branchinfo(r) |
2419 rbc.branchinfo(r) |
2409 rbc.write() |
2420 rbc.write() |
2410 |
2421 |
2411 # ensure the working copy parents are in the manifestfulltextcache |
2422 # ensure the working copy parents are in the manifestfulltextcache |
2412 for ctx in self['.'].parents(): |
2423 for ctx in self[b'.'].parents(): |
2413 ctx.manifest() # accessing the manifest is enough |
2424 ctx.manifest() # accessing the manifest is enough |
2414 |
2425 |
2415 # accessing fnode cache warms the cache |
2426 # accessing fnode cache warms the cache |
2416 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs()) |
2427 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs()) |
2417 # accessing tags warm the cache |
2428 # accessing tags warm the cache |
2418 self.tags() |
2429 self.tags() |
2419 self.filtered('served').tags() |
2430 self.filtered(b'served').tags() |
2420 |
2431 |
2421 # The `full` arg is documented as updating even the lazily-loaded |
2432 # The `full` arg is documented as updating even the lazily-loaded |
2422 # caches immediately, so we're forcing a write to cause these caches |
2433 # caches immediately, so we're forcing a write to cause these caches |
2423 # to be warmed up even if they haven't explicitly been requested |
2434 # to be warmed up even if they haven't explicitly been requested |
2424 # yet (if they've never been used by hg, they won't ever have been |
2435 # yet (if they've never been used by hg, they won't ever have been |
2468 redundant one doesn't). |
2479 redundant one doesn't). |
2469 ''' |
2480 ''' |
2470 unfiltered = self.unfiltered() # all file caches are stored unfiltered |
2481 unfiltered = self.unfiltered() # all file caches are stored unfiltered |
2471 for k in list(self._filecache.keys()): |
2482 for k in list(self._filecache.keys()): |
2472 # dirstate is invalidated separately in invalidatedirstate() |
2483 # dirstate is invalidated separately in invalidatedirstate() |
2473 if k == 'dirstate': |
2484 if k == b'dirstate': |
2474 continue |
2485 continue |
2475 if ( |
2486 if ( |
2476 k == 'changelog' |
2487 k == b'changelog' |
2477 and self.currenttransaction() |
2488 and self.currenttransaction() |
2478 and self.changelog._delayed |
2489 and self.changelog._delayed |
2479 ): |
2490 ): |
2480 # The changelog object may store unwritten revisions. We don't |
2491 # The changelog object may store unwritten revisions. We don't |
2481 # want to lose them. |
2492 # want to lose them. |
2529 parentlock = encoding.environ.get(parentenvvar) |
2540 parentlock = encoding.environ.get(parentenvvar) |
2530 |
2541 |
2531 timeout = 0 |
2542 timeout = 0 |
2532 warntimeout = 0 |
2543 warntimeout = 0 |
2533 if wait: |
2544 if wait: |
2534 timeout = self.ui.configint("ui", "timeout") |
2545 timeout = self.ui.configint(b"ui", b"timeout") |
2535 warntimeout = self.ui.configint("ui", "timeout.warn") |
2546 warntimeout = self.ui.configint(b"ui", b"timeout.warn") |
2536 # internal config: ui.signal-safe-lock |
2547 # internal config: ui.signal-safe-lock |
2537 signalsafe = self.ui.configbool('ui', 'signal-safe-lock') |
2548 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock') |
2538 |
2549 |
2539 l = lockmod.trylock( |
2550 l = lockmod.trylock( |
2540 self.ui, |
2551 self.ui, |
2541 vfs, |
2552 vfs, |
2542 lockname, |
2553 lockname, |
2576 l.lock() |
2587 l.lock() |
2577 return l |
2588 return l |
2578 |
2589 |
2579 l = self._lock( |
2590 l = self._lock( |
2580 vfs=self.svfs, |
2591 vfs=self.svfs, |
2581 lockname="lock", |
2592 lockname=b"lock", |
2582 wait=wait, |
2593 wait=wait, |
2583 releasefn=None, |
2594 releasefn=None, |
2584 acquirefn=self.invalidate, |
2595 acquirefn=self.invalidate, |
2585 desc=_('repository %s') % self.origroot, |
2596 desc=_(b'repository %s') % self.origroot, |
2586 ) |
2597 ) |
2587 self._lockref = weakref.ref(l) |
2598 self._lockref = weakref.ref(l) |
2588 return l |
2599 return l |
2589 |
2600 |
2590 def _wlockchecktransaction(self): |
2601 def _wlockchecktransaction(self): |
2591 if self.currenttransaction() is not None: |
2602 if self.currenttransaction() is not None: |
2592 raise error.LockInheritanceContractViolation( |
2603 raise error.LockInheritanceContractViolation( |
2593 'wlock cannot be inherited in the middle of a transaction' |
2604 b'wlock cannot be inherited in the middle of a transaction' |
2594 ) |
2605 ) |
2595 |
2606 |
2596 def wlock(self, wait=True): |
2607 def wlock(self, wait=True): |
2597 '''Lock the non-store parts of the repository (everything under |
2608 '''Lock the non-store parts of the repository (everything under |
2598 .hg except .hg/store) and return a weak reference to the lock. |
2609 .hg except .hg/store) and return a weak reference to the lock. |
2607 return l |
2618 return l |
2608 |
2619 |
2609 # We do not need to check for non-waiting lock acquisition. Such |
2620 # We do not need to check for non-waiting lock acquisition. Such |
2610 # acquisition would not cause dead-lock as they would just fail. |
2621 # acquisition would not cause dead-lock as they would just fail. |
2611 if wait and ( |
2622 if wait and ( |
2612 self.ui.configbool('devel', 'all-warnings') |
2623 self.ui.configbool(b'devel', b'all-warnings') |
2613 or self.ui.configbool('devel', 'check-locks') |
2624 or self.ui.configbool(b'devel', b'check-locks') |
2614 ): |
2625 ): |
2615 if self._currentlock(self._lockref) is not None: |
2626 if self._currentlock(self._lockref) is not None: |
2616 self.ui.develwarn('"wlock" acquired after "lock"') |
2627 self.ui.develwarn(b'"wlock" acquired after "lock"') |
2617 |
2628 |
2618 def unlock(): |
2629 def unlock(): |
2619 if self.dirstate.pendingparentchange(): |
2630 if self.dirstate.pendingparentchange(): |
2620 self.dirstate.invalidate() |
2631 self.dirstate.invalidate() |
2621 else: |
2632 else: |
2622 self.dirstate.write(None) |
2633 self.dirstate.write(None) |
2623 |
2634 |
2624 self._filecache['dirstate'].refresh() |
2635 self._filecache[b'dirstate'].refresh() |
2625 |
2636 |
2626 l = self._lock( |
2637 l = self._lock( |
2627 self.vfs, |
2638 self.vfs, |
2628 "wlock", |
2639 b"wlock", |
2629 wait, |
2640 wait, |
2630 unlock, |
2641 unlock, |
2631 self.invalidatedirstate, |
2642 self.invalidatedirstate, |
2632 _('working directory of %s') % self.origroot, |
2643 _(b'working directory of %s') % self.origroot, |
2633 inheritchecker=self._wlockchecktransaction, |
2644 inheritchecker=self._wlockchecktransaction, |
2634 parentenvvar='HG_WLOCK_LOCKER', |
2645 parentenvvar=b'HG_WLOCK_LOCKER', |
2635 ) |
2646 ) |
2636 self._wlockref = weakref.ref(l) |
2647 self._wlockref = weakref.ref(l) |
2637 return l |
2648 return l |
2638 |
2649 |
2639 def _currentlock(self, lockref): |
2650 def _currentlock(self, lockref): |
2667 fparent1 = manifest1.get(fname, nullid) |
2678 fparent1 = manifest1.get(fname, nullid) |
2668 fparent2 = manifest2.get(fname, nullid) |
2679 fparent2 = manifest2.get(fname, nullid) |
2669 if isinstance(fctx, context.filectx): |
2680 if isinstance(fctx, context.filectx): |
2670 node = fctx.filenode() |
2681 node = fctx.filenode() |
2671 if node in [fparent1, fparent2]: |
2682 if node in [fparent1, fparent2]: |
2672 self.ui.debug('reusing %s filelog entry\n' % fname) |
2683 self.ui.debug(b'reusing %s filelog entry\n' % fname) |
2673 if ( |
2684 if ( |
2674 fparent1 != nullid |
2685 fparent1 != nullid |
2675 and manifest1.flags(fname) != fctx.flags() |
2686 and manifest1.flags(fname) != fctx.flags() |
2676 ) or ( |
2687 ) or ( |
2677 fparent2 != nullid |
2688 fparent2 != nullid |
2720 # the user that copy information was dropped, so if they didn't |
2731 # the user that copy information was dropped, so if they didn't |
2721 # expect this outcome it can be fixed, but this is the correct |
2732 # expect this outcome it can be fixed, but this is the correct |
2722 # behavior in this circumstance. |
2733 # behavior in this circumstance. |
2723 |
2734 |
2724 if cnode: |
2735 if cnode: |
2725 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))) |
2736 self.ui.debug( |
2737 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)) |
|
2738 ) |
|
2726 if includecopymeta: |
2739 if includecopymeta: |
2727 meta["copy"] = cfname |
2740 meta[b"copy"] = cfname |
2728 meta["copyrev"] = hex(cnode) |
2741 meta[b"copyrev"] = hex(cnode) |
2729 fparent1, fparent2 = nullid, newfparent |
2742 fparent1, fparent2 = nullid, newfparent |
2730 else: |
2743 else: |
2731 self.ui.warn( |
2744 self.ui.warn( |
2732 _( |
2745 _( |
2733 "warning: can't find ancestor for '%s' " |
2746 b"warning: can't find ancestor for '%s' " |
2734 "copied from '%s'!\n" |
2747 b"copied from '%s'!\n" |
2735 ) |
2748 ) |
2736 % (fname, cfname) |
2749 % (fname, cfname) |
2737 ) |
2750 ) |
2738 |
2751 |
2739 elif fparent1 == nullid: |
2752 elif fparent1 == nullid: |
2762 if match.isexact() or match.prefix(): |
2775 if match.isexact() or match.prefix(): |
2763 matched = set(status.modified + status.added + status.removed) |
2776 matched = set(status.modified + status.added + status.removed) |
2764 |
2777 |
2765 for f in match.files(): |
2778 for f in match.files(): |
2766 f = self.dirstate.normalize(f) |
2779 f = self.dirstate.normalize(f) |
2767 if f == '.' or f in matched or f in wctx.substate: |
2780 if f == b'.' or f in matched or f in wctx.substate: |
2768 continue |
2781 continue |
2769 if f in status.deleted: |
2782 if f in status.deleted: |
2770 fail(f, _('file not found!')) |
2783 fail(f, _(b'file not found!')) |
2771 if f in vdirs: # visited directory |
2784 if f in vdirs: # visited directory |
2772 d = f + '/' |
2785 d = f + b'/' |
2773 for mf in matched: |
2786 for mf in matched: |
2774 if mf.startswith(d): |
2787 if mf.startswith(d): |
2775 break |
2788 break |
2776 else: |
2789 else: |
2777 fail(f, _("no match under directory!")) |
2790 fail(f, _(b"no match under directory!")) |
2778 elif f not in self.dirstate: |
2791 elif f not in self.dirstate: |
2779 fail(f, _("file not tracked!")) |
2792 fail(f, _(b"file not tracked!")) |
2780 |
2793 |
2781 @unfilteredmethod |
2794 @unfilteredmethod |
2782 def commit( |
2795 def commit( |
2783 self, |
2796 self, |
2784 text="", |
2797 text=b"", |
2785 user=None, |
2798 user=None, |
2786 date=None, |
2799 date=None, |
2787 match=None, |
2800 match=None, |
2788 force=False, |
2801 force=False, |
2789 editor=False, |
2802 editor=False, |
2797 """ |
2810 """ |
2798 if extra is None: |
2811 if extra is None: |
2799 extra = {} |
2812 extra = {} |
2800 |
2813 |
2801 def fail(f, msg): |
2814 def fail(f, msg): |
2802 raise error.Abort('%s: %s' % (f, msg)) |
2815 raise error.Abort(b'%s: %s' % (f, msg)) |
2803 |
2816 |
2804 if not match: |
2817 if not match: |
2805 match = matchmod.always() |
2818 match = matchmod.always() |
2806 |
2819 |
2807 if not force: |
2820 if not force: |
2815 merge = len(wctx.parents()) > 1 |
2828 merge = len(wctx.parents()) > 1 |
2816 |
2829 |
2817 if not force and merge and not match.always(): |
2830 if not force and merge and not match.always(): |
2818 raise error.Abort( |
2831 raise error.Abort( |
2819 _( |
2832 _( |
2820 'cannot partially commit a merge ' |
2833 b'cannot partially commit a merge ' |
2821 '(do not specify files or patterns)' |
2834 b'(do not specify files or patterns)' |
2822 ) |
2835 ) |
2823 ) |
2836 ) |
2824 |
2837 |
2825 status = self.status(match=match, clean=force) |
2838 status = self.status(match=match, clean=force) |
2826 if force: |
2839 if force: |
2842 ) |
2855 ) |
2843 |
2856 |
2844 # internal config: ui.allowemptycommit |
2857 # internal config: ui.allowemptycommit |
2845 allowemptycommit = ( |
2858 allowemptycommit = ( |
2846 wctx.branch() != wctx.p1().branch() |
2859 wctx.branch() != wctx.p1().branch() |
2847 or extra.get('close') |
2860 or extra.get(b'close') |
2848 or merge |
2861 or merge |
2849 or cctx.files() |
2862 or cctx.files() |
2850 or self.ui.configbool('ui', 'allowemptycommit') |
2863 or self.ui.configbool(b'ui', b'allowemptycommit') |
2851 ) |
2864 ) |
2852 if not allowemptycommit: |
2865 if not allowemptycommit: |
2853 return None |
2866 return None |
2854 |
2867 |
2855 if merge and cctx.deleted(): |
2868 if merge and cctx.deleted(): |
2856 raise error.Abort(_("cannot commit merge with missing files")) |
2869 raise error.Abort(_(b"cannot commit merge with missing files")) |
2857 |
2870 |
2858 ms = mergemod.mergestate.read(self) |
2871 ms = mergemod.mergestate.read(self) |
2859 mergeutil.checkunresolved(ms) |
2872 mergeutil.checkunresolved(ms) |
2860 |
2873 |
2861 if editor: |
2874 if editor: |
2871 if subs: |
2884 if subs: |
2872 uipathfn = scmutil.getuipathfn(self) |
2885 uipathfn = scmutil.getuipathfn(self) |
2873 for s in sorted(commitsubs): |
2886 for s in sorted(commitsubs): |
2874 sub = wctx.sub(s) |
2887 sub = wctx.sub(s) |
2875 self.ui.status( |
2888 self.ui.status( |
2876 _('committing subrepository %s\n') |
2889 _(b'committing subrepository %s\n') |
2877 % uipathfn(subrepoutil.subrelpath(sub)) |
2890 % uipathfn(subrepoutil.subrelpath(sub)) |
2878 ) |
2891 ) |
2879 sr = sub.commit(cctx._text, user, date) |
2892 sr = sub.commit(cctx._text, user, date) |
2880 newstate[s] = (newstate[s][0], sr) |
2893 newstate[s] = (newstate[s][0], sr) |
2881 subrepoutil.writestate(self, newstate) |
2894 subrepoutil.writestate(self, newstate) |
2882 |
2895 |
2883 p1, p2 = self.dirstate.parents() |
2896 p1, p2 = self.dirstate.parents() |
2884 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') |
2897 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'') |
2885 try: |
2898 try: |
2886 self.hook( |
2899 self.hook( |
2887 "precommit", throw=True, parent1=hookp1, parent2=hookp2 |
2900 b"precommit", throw=True, parent1=hookp1, parent2=hookp2 |
2888 ) |
2901 ) |
2889 with self.transaction('commit'): |
2902 with self.transaction(b'commit'): |
2890 ret = self.commitctx(cctx, True) |
2903 ret = self.commitctx(cctx, True) |
2891 # update bookmarks, dirstate and mergestate |
2904 # update bookmarks, dirstate and mergestate |
2892 bookmarks.update(self, [p1, p2], ret) |
2905 bookmarks.update(self, [p1, p2], ret) |
2893 cctx.markcommitted(ret) |
2906 cctx.markcommitted(ret) |
2894 ms.reset() |
2907 ms.reset() |
2895 except: # re-raises |
2908 except: # re-raises |
2896 if edited: |
2909 if edited: |
2897 self.ui.write( |
2910 self.ui.write( |
2898 _('note: commit message saved in %s\n') % msgfn |
2911 _(b'note: commit message saved in %s\n') % msgfn |
2899 ) |
2912 ) |
2900 raise |
2913 raise |
2901 |
2914 |
2902 def commithook(): |
2915 def commithook(): |
2903 # hack for command that use a temporary commit (eg: histedit) |
2916 # hack for command that use a temporary commit (eg: histedit) |
2904 # temporary commit got stripped before hook release |
2917 # temporary commit got stripped before hook release |
2905 if self.changelog.hasnode(ret): |
2918 if self.changelog.hasnode(ret): |
2906 self.hook( |
2919 self.hook( |
2907 "commit", node=hex(ret), parent1=hookp1, parent2=hookp2 |
2920 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2 |
2908 ) |
2921 ) |
2909 |
2922 |
2910 self._afterlock(commithook) |
2923 self._afterlock(commithook) |
2911 return ret |
2924 return ret |
2912 |
2925 |
2928 """ |
2941 """ |
2929 |
2942 |
2930 p1, p2 = ctx.p1(), ctx.p2() |
2943 p1, p2 = ctx.p1(), ctx.p2() |
2931 user = ctx.user() |
2944 user = ctx.user() |
2932 |
2945 |
2933 writecopiesto = self.ui.config('experimental', 'copies.write-to') |
2946 writecopiesto = self.ui.config(b'experimental', b'copies.write-to') |
2934 writefilecopymeta = writecopiesto != 'changeset-only' |
2947 writefilecopymeta = writecopiesto != b'changeset-only' |
2935 writechangesetcopy = writecopiesto in ( |
2948 writechangesetcopy = writecopiesto in ( |
2936 'changeset-only', |
2949 b'changeset-only', |
2937 'compatibility', |
2950 b'compatibility', |
2938 ) |
2951 ) |
2939 p1copies, p2copies = None, None |
2952 p1copies, p2copies = None, None |
2940 if writechangesetcopy: |
2953 if writechangesetcopy: |
2941 p1copies = ctx.p1copies() |
2954 p1copies = ctx.p1copies() |
2942 p2copies = ctx.p2copies() |
2955 p2copies = ctx.p2copies() |
2943 filesadded, filesremoved = None, None |
2956 filesadded, filesremoved = None, None |
2944 with self.lock(), self.transaction("commit") as tr: |
2957 with self.lock(), self.transaction(b"commit") as tr: |
2945 trp = weakref.proxy(tr) |
2958 trp = weakref.proxy(tr) |
2946 |
2959 |
2947 if ctx.manifestnode(): |
2960 if ctx.manifestnode(): |
2948 # reuse an existing manifest revision |
2961 # reuse an existing manifest revision |
2949 self.ui.debug('reusing known manifest\n') |
2962 self.ui.debug(b'reusing known manifest\n') |
2950 mn = ctx.manifestnode() |
2963 mn = ctx.manifestnode() |
2951 files = ctx.files() |
2964 files = ctx.files() |
2952 if writechangesetcopy: |
2965 if writechangesetcopy: |
2953 filesadded = ctx.filesadded() |
2966 filesadded = ctx.filesadded() |
2954 filesremoved = ctx.filesremoved() |
2967 filesremoved = ctx.filesremoved() |
2964 # check in files |
2977 # check in files |
2965 added = [] |
2978 added = [] |
2966 changed = [] |
2979 changed = [] |
2967 removed = list(ctx.removed()) |
2980 removed = list(ctx.removed()) |
2968 linkrev = len(self) |
2981 linkrev = len(self) |
2969 self.ui.note(_("committing files:\n")) |
2982 self.ui.note(_(b"committing files:\n")) |
2970 uipathfn = scmutil.getuipathfn(self) |
2983 uipathfn = scmutil.getuipathfn(self) |
2971 for f in sorted(ctx.modified() + ctx.added()): |
2984 for f in sorted(ctx.modified() + ctx.added()): |
2972 self.ui.note(uipathfn(f) + "\n") |
2985 self.ui.note(uipathfn(f) + b"\n") |
2973 try: |
2986 try: |
2974 fctx = ctx[f] |
2987 fctx = ctx[f] |
2975 if fctx is None: |
2988 if fctx is None: |
2976 removed.append(f) |
2989 removed.append(f) |
2977 else: |
2990 else: |
2986 writefilecopymeta, |
2999 writefilecopymeta, |
2987 ) |
3000 ) |
2988 m.setflag(f, fctx.flags()) |
3001 m.setflag(f, fctx.flags()) |
2989 except OSError: |
3002 except OSError: |
2990 self.ui.warn( |
3003 self.ui.warn( |
2991 _("trouble committing %s!\n") % uipathfn(f) |
3004 _(b"trouble committing %s!\n") % uipathfn(f) |
2992 ) |
3005 ) |
2993 raise |
3006 raise |
2994 except IOError as inst: |
3007 except IOError as inst: |
2995 errcode = getattr(inst, 'errno', errno.ENOENT) |
3008 errcode = getattr(inst, 'errno', errno.ENOENT) |
2996 if error or errcode and errcode != errno.ENOENT: |
3009 if error or errcode and errcode != errno.ENOENT: |
2997 self.ui.warn( |
3010 self.ui.warn( |
2998 _("trouble committing %s!\n") % uipathfn(f) |
3011 _(b"trouble committing %s!\n") % uipathfn(f) |
2999 ) |
3012 ) |
3000 raise |
3013 raise |
3001 |
3014 |
3002 # update manifest |
3015 # update manifest |
3003 removed = [f for f in removed if f in m1 or f in m2] |
3016 removed = [f for f in removed if f in m1 or f in m2] |
3058 # try hard to detect unmodified manifest entry so that the |
3071 # try hard to detect unmodified manifest entry so that the |
3059 # exact same commit can be reproduced later on convert. |
3072 # exact same commit can be reproduced later on convert. |
3060 md = m1.diff(m, scmutil.matchfiles(self, ctx.files())) |
3073 md = m1.diff(m, scmutil.matchfiles(self, ctx.files())) |
3061 if not files and md: |
3074 if not files and md: |
3062 self.ui.debug( |
3075 self.ui.debug( |
3063 'not reusing manifest (no file change in ' |
3076 b'not reusing manifest (no file change in ' |
3064 'changelog, but manifest differs)\n' |
3077 b'changelog, but manifest differs)\n' |
3065 ) |
3078 ) |
3066 if files or md: |
3079 if files or md: |
3067 self.ui.note(_("committing manifest\n")) |
3080 self.ui.note(_(b"committing manifest\n")) |
3068 # we're using narrowmatch here since it's already applied at |
3081 # we're using narrowmatch here since it's already applied at |
3069 # other stages (such as dirstate.walk), so we're already |
3082 # other stages (such as dirstate.walk), so we're already |
3070 # ignoring things outside of narrowspec in most cases. The |
3083 # ignoring things outside of narrowspec in most cases. The |
3071 # one case where we might have files outside the narrowspec |
3084 # one case where we might have files outside the narrowspec |
3072 # at this point is merges, and we already error out in the |
3085 # at this point is merges, and we already error out in the |
3087 f for f in changed if not (f in m1 or f in m2) |
3100 f for f in changed if not (f in m1 or f in m2) |
3088 ] |
3101 ] |
3089 filesremoved = removed |
3102 filesremoved = removed |
3090 else: |
3103 else: |
3091 self.ui.debug( |
3104 self.ui.debug( |
3092 'reusing manifest from p1 (listed files ' |
3105 b'reusing manifest from p1 (listed files ' |
3093 'actually unchanged)\n' |
3106 b'actually unchanged)\n' |
3094 ) |
3107 ) |
3095 mn = p1.manifestnode() |
3108 mn = p1.manifestnode() |
3096 else: |
3109 else: |
3097 self.ui.debug('reusing manifest from p1 (no file change)\n') |
3110 self.ui.debug(b'reusing manifest from p1 (no file change)\n') |
3098 mn = p1.manifestnode() |
3111 mn = p1.manifestnode() |
3099 files = [] |
3112 files = [] |
3100 |
3113 |
3101 if writecopiesto == 'changeset-only': |
3114 if writecopiesto == b'changeset-only': |
3102 # If writing only to changeset extras, use None to indicate that |
3115 # If writing only to changeset extras, use None to indicate that |
3103 # no entry should be written. If writing to both, write an empty |
3116 # no entry should be written. If writing to both, write an empty |
3104 # entry to prevent the reader from falling back to reading |
3117 # entry to prevent the reader from falling back to reading |
3105 # filelogs. |
3118 # filelogs. |
3106 p1copies = p1copies or None |
3119 p1copies = p1copies or None |
3110 |
3123 |
3111 if origctx and origctx.manifestnode() == mn: |
3124 if origctx and origctx.manifestnode() == mn: |
3112 files = origctx.files() |
3125 files = origctx.files() |
3113 |
3126 |
3114 # update changelog |
3127 # update changelog |
3115 self.ui.note(_("committing changelog\n")) |
3128 self.ui.note(_(b"committing changelog\n")) |
3116 self.changelog.delayupdate(tr) |
3129 self.changelog.delayupdate(tr) |
3117 n = self.changelog.add( |
3130 n = self.changelog.add( |
3118 mn, |
3131 mn, |
3119 files, |
3132 files, |
3120 ctx.description(), |
3133 ctx.description(), |
3127 p1copies, |
3140 p1copies, |
3128 p2copies, |
3141 p2copies, |
3129 filesadded, |
3142 filesadded, |
3130 filesremoved, |
3143 filesremoved, |
3131 ) |
3144 ) |
3132 xp1, xp2 = p1.hex(), p2 and p2.hex() or '' |
3145 xp1, xp2 = p1.hex(), p2 and p2.hex() or b'' |
3133 self.hook( |
3146 self.hook( |
3134 'pretxncommit', |
3147 b'pretxncommit', |
3135 throw=True, |
3148 throw=True, |
3136 node=hex(n), |
3149 node=hex(n), |
3137 parent1=xp1, |
3150 parent1=xp1, |
3138 parent2=xp2, |
3151 parent2=xp2, |
3139 ) |
3152 ) |
3161 completely. |
3174 completely. |
3162 ''' |
3175 ''' |
3163 # When using the same lock to commit and strip, the phasecache is left |
3176 # When using the same lock to commit and strip, the phasecache is left |
3164 # dirty after committing. Then when we strip, the repo is invalidated, |
3177 # dirty after committing. Then when we strip, the repo is invalidated, |
3165 # causing those changes to disappear. |
3178 # causing those changes to disappear. |
3166 if '_phasecache' in vars(self): |
3179 if b'_phasecache' in vars(self): |
3167 self._phasecache.write() |
3180 self._phasecache.write() |
3168 |
3181 |
3169 @unfilteredmethod |
3182 @unfilteredmethod |
3170 def destroyed(self): |
3183 def destroyed(self): |
3171 '''Inform the repository that nodes have been destroyed. |
3184 '''Inform the repository that nodes have been destroyed. |
3198 # tag cache retrieval" case to work. |
3211 # tag cache retrieval" case to work. |
3199 self.invalidate() |
3212 self.invalidate() |
3200 |
3213 |
3201 def status( |
3214 def status( |
3202 self, |
3215 self, |
3203 node1='.', |
3216 node1=b'.', |
3204 node2=None, |
3217 node2=None, |
3205 match=None, |
3218 match=None, |
3206 ignored=False, |
3219 ignored=False, |
3207 clean=False, |
3220 clean=False, |
3208 unknown=False, |
3221 unknown=False, |
3329 hookargs = pycompat.strkwargs(hookargs) |
3342 hookargs = pycompat.strkwargs(hookargs) |
3330 hookargs[r'namespace'] = namespace |
3343 hookargs[r'namespace'] = namespace |
3331 hookargs[r'key'] = key |
3344 hookargs[r'key'] = key |
3332 hookargs[r'old'] = old |
3345 hookargs[r'old'] = old |
3333 hookargs[r'new'] = new |
3346 hookargs[r'new'] = new |
3334 self.hook('prepushkey', throw=True, **hookargs) |
3347 self.hook(b'prepushkey', throw=True, **hookargs) |
3335 except error.HookAbort as exc: |
3348 except error.HookAbort as exc: |
3336 self.ui.write_err(_("pushkey-abort: %s\n") % exc) |
3349 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc) |
3337 if exc.hint: |
3350 if exc.hint: |
3338 self.ui.write_err(_("(%s)\n") % exc.hint) |
3351 self.ui.write_err(_(b"(%s)\n") % exc.hint) |
3339 return False |
3352 return False |
3340 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key)) |
3353 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key)) |
3341 ret = pushkey.push(self, namespace, key, old, new) |
3354 ret = pushkey.push(self, namespace, key, old, new) |
3342 |
3355 |
3343 def runhook(): |
3356 def runhook(): |
3344 self.hook( |
3357 self.hook( |
3345 'pushkey', |
3358 b'pushkey', |
3346 namespace=namespace, |
3359 namespace=namespace, |
3347 key=key, |
3360 key=key, |
3348 old=old, |
3361 old=old, |
3349 new=new, |
3362 new=new, |
3350 ret=ret, |
3363 ret=ret, |
3352 |
3365 |
3353 self._afterlock(runhook) |
3366 self._afterlock(runhook) |
3354 return ret |
3367 return ret |
3355 |
3368 |
3356 def listkeys(self, namespace): |
3369 def listkeys(self, namespace): |
3357 self.hook('prelistkeys', throw=True, namespace=namespace) |
3370 self.hook(b'prelistkeys', throw=True, namespace=namespace) |
3358 self.ui.debug('listing keys for "%s"\n' % namespace) |
3371 self.ui.debug(b'listing keys for "%s"\n' % namespace) |
3359 values = pushkey.list(self, namespace) |
3372 values = pushkey.list(self, namespace) |
3360 self.hook('listkeys', namespace=namespace, values=values) |
3373 self.hook(b'listkeys', namespace=namespace, values=values) |
3361 return values |
3374 return values |
3362 |
3375 |
3363 def debugwireargs(self, one, two, three=None, four=None, five=None): |
3376 def debugwireargs(self, one, two, three=None, four=None, five=None): |
3364 '''used to test argument passing over the wire''' |
3377 '''used to test argument passing over the wire''' |
3365 return "%s %s %s %s %s" % ( |
3378 return b"%s %s %s %s %s" % ( |
3366 one, |
3379 one, |
3367 two, |
3380 two, |
3368 pycompat.bytestr(three), |
3381 pycompat.bytestr(three), |
3369 pycompat.bytestr(four), |
3382 pycompat.bytestr(four), |
3370 pycompat.bytestr(five), |
3383 pycompat.bytestr(five), |
3371 ) |
3384 ) |
3372 |
3385 |
3373 def savecommitmessage(self, text): |
3386 def savecommitmessage(self, text): |
3374 fp = self.vfs('last-message.txt', 'wb') |
3387 fp = self.vfs(b'last-message.txt', b'wb') |
3375 try: |
3388 try: |
3376 fp.write(text) |
3389 fp.write(text) |
3377 finally: |
3390 finally: |
3378 fp.close() |
3391 fp.close() |
3379 return self.pathto(fp.name[len(self.root) + 1 :]) |
3392 return self.pathto(fp.name[len(self.root) + 1 :]) |
3397 return a |
3410 return a |
3398 |
3411 |
3399 |
3412 |
3400 def undoname(fn): |
3413 def undoname(fn): |
3401 base, name = os.path.split(fn) |
3414 base, name = os.path.split(fn) |
3402 assert name.startswith('journal') |
3415 assert name.startswith(b'journal') |
3403 return os.path.join(base, name.replace('journal', 'undo', 1)) |
3416 return os.path.join(base, name.replace(b'journal', b'undo', 1)) |
3404 |
3417 |
3405 |
3418 |
3406 def instance(ui, path, create, intents=None, createopts=None): |
3419 def instance(ui, path, create, intents=None, createopts=None): |
3407 localpath = util.urllocalpath(path) |
3420 localpath = util.urllocalpath(path) |
3408 if create: |
3421 if create: |
3421 A dictionary of explicitly requested creation options can be passed |
3434 A dictionary of explicitly requested creation options can be passed |
3422 in. Missing keys will be populated. |
3435 in. Missing keys will be populated. |
3423 """ |
3436 """ |
3424 createopts = dict(createopts or {}) |
3437 createopts = dict(createopts or {}) |
3425 |
3438 |
3426 if 'backend' not in createopts: |
3439 if b'backend' not in createopts: |
3427 # experimental config: storage.new-repo-backend |
3440 # experimental config: storage.new-repo-backend |
3428 createopts['backend'] = ui.config('storage', 'new-repo-backend') |
3441 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend') |
3429 |
3442 |
3430 return createopts |
3443 return createopts |
3431 |
3444 |
3432 |
3445 |
3433 def newreporequirements(ui, createopts): |
3446 def newreporequirements(ui, createopts): |
3436 Extensions can wrap this function to specify custom requirements for |
3449 Extensions can wrap this function to specify custom requirements for |
3437 new repositories. |
3450 new repositories. |
3438 """ |
3451 """ |
3439 # If the repo is being created from a shared repository, we copy |
3452 # If the repo is being created from a shared repository, we copy |
3440 # its requirements. |
3453 # its requirements. |
3441 if 'sharedrepo' in createopts: |
3454 if b'sharedrepo' in createopts: |
3442 requirements = set(createopts['sharedrepo'].requirements) |
3455 requirements = set(createopts[b'sharedrepo'].requirements) |
3443 if createopts.get('sharedrelative'): |
3456 if createopts.get(b'sharedrelative'): |
3444 requirements.add('relshared') |
3457 requirements.add(b'relshared') |
3445 else: |
3458 else: |
3446 requirements.add('shared') |
3459 requirements.add(b'shared') |
3447 |
3460 |
3448 return requirements |
3461 return requirements |
3449 |
3462 |
3450 if 'backend' not in createopts: |
3463 if b'backend' not in createopts: |
3451 raise error.ProgrammingError( |
3464 raise error.ProgrammingError( |
3452 'backend key not present in createopts; ' |
3465 b'backend key not present in createopts; ' |
3453 'was defaultcreateopts() called?' |
3466 b'was defaultcreateopts() called?' |
3454 ) |
3467 ) |
3455 |
3468 |
3456 if createopts['backend'] != 'revlogv1': |
3469 if createopts[b'backend'] != b'revlogv1': |
3457 raise error.Abort( |
3470 raise error.Abort( |
3458 _( |
3471 _( |
3459 'unable to determine repository requirements for ' |
3472 b'unable to determine repository requirements for ' |
3460 'storage backend: %s' |
3473 b'storage backend: %s' |
3461 ) |
3474 ) |
3462 % createopts['backend'] |
3475 % createopts[b'backend'] |
3463 ) |
3476 ) |
3464 |
3477 |
3465 requirements = {'revlogv1'} |
3478 requirements = {b'revlogv1'} |
3466 if ui.configbool('format', 'usestore'): |
3479 if ui.configbool(b'format', b'usestore'): |
3467 requirements.add('store') |
3480 requirements.add(b'store') |
3468 if ui.configbool('format', 'usefncache'): |
3481 if ui.configbool(b'format', b'usefncache'): |
3469 requirements.add('fncache') |
3482 requirements.add(b'fncache') |
3470 if ui.configbool('format', 'dotencode'): |
3483 if ui.configbool(b'format', b'dotencode'): |
3471 requirements.add('dotencode') |
3484 requirements.add(b'dotencode') |
3472 |
3485 |
3473 compengine = ui.config('format', 'revlog-compression') |
3486 compengine = ui.config(b'format', b'revlog-compression') |
3474 if compengine not in util.compengines: |
3487 if compengine not in util.compengines: |
3475 raise error.Abort( |
3488 raise error.Abort( |
3476 _( |
3489 _( |
3477 'compression engine %s defined by ' |
3490 b'compression engine %s defined by ' |
3478 'format.revlog-compression not available' |
3491 b'format.revlog-compression not available' |
3479 ) |
3492 ) |
3480 % compengine, |
3493 % compengine, |
3481 hint=_( |
3494 hint=_( |
3482 'run "hg debuginstall" to list available ' 'compression engines' |
3495 b'run "hg debuginstall" to list available ' |
3496 b'compression engines' |
|
3483 ), |
3497 ), |
3484 ) |
3498 ) |
3485 |
3499 |
3486 # zlib is the historical default and doesn't need an explicit requirement. |
3500 # zlib is the historical default and doesn't need an explicit requirement. |
3487 elif compengine == 'zstd': |
3501 elif compengine == b'zstd': |
3488 requirements.add('revlog-compression-zstd') |
3502 requirements.add(b'revlog-compression-zstd') |
3489 elif compengine != 'zlib': |
3503 elif compengine != b'zlib': |
3490 requirements.add('exp-compression-%s' % compengine) |
3504 requirements.add(b'exp-compression-%s' % compengine) |
3491 |
3505 |
3492 if scmutil.gdinitconfig(ui): |
3506 if scmutil.gdinitconfig(ui): |
3493 requirements.add('generaldelta') |
3507 requirements.add(b'generaldelta') |
3494 if ui.configbool('format', 'sparse-revlog'): |
3508 if ui.configbool(b'format', b'sparse-revlog'): |
3495 requirements.add(SPARSEREVLOG_REQUIREMENT) |
3509 requirements.add(SPARSEREVLOG_REQUIREMENT) |
3496 |
3510 |
3497 # experimental config: format.use-side-data |
3511 # experimental config: format.use-side-data |
3498 if ui.configbool('format', 'use-side-data'): |
3512 if ui.configbool(b'format', b'use-side-data'): |
3499 requirements.add(SIDEDATA_REQUIREMENT) |
3513 requirements.add(SIDEDATA_REQUIREMENT) |
3500 if ui.configbool('experimental', 'treemanifest'): |
3514 if ui.configbool(b'experimental', b'treemanifest'): |
3501 requirements.add('treemanifest') |
3515 requirements.add(b'treemanifest') |
3502 |
3516 |
3503 revlogv2 = ui.config('experimental', 'revlogv2') |
3517 revlogv2 = ui.config(b'experimental', b'revlogv2') |
3504 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data': |
3518 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data': |
3505 requirements.remove('revlogv1') |
3519 requirements.remove(b'revlogv1') |
3506 # generaldelta is implied by revlogv2. |
3520 # generaldelta is implied by revlogv2. |
3507 requirements.discard('generaldelta') |
3521 requirements.discard(b'generaldelta') |
3508 requirements.add(REVLOGV2_REQUIREMENT) |
3522 requirements.add(REVLOGV2_REQUIREMENT) |
3509 # experimental config: format.internal-phase |
3523 # experimental config: format.internal-phase |
3510 if ui.configbool('format', 'internal-phase'): |
3524 if ui.configbool(b'format', b'internal-phase'): |
3511 requirements.add('internal-phase') |
3525 requirements.add(b'internal-phase') |
3512 |
3526 |
3513 if createopts.get('narrowfiles'): |
3527 if createopts.get(b'narrowfiles'): |
3514 requirements.add(repository.NARROW_REQUIREMENT) |
3528 requirements.add(repository.NARROW_REQUIREMENT) |
3515 |
3529 |
3516 if createopts.get('lfs'): |
3530 if createopts.get(b'lfs'): |
3517 requirements.add('lfs') |
3531 requirements.add(b'lfs') |
3518 |
3532 |
3519 if ui.configbool('format', 'bookmarks-in-store'): |
3533 if ui.configbool(b'format', b'bookmarks-in-store'): |
3520 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) |
3534 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) |
3521 |
3535 |
3522 return requirements |
3536 return requirements |
3523 |
3537 |
3524 |
3538 |
3535 |
3549 |
3536 Extensions can wrap this function to filter out creation options |
3550 Extensions can wrap this function to filter out creation options |
3537 they know how to handle. |
3551 they know how to handle. |
3538 """ |
3552 """ |
3539 known = { |
3553 known = { |
3540 'backend', |
3554 b'backend', |
3541 'lfs', |
3555 b'lfs', |
3542 'narrowfiles', |
3556 b'narrowfiles', |
3543 'sharedrepo', |
3557 b'sharedrepo', |
3544 'sharedrelative', |
3558 b'sharedrelative', |
3545 'shareditems', |
3559 b'shareditems', |
3546 'shallowfilestore', |
3560 b'shallowfilestore', |
3547 } |
3561 } |
3548 |
3562 |
3549 return {k: v for k, v in createopts.items() if k not in known} |
3563 return {k: v for k, v in createopts.items() if k not in known} |
3550 |
3564 |
3551 |
3565 |
3580 |
3594 |
3581 unknownopts = filterknowncreateopts(ui, createopts) |
3595 unknownopts = filterknowncreateopts(ui, createopts) |
3582 |
3596 |
3583 if not isinstance(unknownopts, dict): |
3597 if not isinstance(unknownopts, dict): |
3584 raise error.ProgrammingError( |
3598 raise error.ProgrammingError( |
3585 'filterknowncreateopts() did not return ' 'a dict' |
3599 b'filterknowncreateopts() did not return ' b'a dict' |
3586 ) |
3600 ) |
3587 |
3601 |
3588 if unknownopts: |
3602 if unknownopts: |
3589 raise error.Abort( |
3603 raise error.Abort( |
3590 _( |
3604 _( |
3591 'unable to create repository because of unknown ' |
3605 b'unable to create repository because of unknown ' |
3592 'creation option: %s' |
3606 b'creation option: %s' |
3593 ) |
3607 ) |
3594 % ', '.join(sorted(unknownopts)), |
3608 % b', '.join(sorted(unknownopts)), |
3595 hint=_('is a required extension not loaded?'), |
3609 hint=_(b'is a required extension not loaded?'), |
3596 ) |
3610 ) |
3597 |
3611 |
3598 requirements = newreporequirements(ui, createopts=createopts) |
3612 requirements = newreporequirements(ui, createopts=createopts) |
3599 |
3613 |
3600 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) |
3614 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) |
3601 |
3615 |
3602 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg')) |
3616 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg')) |
3603 if hgvfs.exists(): |
3617 if hgvfs.exists(): |
3604 raise error.RepoError(_('repository %s already exists') % path) |
3618 raise error.RepoError(_(b'repository %s already exists') % path) |
3605 |
3619 |
3606 if 'sharedrepo' in createopts: |
3620 if b'sharedrepo' in createopts: |
3607 sharedpath = createopts['sharedrepo'].sharedpath |
3621 sharedpath = createopts[b'sharedrepo'].sharedpath |
3608 |
3622 |
3609 if createopts.get('sharedrelative'): |
3623 if createopts.get(b'sharedrelative'): |
3610 try: |
3624 try: |
3611 sharedpath = os.path.relpath(sharedpath, hgvfs.base) |
3625 sharedpath = os.path.relpath(sharedpath, hgvfs.base) |
3612 except (IOError, ValueError) as e: |
3626 except (IOError, ValueError) as e: |
3613 # ValueError is raised on Windows if the drive letters differ |
3627 # ValueError is raised on Windows if the drive letters differ |
3614 # on each path. |
3628 # on each path. |
3615 raise error.Abort( |
3629 raise error.Abort( |
3616 _('cannot calculate relative path'), |
3630 _(b'cannot calculate relative path'), |
3617 hint=stringutil.forcebytestr(e), |
3631 hint=stringutil.forcebytestr(e), |
3618 ) |
3632 ) |
3619 |
3633 |
3620 if not wdirvfs.exists(): |
3634 if not wdirvfs.exists(): |
3621 wdirvfs.makedirs() |
3635 wdirvfs.makedirs() |
3622 |
3636 |
3623 hgvfs.makedir(notindexed=True) |
3637 hgvfs.makedir(notindexed=True) |
3624 if 'sharedrepo' not in createopts: |
3638 if b'sharedrepo' not in createopts: |
3625 hgvfs.mkdir(b'cache') |
3639 hgvfs.mkdir(b'cache') |
3626 hgvfs.mkdir(b'wcache') |
3640 hgvfs.mkdir(b'wcache') |
3627 |
3641 |
3628 if b'store' in requirements and 'sharedrepo' not in createopts: |
3642 if b'store' in requirements and b'sharedrepo' not in createopts: |
3629 hgvfs.mkdir(b'store') |
3643 hgvfs.mkdir(b'store') |
3630 |
3644 |
3631 # We create an invalid changelog outside the store so very old |
3645 # We create an invalid changelog outside the store so very old |
3632 # Mercurial versions (which didn't know about the requirements |
3646 # Mercurial versions (which didn't know about the requirements |
3633 # file) encounter an error on reading the changelog. This |
3647 # file) encounter an error on reading the changelog. This |
3643 ) |
3657 ) |
3644 |
3658 |
3645 scmutil.writerequires(hgvfs, requirements) |
3659 scmutil.writerequires(hgvfs, requirements) |
3646 |
3660 |
3647 # Write out file telling readers where to find the shared store. |
3661 # Write out file telling readers where to find the shared store. |
3648 if 'sharedrepo' in createopts: |
3662 if b'sharedrepo' in createopts: |
3649 hgvfs.write(b'sharedpath', sharedpath) |
3663 hgvfs.write(b'sharedpath', sharedpath) |
3650 |
3664 |
3651 if createopts.get('shareditems'): |
3665 if createopts.get(b'shareditems'): |
3652 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n' |
3666 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n' |
3653 hgvfs.write(b'shared', shared) |
3667 hgvfs.write(b'shared', shared) |
3654 |
3668 |
3655 |
3669 |
3656 def poisonrepository(repo): |
3670 def poisonrepository(repo): |
3657 """Poison a repository instance so it can no longer be used.""" |
3671 """Poison a repository instance so it can no longer be used.""" |
3667 def __getattribute__(self, item): |
3681 def __getattribute__(self, item): |
3668 if item == r'close': |
3682 if item == r'close': |
3669 return object.__getattribute__(self, item) |
3683 return object.__getattribute__(self, item) |
3670 |
3684 |
3671 raise error.ProgrammingError( |
3685 raise error.ProgrammingError( |
3672 'repo instances should not be used ' 'after unshare' |
3686 b'repo instances should not be used ' b'after unshare' |
3673 ) |
3687 ) |
3674 |
3688 |
3675 def close(self): |
3689 def close(self): |
3676 pass |
3690 pass |
3677 |
3691 |