26 obsutil, |
26 obsutil, |
27 phases, |
27 phases, |
28 pycompat, |
28 pycompat, |
29 util, |
29 util, |
30 ) |
30 ) |
31 from .utils import ( |
31 from .utils import stringutil |
32 stringutil, |
32 |
33 ) |
33 |
34 |
34 def backupbundle( |
35 def backupbundle(repo, bases, heads, node, suffix, compress=True, |
35 repo, bases, heads, node, suffix, compress=True, obsolescence=True |
36 obsolescence=True): |
36 ): |
37 """create a bundle with the specified revisions as a backup""" |
37 """create a bundle with the specified revisions as a backup""" |
38 |
38 |
39 backupdir = "strip-backup" |
39 backupdir = "strip-backup" |
40 vfs = repo.vfs |
40 vfs = repo.vfs |
41 if not vfs.isdir(backupdir): |
41 if not vfs.isdir(backupdir): |
43 |
43 |
44 # Include a hash of all the nodes in the filename for uniqueness |
44 # Include a hash of all the nodes in the filename for uniqueness |
45 allcommits = repo.set('%ln::%ln', bases, heads) |
45 allcommits = repo.set('%ln::%ln', bases, heads) |
46 allhashes = sorted(c.hex() for c in allcommits) |
46 allhashes = sorted(c.hex() for c in allcommits) |
47 totalhash = hashlib.sha1(''.join(allhashes)).digest() |
47 totalhash = hashlib.sha1(''.join(allhashes)).digest() |
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), |
48 name = "%s/%s-%s-%s.hg" % ( |
49 hex(totalhash[:4]), suffix) |
49 backupdir, |
|
50 short(node), |
|
51 hex(totalhash[:4]), |
|
52 suffix, |
|
53 ) |
50 |
54 |
51 cgversion = changegroup.localversion(repo) |
55 cgversion = changegroup.localversion(repo) |
52 comp = None |
56 comp = None |
53 if cgversion != '01': |
57 if cgversion != '01': |
54 bundletype = "HG20" |
58 bundletype = "HG20" |
63 contentopts = { |
67 contentopts = { |
64 'cg.version': cgversion, |
68 'cg.version': cgversion, |
65 'obsolescence': obsolescence, |
69 'obsolescence': obsolescence, |
66 'phases': True, |
70 'phases': True, |
67 } |
71 } |
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype, |
72 return bundle2.writenewbundle( |
69 outgoing, contentopts, vfs, compression=comp) |
73 repo.ui, |
|
74 repo, |
|
75 'strip', |
|
76 name, |
|
77 bundletype, |
|
78 outgoing, |
|
79 contentopts, |
|
80 vfs, |
|
81 compression=comp, |
|
82 ) |
|
83 |
70 |
84 |
71 def _collectfiles(repo, striprev): |
85 def _collectfiles(repo, striprev): |
72 """find out the filelogs affected by the strip""" |
86 """find out the filelogs affected by the strip""" |
73 files = set() |
87 files = set() |
74 |
88 |
75 for x in pycompat.xrange(striprev, len(repo)): |
89 for x in pycompat.xrange(striprev, len(repo)): |
76 files.update(repo[x].files()) |
90 files.update(repo[x].files()) |
77 |
91 |
78 return sorted(files) |
92 return sorted(files) |
79 |
93 |
|
94 |
80 def _collectrevlog(revlog, striprev): |
95 def _collectrevlog(revlog, striprev): |
81 _, brokenset = revlog.getstrippoint(striprev) |
96 _, brokenset = revlog.getstrippoint(striprev) |
82 return [revlog.linkrev(r) for r in brokenset] |
97 return [revlog.linkrev(r) for r in brokenset] |
|
98 |
83 |
99 |
84 def _collectbrokencsets(repo, files, striprev): |
100 def _collectbrokencsets(repo, files, striprev): |
85 """return the changesets which will be broken by the truncation""" |
101 """return the changesets which will be broken by the truncation""" |
86 s = set() |
102 s = set() |
87 |
103 |
89 s.update(_collectrevlog(revlog, striprev)) |
105 s.update(_collectrevlog(revlog, striprev)) |
90 for fname in files: |
106 for fname in files: |
91 s.update(_collectrevlog(repo.file(fname), striprev)) |
107 s.update(_collectrevlog(repo.file(fname), striprev)) |
92 |
108 |
93 return s |
109 return s |
|
110 |
94 |
111 |
95 def strip(ui, repo, nodelist, backup=True, topic='backup'): |
112 def strip(ui, repo, nodelist, backup=True, topic='backup'): |
96 # This function requires the caller to lock the repo, but it operates |
113 # This function requires the caller to lock the repo, but it operates |
97 # within a transaction of its own, and thus requires there to be no current |
114 # within a transaction of its own, and thus requires there to be no current |
98 # transaction when it is called. |
115 # transaction when it is called. |
149 |
166 |
150 stripobsidx = obsmarkers = () |
167 stripobsidx = obsmarkers = () |
151 if repo.ui.configbool('devel', 'strip-obsmarkers'): |
168 if repo.ui.configbool('devel', 'strip-obsmarkers'): |
152 obsmarkers = obsutil.exclusivemarkers(repo, stripbases) |
169 obsmarkers = obsutil.exclusivemarkers(repo, stripbases) |
153 if obsmarkers: |
170 if obsmarkers: |
154 stripobsidx = [i for i, m in enumerate(repo.obsstore) |
171 stripobsidx = [ |
155 if m in obsmarkers] |
172 i for i, m in enumerate(repo.obsstore) if m in obsmarkers |
|
173 ] |
156 |
174 |
157 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
175 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
158 |
176 |
159 backupfile = None |
177 backupfile = None |
160 node = nodelist[-1] |
178 node = nodelist[-1] |
167 # |
185 # |
168 # We do not include obsolescence, it might re-introduce prune markers |
186 # We do not include obsolescence, it might re-introduce prune markers |
169 # we are trying to strip. This is harmless since the stripped markers |
187 # we are trying to strip. This is harmless since the stripped markers |
170 # are already backed up and we did not touched the markers for the |
188 # are already backed up and we did not touched the markers for the |
171 # saved changesets. |
189 # saved changesets. |
172 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp', |
190 tmpbundlefile = backupbundle( |
173 compress=False, obsolescence=False) |
191 repo, |
|
192 savebases, |
|
193 saveheads, |
|
194 node, |
|
195 'temp', |
|
196 compress=False, |
|
197 obsolescence=False, |
|
198 ) |
174 |
199 |
175 with ui.uninterruptible(): |
200 with ui.uninterruptible(): |
176 try: |
201 try: |
177 with repo.transaction("strip") as tr: |
202 with repo.transaction("strip") as tr: |
178 # TODO this code violates the interface abstraction of the |
203 # TODO this code violates the interface abstraction of the |
211 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile) |
236 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile) |
212 txnname = 'strip' |
237 txnname = 'strip' |
213 if not isinstance(gen, bundle2.unbundle20): |
238 if not isinstance(gen, bundle2.unbundle20): |
214 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl) |
239 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl) |
215 with repo.transaction(txnname) as tr: |
240 with repo.transaction(txnname) as tr: |
216 bundle2.applybundle(repo, gen, tr, source='strip', |
241 bundle2.applybundle( |
217 url=tmpbundleurl) |
242 repo, gen, tr, source='strip', url=tmpbundleurl |
|
243 ) |
218 if not repo.ui.verbose: |
244 if not repo.ui.verbose: |
219 repo.ui.popbuffer() |
245 repo.ui.popbuffer() |
220 f.close() |
246 f.close() |
221 |
247 |
222 with repo.transaction('repair') as tr: |
248 with repo.transaction('repair') as tr: |
227 for undovfs, undofile in repo.undofiles(): |
253 for undovfs, undofile in repo.undofiles(): |
228 try: |
254 try: |
229 undovfs.unlink(undofile) |
255 undovfs.unlink(undofile) |
230 except OSError as e: |
256 except OSError as e: |
231 if e.errno != errno.ENOENT: |
257 if e.errno != errno.ENOENT: |
232 ui.warn(_('error removing %s: %s\n') % |
258 ui.warn( |
233 (undovfs.join(undofile), |
259 _('error removing %s: %s\n') |
234 stringutil.forcebytestr(e))) |
260 % ( |
235 |
261 undovfs.join(undofile), |
236 except: # re-raises |
262 stringutil.forcebytestr(e), |
|
263 ) |
|
264 ) |
|
265 |
|
266 except: # re-raises |
237 if backupfile: |
267 if backupfile: |
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n") |
268 ui.warn( |
239 % vfs.join(backupfile)) |
269 _("strip failed, backup bundle stored in '%s'\n") |
|
270 % vfs.join(backupfile) |
|
271 ) |
240 if tmpbundlefile: |
272 if tmpbundlefile: |
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") |
273 ui.warn( |
242 % vfs.join(tmpbundlefile)) |
274 _("strip failed, unrecovered changes stored in '%s'\n") |
243 ui.warn(_("(fix the problem, then recover the changesets with " |
275 % vfs.join(tmpbundlefile) |
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) |
276 ) |
|
277 ui.warn( |
|
278 _( |
|
279 "(fix the problem, then recover the changesets with " |
|
280 "\"hg unbundle '%s'\")\n" |
|
281 ) |
|
282 % vfs.join(tmpbundlefile) |
|
283 ) |
245 raise |
284 raise |
246 else: |
285 else: |
247 if tmpbundlefile: |
286 if tmpbundlefile: |
248 # Remove temporary bundle only if there were no exceptions |
287 # Remove temporary bundle only if there were no exceptions |
249 vfs.unlink(tmpbundlefile) |
288 vfs.unlink(tmpbundlefile) |
250 |
289 |
251 repo.destroyed() |
290 repo.destroyed() |
252 # return the backup file path (or None if 'backup' was False) so |
291 # return the backup file path (or None if 'backup' was False) so |
253 # extensions can use it |
292 # extensions can use it |
254 return backupfile |
293 return backupfile |
|
294 |
255 |
295 |
256 def softstrip(ui, repo, nodelist, backup=True, topic='backup'): |
296 def softstrip(ui, repo, nodelist, backup=True, topic='backup'): |
257 """perform a "soft" strip using the archived phase""" |
297 """perform a "soft" strip using the archived phase""" |
258 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)] |
298 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)] |
259 if not tostrip: |
299 if not tostrip: |
290 newbmtarget = repo[newbmtarget.first()].node() |
330 newbmtarget = repo[newbmtarget.first()].node() |
291 else: |
331 else: |
292 newbmtarget = '.' |
332 newbmtarget = '.' |
293 return newbmtarget, updatebm |
333 return newbmtarget, updatebm |
294 |
334 |
|
335 |
295 def _createstripbackup(repo, stripbases, node, topic): |
336 def _createstripbackup(repo, stripbases, node, topic): |
296 # backup the changeset we are about to strip |
337 # backup the changeset we are about to strip |
297 vfs = repo.vfs |
338 vfs = repo.vfs |
298 cl = repo.changelog |
339 cl = repo.changelog |
299 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) |
340 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) |
300 repo.ui.status(_("saved backup bundle to %s\n") % |
341 repo.ui.status(_("saved backup bundle to %s\n") % vfs.join(backupfile)) |
301 vfs.join(backupfile)) |
342 repo.ui.log( |
302 repo.ui.log("backupbundle", "saved backup bundle to %s\n", |
343 "backupbundle", "saved backup bundle to %s\n", vfs.join(backupfile) |
303 vfs.join(backupfile)) |
344 ) |
304 return backupfile |
345 return backupfile |
|
346 |
305 |
347 |
306 def safestriproots(ui, repo, nodes): |
348 def safestriproots(ui, repo, nodes): |
307 """return list of roots of nodes where descendants are covered by nodes""" |
349 """return list of roots of nodes where descendants are covered by nodes""" |
308 torev = repo.unfiltered().changelog.rev |
350 torev = repo.unfiltered().changelog.rev |
309 revs = set(torev(n) for n in nodes) |
351 revs = set(torev(n) for n in nodes) |
314 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
356 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
315 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
357 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
316 notstrip = revs - tostrip |
358 notstrip = revs - tostrip |
317 if notstrip: |
359 if notstrip: |
318 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
360 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
319 ui.warn(_('warning: orphaned descendants detected, ' |
361 ui.warn( |
320 'not stripping %s\n') % nodestr) |
362 _('warning: orphaned descendants detected, ' 'not stripping %s\n') |
|
363 % nodestr |
|
364 ) |
321 return [c.node() for c in repo.set('roots(%ld)', tostrip)] |
365 return [c.node() for c in repo.set('roots(%ld)', tostrip)] |
|
366 |
322 |
367 |
323 class stripcallback(object): |
368 class stripcallback(object): |
324 """used as a transaction postclose callback""" |
369 """used as a transaction postclose callback""" |
325 |
370 |
326 def __init__(self, ui, repo, backup, topic): |
371 def __init__(self, ui, repo, backup, topic): |
335 |
380 |
336 def __call__(self, tr): |
381 def __call__(self, tr): |
337 roots = safestriproots(self.ui, self.repo, self.nodelist) |
382 roots = safestriproots(self.ui, self.repo, self.nodelist) |
338 if roots: |
383 if roots: |
339 strip(self.ui, self.repo, roots, self.backup, self.topic) |
384 strip(self.ui, self.repo, roots, self.backup, self.topic) |
|
385 |
340 |
386 |
341 def delayedstrip(ui, repo, nodelist, topic=None, backup=True): |
387 def delayedstrip(ui, repo, nodelist, topic=None, backup=True): |
342 """like strip, but works inside transaction and won't strip irreverent revs |
388 """like strip, but works inside transaction and won't strip irreverent revs |
343 |
389 |
344 nodelist must explicitly contain all descendants. Otherwise a warning will |
390 nodelist must explicitly contain all descendants. Otherwise a warning will |
359 tr.addpostclose('\xffstrip', callback) |
405 tr.addpostclose('\xffstrip', callback) |
360 if topic: |
406 if topic: |
361 callback.topic = topic |
407 callback.topic = topic |
362 callback.addnodes(nodelist) |
408 callback.addnodes(nodelist) |
363 |
409 |
|
410 |
364 def stripmanifest(repo, striprev, tr, files): |
411 def stripmanifest(repo, striprev, tr, files): |
365 for revlog in manifestrevlogs(repo): |
412 for revlog in manifestrevlogs(repo): |
366 revlog.strip(striprev, tr) |
413 revlog.strip(striprev, tr) |
|
414 |
367 |
415 |
368 def manifestrevlogs(repo): |
416 def manifestrevlogs(repo): |
369 yield repo.manifestlog.getstorage(b'') |
417 yield repo.manifestlog.getstorage(b'') |
370 if 'treemanifest' in repo.requirements: |
418 if 'treemanifest' in repo.requirements: |
371 # This logic is safe if treemanifest isn't enabled, but also |
419 # This logic is safe if treemanifest isn't enabled, but also |
372 # pointless, so we skip it if treemanifest isn't enabled. |
420 # pointless, so we skip it if treemanifest isn't enabled. |
373 for unencoded, encoded, size in repo.store.datafiles(): |
421 for unencoded, encoded, size in repo.store.datafiles(): |
374 if (unencoded.startswith('meta/') and |
422 if unencoded.startswith('meta/') and unencoded.endswith( |
375 unencoded.endswith('00manifest.i')): |
423 '00manifest.i' |
|
424 ): |
376 dir = unencoded[5:-12] |
425 dir = unencoded[5:-12] |
377 yield repo.manifestlog.getstorage(dir) |
426 yield repo.manifestlog.getstorage(dir) |
|
427 |
378 |
428 |
379 def rebuildfncache(ui, repo): |
429 def rebuildfncache(ui, repo): |
380 """Rebuilds the fncache file from repo history. |
430 """Rebuilds the fncache file from repo history. |
381 |
431 |
382 Missing entries will be added. Extra entries will be removed. |
432 Missing entries will be added. Extra entries will be removed. |
383 """ |
433 """ |
384 repo = repo.unfiltered() |
434 repo = repo.unfiltered() |
385 |
435 |
386 if 'fncache' not in repo.requirements: |
436 if 'fncache' not in repo.requirements: |
387 ui.warn(_('(not rebuilding fncache because repository does not ' |
437 ui.warn( |
388 'support fncache)\n')) |
438 _( |
|
439 '(not rebuilding fncache because repository does not ' |
|
440 'support fncache)\n' |
|
441 ) |
|
442 ) |
389 return |
443 return |
390 |
444 |
391 with repo.lock(): |
445 with repo.lock(): |
392 fnc = repo.store.fncache |
446 fnc = repo.store.fncache |
393 fnc.ensureloaded(warn=ui.warn) |
447 fnc.ensureloaded(warn=ui.warn) |
394 |
448 |
395 oldentries = set(fnc.entries) |
449 oldentries = set(fnc.entries) |
396 newentries = set() |
450 newentries = set() |
397 seenfiles = set() |
451 seenfiles = set() |
398 |
452 |
399 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'), |
453 progress = ui.makeprogress( |
400 total=len(repo)) |
454 _('rebuilding'), unit=_('changesets'), total=len(repo) |
|
455 ) |
401 for rev in repo: |
456 for rev in repo: |
402 progress.update(rev) |
457 progress.update(rev) |
403 |
458 |
404 ctx = repo[rev] |
459 ctx = repo[rev] |
405 for f in ctx.files(): |
460 for f in ctx.files(): |
436 ui.write(_('removing %s\n') % p) |
491 ui.write(_('removing %s\n') % p) |
437 for p in sorted(newentries - oldentries): |
492 for p in sorted(newentries - oldentries): |
438 ui.write(_('adding %s\n') % p) |
493 ui.write(_('adding %s\n') % p) |
439 |
494 |
440 if addcount or removecount: |
495 if addcount or removecount: |
441 ui.write(_('%d items added, %d removed from fncache\n') % |
496 ui.write( |
442 (addcount, removecount)) |
497 _('%d items added, %d removed from fncache\n') |
|
498 % (addcount, removecount) |
|
499 ) |
443 fnc.entries = newentries |
500 fnc.entries = newentries |
444 fnc._dirty = True |
501 fnc._dirty = True |
445 |
502 |
446 with repo.transaction('fncache') as tr: |
503 with repo.transaction('fncache') as tr: |
447 fnc.write(tr) |
504 fnc.write(tr) |
448 else: |
505 else: |
449 ui.write(_('fncache already up to date\n')) |
506 ui.write(_('fncache already up to date\n')) |
|
507 |
450 |
508 |
451 def deleteobsmarkers(obsstore, indices): |
509 def deleteobsmarkers(obsstore, indices): |
452 """Delete some obsmarkers from obsstore and return how many were deleted |
510 """Delete some obsmarkers from obsstore and return how many were deleted |
453 |
511 |
454 'indices' is a list of ints which are the indices |
512 'indices' is a list of ints which are the indices |