44 ) |
44 ) |
45 |
45 |
46 eh = exthelper.exthelper() |
46 eh = exthelper.exthelper() |
47 |
47 |
48 |
48 |
49 @eh.wrapfunction(localrepo, 'makefilestorage') |
49 @eh.wrapfunction(localrepo, b'makefilestorage') |
50 def localrepomakefilestorage(orig, requirements, features, **kwargs): |
50 def localrepomakefilestorage(orig, requirements, features, **kwargs): |
51 if b'lfs' in requirements: |
51 if b'lfs' in requirements: |
52 features.add(repository.REPO_FEATURE_LFS) |
52 features.add(repository.REPO_FEATURE_LFS) |
53 |
53 |
54 return orig(requirements=requirements, features=features, **kwargs) |
54 return orig(requirements=requirements, features=features, **kwargs) |
55 |
55 |
56 |
56 |
57 @eh.wrapfunction(changegroup, 'allsupportedversions') |
57 @eh.wrapfunction(changegroup, b'allsupportedversions') |
58 def allsupportedversions(orig, ui): |
58 def allsupportedversions(orig, ui): |
59 versions = orig(ui) |
59 versions = orig(ui) |
60 versions.add('03') |
60 versions.add(b'03') |
61 return versions |
61 return versions |
62 |
62 |
63 |
63 |
64 @eh.wrapfunction(wireprotov1server, '_capabilities') |
64 @eh.wrapfunction(wireprotov1server, b'_capabilities') |
65 def _capabilities(orig, repo, proto): |
65 def _capabilities(orig, repo, proto): |
66 '''Wrap server command to announce lfs server capability''' |
66 '''Wrap server command to announce lfs server capability''' |
67 caps = orig(repo, proto) |
67 caps = orig(repo, proto) |
68 if util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
68 if util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
69 # Advertise a slightly different capability when lfs is *required*, so |
69 # Advertise a slightly different capability when lfs is *required*, so |
70 # that the client knows it MUST load the extension. If lfs is not |
70 # that the client knows it MUST load the extension. If lfs is not |
71 # required on the server, there's no reason to autoload the extension |
71 # required on the server, there's no reason to autoload the extension |
72 # on the client. |
72 # on the client. |
73 if b'lfs' in repo.requirements: |
73 if b'lfs' in repo.requirements: |
74 caps.append('lfs-serve') |
74 caps.append(b'lfs-serve') |
75 |
75 |
76 caps.append('lfs') |
76 caps.append(b'lfs') |
77 return caps |
77 return caps |
78 |
78 |
79 |
79 |
80 def bypasscheckhash(self, text): |
80 def bypasscheckhash(self, text): |
81 return False |
81 return False |
99 text = store.read(oid, verify=False) |
99 text = store.read(oid, verify=False) |
100 |
100 |
101 # pack hg filelog metadata |
101 # pack hg filelog metadata |
102 hgmeta = {} |
102 hgmeta = {} |
103 for k in p.keys(): |
103 for k in p.keys(): |
104 if k.startswith('x-hg-'): |
104 if k.startswith(b'x-hg-'): |
105 name = k[len('x-hg-') :] |
105 name = k[len(b'x-hg-') :] |
106 hgmeta[name] = p[k] |
106 hgmeta[name] = p[k] |
107 if hgmeta or text.startswith('\1\n'): |
107 if hgmeta or text.startswith(b'\1\n'): |
108 text = storageutil.packmeta(hgmeta, text) |
108 text = storageutil.packmeta(hgmeta, text) |
109 |
109 |
110 return (text, True, {}) |
110 return (text, True, {}) |
111 |
111 |
112 |
112 |
120 # git-lfs only supports sha256 |
120 # git-lfs only supports sha256 |
121 oid = hex(hashlib.sha256(text).digest()) |
121 oid = hex(hashlib.sha256(text).digest()) |
122 self.opener.lfslocalblobstore.write(oid, text) |
122 self.opener.lfslocalblobstore.write(oid, text) |
123 |
123 |
124 # replace contents with metadata |
124 # replace contents with metadata |
125 longoid = 'sha256:%s' % oid |
125 longoid = b'sha256:%s' % oid |
126 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text)) |
126 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) |
127 |
127 |
128 # by default, we expect the content to be binary. however, LFS could also |
128 # by default, we expect the content to be binary. however, LFS could also |
129 # be used for non-binary content. add a special entry for non-binary data. |
129 # be used for non-binary content. add a special entry for non-binary data. |
130 # this will be used by filectx.isbinary(). |
130 # this will be used by filectx.isbinary(). |
131 if not stringutil.binary(text): |
131 if not stringutil.binary(text): |
132 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
132 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
133 metadata['x-is-binary'] = '0' |
133 metadata[b'x-is-binary'] = b'0' |
134 |
134 |
135 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
135 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
136 if hgmeta is not None: |
136 if hgmeta is not None: |
137 for k, v in hgmeta.iteritems(): |
137 for k, v in hgmeta.iteritems(): |
138 metadata['x-hg-%s' % k] = v |
138 metadata[b'x-hg-%s' % k] = v |
139 |
139 |
140 rawtext = metadata.serialize() |
140 rawtext = metadata.serialize() |
141 return (rawtext, False) |
141 return (rawtext, False) |
142 |
142 |
143 |
143 |
168 node=None, |
168 node=None, |
169 flags=revlog.REVIDX_DEFAULT_FLAGS, |
169 flags=revlog.REVIDX_DEFAULT_FLAGS, |
170 **kwds |
170 **kwds |
171 ): |
171 ): |
172 # The matcher isn't available if reposetup() wasn't called. |
172 # The matcher isn't available if reposetup() wasn't called. |
173 lfstrack = self._revlog.opener.options.get('lfstrack') |
173 lfstrack = self._revlog.opener.options.get(b'lfstrack') |
174 |
174 |
175 if lfstrack: |
175 if lfstrack: |
176 textlen = len(text) |
176 textlen = len(text) |
177 # exclude hg rename meta from file size |
177 # exclude hg rename meta from file size |
178 meta, offset = storageutil.parsemeta(text) |
178 meta, offset = storageutil.parsemeta(text) |
201 if _islfs(self, node): |
201 if _islfs(self, node): |
202 rawtext = self._revlog.rawdata(node) |
202 rawtext = self._revlog.rawdata(node) |
203 if not rawtext: |
203 if not rawtext: |
204 return False |
204 return False |
205 metadata = pointer.deserialize(rawtext) |
205 metadata = pointer.deserialize(rawtext) |
206 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata: |
206 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: |
207 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) |
207 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) |
208 else: |
208 else: |
209 return False |
209 return False |
210 return orig(self, node) |
210 return orig(self, node) |
211 |
211 |
212 |
212 |
214 def filelogsize(orig, self, rev): |
214 def filelogsize(orig, self, rev): |
215 if _islfs(self, rev=rev): |
215 if _islfs(self, rev=rev): |
216 # fast path: use lfs metadata to answer size |
216 # fast path: use lfs metadata to answer size |
217 rawtext = self._revlog.rawdata(rev) |
217 rawtext = self._revlog.rawdata(rev) |
218 metadata = pointer.deserialize(rawtext) |
218 metadata = pointer.deserialize(rawtext) |
219 return int(metadata['size']) |
219 return int(metadata[b'size']) |
220 return orig(self, rev) |
220 return orig(self, rev) |
221 |
221 |
222 |
222 |
223 @eh.wrapfunction(context.basefilectx, 'cmp') |
223 @eh.wrapfunction(context.basefilectx, b'cmp') |
224 def filectxcmp(orig, self, fctx): |
224 def filectxcmp(orig, self, fctx): |
225 """returns True if text is different than fctx""" |
225 """returns True if text is different than fctx""" |
226 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
226 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
227 if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
227 if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
228 # fast path: check LFS oid |
228 # fast path: check LFS oid |
230 p2 = pointer.deserialize(fctx.rawdata()) |
230 p2 = pointer.deserialize(fctx.rawdata()) |
231 return p1.oid() != p2.oid() |
231 return p1.oid() != p2.oid() |
232 return orig(self, fctx) |
232 return orig(self, fctx) |
233 |
233 |
234 |
234 |
235 @eh.wrapfunction(context.basefilectx, 'isbinary') |
235 @eh.wrapfunction(context.basefilectx, b'isbinary') |
236 def filectxisbinary(orig, self): |
236 def filectxisbinary(orig, self): |
237 if self.islfs(): |
237 if self.islfs(): |
238 # fast path: use lfs metadata to answer isbinary |
238 # fast path: use lfs metadata to answer isbinary |
239 metadata = pointer.deserialize(self.rawdata()) |
239 metadata = pointer.deserialize(self.rawdata()) |
240 # if lfs metadata says nothing, assume it's binary by default |
240 # if lfs metadata says nothing, assume it's binary by default |
241 return bool(int(metadata.get('x-is-binary', 1))) |
241 return bool(int(metadata.get(b'x-is-binary', 1))) |
242 return orig(self) |
242 return orig(self) |
243 |
243 |
244 |
244 |
245 def filectxislfs(self): |
245 def filectxislfs(self): |
246 return _islfs(self.filelog(), self.filenode()) |
246 return _islfs(self.filelog(), self.filenode()) |
247 |
247 |
248 |
248 |
249 @eh.wrapfunction(cmdutil, '_updatecatformatter') |
249 @eh.wrapfunction(cmdutil, b'_updatecatformatter') |
250 def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
250 def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
251 orig(fm, ctx, matcher, path, decode) |
251 orig(fm, ctx, matcher, path, decode) |
252 fm.data(rawdata=ctx[path].rawdata()) |
252 fm.data(rawdata=ctx[path].rawdata()) |
253 |
253 |
254 |
254 |
255 @eh.wrapfunction(scmutil, 'wrapconvertsink') |
255 @eh.wrapfunction(scmutil, b'wrapconvertsink') |
256 def convertsink(orig, sink): |
256 def convertsink(orig, sink): |
257 sink = orig(sink) |
257 sink = orig(sink) |
258 if sink.repotype == 'hg': |
258 if sink.repotype == b'hg': |
259 |
259 |
260 class lfssink(sink.__class__): |
260 class lfssink(sink.__class__): |
261 def putcommit( |
261 def putcommit( |
262 self, |
262 self, |
263 files, |
263 files, |
279 revmap, |
279 revmap, |
280 full, |
280 full, |
281 cleanp2, |
281 cleanp2, |
282 ) |
282 ) |
283 |
283 |
284 if 'lfs' not in self.repo.requirements: |
284 if b'lfs' not in self.repo.requirements: |
285 ctx = self.repo[node] |
285 ctx = self.repo[node] |
286 |
286 |
287 # The file list may contain removed files, so check for |
287 # The file list may contain removed files, so check for |
288 # membership before assuming it is in the context. |
288 # membership before assuming it is in the context. |
289 if any(f in ctx and ctx[f].islfs() for f, n in files): |
289 if any(f in ctx and ctx[f].islfs() for f, n in files): |
290 self.repo.requirements.add('lfs') |
290 self.repo.requirements.add(b'lfs') |
291 self.repo._writerequirements() |
291 self.repo._writerequirements() |
292 |
292 |
293 return node |
293 return node |
294 |
294 |
295 sink.__class__ = lfssink |
295 sink.__class__ = lfssink |
297 return sink |
297 return sink |
298 |
298 |
299 |
299 |
300 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
300 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
301 # options and blob stores are passed from othervfs to the new readonlyvfs. |
301 # options and blob stores are passed from othervfs to the new readonlyvfs. |
302 @eh.wrapfunction(vfsmod.readonlyvfs, '__init__') |
302 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') |
303 def vfsinit(orig, self, othervfs): |
303 def vfsinit(orig, self, othervfs): |
304 orig(self, othervfs) |
304 orig(self, othervfs) |
305 # copy lfs related options |
305 # copy lfs related options |
306 for k, v in othervfs.options.items(): |
306 for k, v in othervfs.options.items(): |
307 if k.startswith('lfs'): |
307 if k.startswith(b'lfs'): |
308 self.options[k] = v |
308 self.options[k] = v |
309 # also copy lfs blobstores. note: this can run before reposetup, so lfs |
309 # also copy lfs blobstores. note: this can run before reposetup, so lfs |
310 # blobstore attributes are not always ready at this time. |
310 # blobstore attributes are not always ready at this time. |
311 for name in ['lfslocalblobstore', 'lfsremoteblobstore']: |
311 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: |
312 if util.safehasattr(othervfs, name): |
312 if util.safehasattr(othervfs, name): |
313 setattr(self, name, getattr(othervfs, name)) |
313 setattr(self, name, getattr(othervfs, name)) |
314 |
314 |
315 |
315 |
316 def _prefetchfiles(repo, revs, match): |
316 def _prefetchfiles(repo, revs, match): |
317 """Ensure that required LFS blobs are present, fetching them as a group if |
317 """Ensure that required LFS blobs are present, fetching them as a group if |
318 needed.""" |
318 needed.""" |
319 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
319 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
320 return |
320 return |
321 |
321 |
322 pointers = [] |
322 pointers = [] |
323 oids = set() |
323 oids = set() |
324 localstore = repo.svfs.lfslocalblobstore |
324 localstore = repo.svfs.lfslocalblobstore |
338 blobstore.remote(repo).readbatch(pointers, localstore) |
338 blobstore.remote(repo).readbatch(pointers, localstore) |
339 |
339 |
340 |
340 |
341 def _canskipupload(repo): |
341 def _canskipupload(repo): |
342 # Skip if this hasn't been passed to reposetup() |
342 # Skip if this hasn't been passed to reposetup() |
343 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
343 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
344 return True |
344 return True |
345 |
345 |
346 # if remotestore is a null store, upload is a no-op and can be skipped |
346 # if remotestore is a null store, upload is a no-op and can be skipped |
347 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
347 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
348 |
348 |
349 |
349 |
350 def candownload(repo): |
350 def candownload(repo): |
351 # Skip if this hasn't been passed to reposetup() |
351 # Skip if this hasn't been passed to reposetup() |
352 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
352 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
353 return False |
353 return False |
354 |
354 |
355 # if remotestore is a null store, downloads will lead to nothing |
355 # if remotestore is a null store, downloads will lead to nothing |
356 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
356 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
357 |
357 |
375 the remote blobstore. |
375 the remote blobstore. |
376 """ |
376 """ |
377 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
377 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
378 |
378 |
379 |
379 |
380 @eh.wrapfunction(exchange, 'push') |
380 @eh.wrapfunction(exchange, b'push') |
381 def push(orig, repo, remote, *args, **kwargs): |
381 def push(orig, repo, remote, *args, **kwargs): |
382 """bail on push if the extension isn't enabled on remote when needed, and |
382 """bail on push if the extension isn't enabled on remote when needed, and |
383 update the remote store based on the destination path.""" |
383 update the remote store based on the destination path.""" |
384 if 'lfs' in repo.requirements: |
384 if b'lfs' in repo.requirements: |
385 # If the remote peer is for a local repo, the requirement tests in the |
385 # If the remote peer is for a local repo, the requirement tests in the |
386 # base class method enforce lfs support. Otherwise, some revisions in |
386 # base class method enforce lfs support. Otherwise, some revisions in |
387 # this repo use lfs, and the remote repo needs the extension loaded. |
387 # this repo use lfs, and the remote repo needs the extension loaded. |
388 if not remote.local() and not remote.capable('lfs'): |
388 if not remote.local() and not remote.capable(b'lfs'): |
389 # This is a copy of the message in exchange.push() when requirements |
389 # This is a copy of the message in exchange.push() when requirements |
390 # are missing between local repos. |
390 # are missing between local repos. |
391 m = _("required features are not supported in the destination: %s") |
391 m = _(b"required features are not supported in the destination: %s") |
392 raise error.Abort( |
392 raise error.Abort( |
393 m % 'lfs', hint=_('enable the lfs extension on the server') |
393 m % b'lfs', hint=_(b'enable the lfs extension on the server') |
394 ) |
394 ) |
395 |
395 |
396 # Repositories where this extension is disabled won't have the field. |
396 # Repositories where this extension is disabled won't have the field. |
397 # But if there's a requirement, then the extension must be loaded AND |
397 # But if there's a requirement, then the extension must be loaded AND |
398 # there may be blobs to push. |
398 # there may be blobs to push. |
405 else: |
405 else: |
406 return orig(repo, remote, *args, **kwargs) |
406 return orig(repo, remote, *args, **kwargs) |
407 |
407 |
408 |
408 |
409 # when writing a bundle via "hg bundle" command, upload related LFS blobs |
409 # when writing a bundle via "hg bundle" command, upload related LFS blobs |
410 @eh.wrapfunction(bundle2, 'writenewbundle') |
410 @eh.wrapfunction(bundle2, b'writenewbundle') |
411 def writenewbundle( |
411 def writenewbundle( |
412 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
412 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
413 ): |
413 ): |
414 """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
414 """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
415 uploadblobsfromrevs(repo, outgoing.missing) |
415 uploadblobsfromrevs(repo, outgoing.missing) |
418 ) |
418 ) |
419 |
419 |
420 |
420 |
421 def extractpointers(repo, revs): |
421 def extractpointers(repo, revs): |
422 """return a list of lfs pointers added by given revs""" |
422 """return a list of lfs pointers added by given revs""" |
423 repo.ui.debug('lfs: computing set of blobs to upload\n') |
423 repo.ui.debug(b'lfs: computing set of blobs to upload\n') |
424 pointers = {} |
424 pointers = {} |
425 |
425 |
426 makeprogress = repo.ui.makeprogress |
426 makeprogress = repo.ui.makeprogress |
427 with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress: |
427 with makeprogress( |
|
428 _(b'lfs search'), _(b'changesets'), len(revs) |
|
429 ) as progress: |
428 for r in revs: |
430 for r in revs: |
429 ctx = repo[r] |
431 ctx = repo[r] |
430 for p in pointersfromctx(ctx).values(): |
432 for p in pointersfromctx(ctx).values(): |
431 pointers[p.oid()] = p |
433 pointers[p.oid()] = p |
432 progress.increment() |
434 progress.increment() |
492 |
494 |
493 remoteblob = repo.svfs.lfsremoteblobstore |
495 remoteblob = repo.svfs.lfsremoteblobstore |
494 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
496 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
495 |
497 |
496 |
498 |
497 @eh.wrapfunction(upgrade, '_finishdatamigration') |
499 @eh.wrapfunction(upgrade, b'_finishdatamigration') |
498 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
500 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
499 orig(ui, srcrepo, dstrepo, requirements) |
501 orig(ui, srcrepo, dstrepo, requirements) |
500 |
502 |
501 # Skip if this hasn't been passed to reposetup() |
503 # Skip if this hasn't been passed to reposetup() |
502 if util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and util.safehasattr( |
504 if util.safehasattr( |
503 dstrepo.svfs, 'lfslocalblobstore' |
505 srcrepo.svfs, b'lfslocalblobstore' |
504 ): |
506 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): |
505 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
507 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
506 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
508 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
507 |
509 |
508 for dirpath, dirs, files in srclfsvfs.walk(): |
510 for dirpath, dirs, files in srclfsvfs.walk(): |
509 for oid in files: |
511 for oid in files: |
510 ui.write(_('copying lfs blob %s\n') % oid) |
512 ui.write(_(b'copying lfs blob %s\n') % oid) |
511 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
513 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
512 |
514 |
513 |
515 |
514 @eh.wrapfunction(upgrade, 'preservedrequirements') |
516 @eh.wrapfunction(upgrade, b'preservedrequirements') |
515 @eh.wrapfunction(upgrade, 'supporteddestrequirements') |
517 @eh.wrapfunction(upgrade, b'supporteddestrequirements') |
516 def upgraderequirements(orig, repo): |
518 def upgraderequirements(orig, repo): |
517 reqs = orig(repo) |
519 reqs = orig(repo) |
518 if 'lfs' in repo.requirements: |
520 if b'lfs' in repo.requirements: |
519 reqs.add('lfs') |
521 reqs.add(b'lfs') |
520 return reqs |
522 return reqs |