106 raise StopIteration |
110 raise StopIteration |
107 if pos == -1: |
111 if pos == -1: |
108 self.pos += 1 |
112 self.pos += 1 |
109 return data |
113 return data |
110 zeropos = data.find('\x00', pos) |
114 zeropos = data.find('\x00', pos) |
111 hashval = unhexlify(data, self.lm.extrainfo[self.pos], |
115 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40) |
112 zeropos + 1, 40) |
|
113 flags = self.lm._getflags(data, self.pos, zeropos) |
116 flags = self.lm._getflags(data, self.pos, zeropos) |
114 self.pos += 1 |
117 self.pos += 1 |
115 return (data[pos:zeropos], hashval, flags) |
118 return (data[pos:zeropos], hashval, flags) |
116 |
119 |
117 __next__ = next |
120 __next__ = next |
118 |
121 |
|
122 |
119 def unhexlify(data, extra, pos, length): |
123 def unhexlify(data, extra, pos, length): |
120 s = bin(data[pos:pos + length]) |
124 s = bin(data[pos : pos + length]) |
121 if extra: |
125 if extra: |
122 s += chr(extra & 0xff) |
126 s += chr(extra & 0xFF) |
123 return s |
127 return s |
|
128 |
124 |
129 |
125 def _cmp(a, b): |
130 def _cmp(a, b): |
126 return (a > b) - (a < b) |
131 return (a > b) - (a < b) |
|
132 |
127 |
133 |
128 class _lazymanifest(object): |
134 class _lazymanifest(object): |
129 """A pure python manifest backed by a byte string. It is supplimented with |
135 """A pure python manifest backed by a byte string. It is supplimented with |
130 internal lists as it is modified, until it is compacted back to a pure byte |
136 internal lists as it is modified, until it is compacted back to a pure byte |
131 string. |
137 string. |
162 return [] |
175 return [] |
163 pos = data.find("\n") |
176 pos = data.find("\n") |
164 if pos == -1 or data[-1:] != '\n': |
177 if pos == -1 or data[-1:] != '\n': |
165 raise ValueError("Manifest did not end in a newline.") |
178 raise ValueError("Manifest did not end in a newline.") |
166 positions = [0] |
179 positions = [0] |
167 prev = data[:data.find('\x00')] |
180 prev = data[: data.find('\x00')] |
168 while pos < len(data) - 1 and pos != -1: |
181 while pos < len(data) - 1 and pos != -1: |
169 positions.append(pos + 1) |
182 positions.append(pos + 1) |
170 nexts = data[pos + 1:data.find('\x00', pos + 1)] |
183 nexts = data[pos + 1 : data.find('\x00', pos + 1)] |
171 if nexts < prev: |
184 if nexts < prev: |
172 raise ValueError("Manifest lines not in sorted order.") |
185 raise ValueError("Manifest lines not in sorted order.") |
173 prev = nexts |
186 prev = nexts |
174 pos = data.find("\n", pos + 1) |
187 pos = data.find("\n", pos + 1) |
175 return positions |
188 return positions |
183 return self.data, pos |
196 return self.data, pos |
184 return self.extradata[-pos - 1], -1 |
197 return self.extradata[-pos - 1], -1 |
185 |
198 |
186 def _getkey(self, pos): |
199 def _getkey(self, pos): |
187 if pos >= 0: |
200 if pos >= 0: |
188 return self.data[pos:self.data.find('\x00', pos + 1)] |
201 return self.data[pos : self.data.find('\x00', pos + 1)] |
189 return self.extradata[-pos - 1][0] |
202 return self.extradata[-pos - 1][0] |
190 |
203 |
191 def bsearch(self, key): |
204 def bsearch(self, key): |
192 first = 0 |
205 first = 0 |
193 last = len(self.positions) - 1 |
206 last = len(self.positions) - 1 |
194 |
207 |
195 while first <= last: |
208 while first <= last: |
196 midpoint = (first + last)//2 |
209 midpoint = (first + last) // 2 |
197 nextpos = self.positions[midpoint] |
210 nextpos = self.positions[midpoint] |
198 candidate = self._getkey(nextpos) |
211 candidate = self._getkey(nextpos) |
199 r = _cmp(key, candidate) |
212 r = _cmp(key, candidate) |
200 if r == 0: |
213 if r == 0: |
201 return midpoint |
214 return midpoint |
257 def __delitem__(self, key): |
270 def __delitem__(self, key): |
258 needle, found = self.bsearch2(key) |
271 needle, found = self.bsearch2(key) |
259 if not found: |
272 if not found: |
260 raise KeyError |
273 raise KeyError |
261 cur = self.positions[needle] |
274 cur = self.positions[needle] |
262 self.positions = self.positions[:needle] + self.positions[needle + 1:] |
275 self.positions = self.positions[:needle] + self.positions[needle + 1 :] |
263 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:] |
276 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :] |
264 if cur >= 0: |
277 if cur >= 0: |
265 # This does NOT unsort the list as far as the search functions are |
278 # This does NOT unsort the list as far as the search functions are |
266 # concerned, as they only examine lines mapped by self.positions. |
279 # concerned, as they only examine lines mapped by self.positions. |
267 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:] |
280 self.data = self.data[:cur] + '\x00' + self.data[cur + 1 :] |
268 self.hasremovals = True |
281 self.hasremovals = True |
269 |
282 |
270 def __setitem__(self, key, value): |
283 def __setitem__(self, key, value): |
271 if not isinstance(key, bytes): |
284 if not isinstance(key, bytes): |
272 raise TypeError("setitem: manifest keys must be a byte string.") |
285 raise TypeError("setitem: manifest keys must be a byte string.") |
291 self.extradata.append((key, hashval, value[1])) |
304 self.extradata.append((key, hashval, value[1])) |
292 self.positions[needle] = -len(self.extradata) |
305 self.positions[needle] = -len(self.extradata) |
293 else: |
306 else: |
294 # not found, put it in with extra positions |
307 # not found, put it in with extra positions |
295 self.extradata.append((key, hashval, value[1])) |
308 self.extradata.append((key, hashval, value[1])) |
296 self.positions = (self.positions[:needle] + [-len(self.extradata)] |
309 self.positions = ( |
297 + self.positions[needle:]) |
310 self.positions[:needle] |
298 self.extrainfo = (self.extrainfo[:needle] + [0] + |
311 + [-len(self.extradata)] |
299 self.extrainfo[needle:]) |
312 + self.positions[needle:] |
|
313 ) |
|
314 self.extrainfo = ( |
|
315 self.extrainfo[:needle] + [0] + self.extrainfo[needle:] |
|
316 ) |
300 |
317 |
301 def copy(self): |
318 def copy(self): |
302 # XXX call _compact like in C? |
319 # XXX call _compact like in C? |
303 return _lazymanifest(self.data, self.positions, self.extrainfo, |
320 return _lazymanifest( |
304 self.extradata, self.hasremovals) |
321 self.data, |
|
322 self.positions, |
|
323 self.extrainfo, |
|
324 self.extradata, |
|
325 self.hasremovals, |
|
326 ) |
305 |
327 |
306 def _compact(self): |
328 def _compact(self): |
307 # hopefully not called TOO often |
329 # hopefully not called TOO often |
308 if len(self.extradata) == 0 and not self.hasremovals: |
330 if len(self.extradata) == 0 and not self.hasremovals: |
309 return |
331 return |
327 |
349 |
328 # A removed file has no positions[] entry, but does have an |
350 # A removed file has no positions[] entry, but does have an |
329 # overwritten first byte. Break out and find the end of the |
351 # overwritten first byte. Break out and find the end of the |
330 # current good entry/entries if there is a removed file |
352 # current good entry/entries if there is a removed file |
331 # before the next position. |
353 # before the next position. |
332 if (self.hasremovals |
354 if ( |
333 and self.data.find('\n\x00', cur, |
355 self.hasremovals |
334 self.positions[i]) != -1): |
356 and self.data.find('\n\x00', cur, self.positions[i]) |
|
357 != -1 |
|
358 ): |
335 break |
359 break |
336 |
360 |
337 offset += self.positions[i] - cur |
361 offset += self.positions[i] - cur |
338 cur = self.positions[i] |
362 cur = self.positions[i] |
339 end_cut = self.data.find('\n', cur) |
363 end_cut = self.data.find('\n', cur) |
473 |
501 |
474 def _filesfastpath(self, match): |
502 def _filesfastpath(self, match): |
475 '''Checks whether we can correctly and quickly iterate over matcher |
503 '''Checks whether we can correctly and quickly iterate over matcher |
476 files instead of over manifest files.''' |
504 files instead of over manifest files.''' |
477 files = match.files() |
505 files = match.files() |
478 return (len(files) < 100 and (match.isexact() or |
506 return len(files) < 100 and ( |
479 (match.prefix() and all(fn in self for fn in files)))) |
507 match.isexact() |
|
508 or (match.prefix() and all(fn in self for fn in files)) |
|
509 ) |
480 |
510 |
481 def walk(self, match): |
511 def walk(self, match): |
482 '''Generates matching file names. |
512 '''Generates matching file names. |
483 |
513 |
484 Equivalent to manifest.matches(match).iterkeys(), but without creating |
514 Equivalent to manifest.matches(match).iterkeys(), but without creating |
632 else: |
663 else: |
633 # For large changes, it's much cheaper to just build the text and |
664 # For large changes, it's much cheaper to just build the text and |
634 # diff it. |
665 # diff it. |
635 arraytext = bytearray(self.text()) |
666 arraytext = bytearray(self.text()) |
636 deltatext = mdiff.textdiff( |
667 deltatext = mdiff.textdiff( |
637 util.buffer(base), util.buffer(arraytext)) |
668 util.buffer(base), util.buffer(arraytext) |
|
669 ) |
638 |
670 |
639 return arraytext, deltatext |
671 return arraytext, deltatext |
|
672 |
640 |
673 |
641 def _msearch(m, s, lo=0, hi=None): |
674 def _msearch(m, s, lo=0, hi=None): |
642 '''return a tuple (start, end) that says where to find s within m. |
675 '''return a tuple (start, end) that says where to find s within m. |
643 |
676 |
644 If the string is found m[start:end] are the line containing |
677 If the string is found m[start:end] are the line containing |
645 that string. If start == end the string was not found and |
678 that string. If start == end the string was not found and |
646 they indicate the proper sorted insertion point. |
679 they indicate the proper sorted insertion point. |
647 |
680 |
648 m should be a buffer, a memoryview or a byte string. |
681 m should be a buffer, a memoryview or a byte string. |
649 s is a byte string''' |
682 s is a byte string''' |
|
683 |
650 def advance(i, c): |
684 def advance(i, c): |
651 while i < lenm and m[i:i + 1] != c: |
685 while i < lenm and m[i : i + 1] != c: |
652 i += 1 |
686 i += 1 |
653 return i |
687 return i |
|
688 |
654 if not s: |
689 if not s: |
655 return (lo, lo) |
690 return (lo, lo) |
656 lenm = len(m) |
691 lenm = len(m) |
657 if not hi: |
692 if not hi: |
658 hi = lenm |
693 hi = lenm |
659 while lo < hi: |
694 while lo < hi: |
660 mid = (lo + hi) // 2 |
695 mid = (lo + hi) // 2 |
661 start = mid |
696 start = mid |
662 while start > 0 and m[start - 1:start] != '\n': |
697 while start > 0 and m[start - 1 : start] != '\n': |
663 start -= 1 |
698 start -= 1 |
664 end = advance(start, '\0') |
699 end = advance(start, '\0') |
665 if bytes(m[start:end]) < s: |
700 if bytes(m[start:end]) < s: |
666 # we know that after the null there are 40 bytes of sha1 |
701 # we know that after the null there are 40 bytes of sha1 |
667 # this translates to the bisect lo = mid + 1 |
702 # this translates to the bisect lo = mid + 1 |
676 end = advance(end + 40, '\n') |
711 end = advance(end + 40, '\n') |
677 return (lo, end + 1) |
712 return (lo, end + 1) |
678 else: |
713 else: |
679 return (lo, lo) |
714 return (lo, lo) |
680 |
715 |
|
716 |
681 def _checkforbidden(l): |
717 def _checkforbidden(l): |
682 """Check filenames for illegal characters.""" |
718 """Check filenames for illegal characters.""" |
683 for f in l: |
719 for f in l: |
684 if '\n' in f or '\r' in f: |
720 if '\n' in f or '\r' in f: |
685 raise error.StorageError( |
721 raise error.StorageError( |
686 _("'\\n' and '\\r' disallowed in filenames: %r") |
722 _("'\\n' and '\\r' disallowed in filenames: %r") |
687 % pycompat.bytestr(f)) |
723 % pycompat.bytestr(f) |
|
724 ) |
688 |
725 |
689 |
726 |
690 # apply the changes collected during the bisect loop to our addlist |
727 # apply the changes collected during the bisect loop to our addlist |
691 # return a delta suitable for addrevision |
728 # return a delta suitable for addrevision |
692 def _addlistdelta(addlist, x): |
729 def _addlistdelta(addlist, x): |
702 |
739 |
703 currentposition = end |
740 currentposition = end |
704 |
741 |
705 newaddlist += addlist[currentposition:] |
742 newaddlist += addlist[currentposition:] |
706 |
743 |
707 deltatext = "".join(struct.pack(">lll", start, end, len(content)) |
744 deltatext = "".join( |
708 + content for start, end, content in x) |
745 struct.pack(">lll", start, end, len(content)) + content |
|
746 for start, end, content in x |
|
747 ) |
709 return deltatext, newaddlist |
748 return deltatext, newaddlist |
|
749 |
710 |
750 |
711 def _splittopdir(f): |
751 def _splittopdir(f): |
712 if '/' in f: |
752 if '/' in f: |
713 dir, subpath = f.split('/', 1) |
753 dir, subpath = f.split('/', 1) |
714 return dir + '/', subpath |
754 return dir + '/', subpath |
715 else: |
755 else: |
716 return '', f |
756 return '', f |
717 |
757 |
|
758 |
718 _noop = lambda s: None |
759 _noop = lambda s: None |
|
760 |
719 |
761 |
720 class treemanifest(object): |
762 class treemanifest(object): |
721 def __init__(self, dir='', text=''): |
763 def __init__(self, dir='', text=''): |
722 self._dir = dir |
764 self._dir = dir |
723 self._node = nullid |
765 self._node = nullid |
728 self._lazydirs = {} |
770 self._lazydirs = {} |
729 # Using _lazymanifest here is a little slower than plain old dicts |
771 # Using _lazymanifest here is a little slower than plain old dicts |
730 self._files = {} |
772 self._files = {} |
731 self._flags = {} |
773 self._flags = {} |
732 if text: |
774 if text: |
|
775 |
733 def readsubtree(subdir, subm): |
776 def readsubtree(subdir, subm): |
734 raise AssertionError('treemanifest constructor only accepts ' |
777 raise AssertionError( |
735 'flat manifests') |
778 'treemanifest constructor only accepts ' 'flat manifests' |
|
779 ) |
|
780 |
736 self.parse(text, readsubtree) |
781 self.parse(text, readsubtree) |
737 self._dirty = True # Mark flat manifest dirty after parsing |
782 self._dirty = True # Mark flat manifest dirty after parsing |
738 |
783 |
739 def _subpath(self, path): |
784 def _subpath(self, path): |
740 return self._dir + path |
785 return self._dir + path |
741 |
786 |
742 def _loadalllazy(self): |
787 def _loadalllazy(self): |
805 return not self._isempty() |
850 return not self._isempty() |
806 |
851 |
807 __bool__ = __nonzero__ |
852 __bool__ = __nonzero__ |
808 |
853 |
809 def _isempty(self): |
854 def _isempty(self): |
810 self._load() # for consistency; already loaded by all callers |
855 self._load() # for consistency; already loaded by all callers |
811 # See if we can skip loading everything. |
856 # See if we can skip loading everything. |
812 if self._files or (self._dirs and |
857 if self._files or ( |
813 any(not m._isempty() for m in self._dirs.values())): |
858 self._dirs and any(not m._isempty() for m in self._dirs.values()) |
|
859 ): |
814 return False |
860 return False |
815 self._loadalllazy() |
861 self._loadalllazy() |
816 return (not self._dirs or |
862 return not self._dirs or all(m._isempty() for m in self._dirs.values()) |
817 all(m._isempty() for m in self._dirs.values())) |
|
818 |
863 |
819 def __repr__(self): |
864 def __repr__(self): |
820 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' % |
865 return '<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' % ( |
821 (self._dir, hex(self._node), |
866 self._dir, |
822 bool(self._loadfunc is _noop), |
867 hex(self._node), |
823 self._dirty, id(self))) |
868 bool(self._loadfunc is _noop), |
|
869 self._dirty, |
|
870 id(self), |
|
871 ) |
824 |
872 |
825 def dir(self): |
873 def dir(self): |
826 '''The directory that this tree manifest represents, including a |
874 '''The directory that this tree manifest represents, including a |
827 trailing '/'. Empty string for the repo root directory.''' |
875 trailing '/'. Empty string for the repo root directory.''' |
828 return self._dir |
876 return self._dir |
839 self._dirty = False |
887 self._dirty = False |
840 |
888 |
841 def iterentries(self): |
889 def iterentries(self): |
842 self._load() |
890 self._load() |
843 self._loadalllazy() |
891 self._loadalllazy() |
844 for p, n in sorted(itertools.chain(self._dirs.items(), |
892 for p, n in sorted( |
845 self._files.items())): |
893 itertools.chain(self._dirs.items(), self._files.items()) |
|
894 ): |
846 if p in self._files: |
895 if p in self._files: |
847 yield self._subpath(p), n, self._flags.get(p, '') |
896 yield self._subpath(p), n, self._flags.get(p, '') |
848 else: |
897 else: |
849 for x in n.iterentries(): |
898 for x in n.iterentries(): |
850 yield x |
899 yield x |
851 |
900 |
852 def items(self): |
901 def items(self): |
853 self._load() |
902 self._load() |
854 self._loadalllazy() |
903 self._loadalllazy() |
855 for p, n in sorted(itertools.chain(self._dirs.items(), |
904 for p, n in sorted( |
856 self._files.items())): |
905 itertools.chain(self._dirs.items(), self._files.items()) |
|
906 ): |
857 if p in self._files: |
907 if p in self._files: |
858 yield self._subpath(p), n |
908 yield self._subpath(p), n |
859 else: |
909 else: |
860 for f, sn in n.iteritems(): |
910 for f, sn in n.iteritems(): |
861 yield f, sn |
911 yield f, sn |
992 def copy(self): |
1042 def copy(self): |
993 copy = treemanifest(self._dir) |
1043 copy = treemanifest(self._dir) |
994 copy._node = self._node |
1044 copy._node = self._node |
995 copy._dirty = self._dirty |
1045 copy._dirty = self._dirty |
996 if self._copyfunc is _noop: |
1046 if self._copyfunc is _noop: |
|
1047 |
997 def _copyfunc(s): |
1048 def _copyfunc(s): |
998 self._load() |
1049 self._load() |
999 s._lazydirs = {d: (p, n, r, True) for |
1050 s._lazydirs = { |
1000 d, (p, n, r, c) in self._lazydirs.iteritems()} |
1051 d: (p, n, r, True) |
|
1052 for d, (p, n, r, c) in self._lazydirs.iteritems() |
|
1053 } |
1001 sdirs = s._dirs |
1054 sdirs = s._dirs |
1002 for d, v in self._dirs.iteritems(): |
1055 for d, v in self._dirs.iteritems(): |
1003 sdirs[d] = v.copy() |
1056 sdirs[d] = v.copy() |
1004 s._files = dict.copy(self._files) |
1057 s._files = dict.copy(self._files) |
1005 s._flags = dict.copy(self._flags) |
1058 s._flags = dict.copy(self._flags) |
|
1059 |
1006 if self._loadfunc is _noop: |
1060 if self._loadfunc is _noop: |
1007 _copyfunc(copy) |
1061 _copyfunc(copy) |
1008 else: |
1062 else: |
1009 copy._copyfunc = _copyfunc |
1063 copy._copyfunc = _copyfunc |
1010 else: |
1064 else: |
1259 |
1314 |
1260 def read(self, gettext, readsubtree): |
1315 def read(self, gettext, readsubtree): |
1261 def _load_for_read(s): |
1316 def _load_for_read(s): |
1262 s.parse(gettext(), readsubtree) |
1317 s.parse(gettext(), readsubtree) |
1263 s._dirty = False |
1318 s._dirty = False |
|
1319 |
1264 self._loadfunc = _load_for_read |
1320 self._loadfunc = _load_for_read |
1265 |
1321 |
1266 def writesubtrees(self, m1, m2, writesubtree, match): |
1322 def writesubtrees(self, m1, m2, writesubtree, match): |
1267 self._load() # for consistency; should never have any effect here |
1323 self._load() # for consistency; should never have any effect here |
1268 m1._load() |
1324 m1._load() |
1269 m2._load() |
1325 m2._load() |
1270 emptytree = treemanifest() |
1326 emptytree = treemanifest() |
|
1327 |
1271 def getnode(m, d): |
1328 def getnode(m, d): |
1272 ld = m._lazydirs.get(d) |
1329 ld = m._lazydirs.get(d) |
1273 if ld: |
1330 if ld: |
1274 return ld[1] |
1331 return ld[1] |
1275 return m._dirs.get(d, emptytree)._node |
1332 return m._dirs.get(d, emptytree)._node |
1415 if clear_persisted_data: |
1474 if clear_persisted_data: |
1416 self._dirty = True |
1475 self._dirty = True |
1417 self.write() |
1476 self.write() |
1418 self._read = False |
1477 self._read = False |
1419 |
1478 |
|
1479 |
1420 # and upper bound of what we expect from compression |
1480 # and upper bound of what we expect from compression |
1421 # (real live value seems to be "3") |
1481 # (real live value seems to be "3") |
1422 MAXCOMPRESSION = 3 |
1482 MAXCOMPRESSION = 3 |
|
1483 |
1423 |
1484 |
1424 @interfaceutil.implementer(repository.imanifeststorage) |
1485 @interfaceutil.implementer(repository.imanifeststorage) |
1425 class manifestrevlog(object): |
1486 class manifestrevlog(object): |
1426 '''A revlog that stores manifest texts. This is responsible for caching the |
1487 '''A revlog that stores manifest texts. This is responsible for caching the |
1427 full-text manifest contents. |
1488 full-text manifest contents. |
1428 ''' |
1489 ''' |
1429 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None, |
1490 |
1430 treemanifest=False): |
1491 def __init__( |
|
1492 self, |
|
1493 opener, |
|
1494 tree='', |
|
1495 dirlogcache=None, |
|
1496 indexfile=None, |
|
1497 treemanifest=False, |
|
1498 ): |
1431 """Constructs a new manifest revlog |
1499 """Constructs a new manifest revlog |
1432 |
1500 |
1433 `indexfile` - used by extensions to have two manifests at once, like |
1501 `indexfile` - used by extensions to have two manifests at once, like |
1434 when transitioning between flatmanifeset and treemanifests. |
1502 when transitioning between flatmanifeset and treemanifests. |
1435 |
1503 |
1466 if tree: |
1534 if tree: |
1467 self._dirlogcache = dirlogcache |
1535 self._dirlogcache = dirlogcache |
1468 else: |
1536 else: |
1469 self._dirlogcache = {'': self} |
1537 self._dirlogcache = {'': self} |
1470 |
1538 |
1471 self._revlog = revlog.revlog(opener, indexfile, |
1539 self._revlog = revlog.revlog( |
1472 # only root indexfile is cached |
1540 opener, |
1473 checkambig=not bool(tree), |
1541 indexfile, |
1474 mmaplargeindex=True, |
1542 # only root indexfile is cached |
1475 upperboundcomp=MAXCOMPRESSION) |
1543 checkambig=not bool(tree), |
|
1544 mmaplargeindex=True, |
|
1545 upperboundcomp=MAXCOMPRESSION, |
|
1546 ) |
1476 |
1547 |
1477 self.index = self._revlog.index |
1548 self.index = self._revlog.index |
1478 self.version = self._revlog.version |
1549 self.version = self._revlog.version |
1479 self._generaldelta = self._revlog._generaldelta |
1550 self._generaldelta = self._revlog._generaldelta |
1480 |
1551 |
1513 |
1584 |
1514 def dirlog(self, d): |
1585 def dirlog(self, d): |
1515 if d: |
1586 if d: |
1516 assert self._treeondisk |
1587 assert self._treeondisk |
1517 if d not in self._dirlogcache: |
1588 if d not in self._dirlogcache: |
1518 mfrevlog = manifestrevlog(self.opener, d, |
1589 mfrevlog = manifestrevlog( |
1519 self._dirlogcache, |
1590 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk |
1520 treemanifest=self._treeondisk) |
1591 ) |
1521 self._dirlogcache[d] = mfrevlog |
1592 self._dirlogcache[d] = mfrevlog |
1522 return self._dirlogcache[d] |
1593 return self._dirlogcache[d] |
1523 |
1594 |
1524 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None, |
1595 def add( |
1525 match=None): |
1596 self, |
|
1597 m, |
|
1598 transaction, |
|
1599 link, |
|
1600 p1, |
|
1601 p2, |
|
1602 added, |
|
1603 removed, |
|
1604 readtree=None, |
|
1605 match=None, |
|
1606 ): |
1526 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'): |
1607 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'): |
1527 # If our first parent is in the manifest cache, we can |
1608 # If our first parent is in the manifest cache, we can |
1528 # compute a delta here using properties we know about the |
1609 # compute a delta here using properties we know about the |
1529 # manifest up-front, which may save time later for the |
1610 # manifest up-front, which may save time later for the |
1530 # revlog layer. |
1611 # revlog layer. |
1531 |
1612 |
1532 _checkforbidden(added) |
1613 _checkforbidden(added) |
1533 # combine the changed lists into one sorted iterator |
1614 # combine the changed lists into one sorted iterator |
1534 work = heapq.merge([(x, False) for x in sorted(added)], |
1615 work = heapq.merge( |
1535 [(x, True) for x in sorted(removed)]) |
1616 [(x, False) for x in sorted(added)], |
|
1617 [(x, True) for x in sorted(removed)], |
|
1618 ) |
1536 |
1619 |
1537 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work) |
1620 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work) |
1538 cachedelta = self._revlog.rev(p1), deltatext |
1621 cachedelta = self._revlog.rev(p1), deltatext |
1539 text = util.buffer(arraytext) |
1622 text = util.buffer(arraytext) |
1540 n = self._revlog.addrevision(text, transaction, link, p1, p2, |
1623 n = self._revlog.addrevision( |
1541 cachedelta) |
1624 text, transaction, link, p1, p2, cachedelta |
|
1625 ) |
1542 else: |
1626 else: |
1543 # The first parent manifest isn't already loaded, so we'll |
1627 # The first parent manifest isn't already loaded, so we'll |
1544 # just encode a fulltext of the manifest and pass that |
1628 # just encode a fulltext of the manifest and pass that |
1545 # through to the revlog layer, and let it handle the delta |
1629 # through to the revlog layer, and let it handle the delta |
1546 # process. |
1630 # process. |
1547 if self._treeondisk: |
1631 if self._treeondisk: |
1548 assert readtree, "readtree must be set for treemanifest writes" |
1632 assert readtree, "readtree must be set for treemanifest writes" |
1549 assert match, "match must be specified for treemanifest writes" |
1633 assert match, "match must be specified for treemanifest writes" |
1550 m1 = readtree(self.tree, p1) |
1634 m1 = readtree(self.tree, p1) |
1551 m2 = readtree(self.tree, p2) |
1635 m2 = readtree(self.tree, p2) |
1552 n = self._addtree(m, transaction, link, m1, m2, readtree, |
1636 n = self._addtree( |
1553 match=match) |
1637 m, transaction, link, m1, m2, readtree, match=match |
|
1638 ) |
1554 arraytext = None |
1639 arraytext = None |
1555 else: |
1640 else: |
1556 text = m.text() |
1641 text = m.text() |
1557 n = self._revlog.addrevision(text, transaction, link, p1, p2) |
1642 n = self._revlog.addrevision(text, transaction, link, p1, p2) |
1558 arraytext = bytearray(text) |
1643 arraytext = bytearray(text) |
1563 return n |
1648 return n |
1564 |
1649 |
1565 def _addtree(self, m, transaction, link, m1, m2, readtree, match): |
1650 def _addtree(self, m, transaction, link, m1, m2, readtree, match): |
1566 # If the manifest is unchanged compared to one parent, |
1651 # If the manifest is unchanged compared to one parent, |
1567 # don't write a new revision |
1652 # don't write a new revision |
1568 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince( |
1653 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)): |
1569 m2)): |
|
1570 return m.node() |
1654 return m.node() |
|
1655 |
1571 def writesubtree(subm, subp1, subp2, match): |
1656 def writesubtree(subm, subp1, subp2, match): |
1572 sublog = self.dirlog(subm.dir()) |
1657 sublog = self.dirlog(subm.dir()) |
1573 sublog.add(subm, transaction, link, subp1, subp2, None, None, |
1658 sublog.add( |
1574 readtree=readtree, match=match) |
1659 subm, |
|
1660 transaction, |
|
1661 link, |
|
1662 subp1, |
|
1663 subp2, |
|
1664 None, |
|
1665 None, |
|
1666 readtree=readtree, |
|
1667 match=match, |
|
1668 ) |
|
1669 |
1575 m.writesubtrees(m1, m2, writesubtree, match) |
1670 m.writesubtrees(m1, m2, writesubtree, match) |
1576 text = m.dirtext() |
1671 text = m.dirtext() |
1577 n = None |
1672 n = None |
1578 if self.tree != '': |
1673 if self.tree != '': |
1579 # Double-check whether contents are unchanged to one parent |
1674 # Double-check whether contents are unchanged to one parent |
1630 return self._revlog.cmp(node, text) |
1726 return self._revlog.cmp(node, text) |
1631 |
1727 |
1632 def deltaparent(self, rev): |
1728 def deltaparent(self, rev): |
1633 return self._revlog.deltaparent(rev) |
1729 return self._revlog.deltaparent(rev) |
1634 |
1730 |
1635 def emitrevisions(self, nodes, nodesorder=None, |
1731 def emitrevisions( |
1636 revisiondata=False, assumehaveparentrevisions=False, |
1732 self, |
1637 deltamode=repository.CG_DELTAMODE_STD): |
1733 nodes, |
|
1734 nodesorder=None, |
|
1735 revisiondata=False, |
|
1736 assumehaveparentrevisions=False, |
|
1737 deltamode=repository.CG_DELTAMODE_STD, |
|
1738 ): |
1638 return self._revlog.emitrevisions( |
1739 return self._revlog.emitrevisions( |
1639 nodes, nodesorder=nodesorder, revisiondata=revisiondata, |
1740 nodes, |
|
1741 nodesorder=nodesorder, |
|
1742 revisiondata=revisiondata, |
1640 assumehaveparentrevisions=assumehaveparentrevisions, |
1743 assumehaveparentrevisions=assumehaveparentrevisions, |
1641 deltamode=deltamode) |
1744 deltamode=deltamode, |
|
1745 ) |
1642 |
1746 |
1643 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None): |
1747 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None): |
1644 return self._revlog.addgroup(deltas, linkmapper, transaction, |
1748 return self._revlog.addgroup( |
1645 addrevisioncb=addrevisioncb) |
1749 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb |
|
1750 ) |
1646 |
1751 |
1647 def rawsize(self, rev): |
1752 def rawsize(self, rev): |
1648 return self._revlog.rawsize(rev) |
1753 return self._revlog.rawsize(rev) |
1649 |
1754 |
1650 def getstrippoint(self, minlink): |
1755 def getstrippoint(self, minlink): |
1660 if not isinstance(destrevlog, manifestrevlog): |
1765 if not isinstance(destrevlog, manifestrevlog): |
1661 raise error.ProgrammingError('expected manifestrevlog to clone()') |
1766 raise error.ProgrammingError('expected manifestrevlog to clone()') |
1662 |
1767 |
1663 return self._revlog.clone(tr, destrevlog._revlog, **kwargs) |
1768 return self._revlog.clone(tr, destrevlog._revlog, **kwargs) |
1664 |
1769 |
1665 def storageinfo(self, exclusivefiles=False, sharedfiles=False, |
1770 def storageinfo( |
1666 revisionscount=False, trackedsize=False, |
1771 self, |
1667 storedsize=False): |
1772 exclusivefiles=False, |
|
1773 sharedfiles=False, |
|
1774 revisionscount=False, |
|
1775 trackedsize=False, |
|
1776 storedsize=False, |
|
1777 ): |
1668 return self._revlog.storageinfo( |
1778 return self._revlog.storageinfo( |
1669 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles, |
1779 exclusivefiles=exclusivefiles, |
1670 revisionscount=revisionscount, trackedsize=trackedsize, |
1780 sharedfiles=sharedfiles, |
1671 storedsize=storedsize) |
1781 revisionscount=revisionscount, |
|
1782 trackedsize=trackedsize, |
|
1783 storedsize=storedsize, |
|
1784 ) |
1672 |
1785 |
1673 @property |
1786 @property |
1674 def indexfile(self): |
1787 def indexfile(self): |
1675 return self._revlog.indexfile |
1788 return self._revlog.indexfile |
1676 |
1789 |
1683 return self._revlog.opener |
1796 return self._revlog.opener |
1684 |
1797 |
1685 @opener.setter |
1798 @opener.setter |
1686 def opener(self, value): |
1799 def opener(self, value): |
1687 self._revlog.opener = value |
1800 self._revlog.opener = value |
|
1801 |
1688 |
1802 |
1689 @interfaceutil.implementer(repository.imanifestlog) |
1803 @interfaceutil.implementer(repository.imanifestlog) |
1690 class manifestlog(object): |
1804 class manifestlog(object): |
1691 """A collection class representing the collection of manifest snapshots |
1805 """A collection class representing the collection of manifest snapshots |
1692 referenced by commits in the repository. |
1806 referenced by commits in the repository. |
1693 |
1807 |
1694 In this situation, 'manifest' refers to the abstract concept of a snapshot |
1808 In this situation, 'manifest' refers to the abstract concept of a snapshot |
1695 of the list of files in the given commit. Consumers of the output of this |
1809 of the list of files in the given commit. Consumers of the output of this |
1696 class do not care about the implementation details of the actual manifests |
1810 class do not care about the implementation details of the actual manifests |
1697 they receive (i.e. tree or flat or lazily loaded, etc).""" |
1811 they receive (i.e. tree or flat or lazily loaded, etc).""" |
|
1812 |
1698 def __init__(self, opener, repo, rootstore, narrowmatch): |
1813 def __init__(self, opener, repo, rootstore, narrowmatch): |
1699 usetreemanifest = False |
1814 usetreemanifest = False |
1700 cachesize = 4 |
1815 cachesize = 4 |
1701 |
1816 |
1702 opts = getattr(opener, 'options', None) |
1817 opts = getattr(opener, 'options', None) |
1794 |
1914 |
1795 def read(self): |
1915 def read(self): |
1796 return self._manifestdict |
1916 return self._manifestdict |
1797 |
1917 |
1798 def write(self, transaction, link, p1, p2, added, removed, match=None): |
1918 def write(self, transaction, link, p1, p2, added, removed, match=None): |
1799 return self._storage().add(self._manifestdict, transaction, link, |
1919 return self._storage().add( |
1800 p1, p2, added, removed, match=match) |
1920 self._manifestdict, |
|
1921 transaction, |
|
1922 link, |
|
1923 p1, |
|
1924 p2, |
|
1925 added, |
|
1926 removed, |
|
1927 match=match, |
|
1928 ) |
|
1929 |
1801 |
1930 |
1802 @interfaceutil.implementer(repository.imanifestrevisionstored) |
1931 @interfaceutil.implementer(repository.imanifestrevisionstored) |
1803 class manifestctx(object): |
1932 class manifestctx(object): |
1804 """A class representing a single revision of a manifest, including its |
1933 """A class representing a single revision of a manifest, including its |
1805 contents, its parent revs, and its linkrev. |
1934 contents, its parent revs, and its linkrev. |
1806 """ |
1935 """ |
|
1936 |
1807 def __init__(self, manifestlog, node): |
1937 def __init__(self, manifestlog, node): |
1808 self._manifestlog = manifestlog |
1938 self._manifestlog = manifestlog |
1809 self._data = None |
1939 self._data = None |
1810 |
1940 |
1811 self._node = node |
1941 self._node = node |
1812 |
1942 |
1813 # TODO: We eventually want p1, p2, and linkrev exposed on this class, |
1943 # TODO: We eventually want p1, p2, and linkrev exposed on this class, |
1814 # but let's add it later when something needs it and we can load it |
1944 # but let's add it later when something needs it and we can load it |
1815 # lazily. |
1945 # lazily. |
1816 #self.p1, self.p2 = store.parents(node) |
1946 # self.p1, self.p2 = store.parents(node) |
1817 #rev = store.rev(node) |
1947 # rev = store.rev(node) |
1818 #self.linkrev = store.linkrev(rev) |
1948 # self.linkrev = store.linkrev(rev) |
1819 |
1949 |
1820 def _storage(self): |
1950 def _storage(self): |
1821 return self._manifestlog.getstorage(b'') |
1951 return self._manifestlog.getstorage(b'') |
1822 |
1952 |
1823 def node(self): |
1953 def node(self): |
1901 return self._treemanifest |
2032 return self._treemanifest |
1902 |
2033 |
1903 def write(self, transaction, link, p1, p2, added, removed, match=None): |
2034 def write(self, transaction, link, p1, p2, added, removed, match=None): |
1904 def readtree(dir, node): |
2035 def readtree(dir, node): |
1905 return self._manifestlog.get(dir, node).read() |
2036 return self._manifestlog.get(dir, node).read() |
1906 return self._storage().add(self._treemanifest, transaction, link, |
2037 |
1907 p1, p2, added, removed, readtree=readtree, |
2038 return self._storage().add( |
1908 match=match) |
2039 self._treemanifest, |
|
2040 transaction, |
|
2041 link, |
|
2042 p1, |
|
2043 p2, |
|
2044 added, |
|
2045 removed, |
|
2046 readtree=readtree, |
|
2047 match=match, |
|
2048 ) |
|
2049 |
1909 |
2050 |
1910 @interfaceutil.implementer(repository.imanifestrevisionstored) |
2051 @interfaceutil.implementer(repository.imanifestrevisionstored) |
1911 class treemanifestctx(object): |
2052 class treemanifestctx(object): |
1912 def __init__(self, manifestlog, dir, node): |
2053 def __init__(self, manifestlog, dir, node): |
1913 self._manifestlog = manifestlog |
2054 self._manifestlog = manifestlog |
1936 if self._node == nullid: |
2077 if self._node == nullid: |
1937 self._data = treemanifest() |
2078 self._data = treemanifest() |
1938 # TODO accessing non-public API |
2079 # TODO accessing non-public API |
1939 elif store._treeondisk: |
2080 elif store._treeondisk: |
1940 m = treemanifest(dir=self._dir) |
2081 m = treemanifest(dir=self._dir) |
|
2082 |
1941 def gettext(): |
2083 def gettext(): |
1942 return store.revision(self._node) |
2084 return store.revision(self._node) |
|
2085 |
1943 def readsubtree(dir, subm): |
2086 def readsubtree(dir, subm): |
1944 # Set verify to False since we need to be able to create |
2087 # Set verify to False since we need to be able to create |
1945 # subtrees for trees that don't exist on disk. |
2088 # subtrees for trees that don't exist on disk. |
1946 return self._manifestlog.get(dir, subm, verify=False).read() |
2089 return self._manifestlog.get(dir, subm, verify=False).read() |
|
2090 |
1947 m.read(gettext, readsubtree) |
2091 m.read(gettext, readsubtree) |
1948 m.setnode(self._node) |
2092 m.setnode(self._node) |
1949 self._data = m |
2093 self._data = m |
1950 else: |
2094 else: |
1951 if self._node in store.fulltextcache: |
2095 if self._node in store.fulltextcache: |
2011 and not any submanifests. |
2155 and not any submanifests. |
2012 ''' |
2156 ''' |
2013 store = self._storage() |
2157 store = self._storage() |
2014 r = store.rev(self._node) |
2158 r = store.rev(self._node) |
2015 deltaparent = store.deltaparent(r) |
2159 deltaparent = store.deltaparent(r) |
2016 if (deltaparent != nullrev and |
2160 if deltaparent != nullrev and deltaparent in store.parentrevs(r): |
2017 deltaparent in store.parentrevs(r)): |
|
2018 return self.readdelta(shallow=shallow) |
2161 return self.readdelta(shallow=shallow) |
2019 |
2162 |
2020 if shallow: |
2163 if shallow: |
2021 return manifestdict(store.revision(self._node)) |
2164 return manifestdict(store.revision(self._node)) |
2022 else: |
2165 else: |
2023 return self.read() |
2166 return self.read() |
2024 |
2167 |
2025 def find(self, key): |
2168 def find(self, key): |
2026 return self.read().find(key) |
2169 return self.read().find(key) |
|
2170 |
2027 |
2171 |
2028 class excludeddir(treemanifest): |
2172 class excludeddir(treemanifest): |
2029 """Stand-in for a directory that is excluded from the repository. |
2173 """Stand-in for a directory that is excluded from the repository. |
2030 |
2174 |
2031 With narrowing active on a repository that uses treemanifests, |
2175 With narrowing active on a repository that uses treemanifests, |
2034 some sort of pseudo-manifest to surface to internals so we can |
2178 some sort of pseudo-manifest to surface to internals so we can |
2035 detect a merge conflict outside the narrowspec. That's what this |
2179 detect a merge conflict outside the narrowspec. That's what this |
2036 class is: it stands in for a directory whose node is known, but |
2180 class is: it stands in for a directory whose node is known, but |
2037 whose contents are unknown. |
2181 whose contents are unknown. |
2038 """ |
2182 """ |
|
2183 |
2039 def __init__(self, dir, node): |
2184 def __init__(self, dir, node): |
2040 super(excludeddir, self).__init__(dir) |
2185 super(excludeddir, self).__init__(dir) |
2041 self._node = node |
2186 self._node = node |
2042 # Add an empty file, which will be included by iterators and such, |
2187 # Add an empty file, which will be included by iterators and such, |
2043 # appearing as the directory itself (i.e. something like "dir/") |
2188 # appearing as the directory itself (i.e. something like "dir/") |
2050 # be of the same type as the original, which would not happen with the |
2195 # be of the same type as the original, which would not happen with the |
2051 # super type's copy(). |
2196 # super type's copy(). |
2052 def copy(self): |
2197 def copy(self): |
2053 return self |
2198 return self |
2054 |
2199 |
|
2200 |
2055 class excludeddirmanifestctx(treemanifestctx): |
2201 class excludeddirmanifestctx(treemanifestctx): |
2056 """context wrapper for excludeddir - see that docstring for rationale""" |
2202 """context wrapper for excludeddir - see that docstring for rationale""" |
|
2203 |
2057 def __init__(self, dir, node): |
2204 def __init__(self, dir, node): |
2058 self._dir = dir |
2205 self._dir = dir |
2059 self._node = node |
2206 self._node = node |
2060 |
2207 |
2061 def read(self): |
2208 def read(self): |
2062 return excludeddir(self._dir, self._node) |
2209 return excludeddir(self._dir, self._node) |
2063 |
2210 |
2064 def write(self, *args): |
2211 def write(self, *args): |
2065 raise error.ProgrammingError( |
2212 raise error.ProgrammingError( |
2066 'attempt to write manifest from excluded dir %s' % self._dir) |
2213 'attempt to write manifest from excluded dir %s' % self._dir |
|
2214 ) |
|
2215 |
2067 |
2216 |
2068 class excludedmanifestrevlog(manifestrevlog): |
2217 class excludedmanifestrevlog(manifestrevlog): |
2069 """Stand-in for excluded treemanifest revlogs. |
2218 """Stand-in for excluded treemanifest revlogs. |
2070 |
2219 |
2071 When narrowing is active on a treemanifest repository, we'll have |
2220 When narrowing is active on a treemanifest repository, we'll have |
2078 def __init__(self, dir): |
2227 def __init__(self, dir): |
2079 self._dir = dir |
2228 self._dir = dir |
2080 |
2229 |
2081 def __len__(self): |
2230 def __len__(self): |
2082 raise error.ProgrammingError( |
2231 raise error.ProgrammingError( |
2083 'attempt to get length of excluded dir %s' % self._dir) |
2232 'attempt to get length of excluded dir %s' % self._dir |
|
2233 ) |
2084 |
2234 |
2085 def rev(self, node): |
2235 def rev(self, node): |
2086 raise error.ProgrammingError( |
2236 raise error.ProgrammingError( |
2087 'attempt to get rev from excluded dir %s' % self._dir) |
2237 'attempt to get rev from excluded dir %s' % self._dir |
|
2238 ) |
2088 |
2239 |
2089 def linkrev(self, node): |
2240 def linkrev(self, node): |
2090 raise error.ProgrammingError( |
2241 raise error.ProgrammingError( |
2091 'attempt to get linkrev from excluded dir %s' % self._dir) |
2242 'attempt to get linkrev from excluded dir %s' % self._dir |
|
2243 ) |
2092 |
2244 |
2093 def node(self, rev): |
2245 def node(self, rev): |
2094 raise error.ProgrammingError( |
2246 raise error.ProgrammingError( |
2095 'attempt to get node from excluded dir %s' % self._dir) |
2247 'attempt to get node from excluded dir %s' % self._dir |
|
2248 ) |
2096 |
2249 |
2097 def add(self, *args, **kwargs): |
2250 def add(self, *args, **kwargs): |
2098 # We should never write entries in dirlogs outside the narrow clone. |
2251 # We should never write entries in dirlogs outside the narrow clone. |
2099 # However, the method still gets called from writesubtree() in |
2252 # However, the method still gets called from writesubtree() in |
2100 # _addtree(), so we need to handle it. We should possibly make that |
2253 # _addtree(), so we need to handle it. We should possibly make that |