3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1 |
# Copyright (C) 2008, 2009 Canonical Ltd
|
2 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
3 |
# This program is free software; you can redistribute it and/or modify
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
4 |
# it under the terms of the GNU General Public License as published by
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
12 |
#
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
3735.36.3
by John Arbash Meinel
Add the new address for FSF to the new files. |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
16 |
|
17 |
"""Core compression logic for compressing streams of related files."""
|
|
18 |
||
0.17.13
by Robert Collins
Do not output copy instructions which take more to encode than a fresh insert. (But do not refer to those insertions when finding ranges to copy: they are not interesting). |
19 |
from itertools import izip |
0.17.5
by Robert Collins
nograph tests completely passing. |
20 |
from cStringIO import StringIO |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
21 |
import time |
0.17.5
by Robert Collins
nograph tests completely passing. |
22 |
import zlib |
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
23 |
try: |
24 |
import pylzma |
|
25 |
except ImportError: |
|
26 |
pylzma = None |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
27 |
|
0.17.4
by Robert Collins
Annotate. |
28 |
from bzrlib import ( |
29 |
annotate, |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
30 |
debug, |
0.17.4
by Robert Collins
Annotate. |
31 |
diff, |
0.17.5
by Robert Collins
nograph tests completely passing. |
32 |
errors, |
0.17.4
by Robert Collins
Annotate. |
33 |
graph as _mod_graph, |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
34 |
knit, |
0.20.2
by John Arbash Meinel
Teach groupcompress about 'chunked' encoding |
35 |
osutils, |
0.17.4
by Robert Collins
Annotate. |
36 |
pack, |
37 |
patiencediff, |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
38 |
trace, |
0.17.4
by Robert Collins
Annotate. |
39 |
)
|
40 |
from bzrlib.graph import Graph |
|
0.17.21
by Robert Collins
Update groupcompress to bzrlib 1.10. |
41 |
from bzrlib.btree_index import BTreeBuilder |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
42 |
from bzrlib.lru_cache import LRUSizeCache |
0.17.9
by Robert Collins
Initial stab at repository format support. |
43 |
from bzrlib.tsort import topo_sort |
0.17.2
by Robert Collins
Core proof of concept working. |
44 |
from bzrlib.versionedfile import ( |
0.17.5
by Robert Collins
nograph tests completely passing. |
45 |
adapter_registry, |
46 |
AbsentContentFactory, |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
47 |
ChunkedContentFactory, |
0.17.2
by Robert Collins
Core proof of concept working. |
48 |
FulltextContentFactory, |
49 |
VersionedFiles, |
|
50 |
)
|
|
51 |
||
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
52 |
_USE_LZMA = False and (pylzma is not None) |
0.17.2
by Robert Collins
Core proof of concept working. |
53 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
54 |
# osutils.sha_string('')
|
55 |
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' |
|
56 |
||
57 |
||
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
58 |
def sort_gc_optimal(parent_map): |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
59 |
"""Sort and group the keys in parent_map into groupcompress order.
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
60 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
61 |
groupcompress is defined (currently) as reverse-topological order, grouped
|
62 |
by the key prefix.
|
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
63 |
|
64 |
:return: A sorted-list of keys
|
|
65 |
"""
|
|
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
66 |
# groupcompress ordering is approximately reverse topological,
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
67 |
# properly grouped by file-id.
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
68 |
per_prefix_map = {} |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
69 |
for item in parent_map.iteritems(): |
70 |
key = item[0] |
|
71 |
if isinstance(key, str) or len(key) == 1: |
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
72 |
prefix = '' |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
73 |
else: |
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
74 |
prefix = key[0] |
75 |
try: |
|
76 |
per_prefix_map[prefix].append(item) |
|
77 |
except KeyError: |
|
78 |
per_prefix_map[prefix] = [item] |
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
79 |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
80 |
present_keys = [] |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
81 |
for prefix in sorted(per_prefix_map): |
82 |
present_keys.extend(reversed(topo_sort(per_prefix_map[prefix]))) |
|
83 |
return present_keys |
|
84 |
||
85 |
||
3735.32.9
by John Arbash Meinel
Use a 32kB extension, since that is the max window size for zlib. |
86 |
# The max zlib window size is 32kB, so if we set 'max_size' output of the
|
87 |
# decompressor to the requested bytes + 32kB, then we should guarantee
|
|
88 |
# num_bytes coming out.
|
|
89 |
_ZLIB_DECOMP_WINDOW = 32*1024 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
90 |
|
91 |
class GroupCompressBlock(object): |
|
92 |
"""An object which maintains the internal structure of the compressed data.
|
|
93 |
||
94 |
This tracks the meta info (start of text, length, type, etc.)
|
|
95 |
"""
|
|
96 |
||
0.25.5
by John Arbash Meinel
Now using a zlib compressed format. |
97 |
# Group Compress Block v1 Zlib
|
98 |
GCB_HEADER = 'gcb1z\n' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
99 |
# Group Compress Block v1 Lzma
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
100 |
GCB_LZ_HEADER = 'gcb1l\n' |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
101 |
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
102 |
|
103 |
def __init__(self): |
|
104 |
# map by key? or just order in file?
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
105 |
self._compressor_name = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
106 |
self._z_content = None |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
107 |
self._z_content_decompressor = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
108 |
self._z_content_length = None |
109 |
self._content_length = None |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
110 |
self._content = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
111 |
|
112 |
def __len__(self): |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
113 |
# This is the maximum number of bytes this object will reference if
|
114 |
# everything is decompressed. However, if we decompress less than
|
|
115 |
# everything... (this would cause some problems for LRUSizeCache)
|
|
116 |
return self._content_length + self._z_content_length |
|
0.17.48
by John Arbash Meinel
if _NO_LABELS is set, don't bother parsing the mini header. |
117 |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
118 |
def _ensure_content(self, num_bytes=None): |
119 |
"""Make sure that content has been expanded enough.
|
|
120 |
||
121 |
:param num_bytes: Ensure that we have extracted at least num_bytes of
|
|
122 |
content. If None, consume everything
|
|
123 |
"""
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
124 |
# TODO: If we re-use the same content block at different times during
|
125 |
# get_record_stream(), it is possible that the first pass will
|
|
126 |
# get inserted, triggering an extract/_ensure_content() which
|
|
127 |
# will get rid of _z_content. And then the next use of the block
|
|
128 |
# will try to access _z_content (to send it over the wire), and
|
|
129 |
# fail because it is already extracted. Consider never releasing
|
|
130 |
# _z_content because of this.
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
131 |
if num_bytes is None: |
132 |
num_bytes = self._content_length |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
133 |
elif (self._content_length is not None |
134 |
and num_bytes > self._content_length): |
|
135 |
raise AssertionError( |
|
136 |
'requested num_bytes (%d) > content length (%d)' |
|
137 |
% (num_bytes, self._content_length)) |
|
138 |
# Expand the content if required
|
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
139 |
if self._content is None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
140 |
if self._z_content is None: |
141 |
raise AssertionError('No content to decompress') |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
142 |
if self._z_content == '': |
143 |
self._content = '' |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
144 |
elif self._compressor_name == 'lzma': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
145 |
# We don't do partial lzma decomp yet
|
3735.2.160
by John Arbash Meinel
Fix a trivial typo |
146 |
self._content = pylzma.decompress(self._z_content) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
147 |
elif self._compressor_name == 'zlib': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
148 |
# Start a zlib decompressor
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
149 |
if num_bytes is None: |
150 |
self._content = zlib.decompress(self._z_content) |
|
151 |
else: |
|
152 |
self._z_content_decompressor = zlib.decompressobj() |
|
153 |
# Seed the decompressor with the uncompressed bytes, so
|
|
154 |
# that the rest of the code is simplified
|
|
155 |
self._content = self._z_content_decompressor.decompress( |
|
156 |
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
157 |
else: |
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
158 |
raise AssertionError('Unknown compressor: %r' |
3735.2.183
by John Arbash Meinel
Fix the compressor name. |
159 |
% self._compressor_name) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
160 |
# Any bytes remaining to be decompressed will be in the decompressors
|
161 |
# 'unconsumed_tail'
|
|
162 |
||
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
163 |
# Do we have enough bytes already?
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
164 |
if num_bytes is not None and len(self._content) >= num_bytes: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
165 |
return
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
166 |
if num_bytes is None and self._z_content_decompressor is None: |
167 |
# We must have already decompressed everything
|
|
168 |
return
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
169 |
# If we got this far, and don't have a decompressor, something is wrong
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
170 |
if self._z_content_decompressor is None: |
171 |
raise AssertionError( |
|
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
172 |
'No decompressor to decompress %d bytes' % num_bytes) |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
173 |
remaining_decomp = self._z_content_decompressor.unconsumed_tail |
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
174 |
if num_bytes is None: |
175 |
if remaining_decomp: |
|
176 |
# We don't know how much is left, but we'll decompress it all
|
|
177 |
self._content += self._z_content_decompressor.decompress( |
|
178 |
remaining_decomp) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
179 |
# Note: There's what I consider a bug in zlib.decompressobj
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
180 |
# If you pass back in the entire unconsumed_tail, only
|
181 |
# this time you don't pass a max-size, it doesn't
|
|
182 |
# change the unconsumed_tail back to None/''.
|
|
183 |
# However, we know we are done with the whole stream
|
|
184 |
self._z_content_decompressor = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
185 |
# XXX: Why is this the only place in this routine we set this?
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
186 |
self._content_length = len(self._content) |
187 |
else: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
188 |
if not remaining_decomp: |
189 |
raise AssertionError('Nothing left to decompress') |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
190 |
needed_bytes = num_bytes - len(self._content) |
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
191 |
# We always set max_size to 32kB over the minimum needed, so that
|
192 |
# zlib will give us as much as we really want.
|
|
193 |
# TODO: If this isn't good enough, we could make a loop here,
|
|
194 |
# that keeps expanding the request until we get enough
|
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
195 |
self._content += self._z_content_decompressor.decompress( |
196 |
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
197 |
if len(self._content) < num_bytes: |
198 |
raise AssertionError('%d bytes wanted, only %d available' |
|
199 |
% (num_bytes, len(self._content))) |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
200 |
if not self._z_content_decompressor.unconsumed_tail: |
201 |
# The stream is finished
|
|
202 |
self._z_content_decompressor = None |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
203 |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
204 |
def _parse_bytes(self, bytes, pos): |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
205 |
"""Read the various lengths from the header.
|
206 |
||
207 |
This also populates the various 'compressed' buffers.
|
|
208 |
||
209 |
:return: The position in bytes just after the last newline
|
|
210 |
"""
|
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
211 |
# At present, we have 2 integers for the compressed and uncompressed
|
212 |
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
|
|
213 |
# checking too far, cap the search to 14 bytes.
|
|
214 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
215 |
self._z_content_length = int(bytes[pos:pos2]) |
|
216 |
pos = pos2 + 1 |
|
217 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
218 |
self._content_length = int(bytes[pos:pos2]) |
|
219 |
pos = pos2 + 1 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
220 |
if len(bytes) != (pos + self._z_content_length): |
221 |
# XXX: Define some GCCorrupt error ?
|
|
222 |
raise AssertionError('Invalid bytes: (%d) != %d + %d' % |
|
223 |
(len(bytes), pos, self._z_content_length)) |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
224 |
self._z_content = bytes[pos:] |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
225 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
226 |
@classmethod
|
227 |
def from_bytes(cls, bytes): |
|
228 |
out = cls() |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
229 |
if bytes[:6] not in cls.GCB_KNOWN_HEADERS: |
230 |
raise ValueError('bytes did not start with any of %r' |
|
231 |
% (cls.GCB_KNOWN_HEADERS,)) |
|
232 |
# XXX: why not testing the whole header ?
|
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
233 |
if bytes[4] == 'z': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
234 |
out._compressor_name = 'zlib' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
235 |
elif bytes[4] == 'l': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
236 |
out._compressor_name = 'lzma' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
237 |
else: |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
238 |
raise ValueError('unknown compressor: %r' % (bytes,)) |
3735.38.4
by John Arbash Meinel
Another disk format change. |
239 |
out._parse_bytes(bytes, 6) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
240 |
return out |
241 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
242 |
def extract(self, key, start, end, sha1=None): |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
243 |
"""Extract the text for a specific key.
|
244 |
||
245 |
:param key: The label used for this content
|
|
246 |
:param sha1: TODO (should we validate only when sha1 is supplied?)
|
|
247 |
:return: The bytes for the content
|
|
248 |
"""
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
249 |
if start == end == 0: |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
250 |
return '' |
251 |
self._ensure_content(end) |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
252 |
# The bytes are 'f' or 'd' for the type, then a variable-length
|
253 |
# base128 integer for the content size, then the actual content
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
254 |
# We know that the variable-length integer won't be longer than 5
|
255 |
# bytes (it takes 5 bytes to encode 2^32)
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
256 |
c = self._content[start] |
257 |
if c == 'f': |
|
258 |
type = 'fulltext' |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
259 |
else: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
260 |
if c != 'd': |
261 |
raise ValueError('Unknown content control code: %s' |
|
262 |
% (c,)) |
|
263 |
type = 'delta' |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
264 |
content_len, len_len = decode_base128_int( |
265 |
self._content[start + 1:start + 6]) |
|
266 |
content_start = start + 1 + len_len |
|
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
267 |
if end != content_start + content_len: |
268 |
raise ValueError('end != len according to field header' |
|
269 |
' %s != %s' % (end, content_start + content_len)) |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
270 |
if c == 'f': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
271 |
bytes = self._content[content_start:end] |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
272 |
elif c == 'd': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
273 |
bytes = apply_delta_to_source(self._content, content_start, end) |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
274 |
return bytes |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
275 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
276 |
def set_content(self, content): |
277 |
"""Set the content of this block."""
|
|
278 |
self._content_length = len(content) |
|
279 |
self._content = content |
|
280 |
self._z_content = None |
|
281 |
||
282 |
def to_bytes(self): |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
283 |
"""Encode the information into a byte stream."""
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
284 |
compress = zlib.compress |
285 |
if _USE_LZMA: |
|
286 |
compress = pylzma.compress |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
287 |
if self._z_content is None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
288 |
if self._content is None: |
289 |
raise AssertionError('Nothing to compress') |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
290 |
self._z_content = compress(self._content) |
291 |
self._z_content_length = len(self._z_content) |
|
0.17.46
by John Arbash Meinel
Set the proper header when using/not using lzma |
292 |
if _USE_LZMA: |
293 |
header = self.GCB_LZ_HEADER |
|
294 |
else: |
|
295 |
header = self.GCB_HEADER |
|
296 |
chunks = [header, |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
297 |
'%d\n%d\n' % (self._z_content_length, self._content_length), |
298 |
self._z_content, |
|
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
299 |
]
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
300 |
return ''.join(chunks) |
301 |
||
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
302 |
def _dump(self, include_text=False): |
303 |
"""Take this block, and spit out a human-readable structure.
|
|
304 |
||
305 |
:param include_text: Inserts also include text bits, chose whether you
|
|
306 |
want this displayed in the dump or not.
|
|
307 |
:return: A dump of the given block. The layout is something like:
|
|
308 |
[('f', length), ('d', delta_length, text_length, [delta_info])]
|
|
309 |
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
|
|
310 |
...]
|
|
311 |
"""
|
|
312 |
self._ensure_content() |
|
313 |
result = [] |
|
314 |
pos = 0 |
|
315 |
while pos < self._content_length: |
|
316 |
kind = self._content[pos] |
|
317 |
pos += 1 |
|
318 |
if kind not in ('f', 'd'): |
|
319 |
raise ValueError('invalid kind character: %r' % (kind,)) |
|
320 |
content_len, len_len = decode_base128_int( |
|
321 |
self._content[pos:pos + 5]) |
|
322 |
pos += len_len |
|
323 |
if content_len + pos > self._content_length: |
|
324 |
raise ValueError('invalid content_len %d for record @ pos %d' |
|
325 |
% (content_len, pos - len_len - 1)) |
|
326 |
if kind == 'f': # Fulltext |
|
4398.5.6
by John Arbash Meinel
A bit more debugging information from gcblock._dump(True) |
327 |
if include_text: |
328 |
text = self._content[pos:pos+content_len] |
|
329 |
result.append(('f', content_len, text)) |
|
330 |
else: |
|
331 |
result.append(('f', content_len)) |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
332 |
elif kind == 'd': # Delta |
333 |
delta_content = self._content[pos:pos+content_len] |
|
334 |
delta_info = [] |
|
335 |
# The first entry in a delta is the decompressed length
|
|
336 |
decomp_len, delta_pos = decode_base128_int(delta_content) |
|
337 |
result.append(('d', content_len, decomp_len, delta_info)) |
|
338 |
measured_len = 0 |
|
339 |
while delta_pos < content_len: |
|
340 |
c = ord(delta_content[delta_pos]) |
|
341 |
delta_pos += 1 |
|
342 |
if c & 0x80: # Copy |
|
343 |
(offset, length, |
|
344 |
delta_pos) = decode_copy_instruction(delta_content, c, |
|
345 |
delta_pos) |
|
4398.5.6
by John Arbash Meinel
A bit more debugging information from gcblock._dump(True) |
346 |
if include_text: |
347 |
text = self._content[offset:offset+length] |
|
348 |
delta_info.append(('c', offset, length, text)) |
|
349 |
else: |
|
350 |
delta_info.append(('c', offset, length)) |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
351 |
measured_len += length |
352 |
else: # Insert |
|
353 |
if include_text: |
|
354 |
txt = delta_content[delta_pos:delta_pos+c] |
|
355 |
else: |
|
356 |
txt = '' |
|
357 |
delta_info.append(('i', c, txt)) |
|
358 |
measured_len += c |
|
359 |
delta_pos += c |
|
360 |
if delta_pos != content_len: |
|
361 |
raise ValueError('Delta consumed a bad number of bytes:' |
|
362 |
' %d != %d' % (delta_pos, content_len)) |
|
363 |
if measured_len != decomp_len: |
|
364 |
raise ValueError('Delta claimed fulltext was %d bytes, but' |
|
365 |
' extraction resulted in %d bytes' |
|
366 |
% (decomp_len, measured_len)) |
|
367 |
pos += content_len |
|
368 |
return result |
|
369 |
||
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
370 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
371 |
class _LazyGroupCompressFactory(object): |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
372 |
"""Yield content from a GroupCompressBlock on demand."""
|
373 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
374 |
def __init__(self, key, parents, manager, start, end, first): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
375 |
"""Create a _LazyGroupCompressFactory
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
376 |
|
377 |
:param key: The key of just this record
|
|
378 |
:param parents: The parents of this key (possibly None)
|
|
379 |
:param gc_block: A GroupCompressBlock object
|
|
380 |
:param start: Offset of the first byte for this record in the
|
|
381 |
uncompressd content
|
|
382 |
:param end: Offset of the byte just after the end of this record
|
|
383 |
(ie, bytes = content[start:end])
|
|
384 |
:param first: Is this the first Factory for the given block?
|
|
385 |
"""
|
|
386 |
self.key = key |
|
387 |
self.parents = parents |
|
388 |
self.sha1 = None |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
389 |
# Note: This attribute coupled with Manager._factories creates a
|
390 |
# reference cycle. Perhaps we would rather use a weakref(), or
|
|
391 |
# find an appropriate time to release the ref. After the first
|
|
392 |
# get_bytes_as call? After Manager.get_record_stream() returns
|
|
393 |
# the object?
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
394 |
self._manager = manager |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
395 |
self._bytes = None |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
396 |
self.storage_kind = 'groupcompress-block' |
397 |
if not first: |
|
398 |
self.storage_kind = 'groupcompress-block-ref' |
|
399 |
self._first = first |
|
400 |
self._start = start |
|
401 |
self._end = end |
|
402 |
||
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
403 |
def __repr__(self): |
404 |
return '%s(%s, first=%s)' % (self.__class__.__name__, |
|
405 |
self.key, self._first) |
|
406 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
407 |
def get_bytes_as(self, storage_kind): |
408 |
if storage_kind == self.storage_kind: |
|
409 |
if self._first: |
|
410 |
# wire bytes, something...
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
411 |
return self._manager._wire_bytes() |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
412 |
else: |
413 |
return '' |
|
414 |
if storage_kind in ('fulltext', 'chunked'): |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
415 |
if self._bytes is None: |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
416 |
# Grab and cache the raw bytes for this entry
|
417 |
# and break the ref-cycle with _manager since we don't need it
|
|
418 |
# anymore
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
419 |
self._manager._prepare_for_extract() |
420 |
block = self._manager._block |
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
421 |
self._bytes = block.extract(self.key, self._start, self._end) |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
422 |
# There are code paths that first extract as fulltext, and then
|
423 |
# extract as storage_kind (smart fetch). So we don't break the
|
|
424 |
# refcycle here, but instead in manager.get_record_stream()
|
|
3735.2.163
by John Arbash Meinel
Merge bzr.dev 4187, and revert the change to fix refcycle issues. |
425 |
# self._manager = None
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
426 |
if storage_kind == 'fulltext': |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
427 |
return self._bytes |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
428 |
else: |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
429 |
return [self._bytes] |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
430 |
raise errors.UnavailableRepresentation(self.key, storage_kind, |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
431 |
self.storage_kind) |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
432 |
|
433 |
||
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
434 |
class _LazyGroupContentManager(object): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
435 |
"""This manages a group of _LazyGroupCompressFactory objects."""
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
436 |
|
437 |
def __init__(self, block): |
|
438 |
self._block = block |
|
439 |
# We need to preserve the ordering
|
|
440 |
self._factories = [] |
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
441 |
self._last_byte = 0 |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
442 |
|
443 |
def add_factory(self, key, parents, start, end): |
|
444 |
if not self._factories: |
|
445 |
first = True |
|
446 |
else: |
|
447 |
first = False |
|
448 |
# Note that this creates a reference cycle....
|
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
449 |
factory = _LazyGroupCompressFactory(key, parents, self, |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
450 |
start, end, first=first) |
3735.36.13
by John Arbash Meinel
max() shows up under lsprof as more expensive than creating an object. |
451 |
# max() works here, but as a function call, doing a compare seems to be
|
452 |
# significantly faster, timeit says 250ms for max() and 100ms for the
|
|
453 |
# comparison
|
|
454 |
if end > self._last_byte: |
|
455 |
self._last_byte = end |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
456 |
self._factories.append(factory) |
457 |
||
458 |
def get_record_stream(self): |
|
459 |
"""Get a record for all keys added so far."""
|
|
460 |
for factory in self._factories: |
|
461 |
yield factory |
|
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
462 |
# Break the ref-cycle
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
463 |
factory._bytes = None |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
464 |
factory._manager = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
465 |
# TODO: Consider setting self._factories = None after the above loop,
|
466 |
# as it will break the reference cycle
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
467 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
468 |
def _trim_block(self, last_byte): |
469 |
"""Create a new GroupCompressBlock, with just some of the content."""
|
|
470 |
# None of the factories need to be adjusted, because the content is
|
|
471 |
# located in an identical place. Just that some of the unreferenced
|
|
472 |
# trailing bytes are stripped
|
|
473 |
trace.mutter('stripping trailing bytes from groupcompress block' |
|
474 |
' %d => %d', self._block._content_length, last_byte) |
|
475 |
new_block = GroupCompressBlock() |
|
476 |
self._block._ensure_content(last_byte) |
|
477 |
new_block.set_content(self._block._content[:last_byte]) |
|
478 |
self._block = new_block |
|
479 |
||
480 |
def _rebuild_block(self): |
|
481 |
"""Create a new GroupCompressBlock with only the referenced texts."""
|
|
482 |
compressor = GroupCompressor() |
|
483 |
tstart = time.time() |
|
484 |
old_length = self._block._content_length |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
485 |
end_point = 0 |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
486 |
for factory in self._factories: |
487 |
bytes = factory.get_bytes_as('fulltext') |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
488 |
(found_sha1, start_point, end_point, |
489 |
type) = compressor.compress(factory.key, bytes, factory.sha1) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
490 |
# Now update this factory with the new offsets, etc
|
491 |
factory.sha1 = found_sha1 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
492 |
factory._start = start_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
493 |
factory._end = end_point |
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
494 |
self._last_byte = end_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
495 |
new_block = compressor.flush() |
496 |
# TODO: Should we check that new_block really *is* smaller than the old
|
|
497 |
# block? It seems hard to come up with a method that it would
|
|
498 |
# expand, since we do full compression again. Perhaps based on a
|
|
499 |
# request that ends up poorly ordered?
|
|
500 |
delta = time.time() - tstart |
|
501 |
self._block = new_block |
|
502 |
trace.mutter('creating new compressed block on-the-fly in %.3fs' |
|
503 |
' %d bytes => %d bytes', delta, old_length, |
|
504 |
self._block._content_length) |
|
505 |
||
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
506 |
def _prepare_for_extract(self): |
507 |
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
|
|
508 |
# We expect that if one child is going to fulltext, all will be. This
|
|
509 |
# helps prevent all of them from extracting a small amount at a time.
|
|
510 |
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
|
|
511 |
# time (self._block._content) is a little expensive.
|
|
512 |
self._block._ensure_content(self._last_byte) |
|
513 |
||
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
514 |
def _check_rebuild_block(self): |
515 |
"""Check to see if our block should be repacked."""
|
|
516 |
total_bytes_used = 0 |
|
517 |
last_byte_used = 0 |
|
518 |
for factory in self._factories: |
|
519 |
total_bytes_used += factory._end - factory._start |
|
520 |
last_byte_used = max(last_byte_used, factory._end) |
|
521 |
# If we are using most of the bytes from the block, we have nothing
|
|
522 |
# else to check (currently more that 1/2)
|
|
523 |
if total_bytes_used * 2 >= self._block._content_length: |
|
524 |
return
|
|
525 |
# Can we just strip off the trailing bytes? If we are going to be
|
|
526 |
# transmitting more than 50% of the front of the content, go ahead
|
|
527 |
if total_bytes_used * 2 > last_byte_used: |
|
528 |
self._trim_block(last_byte_used) |
|
529 |
return
|
|
530 |
||
531 |
# We are using a small amount of the data, and it isn't just packed
|
|
532 |
# nicely at the front, so rebuild the content.
|
|
533 |
# Note: This would be *nicer* as a strip-data-from-group, rather than
|
|
534 |
# building it up again from scratch
|
|
535 |
# It might be reasonable to consider the fulltext sizes for
|
|
536 |
# different bits when deciding this, too. As you may have a small
|
|
537 |
# fulltext, and a trivial delta, and you are just trading around
|
|
538 |
# for another fulltext. If we do a simple 'prune' you may end up
|
|
539 |
# expanding many deltas into fulltexts, as well.
|
|
540 |
# If we build a cheap enough 'strip', then we could try a strip,
|
|
541 |
# if that expands the content, we then rebuild.
|
|
542 |
self._rebuild_block() |
|
543 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
544 |
def _wire_bytes(self): |
545 |
"""Return a byte stream suitable for transmitting over the wire."""
|
|
3735.32.24
by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream |
546 |
self._check_rebuild_block() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
547 |
# The outer block starts with:
|
548 |
# 'groupcompress-block\n'
|
|
549 |
# <length of compressed key info>\n
|
|
550 |
# <length of uncompressed info>\n
|
|
551 |
# <length of gc block>\n
|
|
552 |
# <header bytes>
|
|
553 |
# <gc-block>
|
|
554 |
lines = ['groupcompress-block\n'] |
|
555 |
# The minimal info we need is the key, the start offset, and the
|
|
556 |
# parents. The length and type are encoded in the record itself.
|
|
557 |
# However, passing in the other bits makes it easier. The list of
|
|
558 |
# keys, and the start offset, the length
|
|
559 |
# 1 line key
|
|
560 |
# 1 line with parents, '' for ()
|
|
561 |
# 1 line for start offset
|
|
562 |
# 1 line for end byte
|
|
563 |
header_lines = [] |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
564 |
for factory in self._factories: |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
565 |
key_bytes = '\x00'.join(factory.key) |
566 |
parents = factory.parents |
|
567 |
if parents is None: |
|
568 |
parent_bytes = 'None:' |
|
569 |
else: |
|
570 |
parent_bytes = '\t'.join('\x00'.join(key) for key in parents) |
|
571 |
record_header = '%s\n%s\n%d\n%d\n' % ( |
|
572 |
key_bytes, parent_bytes, factory._start, factory._end) |
|
573 |
header_lines.append(record_header) |
|
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
574 |
# TODO: Can we break the refcycle at this point and set
|
575 |
# factory._manager = None?
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
576 |
header_bytes = ''.join(header_lines) |
577 |
del header_lines |
|
578 |
header_bytes_len = len(header_bytes) |
|
579 |
z_header_bytes = zlib.compress(header_bytes) |
|
580 |
del header_bytes |
|
581 |
z_header_bytes_len = len(z_header_bytes) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
582 |
block_bytes = self._block.to_bytes() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
583 |
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len, |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
584 |
len(block_bytes))) |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
585 |
lines.append(z_header_bytes) |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
586 |
lines.append(block_bytes) |
587 |
del z_header_bytes, block_bytes |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
588 |
return ''.join(lines) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
589 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
590 |
@classmethod
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
591 |
def from_bytes(cls, bytes): |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
592 |
# TODO: This does extra string copying, probably better to do it a
|
593 |
# different way
|
|
594 |
(storage_kind, z_header_len, header_len, |
|
595 |
block_len, rest) = bytes.split('\n', 4) |
|
596 |
del bytes |
|
597 |
if storage_kind != 'groupcompress-block': |
|
598 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
599 |
z_header_len = int(z_header_len) |
|
600 |
if len(rest) < z_header_len: |
|
601 |
raise ValueError('Compressed header len shorter than all bytes') |
|
602 |
z_header = rest[:z_header_len] |
|
603 |
header_len = int(header_len) |
|
604 |
header = zlib.decompress(z_header) |
|
605 |
if len(header) != header_len: |
|
606 |
raise ValueError('invalid length for decompressed bytes') |
|
607 |
del z_header |
|
608 |
block_len = int(block_len) |
|
609 |
if len(rest) != z_header_len + block_len: |
|
610 |
raise ValueError('Invalid length for block') |
|
611 |
block_bytes = rest[z_header_len:] |
|
612 |
del rest |
|
613 |
# So now we have a valid GCB, we just need to parse the factories that
|
|
614 |
# were sent to us
|
|
615 |
header_lines = header.split('\n') |
|
616 |
del header |
|
617 |
last = header_lines.pop() |
|
618 |
if last != '': |
|
619 |
raise ValueError('header lines did not end with a trailing' |
|
620 |
' newline') |
|
621 |
if len(header_lines) % 4 != 0: |
|
622 |
raise ValueError('The header was not an even multiple of 4 lines') |
|
623 |
block = GroupCompressBlock.from_bytes(block_bytes) |
|
624 |
del block_bytes |
|
625 |
result = cls(block) |
|
626 |
for start in xrange(0, len(header_lines), 4): |
|
627 |
# intern()?
|
|
628 |
key = tuple(header_lines[start].split('\x00')) |
|
629 |
parents_line = header_lines[start+1] |
|
630 |
if parents_line == 'None:': |
|
631 |
parents = None |
|
632 |
else: |
|
633 |
parents = tuple([tuple(segment.split('\x00')) |
|
634 |
for segment in parents_line.split('\t') |
|
635 |
if segment]) |
|
636 |
start_offset = int(header_lines[start+2]) |
|
637 |
end_offset = int(header_lines[start+3]) |
|
638 |
result.add_factory(key, parents, start_offset, end_offset) |
|
639 |
return result |
|
640 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
641 |
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
642 |
def network_block_to_records(storage_kind, bytes, line_end): |
643 |
if storage_kind != 'groupcompress-block': |
|
644 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
645 |
manager = _LazyGroupContentManager.from_bytes(bytes) |
|
646 |
return manager.get_record_stream() |
|
647 |
||
648 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
649 |
class _CommonGroupCompressor(object): |
650 |
||
651 |
def __init__(self): |
|
652 |
"""Create a GroupCompressor."""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
653 |
self.chunks = [] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
654 |
self._last = None |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
655 |
self.endpoint = 0 |
656 |
self.input_bytes = 0 |
|
657 |
self.labels_deltas = {} |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
658 |
self._delta_index = None # Set by the children |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
659 |
self._block = GroupCompressBlock() |
660 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
661 |
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False): |
662 |
"""Compress lines with label key.
|
|
663 |
||
664 |
:param key: A key tuple. It is stored in the output
|
|
665 |
for identification of the text during decompression. If the last
|
|
666 |
element is 'None' it is replaced with the sha1 of the text -
|
|
667 |
e.g. sha1:xxxxxxx.
|
|
668 |
:param bytes: The bytes to be compressed
|
|
669 |
:param expected_sha: If non-None, the sha the lines are believed to
|
|
670 |
have. During compression the sha is calculated; a mismatch will
|
|
671 |
cause an error.
|
|
672 |
:param nostore_sha: If the computed sha1 sum matches, we will raise
|
|
673 |
ExistingContent rather than adding the text.
|
|
674 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
675 |
ranges to match to be considered for a copy command.
|
|
676 |
||
677 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
678 |
the type ('fulltext' or 'delta').
|
|
679 |
||
680 |
:seealso VersionedFiles.add_lines:
|
|
681 |
"""
|
|
682 |
if not bytes: # empty, like a dir entry, etc |
|
683 |
if nostore_sha == _null_sha1: |
|
684 |
raise errors.ExistingContent() |
|
685 |
return _null_sha1, 0, 0, 'fulltext' |
|
686 |
# we assume someone knew what they were doing when they passed it in
|
|
687 |
if expected_sha is not None: |
|
688 |
sha1 = expected_sha |
|
689 |
else: |
|
690 |
sha1 = osutils.sha_string(bytes) |
|
691 |
if nostore_sha is not None: |
|
692 |
if sha1 == nostore_sha: |
|
693 |
raise errors.ExistingContent() |
|
694 |
if key[-1] is None: |
|
695 |
key = key[:-1] + ('sha1:' + sha1,) |
|
696 |
||
697 |
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft) |
|
698 |
return sha1, start, end, type |
|
699 |
||
700 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
701 |
"""Compress lines with label key.
|
|
702 |
||
703 |
:param key: A key tuple. It is stored in the output for identification
|
|
704 |
of the text during decompression.
|
|
705 |
||
706 |
:param bytes: The bytes to be compressed
|
|
707 |
||
708 |
:param max_delta_size: The size above which we issue a fulltext instead
|
|
709 |
of a delta.
|
|
710 |
||
711 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
712 |
ranges to match to be considered for a copy command.
|
|
713 |
||
714 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
715 |
the type ('fulltext' or 'delta').
|
|
716 |
"""
|
|
717 |
raise NotImplementedError(self._compress) |
|
718 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
719 |
def extract(self, key): |
720 |
"""Extract a key previously added to the compressor.
|
|
721 |
||
722 |
:param key: The key to extract.
|
|
723 |
:return: An iterable over bytes and the sha1.
|
|
724 |
"""
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
725 |
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key] |
726 |
delta_chunks = self.chunks[start_chunk:end_chunk] |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
727 |
stored_bytes = ''.join(delta_chunks) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
728 |
if stored_bytes[0] == 'f': |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
729 |
fulltext_len, offset = decode_base128_int(stored_bytes[1:10]) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
730 |
data_len = fulltext_len + 1 + offset |
731 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
732 |
raise ValueError('Index claimed fulltext len, but stored bytes' |
733 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
734 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
735 |
bytes = stored_bytes[offset + 1:] |
736 |
else: |
|
737 |
# XXX: This is inefficient at best
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
738 |
source = ''.join(self.chunks[:start_chunk]) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
739 |
if stored_bytes[0] != 'd': |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
740 |
raise ValueError('Unknown content kind, bytes claim %s' |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
741 |
% (stored_bytes[0],)) |
742 |
delta_len, offset = decode_base128_int(stored_bytes[1:10]) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
743 |
data_len = delta_len + 1 + offset |
744 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
745 |
raise ValueError('Index claimed delta len, but stored bytes' |
746 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
747 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
748 |
bytes = apply_delta(source, stored_bytes[offset + 1:]) |
749 |
bytes_sha1 = osutils.sha_string(bytes) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
750 |
return bytes, bytes_sha1 |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
751 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
752 |
def flush(self): |
753 |
"""Finish this group, creating a formatted stream.
|
|
754 |
||
755 |
After calling this, the compressor should no longer be used
|
|
756 |
"""
|
|
4398.6.2
by John Arbash Meinel
Add a TODO, marking the code that causes us to peak at 2x memory consumption |
757 |
# TODO: this causes us to 'bloat' to 2x the size of content in the
|
758 |
# group. This has an impact for 'commit' of large objects.
|
|
759 |
# One possibility is to use self._content_chunks, and be lazy and
|
|
760 |
# only fill out self._content as a full string when we actually
|
|
761 |
# need it. That would at least drop the peak memory consumption
|
|
762 |
# for 'commit' down to ~1x the size of the largest file, at a
|
|
763 |
# cost of increased complexity within this code. 2x is still <<
|
|
764 |
# 3x the size of the largest file, so we are doing ok.
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
765 |
content = ''.join(self.chunks) |
766 |
self.chunks = None |
|
767 |
self._delta_index = None |
|
768 |
self._block.set_content(content) |
|
769 |
return self._block |
|
770 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
771 |
def pop_last(self): |
772 |
"""Call this if you want to 'revoke' the last compression.
|
|
773 |
||
774 |
After this, the data structures will be rolled back, but you cannot do
|
|
775 |
more compression.
|
|
776 |
"""
|
|
777 |
self._delta_index = None |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
778 |
del self.chunks[self._last[0]:] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
779 |
self.endpoint = self._last[1] |
780 |
self._last = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
781 |
|
782 |
def ratio(self): |
|
783 |
"""Return the overall compression ratio."""
|
|
784 |
return float(self.input_bytes) / float(self.endpoint) |
|
785 |
||
786 |
||
787 |
class PythonGroupCompressor(_CommonGroupCompressor): |
|
788 |
||
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
789 |
def __init__(self): |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
790 |
"""Create a GroupCompressor.
|
791 |
||
792 |
Used only if the pyrex version is not available.
|
|
793 |
"""
|
|
794 |
super(PythonGroupCompressor, self).__init__() |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
795 |
self._delta_index = LinesDeltaIndex([]) |
796 |
# The actual content is managed by LinesDeltaIndex
|
|
797 |
self.chunks = self._delta_index.lines |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
798 |
|
799 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
800 |
"""see _CommonGroupCompressor._compress"""
|
|
801 |
input_len = len(bytes) |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
802 |
new_lines = osutils.split_lines(bytes) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
803 |
out_lines, index_lines = self._delta_index.make_delta( |
804 |
new_lines, bytes_length=input_len, soft=soft) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
805 |
delta_length = sum(map(len, out_lines)) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
806 |
if delta_length > max_delta_size: |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
807 |
# The delta is longer than the fulltext, insert a fulltext
|
808 |
type = 'fulltext' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
809 |
out_lines = ['f', encode_base128_int(input_len)] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
810 |
out_lines.extend(new_lines) |
811 |
index_lines = [False, False] |
|
812 |
index_lines.extend([True] * len(new_lines)) |
|
813 |
else: |
|
814 |
# this is a worthy delta, output it
|
|
815 |
type = 'delta' |
|
816 |
out_lines[0] = 'd' |
|
817 |
# Update the delta_length to include those two encoded integers
|
|
818 |
out_lines[1] = encode_base128_int(delta_length) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
819 |
# Before insertion
|
820 |
start = self.endpoint |
|
821 |
chunk_start = len(self.chunks) |
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
822 |
self._last = (chunk_start, self.endpoint) |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
823 |
self._delta_index.extend_lines(out_lines, index_lines) |
824 |
self.endpoint = self._delta_index.endpoint |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
825 |
self.input_bytes += input_len |
826 |
chunk_end = len(self.chunks) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
827 |
self.labels_deltas[key] = (start, chunk_start, |
828 |
self.endpoint, chunk_end) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
829 |
return start, self.endpoint, type |
830 |
||
831 |
||
832 |
class PyrexGroupCompressor(_CommonGroupCompressor): |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
833 |
"""Produce a serialised group of compressed texts.
|
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
834 |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
835 |
It contains code very similar to SequenceMatcher because of having a similar
|
836 |
task. However some key differences apply:
|
|
837 |
- there is no junk, we want a minimal edit not a human readable diff.
|
|
838 |
- we don't filter very common lines (because we don't know where a good
|
|
839 |
range will start, and after the first text we want to be emitting minmal
|
|
840 |
edits only.
|
|
841 |
- we chain the left side, not the right side
|
|
842 |
- we incrementally update the adjacency matrix as new lines are provided.
|
|
843 |
- we look for matches in all of the left side, so the routine which does
|
|
844 |
the analagous task of find_longest_match does not need to filter on the
|
|
845 |
left side.
|
|
846 |
"""
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
847 |
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
848 |
def __init__(self): |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
849 |
super(PyrexGroupCompressor, self).__init__() |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
850 |
self._delta_index = DeltaIndex() |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
851 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
852 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
853 |
"""see _CommonGroupCompressor._compress"""
|
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
854 |
input_len = len(bytes) |
0.23.12
by John Arbash Meinel
Add a 'len:' field to the data. |
855 |
# By having action/label/sha1/len, we can parse the group if the index
|
856 |
# was ever destroyed, we have the key in 'label', we know the final
|
|
857 |
# bytes are valid from sha1, and we know where to find the end of this
|
|
858 |
# record because of 'len'. (the delta record itself will store the
|
|
859 |
# total length for the expanded record)
|
|
0.23.13
by John Arbash Meinel
Factor out the ability to have/not have labels. |
860 |
# 'len: %d\n' costs approximately 1% increase in total data
|
861 |
# Having the labels at all costs us 9-10% increase, 38% increase for
|
|
862 |
# inventory pages, and 5.8% increase for text pages
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
863 |
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
|
0.23.33
by John Arbash Meinel
Fix a bug when handling multiple large-range copies. |
864 |
if self._delta_index._source_offset != self.endpoint: |
865 |
raise AssertionError('_source_offset != endpoint' |
|
866 |
' somehow the DeltaIndex got out of sync with'
|
|
867 |
' the output lines') |
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
868 |
delta = self._delta_index.make_delta(bytes, max_delta_size) |
869 |
if (delta is None): |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
870 |
type = 'fulltext' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
871 |
enc_length = encode_base128_int(len(bytes)) |
872 |
len_mini_header = 1 + len(enc_length) |
|
873 |
self._delta_index.add_source(bytes, len_mini_header) |
|
874 |
new_chunks = ['f', enc_length, bytes] |
|
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
875 |
else: |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
876 |
type = 'delta' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
877 |
enc_length = encode_base128_int(len(delta)) |
878 |
len_mini_header = 1 + len(enc_length) |
|
879 |
new_chunks = ['d', enc_length, delta] |
|
3735.38.5
by John Arbash Meinel
A bit of testing showed that _FAST=True was actually *slower*. |
880 |
self._delta_index.add_delta_source(delta, len_mini_header) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
881 |
# Before insertion
|
882 |
start = self.endpoint |
|
883 |
chunk_start = len(self.chunks) |
|
884 |
# Now output these bytes
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
885 |
self._output_chunks(new_chunks) |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
886 |
self.input_bytes += input_len |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
887 |
chunk_end = len(self.chunks) |
888 |
self.labels_deltas[key] = (start, chunk_start, |
|
889 |
self.endpoint, chunk_end) |
|
0.23.29
by John Arbash Meinel
Forgot to add the delta bytes to the index objects. |
890 |
if not self._delta_index._source_offset == self.endpoint: |
891 |
raise AssertionError('the delta index is out of sync' |
|
892 |
'with the output lines %s != %s' |
|
893 |
% (self._delta_index._source_offset, self.endpoint)) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
894 |
return start, self.endpoint, type |
0.17.2
by Robert Collins
Core proof of concept working. |
895 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
896 |
def _output_chunks(self, new_chunks): |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
897 |
"""Output some chunks.
|
898 |
||
899 |
:param new_chunks: The chunks to output.
|
|
900 |
"""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
901 |
self._last = (len(self.chunks), self.endpoint) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
902 |
endpoint = self.endpoint |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
903 |
self.chunks.extend(new_chunks) |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
904 |
endpoint += sum(map(len, new_chunks)) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
905 |
self.endpoint = endpoint |
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
906 |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
907 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
908 |
def make_pack_factory(graph, delta, keylength): |
909 |
"""Create a factory for creating a pack based groupcompress.
|
|
910 |
||
911 |
This is only functional enough to run interface tests, it doesn't try to
|
|
912 |
provide a full pack environment.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
913 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
914 |
:param graph: Store a graph.
|
915 |
:param delta: Delta compress contents.
|
|
916 |
:param keylength: How long should keys be.
|
|
917 |
"""
|
|
918 |
def factory(transport): |
|
3735.32.2
by John Arbash Meinel
The 'delta' flag has no effect on the content (all GC is delta'd), |
919 |
parents = graph |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
920 |
ref_length = 0 |
921 |
if graph: |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
922 |
ref_length = 1 |
0.17.7
by Robert Collins
Update for current index2 changes. |
923 |
graph_index = BTreeBuilder(reference_lists=ref_length, |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
924 |
key_elements=keylength) |
925 |
stream = transport.open_write_stream('newpack') |
|
926 |
writer = pack.ContainerWriter(stream.write) |
|
927 |
writer.begin() |
|
928 |
index = _GCGraphIndex(graph_index, lambda:True, parents=parents, |
|
0.17.9
by Robert Collins
Initial stab at repository format support. |
929 |
add_callback=graph_index.add_nodes) |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
930 |
access = knit._DirectPackAccess({}) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
931 |
access.set_writer(writer, graph_index, (transport, 'newpack')) |
0.17.2
by Robert Collins
Core proof of concept working. |
932 |
result = GroupCompressVersionedFiles(index, access, delta) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
933 |
result.stream = stream |
934 |
result.writer = writer |
|
935 |
return result |
|
936 |
return factory |
|
937 |
||
938 |
||
939 |
def cleanup_pack_group(versioned_files): |
|
0.17.23
by Robert Collins
Only decompress as much of the zlib data as is needed to read the text recipe. |
940 |
versioned_files.writer.end() |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
941 |
versioned_files.stream.close() |
942 |
||
943 |
||
944 |
class GroupCompressVersionedFiles(VersionedFiles): |
|
945 |
"""A group-compress based VersionedFiles implementation."""
|
|
946 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
947 |
def __init__(self, index, access, delta=True): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
948 |
"""Create a GroupCompressVersionedFiles object.
|
949 |
||
950 |
:param index: The index object storing access and graph data.
|
|
951 |
:param access: The access object storing raw data.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
952 |
:param delta: Whether to delta compress or just entropy compress.
|
953 |
"""
|
|
954 |
self._index = index |
|
955 |
self._access = access |
|
956 |
self._delta = delta |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
957 |
self._unadded_refs = {} |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
958 |
self._group_cache = LRUSizeCache(max_size=50*1024*1024) |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
959 |
self._fallback_vfs = [] |
0.17.2
by Robert Collins
Core proof of concept working. |
960 |
|
961 |
def add_lines(self, key, parents, lines, parent_texts=None, |
|
962 |
left_matching_blocks=None, nostore_sha=None, random_id=False, |
|
963 |
check_content=True): |
|
964 |
"""Add a text to the store.
|
|
965 |
||
966 |
:param key: The key tuple of the text to add.
|
|
967 |
:param parents: The parents key tuples of the text to add.
|
|
968 |
:param lines: A list of lines. Each line must be a bytestring. And all
|
|
969 |
of them except the last must be terminated with \n and contain no
|
|
970 |
other \n's. The last line may either contain no \n's or a single
|
|
971 |
terminating \n. If the lines list does meet this constraint the add
|
|
972 |
routine may error or may succeed - but you will be unable to read
|
|
973 |
the data back accurately. (Checking the lines have been split
|
|
974 |
correctly is expensive and extremely unlikely to catch bugs so it
|
|
975 |
is not done at runtime unless check_content is True.)
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
976 |
:param parent_texts: An optional dictionary containing the opaque
|
0.17.2
by Robert Collins
Core proof of concept working. |
977 |
representations of some or all of the parents of version_id to
|
978 |
allow delta optimisations. VERY IMPORTANT: the texts must be those
|
|
979 |
returned by add_lines or data corruption can be caused.
|
|
980 |
:param left_matching_blocks: a hint about which areas are common
|
|
981 |
between the text and its left-hand-parent. The format is
|
|
982 |
the SequenceMatcher.get_matching_blocks format.
|
|
983 |
:param nostore_sha: Raise ExistingContent and do not add the lines to
|
|
984 |
the versioned file if the digest of the lines matches this.
|
|
985 |
:param random_id: If True a random id has been selected rather than
|
|
986 |
an id determined by some deterministic process such as a converter
|
|
987 |
from a foreign VCS. When True the backend may choose not to check
|
|
988 |
for uniqueness of the resulting key within the versioned file, so
|
|
989 |
this should only be done when the result is expected to be unique
|
|
990 |
anyway.
|
|
991 |
:param check_content: If True, the lines supplied are verified to be
|
|
992 |
bytestrings that are correctly formed lines.
|
|
993 |
:return: The text sha1, the number of bytes in the text, and an opaque
|
|
994 |
representation of the inserted version which can be provided
|
|
995 |
back to future add_lines calls in the parent_texts dictionary.
|
|
996 |
"""
|
|
997 |
self._index._check_write_ok() |
|
998 |
self._check_add(key, lines, random_id, check_content) |
|
999 |
if parents is None: |
|
1000 |
# The caller might pass None if there is no graph data, but kndx
|
|
1001 |
# indexes can't directly store that, so we give them
|
|
1002 |
# an empty tuple instead.
|
|
1003 |
parents = () |
|
1004 |
# double handling for now. Make it work until then.
|
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1005 |
length = sum(map(len, lines)) |
1006 |
record = ChunkedContentFactory(key, parents, None, lines) |
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
1007 |
sha1 = list(self._insert_record_stream([record], random_id=random_id, |
1008 |
nostore_sha=nostore_sha))[0] |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1009 |
return sha1, length, None |
0.17.2
by Robert Collins
Core proof of concept working. |
1010 |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1011 |
def add_fallback_versioned_files(self, a_versioned_files): |
1012 |
"""Add a source of texts for texts not present in this knit.
|
|
1013 |
||
1014 |
:param a_versioned_files: A VersionedFiles object.
|
|
1015 |
"""
|
|
1016 |
self._fallback_vfs.append(a_versioned_files) |
|
1017 |
||
0.17.4
by Robert Collins
Annotate. |
1018 |
def annotate(self, key): |
1019 |
"""See VersionedFiles.annotate."""
|
|
1020 |
graph = Graph(self) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1021 |
parent_map = self.get_parent_map([key]) |
1022 |
if not parent_map: |
|
1023 |
raise errors.RevisionNotPresent(key, self) |
|
1024 |
if parent_map[key] is not None: |
|
4371.3.18
by John Arbash Meinel
Change VF.annotate to use the new KnownGraph code. |
1025 |
parent_map = dict((k, v) for k, v in graph.iter_ancestry([key]) |
1026 |
if v is not None) |
|
1027 |
keys = parent_map.keys() |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1028 |
else: |
1029 |
keys = [key] |
|
1030 |
parent_map = {key:()} |
|
4371.3.30
by John Arbash Meinel
Clean up the annotate code while using the new functionality. |
1031 |
# We used Graph(self) to load the parent_map, but now that we have it,
|
1032 |
# we can just query the parent map directly, so create a KnownGraph
|
|
1033 |
heads_provider = _mod_graph.KnownGraph(parent_map) |
|
0.17.4
by Robert Collins
Annotate. |
1034 |
parent_cache = {} |
1035 |
reannotate = annotate.reannotate |
|
1036 |
for record in self.get_record_stream(keys, 'topological', True): |
|
1037 |
key = record.key |
|
4371.2.1
by Vincent Ladeuil
Start fixing annotate for gc. |
1038 |
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked')) |
0.17.4
by Robert Collins
Annotate. |
1039 |
parent_lines = [parent_cache[parent] for parent in parent_map[key]] |
1040 |
parent_cache[key] = list( |
|
4371.3.30
by John Arbash Meinel
Clean up the annotate code while using the new functionality. |
1041 |
reannotate(parent_lines, lines, key, None, heads_provider)) |
0.17.4
by Robert Collins
Annotate. |
1042 |
return parent_cache[key] |
1043 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1044 |
def check(self, progress_bar=None): |
1045 |
"""See VersionedFiles.check()."""
|
|
1046 |
keys = self.keys() |
|
1047 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1048 |
record.get_bytes_as('fulltext') |
|
1049 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1050 |
def _check_add(self, key, lines, random_id, check_content): |
1051 |
"""check that version_id and lines are safe to add."""
|
|
1052 |
version_id = key[-1] |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1053 |
if version_id is not None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1054 |
if osutils.contains_whitespace(version_id): |
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
1055 |
raise errors.InvalidRevisionId(version_id, self) |
0.17.2
by Robert Collins
Core proof of concept working. |
1056 |
self.check_not_reserved_id(version_id) |
1057 |
# TODO: If random_id==False and the key is already present, we should
|
|
1058 |
# probably check that the existing content is identical to what is
|
|
1059 |
# being inserted, and otherwise raise an exception. This would make
|
|
1060 |
# the bundle code simpler.
|
|
1061 |
if check_content: |
|
1062 |
self._check_lines_not_unicode(lines) |
|
1063 |
self._check_lines_are_lines(lines) |
|
1064 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1065 |
def get_parent_map(self, keys): |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1066 |
"""Get a map of the graph parents of keys.
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1067 |
|
1068 |
:param keys: The keys to look up parents for.
|
|
1069 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1070 |
the mapping.
|
|
1071 |
"""
|
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1072 |
return self._get_parent_map_with_sources(keys)[0] |
1073 |
||
1074 |
def _get_parent_map_with_sources(self, keys): |
|
1075 |
"""Get a map of the parents of keys.
|
|
1076 |
||
1077 |
:param keys: The keys to look up parents for.
|
|
1078 |
:return: A tuple. The first element is a mapping from keys to parents.
|
|
1079 |
Absent keys are absent from the mapping. The second element is a
|
|
1080 |
list with the locations each key was found in. The first element
|
|
1081 |
is the in-this-knit parents, the second the first fallback source,
|
|
1082 |
and so on.
|
|
1083 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1084 |
result = {} |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1085 |
sources = [self._index] + self._fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1086 |
source_results = [] |
1087 |
missing = set(keys) |
|
1088 |
for source in sources: |
|
1089 |
if not missing: |
|
1090 |
break
|
|
1091 |
new_result = source.get_parent_map(missing) |
|
1092 |
source_results.append(new_result) |
|
1093 |
result.update(new_result) |
|
1094 |
missing.difference_update(set(new_result)) |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1095 |
return result, source_results |
0.17.5
by Robert Collins
nograph tests completely passing. |
1096 |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1097 |
def _get_block(self, index_memo): |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1098 |
read_memo = index_memo[0:3] |
1099 |
# get the group:
|
|
1100 |
try: |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1101 |
block = self._group_cache[read_memo] |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1102 |
except KeyError: |
1103 |
# read the group
|
|
1104 |
zdata = self._access.get_raw_records([read_memo]).next() |
|
1105 |
# decompress - whole thing - this is not a bug, as it
|
|
1106 |
# permits caching. We might want to store the partially
|
|
1107 |
# decompresed group and decompress object, so that recent
|
|
1108 |
# texts are not penalised by big groups.
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1109 |
block = GroupCompressBlock.from_bytes(zdata) |
1110 |
self._group_cache[read_memo] = block |
|
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1111 |
# cheapo debugging:
|
1112 |
# print len(zdata), len(plain)
|
|
1113 |
# parse - requires split_lines, better to have byte offsets
|
|
1114 |
# here (but not by much - we only split the region for the
|
|
1115 |
# recipe, and we often want to end up with lines anyway.
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1116 |
return block |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1117 |
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1118 |
def get_missing_compression_parent_keys(self): |
1119 |
"""Return the keys of missing compression parents.
|
|
1120 |
||
1121 |
Missing compression parents occur when a record stream was missing
|
|
1122 |
basis texts, or a index was scanned that had missing basis texts.
|
|
1123 |
"""
|
|
1124 |
# GroupCompress cannot currently reference texts that are not in the
|
|
1125 |
# group, so this is valid for now
|
|
1126 |
return frozenset() |
|
1127 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1128 |
def get_record_stream(self, keys, ordering, include_delta_closure): |
1129 |
"""Get a stream of records for keys.
|
|
1130 |
||
1131 |
:param keys: The keys to include.
|
|
1132 |
:param ordering: Either 'unordered' or 'topological'. A topologically
|
|
1133 |
sorted stream has compression parents strictly before their
|
|
1134 |
children.
|
|
1135 |
:param include_delta_closure: If True then the closure across any
|
|
1136 |
compression parents will be included (in the opaque data).
|
|
1137 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1138 |
valid until the iterator is advanced.
|
|
1139 |
"""
|
|
1140 |
# keys might be a generator
|
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1141 |
orig_keys = list(keys) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1142 |
keys = set(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1143 |
if not keys: |
1144 |
return
|
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
1145 |
if (not self._index.has_graph |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
1146 |
and ordering in ('topological', 'groupcompress')): |
0.17.5
by Robert Collins
nograph tests completely passing. |
1147 |
# Cannot topological order when no graph has been stored.
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1148 |
# but we allow 'as-requested' or 'unordered'
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1149 |
ordering = 'unordered' |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1150 |
|
1151 |
remaining_keys = keys |
|
1152 |
while True: |
|
1153 |
try: |
|
1154 |
keys = set(remaining_keys) |
|
1155 |
for content_factory in self._get_remaining_record_stream(keys, |
|
1156 |
orig_keys, ordering, include_delta_closure): |
|
1157 |
remaining_keys.discard(content_factory.key) |
|
1158 |
yield content_factory |
|
1159 |
return
|
|
1160 |
except errors.RetryWithNewPacks, e: |
|
1161 |
self._access.reload_or_raise(e) |
|
1162 |
||
1163 |
def _find_from_fallback(self, missing): |
|
1164 |
"""Find whatever keys you can from the fallbacks.
|
|
1165 |
||
1166 |
:param missing: A set of missing keys. This set will be mutated as keys
|
|
1167 |
are found from a fallback_vfs
|
|
1168 |
:return: (parent_map, key_to_source_map, source_results)
|
|
1169 |
parent_map the overall key => parent_keys
|
|
1170 |
key_to_source_map a dict from {key: source}
|
|
1171 |
source_results a list of (source: keys)
|
|
1172 |
"""
|
|
1173 |
parent_map = {} |
|
1174 |
key_to_source_map = {} |
|
1175 |
source_results = [] |
|
1176 |
for source in self._fallback_vfs: |
|
1177 |
if not missing: |
|
1178 |
break
|
|
1179 |
source_parents = source.get_parent_map(missing) |
|
1180 |
parent_map.update(source_parents) |
|
1181 |
source_parents = list(source_parents) |
|
1182 |
source_results.append((source, source_parents)) |
|
1183 |
key_to_source_map.update((key, source) for key in source_parents) |
|
1184 |
missing.difference_update(source_parents) |
|
1185 |
return parent_map, key_to_source_map, source_results |
|
1186 |
||
1187 |
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map): |
|
1188 |
"""Get the (source, [keys]) list.
|
|
1189 |
||
1190 |
The returned objects should be in the order defined by 'ordering',
|
|
1191 |
which can weave between different sources.
|
|
1192 |
:param ordering: Must be one of 'topological' or 'groupcompress'
|
|
1193 |
:return: List of [(source, [keys])] tuples, such that all keys are in
|
|
1194 |
the defined order, regardless of source.
|
|
1195 |
"""
|
|
1196 |
if ordering == 'topological': |
|
1197 |
present_keys = topo_sort(parent_map) |
|
1198 |
else: |
|
1199 |
# ordering == 'groupcompress'
|
|
1200 |
# XXX: This only optimizes for the target ordering. We may need
|
|
1201 |
# to balance that with the time it takes to extract
|
|
1202 |
# ordering, by somehow grouping based on
|
|
1203 |
# locations[key][0:3]
|
|
1204 |
present_keys = sort_gc_optimal(parent_map) |
|
1205 |
# Now group by source:
|
|
1206 |
source_keys = [] |
|
1207 |
current_source = None |
|
1208 |
for key in present_keys: |
|
1209 |
source = key_to_source_map.get(key, self) |
|
1210 |
if source is not current_source: |
|
1211 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1212 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1213 |
source_keys[-1][1].append(key) |
1214 |
return source_keys |
|
1215 |
||
1216 |
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys, |
|
1217 |
key_to_source_map): |
|
1218 |
source_keys = [] |
|
1219 |
current_source = None |
|
1220 |
for key in orig_keys: |
|
1221 |
if key in locations or key in unadded_keys: |
|
1222 |
source = self |
|
1223 |
elif key in key_to_source_map: |
|
1224 |
source = key_to_source_map[key] |
|
1225 |
else: # absent |
|
1226 |
continue
|
|
1227 |
if source is not current_source: |
|
1228 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1229 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1230 |
source_keys[-1][1].append(key) |
1231 |
return source_keys |
|
1232 |
||
1233 |
def _get_io_ordered_source_keys(self, locations, unadded_keys, |
|
1234 |
source_result): |
|
1235 |
def get_group(key): |
|
1236 |
# This is the group the bytes are stored in, followed by the
|
|
1237 |
# location in the group
|
|
1238 |
return locations[key][0] |
|
1239 |
present_keys = sorted(locations.iterkeys(), key=get_group) |
|
1240 |
# We don't have an ordering for keys in the in-memory object, but
|
|
1241 |
# lets process the in-memory ones first.
|
|
1242 |
present_keys = list(unadded_keys) + present_keys |
|
1243 |
# Now grab all of the ones from other sources
|
|
1244 |
source_keys = [(self, present_keys)] |
|
1245 |
source_keys.extend(source_result) |
|
1246 |
return source_keys |
|
1247 |
||
1248 |
def _get_remaining_record_stream(self, keys, orig_keys, ordering, |
|
1249 |
include_delta_closure): |
|
1250 |
"""Get a stream of records for keys.
|
|
1251 |
||
1252 |
:param keys: The keys to include.
|
|
1253 |
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
|
|
1254 |
'as-requested'
|
|
1255 |
:param include_delta_closure: If True then the closure across any
|
|
1256 |
compression parents will be included (in the opaque data).
|
|
1257 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1258 |
valid until the iterator is advanced.
|
|
1259 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1260 |
# Cheap: iterate
|
1261 |
locations = self._index.get_build_details(keys) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1262 |
unadded_keys = set(self._unadded_refs).intersection(keys) |
1263 |
missing = keys.difference(locations) |
|
1264 |
missing.difference_update(unadded_keys) |
|
1265 |
(fallback_parent_map, key_to_source_map, |
|
1266 |
source_result) = self._find_from_fallback(missing) |
|
1267 |
if ordering in ('topological', 'groupcompress'): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1268 |
# would be better to not globally sort initially but instead
|
1269 |
# start with one key, recurse to its oldest parent, then grab
|
|
1270 |
# everything in the same group, etc.
|
|
1271 |
parent_map = dict((key, details[2]) for key, details in |
|
1272 |
locations.iteritems()) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1273 |
for key in unadded_keys: |
1274 |
parent_map[key] = self._unadded_refs[key] |
|
1275 |
parent_map.update(fallback_parent_map) |
|
1276 |
source_keys = self._get_ordered_source_keys(ordering, parent_map, |
|
1277 |
key_to_source_map) |
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1278 |
elif ordering == 'as-requested': |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1279 |
source_keys = self._get_as_requested_source_keys(orig_keys, |
1280 |
locations, unadded_keys, key_to_source_map) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1281 |
else: |
0.20.10
by John Arbash Meinel
Change the extraction ordering for 'unordered'. |
1282 |
# We want to yield the keys in a semi-optimal (read-wise) ordering.
|
1283 |
# Otherwise we thrash the _group_cache and destroy performance
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1284 |
source_keys = self._get_io_ordered_source_keys(locations, |
1285 |
unadded_keys, source_result) |
|
1286 |
for key in missing: |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1287 |
yield AbsentContentFactory(key) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1288 |
manager = None |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1289 |
last_read_memo = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
1290 |
# TODO: This works fairly well at batching up existing groups into a
|
1291 |
# streamable format, and possibly allowing for taking one big
|
|
1292 |
# group and splitting it when it isn't fully utilized.
|
|
1293 |
# However, it doesn't allow us to find under-utilized groups and
|
|
1294 |
# combine them into a bigger group on the fly.
|
|
1295 |
# (Consider the issue with how chk_map inserts texts
|
|
1296 |
# one-at-a-time.) This could be done at insert_record_stream()
|
|
1297 |
# time, but it probably would decrease the number of
|
|
1298 |
# bytes-on-the-wire for fetch.
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1299 |
for source, keys in source_keys: |
1300 |
if source is self: |
|
1301 |
for key in keys: |
|
1302 |
if key in self._unadded_refs: |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1303 |
if manager is not None: |
1304 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1305 |
yield factory |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1306 |
last_read_memo = manager = None |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1307 |
bytes, sha1 = self._compressor.extract(key) |
1308 |
parents = self._unadded_refs[key] |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1309 |
yield FulltextContentFactory(key, parents, sha1, bytes) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1310 |
else: |
1311 |
index_memo, _, parents, (method, _) = locations[key] |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
1312 |
read_memo = index_memo[0:3] |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1313 |
if last_read_memo != read_memo: |
1314 |
# We are starting a new block. If we have a
|
|
1315 |
# manager, we have found everything that fits for
|
|
1316 |
# now, so yield records
|
|
1317 |
if manager is not None: |
|
1318 |
for factory in manager.get_record_stream(): |
|
1319 |
yield factory |
|
1320 |
# Now start a new manager
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
1321 |
block = self._get_block(index_memo) |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1322 |
manager = _LazyGroupContentManager(block) |
1323 |
last_read_memo = read_memo |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
1324 |
start, end = index_memo[3:5] |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1325 |
manager.add_factory(key, parents, start, end) |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1326 |
else: |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1327 |
if manager is not None: |
1328 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1329 |
yield factory |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1330 |
last_read_memo = manager = None |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1331 |
for record in source.get_record_stream(keys, ordering, |
1332 |
include_delta_closure): |
|
1333 |
yield record |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1334 |
if manager is not None: |
1335 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1336 |
yield factory |
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1337 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1338 |
def get_sha1s(self, keys): |
1339 |
"""See VersionedFiles.get_sha1s()."""
|
|
1340 |
result = {} |
|
1341 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1342 |
if record.sha1 != None: |
|
1343 |
result[record.key] = record.sha1 |
|
1344 |
else: |
|
1345 |
if record.storage_kind != 'absent': |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1346 |
result[record.key] = osutils.sha_string( |
1347 |
record.get_bytes_as('fulltext')) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1348 |
return result |
1349 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1350 |
def insert_record_stream(self, stream): |
1351 |
"""Insert a record stream into this container.
|
|
1352 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1353 |
:param stream: A stream of records to insert.
|
0.17.2
by Robert Collins
Core proof of concept working. |
1354 |
:return: None
|
1355 |
:seealso VersionedFiles.get_record_stream:
|
|
1356 |
"""
|
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1357 |
# XXX: Setting random_id=True makes
|
1358 |
# test_insert_record_stream_existing_keys fail for groupcompress and
|
|
1359 |
# groupcompress-nograph, this needs to be revisited while addressing
|
|
1360 |
# 'bzr branch' performance issues.
|
|
1361 |
for _ in self._insert_record_stream(stream, random_id=False): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1362 |
pass
|
0.17.2
by Robert Collins
Core proof of concept working. |
1363 |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1364 |
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None, |
1365 |
reuse_blocks=True): |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1366 |
"""Internal core to insert a record stream into this container.
|
1367 |
||
1368 |
This helper function has a different interface than insert_record_stream
|
|
1369 |
to allow add_lines to be minimal, but still return the needed data.
|
|
1370 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1371 |
:param stream: A stream of records to insert.
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
1372 |
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
|
1373 |
raise ExistingContent, rather than committing the new text.
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1374 |
:param reuse_blocks: If the source is streaming from
|
1375 |
groupcompress-blocks, just insert the blocks as-is, rather than
|
|
1376 |
expanding the texts and inserting again.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
1377 |
:return: An iterator over the sha1 of the inserted records.
|
1378 |
:seealso insert_record_stream:
|
|
1379 |
:seealso add_lines:
|
|
1380 |
"""
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1381 |
adapters = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1382 |
def get_adapter(adapter_key): |
1383 |
try: |
|
1384 |
return adapters[adapter_key] |
|
1385 |
except KeyError: |
|
1386 |
adapter_factory = adapter_registry.get(adapter_key) |
|
1387 |
adapter = adapter_factory(self) |
|
1388 |
adapters[adapter_key] = adapter |
|
1389 |
return adapter |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1390 |
# This will go up to fulltexts for gc to gc fetching, which isn't
|
1391 |
# ideal.
|
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
1392 |
self._compressor = GroupCompressor() |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1393 |
self._unadded_refs = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1394 |
keys_to_add = [] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1395 |
def flush(): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1396 |
bytes = self._compressor.flush().to_bytes() |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1397 |
index, start, length = self._access.add_raw_records( |
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
1398 |
[(None, len(bytes))], bytes)[0] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1399 |
nodes = [] |
1400 |
for key, reads, refs in keys_to_add: |
|
1401 |
nodes.append((key, "%d %d %s" % (start, length, reads), refs)) |
|
1402 |
self._index.add_records(nodes, random_id=random_id) |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1403 |
self._unadded_refs = {} |
1404 |
del keys_to_add[:] |
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
1405 |
self._compressor = GroupCompressor() |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1406 |
|
0.20.15
by John Arbash Meinel
Change so that regions that have lots of copies get converted back |
1407 |
last_prefix = None |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1408 |
max_fulltext_len = 0 |
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1409 |
max_fulltext_prefix = None |
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
1410 |
insert_manager = None |
1411 |
block_start = None |
|
1412 |
block_length = None |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1413 |
# XXX: TODO: remove this, it is just for safety checking for now
|
1414 |
inserted_keys = set() |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1415 |
for record in stream: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1416 |
# Raise an error when a record is missing.
|
1417 |
if record.storage_kind == 'absent': |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1418 |
raise errors.RevisionNotPresent(record.key, self) |
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1419 |
if random_id: |
1420 |
if record.key in inserted_keys: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1421 |
trace.note('Insert claimed random_id=True,' |
1422 |
' but then inserted %r two times', record.key) |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1423 |
continue
|
1424 |
inserted_keys.add(record.key) |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1425 |
if reuse_blocks: |
1426 |
# If the reuse_blocks flag is set, check to see if we can just
|
|
1427 |
# copy a groupcompress block as-is.
|
|
1428 |
if record.storage_kind == 'groupcompress-block': |
|
1429 |
# Insert the raw block into the target repo
|
|
1430 |
insert_manager = record._manager |
|
3735.2.163
by John Arbash Meinel
Merge bzr.dev 4187, and revert the change to fix refcycle issues. |
1431 |
insert_manager._check_rebuild_block() |
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1432 |
bytes = record._manager._block.to_bytes() |
1433 |
_, start, length = self._access.add_raw_records( |
|
1434 |
[(None, len(bytes))], bytes)[0] |
|
1435 |
del bytes |
|
1436 |
block_start = start |
|
1437 |
block_length = length |
|
1438 |
if record.storage_kind in ('groupcompress-block', |
|
1439 |
'groupcompress-block-ref'): |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1440 |
if insert_manager is None: |
1441 |
raise AssertionError('No insert_manager set') |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1442 |
value = "%d %d %d %d" % (block_start, block_length, |
1443 |
record._start, record._end) |
|
1444 |
nodes = [(record.key, value, (record.parents,))] |
|
3735.38.1
by John Arbash Meinel
Change the delta byte stream to remove the 'source length' entry. |
1445 |
# TODO: Consider buffering up many nodes to be added, not
|
1446 |
# sure how much overhead this has, but we're seeing
|
|
1447 |
# ~23s / 120s in add_records calls
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1448 |
self._index.add_records(nodes, random_id=random_id) |
1449 |
continue
|
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1450 |
try: |
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
1451 |
bytes = record.get_bytes_as('fulltext') |
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1452 |
except errors.UnavailableRepresentation: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1453 |
adapter_key = record.storage_kind, 'fulltext' |
1454 |
adapter = get_adapter(adapter_key) |
|
0.20.21
by John Arbash Meinel
Merge the chk sorting code. |
1455 |
bytes = adapter.get_bytes(record) |
0.20.13
by John Arbash Meinel
Play around a bit. |
1456 |
if len(record.key) > 1: |
1457 |
prefix = record.key[0] |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1458 |
soft = (prefix == last_prefix) |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1459 |
else: |
1460 |
prefix = None |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1461 |
soft = False |
1462 |
if max_fulltext_len < len(bytes): |
|
1463 |
max_fulltext_len = len(bytes) |
|
1464 |
max_fulltext_prefix = prefix |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1465 |
(found_sha1, start_point, end_point, |
1466 |
type) = self._compressor.compress(record.key, |
|
1467 |
bytes, record.sha1, soft=soft, |
|
1468 |
nostore_sha=nostore_sha) |
|
1469 |
# delta_ratio = float(len(bytes)) / (end_point - start_point)
|
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1470 |
# Check if we want to continue to include that text
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1471 |
if (prefix == max_fulltext_prefix |
1472 |
and end_point < 2 * max_fulltext_len): |
|
1473 |
# As long as we are on the same file_id, we will fill at least
|
|
1474 |
# 2 * max_fulltext_len
|
|
1475 |
start_new_block = False |
|
1476 |
elif end_point > 4*1024*1024: |
|
1477 |
start_new_block = True |
|
1478 |
elif (prefix is not None and prefix != last_prefix |
|
1479 |
and end_point > 2*1024*1024): |
|
1480 |
start_new_block = True |
|
1481 |
else: |
|
1482 |
start_new_block = False |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1483 |
last_prefix = prefix |
1484 |
if start_new_block: |
|
1485 |
self._compressor.pop_last() |
|
1486 |
flush() |
|
1487 |
max_fulltext_len = len(bytes) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1488 |
(found_sha1, start_point, end_point, |
1489 |
type) = self._compressor.compress(record.key, bytes, |
|
1490 |
record.sha1) |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1491 |
if record.key[-1] is None: |
1492 |
key = record.key[:-1] + ('sha1:' + found_sha1,) |
|
1493 |
else: |
|
1494 |
key = record.key |
|
1495 |
self._unadded_refs[key] = record.parents |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
1496 |
yield found_sha1 |
3735.2.164
by John Arbash Meinel
Fix a critical bug that caused problems with the index entries. |
1497 |
keys_to_add.append((key, '%d %d' % (start_point, end_point), |
0.17.5
by Robert Collins
nograph tests completely passing. |
1498 |
(record.parents,))) |
0.17.8
by Robert Collins
Flush pending updates at the end of _insert_record_stream |
1499 |
if len(keys_to_add): |
1500 |
flush() |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1501 |
self._compressor = None |
0.17.5
by Robert Collins
nograph tests completely passing. |
1502 |
|
1503 |
def iter_lines_added_or_present_in_keys(self, keys, pb=None): |
|
1504 |
"""Iterate over the lines in the versioned files from keys.
|
|
1505 |
||
1506 |
This may return lines from other keys. Each item the returned
|
|
1507 |
iterator yields is a tuple of a line and a text version that that line
|
|
1508 |
is present in (not introduced in).
|
|
1509 |
||
1510 |
Ordering of results is in whatever order is most suitable for the
|
|
1511 |
underlying storage format.
|
|
1512 |
||
1513 |
If a progress bar is supplied, it may be used to indicate progress.
|
|
1514 |
The caller is responsible for cleaning up progress bars (because this
|
|
1515 |
is an iterator).
|
|
1516 |
||
1517 |
NOTES:
|
|
1518 |
* Lines are normalised by the underlying store: they will all have \n
|
|
1519 |
terminators.
|
|
1520 |
* Lines are returned in arbitrary order.
|
|
1521 |
||
1522 |
:return: An iterator over (line, key).
|
|
1523 |
"""
|
|
1524 |
if pb is None: |
|
1525 |
pb = progress.DummyProgress() |
|
1526 |
keys = set(keys) |
|
1527 |
total = len(keys) |
|
1528 |
# we don't care about inclusions, the caller cares.
|
|
1529 |
# but we need to setup a list of records to visit.
|
|
1530 |
# we need key, position, length
|
|
1531 |
for key_idx, record in enumerate(self.get_record_stream(keys, |
|
1532 |
'unordered', True)): |
|
1533 |
# XXX: todo - optimise to use less than full texts.
|
|
1534 |
key = record.key |
|
3735.32.1
by John Arbash Meinel
Fix the VF WalkingContent checks. |
1535 |
pb.update('Walking content', key_idx, total) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1536 |
if record.storage_kind == 'absent': |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1537 |
raise errors.RevisionNotPresent(key, self) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1538 |
lines = osutils.split_lines(record.get_bytes_as('fulltext')) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1539 |
for line in lines: |
1540 |
yield line, key |
|
3735.32.1
by John Arbash Meinel
Fix the VF WalkingContent checks. |
1541 |
pb.update('Walking content', total, total) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1542 |
|
1543 |
def keys(self): |
|
1544 |
"""See VersionedFiles.keys."""
|
|
1545 |
if 'evil' in debug.debug_flags: |
|
1546 |
trace.mutter_callsite(2, "keys scales with size of history") |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1547 |
sources = [self._index] + self._fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1548 |
result = set() |
1549 |
for source in sources: |
|
1550 |
result.update(source.keys()) |
|
1551 |
return result |
|
1552 |
||
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1553 |
|
1554 |
class _GCGraphIndex(object): |
|
1555 |
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
|
|
1556 |
||
0.17.9
by Robert Collins
Initial stab at repository format support. |
1557 |
def __init__(self, graph_index, is_locked, parents=True, |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1558 |
add_callback=None, track_external_parent_refs=False): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1559 |
"""Construct a _GCGraphIndex on a graph_index.
|
1560 |
||
1561 |
:param graph_index: An implementation of bzrlib.index.GraphIndex.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1562 |
:param is_locked: A callback, returns True if the index is locked and
|
1563 |
thus usable.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1564 |
:param parents: If True, record knits parents, if not do not record
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1565 |
parents.
|
1566 |
:param add_callback: If not None, allow additions to the index and call
|
|
1567 |
this callback with a list of added GraphIndex nodes:
|
|
1568 |
[(node, value, node_refs), ...]
|
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1569 |
:param track_external_parent_refs: As keys are added, keep track of the
|
1570 |
keys they reference, so that we can query get_missing_parents(),
|
|
1571 |
etc.
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1572 |
"""
|
1573 |
self._add_callback = add_callback |
|
1574 |
self._graph_index = graph_index |
|
1575 |
self._parents = parents |
|
1576 |
self.has_graph = parents |
|
1577 |
self._is_locked = is_locked |
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1578 |
if track_external_parent_refs: |
1579 |
self._key_dependencies = knit._KeyRefs() |
|
1580 |
else: |
|
1581 |
self._key_dependencies = None |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1582 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1583 |
def add_records(self, records, random_id=False): |
1584 |
"""Add multiple records to the index.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1585 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1586 |
This function does not insert data into the Immutable GraphIndex
|
1587 |
backing the KnitGraphIndex, instead it prepares data for insertion by
|
|
1588 |
the caller and checks that it is safe to insert then calls
|
|
1589 |
self._add_callback with the prepared GraphIndex nodes.
|
|
1590 |
||
1591 |
:param records: a list of tuples:
|
|
1592 |
(key, options, access_memo, parents).
|
|
1593 |
:param random_id: If True the ids being added were randomly generated
|
|
1594 |
and no check for existence will be performed.
|
|
1595 |
"""
|
|
1596 |
if not self._add_callback: |
|
1597 |
raise errors.ReadOnlyError(self) |
|
1598 |
# we hope there are no repositories with inconsistent parentage
|
|
1599 |
# anymore.
|
|
1600 |
||
1601 |
changed = False |
|
1602 |
keys = {} |
|
1603 |
for (key, value, refs) in records: |
|
1604 |
if not self._parents: |
|
1605 |
if refs: |
|
1606 |
for ref in refs: |
|
1607 |
if ref: |
|
1608 |
raise KnitCorrupt(self, |
|
1609 |
"attempt to add node with parents "
|
|
1610 |
"in parentless index.") |
|
1611 |
refs = () |
|
1612 |
changed = True |
|
1613 |
keys[key] = (value, refs) |
|
1614 |
# check for dups
|
|
1615 |
if not random_id: |
|
1616 |
present_nodes = self._get_entries(keys) |
|
1617 |
for (index, key, value, node_refs) in present_nodes: |
|
1618 |
if node_refs != keys[key][1]: |
|
1619 |
raise errors.KnitCorrupt(self, "inconsistent details in add_records" |
|
1620 |
": %s %s" % ((value, node_refs), keys[key])) |
|
1621 |
del keys[key] |
|
1622 |
changed = True |
|
1623 |
if changed: |
|
1624 |
result = [] |
|
1625 |
if self._parents: |
|
1626 |
for key, (value, node_refs) in keys.iteritems(): |
|
1627 |
result.append((key, value, node_refs)) |
|
1628 |
else: |
|
1629 |
for key, (value, node_refs) in keys.iteritems(): |
|
1630 |
result.append((key, value)) |
|
1631 |
records = result |
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1632 |
key_dependencies = self._key_dependencies |
1633 |
if key_dependencies is not None and self._parents: |
|
1634 |
for key, value, refs in records: |
|
1635 |
parents = refs[0] |
|
1636 |
key_dependencies.add_references(key, parents) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1637 |
self._add_callback(records) |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1638 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1639 |
def _check_read(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1640 |
"""Raise an exception if reads are not permitted."""
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1641 |
if not self._is_locked(): |
1642 |
raise errors.ObjectNotLocked(self) |
|
1643 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1644 |
def _check_write_ok(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1645 |
"""Raise an exception if writes are not permitted."""
|
0.17.2
by Robert Collins
Core proof of concept working. |
1646 |
if not self._is_locked(): |
1647 |
raise errors.ObjectNotLocked(self) |
|
1648 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1649 |
def _get_entries(self, keys, check_present=False): |
1650 |
"""Get the entries for keys.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1651 |
|
1652 |
Note: Callers are responsible for checking that the index is locked
|
|
1653 |
before calling this method.
|
|
1654 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1655 |
:param keys: An iterable of index key tuples.
|
1656 |
"""
|
|
1657 |
keys = set(keys) |
|
1658 |
found_keys = set() |
|
1659 |
if self._parents: |
|
1660 |
for node in self._graph_index.iter_entries(keys): |
|
1661 |
yield node |
|
1662 |
found_keys.add(node[1]) |
|
1663 |
else: |
|
1664 |
# adapt parentless index to the rest of the code.
|
|
1665 |
for node in self._graph_index.iter_entries(keys): |
|
1666 |
yield node[0], node[1], node[2], () |
|
1667 |
found_keys.add(node[1]) |
|
1668 |
if check_present: |
|
1669 |
missing_keys = keys.difference(found_keys) |
|
1670 |
if missing_keys: |
|
1671 |
raise RevisionNotPresent(missing_keys.pop(), self) |
|
1672 |
||
1673 |
def get_parent_map(self, keys): |
|
1674 |
"""Get a map of the parents of keys.
|
|
1675 |
||
1676 |
:param keys: The keys to look up parents for.
|
|
1677 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1678 |
the mapping.
|
|
1679 |
"""
|
|
1680 |
self._check_read() |
|
1681 |
nodes = self._get_entries(keys) |
|
1682 |
result = {} |
|
1683 |
if self._parents: |
|
1684 |
for node in nodes: |
|
1685 |
result[node[1]] = node[3][0] |
|
1686 |
else: |
|
1687 |
for node in nodes: |
|
1688 |
result[node[1]] = None |
|
1689 |
return result |
|
1690 |
||
4343.3.1
by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories. |
1691 |
def get_missing_parents(self): |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1692 |
"""Return the keys of missing parents."""
|
1693 |
# Copied from _KnitGraphIndex.get_missing_parents
|
|
1694 |
# We may have false positives, so filter those out.
|
|
1695 |
self._key_dependencies.add_keys( |
|
1696 |
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs())) |
|
1697 |
return frozenset(self._key_dependencies.get_unsatisfied_refs()) |
|
4343.3.1
by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories. |
1698 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1699 |
def get_build_details(self, keys): |
1700 |
"""Get the various build details for keys.
|
|
1701 |
||
1702 |
Ghosts are omitted from the result.
|
|
1703 |
||
1704 |
:param keys: An iterable of keys.
|
|
1705 |
:return: A dict of key:
|
|
1706 |
(index_memo, compression_parent, parents, record_details).
|
|
1707 |
index_memo
|
|
1708 |
opaque structure to pass to read_records to extract the raw
|
|
1709 |
data
|
|
1710 |
compression_parent
|
|
1711 |
Content that this record is built upon, may be None
|
|
1712 |
parents
|
|
1713 |
Logical parents of this node
|
|
1714 |
record_details
|
|
1715 |
extra information about the content which needs to be passed to
|
|
1716 |
Factory.parse_record
|
|
1717 |
"""
|
|
1718 |
self._check_read() |
|
1719 |
result = {} |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1720 |
entries = self._get_entries(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1721 |
for entry in entries: |
1722 |
key = entry[1] |
|
1723 |
if not self._parents: |
|
1724 |
parents = None |
|
1725 |
else: |
|
1726 |
parents = entry[3][0] |
|
1727 |
method = 'group' |
|
1728 |
result[key] = (self._node_to_position(entry), |
|
1729 |
None, parents, (method, None)) |
|
1730 |
return result |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1731 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1732 |
def keys(self): |
1733 |
"""Get all the keys in the collection.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1734 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1735 |
The keys are not ordered.
|
1736 |
"""
|
|
1737 |
self._check_read() |
|
1738 |
return [node[1] for node in self._graph_index.iter_all_entries()] |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1739 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1740 |
def _node_to_position(self, node): |
1741 |
"""Convert an index value to position details."""
|
|
1742 |
bits = node[2].split(' ') |
|
1743 |
# It would be nice not to read the entire gzip.
|
|
1744 |
start = int(bits[0]) |
|
1745 |
stop = int(bits[1]) |
|
1746 |
basis_end = int(bits[2]) |
|
1747 |
delta_end = int(bits[3]) |
|
1748 |
return node[0], start, stop, basis_end, delta_end |
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1749 |
|
4343.3.2
by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos |
1750 |
def scan_unvalidated_index(self, graph_index): |
1751 |
"""Inform this _GCGraphIndex that there is an unvalidated index.
|
|
1752 |
||
1753 |
This allows this _GCGraphIndex to keep track of any missing
|
|
1754 |
compression parents we may want to have filled in to make those
|
|
1755 |
indices valid.
|
|
1756 |
||
1757 |
:param graph_index: A GraphIndex
|
|
1758 |
"""
|
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1759 |
if self._key_dependencies is not None: |
1760 |
# Add parent refs from graph_index (and discard parent refs that
|
|
1761 |
# the graph_index has).
|
|
1762 |
add_refs = self._key_dependencies.add_references |
|
1763 |
for node in graph_index.iter_all_entries(): |
|
1764 |
add_refs(node[1], node[3][0]) |
|
4343.3.2
by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos |
1765 |
|
1766 |
||
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1767 |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
1768 |
from bzrlib._groupcompress_py import ( |
1769 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
1770 |
apply_delta_to_source, |
3735.40.11
by John Arbash Meinel
Implement make_delta and apply_delta. |
1771 |
encode_base128_int, |
1772 |
decode_base128_int, |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
1773 |
decode_copy_instruction, |
3735.40.13
by John Arbash Meinel
Rename EquivalenceTable to LinesDeltaIndex. |
1774 |
LinesDeltaIndex, |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
1775 |
)
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1776 |
try: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1777 |
from bzrlib._groupcompress_pyx import ( |
1778 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
1779 |
apply_delta_to_source, |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1780 |
DeltaIndex, |
3735.40.16
by John Arbash Meinel
Implement (de|en)code_base128_int in pyrex. |
1781 |
encode_base128_int, |
1782 |
decode_base128_int, |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1783 |
)
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1784 |
GroupCompressor = PyrexGroupCompressor |
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1785 |
except ImportError: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1786 |
GroupCompressor = PythonGroupCompressor |
1787 |