3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1 |
# Copyright (C) 2008, 2009 Canonical Ltd
|
2 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
3 |
# This program is free software; you can redistribute it and/or modify
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
4 |
# it under the terms of the GNU General Public License as published by
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
12 |
#
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
3735.36.3
by John Arbash Meinel
Add the new address for FSF to the new files. |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
16 |
|
17 |
"""Core compression logic for compressing streams of related files."""
|
|
18 |
||
0.17.13
by Robert Collins
Do not output copy instructions which take more to encode than a fresh insert. (But do not refer to those insertions when finding ranges to copy: they are not interesting). |
19 |
from itertools import izip |
0.17.5
by Robert Collins
nograph tests completely passing. |
20 |
from cStringIO import StringIO |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
21 |
import time |
0.17.5
by Robert Collins
nograph tests completely passing. |
22 |
import zlib |
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
23 |
try: |
24 |
import pylzma |
|
25 |
except ImportError: |
|
26 |
pylzma = None |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
27 |
|
0.17.4
by Robert Collins
Annotate. |
28 |
from bzrlib import ( |
29 |
annotate, |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
30 |
debug, |
0.17.4
by Robert Collins
Annotate. |
31 |
diff, |
0.17.5
by Robert Collins
nograph tests completely passing. |
32 |
errors, |
0.17.4
by Robert Collins
Annotate. |
33 |
graph as _mod_graph, |
0.20.2
by John Arbash Meinel
Teach groupcompress about 'chunked' encoding |
34 |
osutils, |
0.17.4
by Robert Collins
Annotate. |
35 |
pack, |
36 |
patiencediff, |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
37 |
trace, |
0.17.4
by Robert Collins
Annotate. |
38 |
)
|
39 |
from bzrlib.graph import Graph |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
40 |
from bzrlib.knit import _DirectPackAccess |
0.17.21
by Robert Collins
Update groupcompress to bzrlib 1.10. |
41 |
from bzrlib.btree_index import BTreeBuilder |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
42 |
from bzrlib.lru_cache import LRUSizeCache |
0.17.9
by Robert Collins
Initial stab at repository format support. |
43 |
from bzrlib.tsort import topo_sort |
0.17.2
by Robert Collins
Core proof of concept working. |
44 |
from bzrlib.versionedfile import ( |
0.17.5
by Robert Collins
nograph tests completely passing. |
45 |
adapter_registry, |
46 |
AbsentContentFactory, |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
47 |
ChunkedContentFactory, |
0.17.2
by Robert Collins
Core proof of concept working. |
48 |
FulltextContentFactory, |
49 |
VersionedFiles, |
|
50 |
)
|
|
51 |
||
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
52 |
_USE_LZMA = False and (pylzma is not None) |
0.17.2
by Robert Collins
Core proof of concept working. |
53 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
54 |
# osutils.sha_string('')
|
55 |
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' |
|
56 |
||
57 |
||
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
58 |
def sort_gc_optimal(parent_map): |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
59 |
"""Sort and group the keys in parent_map into groupcompress order.
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
60 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
61 |
groupcompress is defined (currently) as reverse-topological order, grouped
|
62 |
by the key prefix.
|
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
63 |
|
64 |
:return: A sorted-list of keys
|
|
65 |
"""
|
|
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
66 |
# groupcompress ordering is approximately reverse topological,
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
67 |
# properly grouped by file-id.
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
68 |
per_prefix_map = {} |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
69 |
for item in parent_map.iteritems(): |
70 |
key = item[0] |
|
71 |
if isinstance(key, str) or len(key) == 1: |
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
72 |
prefix = '' |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
73 |
else: |
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
74 |
prefix = key[0] |
75 |
try: |
|
76 |
per_prefix_map[prefix].append(item) |
|
77 |
except KeyError: |
|
78 |
per_prefix_map[prefix] = [item] |
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
79 |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
80 |
present_keys = [] |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
81 |
for prefix in sorted(per_prefix_map): |
82 |
present_keys.extend(reversed(topo_sort(per_prefix_map[prefix]))) |
|
83 |
return present_keys |
|
84 |
||
85 |
||
3735.32.9
by John Arbash Meinel
Use a 32kB extension, since that is the max window size for zlib. |
86 |
# The max zlib window size is 32kB, so if we set 'max_size' output of the
|
87 |
# decompressor to the requested bytes + 32kB, then we should guarantee
|
|
88 |
# num_bytes coming out.
|
|
89 |
_ZLIB_DECOMP_WINDOW = 32*1024 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
90 |
|
91 |
class GroupCompressBlock(object): |
|
92 |
"""An object which maintains the internal structure of the compressed data.
|
|
93 |
||
94 |
This tracks the meta info (start of text, length, type, etc.)
|
|
95 |
"""
|
|
96 |
||
0.25.5
by John Arbash Meinel
Now using a zlib compressed format. |
97 |
# Group Compress Block v1 Zlib
|
98 |
GCB_HEADER = 'gcb1z\n' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
99 |
# Group Compress Block v1 Lzma
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
100 |
GCB_LZ_HEADER = 'gcb1l\n' |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
101 |
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
102 |
|
103 |
def __init__(self): |
|
104 |
# map by key? or just order in file?
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
105 |
self._compressor_name = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
106 |
self._z_content = None |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
107 |
self._z_content_decompressor = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
108 |
self._z_content_length = None |
109 |
self._content_length = None |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
110 |
self._content = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
111 |
|
112 |
def __len__(self): |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
113 |
# This is the maximum number of bytes this object will reference if
|
114 |
# everything is decompressed. However, if we decompress less than
|
|
115 |
# everything... (this would cause some problems for LRUSizeCache)
|
|
116 |
return self._content_length + self._z_content_length |
|
0.17.48
by John Arbash Meinel
if _NO_LABELS is set, don't bother parsing the mini header. |
117 |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
118 |
def _ensure_content(self, num_bytes=None): |
119 |
"""Make sure that content has been expanded enough.
|
|
120 |
||
121 |
:param num_bytes: Ensure that we have extracted at least num_bytes of
|
|
122 |
content. If None, consume everything
|
|
123 |
"""
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
124 |
# TODO: If we re-use the same content block at different times during
|
125 |
# get_record_stream(), it is possible that the first pass will
|
|
126 |
# get inserted, triggering an extract/_ensure_content() which
|
|
127 |
# will get rid of _z_content. And then the next use of the block
|
|
128 |
# will try to access _z_content (to send it over the wire), and
|
|
129 |
# fail because it is already extracted. Consider never releasing
|
|
130 |
# _z_content because of this.
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
131 |
if num_bytes is None: |
132 |
num_bytes = self._content_length |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
133 |
elif (self._content_length is not None |
134 |
and num_bytes > self._content_length): |
|
135 |
raise AssertionError( |
|
136 |
'requested num_bytes (%d) > content length (%d)' |
|
137 |
% (num_bytes, self._content_length)) |
|
138 |
# Expand the content if required
|
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
139 |
if self._content is None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
140 |
if self._z_content is None: |
141 |
raise AssertionError('No content to decompress') |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
142 |
if self._z_content == '': |
143 |
self._content = '' |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
144 |
elif self._compressor_name == 'lzma': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
145 |
# We don't do partial lzma decomp yet
|
3735.2.160
by John Arbash Meinel
Fix a trivial typo |
146 |
self._content = pylzma.decompress(self._z_content) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
147 |
elif self._compressor_name == 'zlib': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
148 |
# Start a zlib decompressor
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
149 |
if num_bytes is None: |
150 |
self._content = zlib.decompress(self._z_content) |
|
151 |
else: |
|
152 |
self._z_content_decompressor = zlib.decompressobj() |
|
153 |
# Seed the decompressor with the uncompressed bytes, so
|
|
154 |
# that the rest of the code is simplified
|
|
155 |
self._content = self._z_content_decompressor.decompress( |
|
156 |
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
157 |
else: |
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
158 |
raise AssertionError('Unknown compressor: %r' |
3735.2.183
by John Arbash Meinel
Fix the compressor name. |
159 |
% self._compressor_name) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
160 |
# Any bytes remaining to be decompressed will be in the decompressors
|
161 |
# 'unconsumed_tail'
|
|
162 |
||
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
163 |
# Do we have enough bytes already?
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
164 |
if num_bytes is not None and len(self._content) >= num_bytes: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
165 |
return
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
166 |
if num_bytes is None and self._z_content_decompressor is None: |
167 |
# We must have already decompressed everything
|
|
168 |
return
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
169 |
# If we got this far, and don't have a decompressor, something is wrong
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
170 |
if self._z_content_decompressor is None: |
171 |
raise AssertionError( |
|
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
172 |
'No decompressor to decompress %d bytes' % num_bytes) |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
173 |
remaining_decomp = self._z_content_decompressor.unconsumed_tail |
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
174 |
if num_bytes is None: |
175 |
if remaining_decomp: |
|
176 |
# We don't know how much is left, but we'll decompress it all
|
|
177 |
self._content += self._z_content_decompressor.decompress( |
|
178 |
remaining_decomp) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
179 |
# Note: There's what I consider a bug in zlib.decompressobj
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
180 |
# If you pass back in the entire unconsumed_tail, only
|
181 |
# this time you don't pass a max-size, it doesn't
|
|
182 |
# change the unconsumed_tail back to None/''.
|
|
183 |
# However, we know we are done with the whole stream
|
|
184 |
self._z_content_decompressor = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
185 |
# XXX: Why is this the only place in this routine we set this?
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
186 |
self._content_length = len(self._content) |
187 |
else: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
188 |
if not remaining_decomp: |
189 |
raise AssertionError('Nothing left to decompress') |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
190 |
needed_bytes = num_bytes - len(self._content) |
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
191 |
# We always set max_size to 32kB over the minimum needed, so that
|
192 |
# zlib will give us as much as we really want.
|
|
193 |
# TODO: If this isn't good enough, we could make a loop here,
|
|
194 |
# that keeps expanding the request until we get enough
|
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
195 |
self._content += self._z_content_decompressor.decompress( |
196 |
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
197 |
if len(self._content) < num_bytes: |
198 |
raise AssertionError('%d bytes wanted, only %d available' |
|
199 |
% (num_bytes, len(self._content))) |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
200 |
if not self._z_content_decompressor.unconsumed_tail: |
201 |
# The stream is finished
|
|
202 |
self._z_content_decompressor = None |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
203 |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
204 |
def _parse_bytes(self, bytes, pos): |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
205 |
"""Read the various lengths from the header.
|
206 |
||
207 |
This also populates the various 'compressed' buffers.
|
|
208 |
||
209 |
:return: The position in bytes just after the last newline
|
|
210 |
"""
|
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
211 |
# At present, we have 2 integers for the compressed and uncompressed
|
212 |
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
|
|
213 |
# checking too far, cap the search to 14 bytes.
|
|
214 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
215 |
self._z_content_length = int(bytes[pos:pos2]) |
|
216 |
pos = pos2 + 1 |
|
217 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
218 |
self._content_length = int(bytes[pos:pos2]) |
|
219 |
pos = pos2 + 1 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
220 |
if len(bytes) != (pos + self._z_content_length): |
221 |
# XXX: Define some GCCorrupt error ?
|
|
222 |
raise AssertionError('Invalid bytes: (%d) != %d + %d' % |
|
223 |
(len(bytes), pos, self._z_content_length)) |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
224 |
self._z_content = bytes[pos:] |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
225 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
226 |
@classmethod
|
227 |
def from_bytes(cls, bytes): |
|
228 |
out = cls() |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
229 |
if bytes[:6] not in cls.GCB_KNOWN_HEADERS: |
230 |
raise ValueError('bytes did not start with any of %r' |
|
231 |
% (cls.GCB_KNOWN_HEADERS,)) |
|
232 |
# XXX: why not testing the whole header ?
|
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
233 |
if bytes[4] == 'z': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
234 |
out._compressor_name = 'zlib' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
235 |
elif bytes[4] == 'l': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
236 |
out._compressor_name = 'lzma' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
237 |
else: |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
238 |
raise ValueError('unknown compressor: %r' % (bytes,)) |
3735.38.4
by John Arbash Meinel
Another disk format change. |
239 |
out._parse_bytes(bytes, 6) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
240 |
return out |
241 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
242 |
def extract(self, key, start, end, sha1=None): |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
243 |
"""Extract the text for a specific key.
|
244 |
||
245 |
:param key: The label used for this content
|
|
246 |
:param sha1: TODO (should we validate only when sha1 is supplied?)
|
|
247 |
:return: The bytes for the content
|
|
248 |
"""
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
249 |
if start == end == 0: |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
250 |
return '' |
251 |
self._ensure_content(end) |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
252 |
# The bytes are 'f' or 'd' for the type, then a variable-length
|
253 |
# base128 integer for the content size, then the actual content
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
254 |
# We know that the variable-length integer won't be longer than 5
|
255 |
# bytes (it takes 5 bytes to encode 2^32)
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
256 |
c = self._content[start] |
257 |
if c == 'f': |
|
258 |
type = 'fulltext' |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
259 |
else: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
260 |
if c != 'd': |
261 |
raise ValueError('Unknown content control code: %s' |
|
262 |
% (c,)) |
|
263 |
type = 'delta' |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
264 |
content_len, len_len = decode_base128_int( |
265 |
self._content[start + 1:start + 6]) |
|
266 |
content_start = start + 1 + len_len |
|
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
267 |
if end != content_start + content_len: |
268 |
raise ValueError('end != len according to field header' |
|
269 |
' %s != %s' % (end, content_start + content_len)) |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
270 |
if c == 'f': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
271 |
bytes = self._content[content_start:end] |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
272 |
elif c == 'd': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
273 |
bytes = apply_delta_to_source(self._content, content_start, end) |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
274 |
return bytes |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
275 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
276 |
def set_content(self, content): |
277 |
"""Set the content of this block."""
|
|
278 |
self._content_length = len(content) |
|
279 |
self._content = content |
|
280 |
self._z_content = None |
|
281 |
||
282 |
def to_bytes(self): |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
283 |
"""Encode the information into a byte stream."""
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
284 |
compress = zlib.compress |
285 |
if _USE_LZMA: |
|
286 |
compress = pylzma.compress |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
287 |
if self._z_content is None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
288 |
if self._content is None: |
289 |
raise AssertionError('Nothing to compress') |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
290 |
self._z_content = compress(self._content) |
291 |
self._z_content_length = len(self._z_content) |
|
0.17.46
by John Arbash Meinel
Set the proper header when using/not using lzma |
292 |
if _USE_LZMA: |
293 |
header = self.GCB_LZ_HEADER |
|
294 |
else: |
|
295 |
header = self.GCB_HEADER |
|
296 |
chunks = [header, |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
297 |
'%d\n%d\n' % (self._z_content_length, self._content_length), |
298 |
self._z_content, |
|
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
299 |
]
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
300 |
return ''.join(chunks) |
301 |
||
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
302 |
def _dump(self, include_text=False): |
303 |
"""Take this block, and spit out a human-readable structure.
|
|
304 |
||
305 |
:param include_text: Inserts also include text bits, chose whether you
|
|
306 |
want this displayed in the dump or not.
|
|
307 |
:return: A dump of the given block. The layout is something like:
|
|
308 |
[('f', length), ('d', delta_length, text_length, [delta_info])]
|
|
309 |
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
|
|
310 |
...]
|
|
311 |
"""
|
|
312 |
self._ensure_content() |
|
313 |
result = [] |
|
314 |
pos = 0 |
|
315 |
while pos < self._content_length: |
|
316 |
kind = self._content[pos] |
|
317 |
pos += 1 |
|
318 |
if kind not in ('f', 'd'): |
|
319 |
raise ValueError('invalid kind character: %r' % (kind,)) |
|
320 |
content_len, len_len = decode_base128_int( |
|
321 |
self._content[pos:pos + 5]) |
|
322 |
pos += len_len |
|
323 |
if content_len + pos > self._content_length: |
|
324 |
raise ValueError('invalid content_len %d for record @ pos %d' |
|
325 |
% (content_len, pos - len_len - 1)) |
|
326 |
if kind == 'f': # Fulltext |
|
327 |
result.append(('f', content_len)) |
|
328 |
elif kind == 'd': # Delta |
|
329 |
delta_content = self._content[pos:pos+content_len] |
|
330 |
delta_info = [] |
|
331 |
# The first entry in a delta is the decompressed length
|
|
332 |
decomp_len, delta_pos = decode_base128_int(delta_content) |
|
333 |
result.append(('d', content_len, decomp_len, delta_info)) |
|
334 |
measured_len = 0 |
|
335 |
while delta_pos < content_len: |
|
336 |
c = ord(delta_content[delta_pos]) |
|
337 |
delta_pos += 1 |
|
338 |
if c & 0x80: # Copy |
|
339 |
(offset, length, |
|
340 |
delta_pos) = decode_copy_instruction(delta_content, c, |
|
341 |
delta_pos) |
|
342 |
delta_info.append(('c', offset, length)) |
|
343 |
measured_len += length |
|
344 |
else: # Insert |
|
345 |
if include_text: |
|
346 |
txt = delta_content[delta_pos:delta_pos+c] |
|
347 |
else: |
|
348 |
txt = '' |
|
349 |
delta_info.append(('i', c, txt)) |
|
350 |
measured_len += c |
|
351 |
delta_pos += c |
|
352 |
if delta_pos != content_len: |
|
353 |
raise ValueError('Delta consumed a bad number of bytes:' |
|
354 |
' %d != %d' % (delta_pos, content_len)) |
|
355 |
if measured_len != decomp_len: |
|
356 |
raise ValueError('Delta claimed fulltext was %d bytes, but' |
|
357 |
' extraction resulted in %d bytes' |
|
358 |
% (decomp_len, measured_len)) |
|
359 |
pos += content_len |
|
360 |
return result |
|
361 |
||
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
362 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
363 |
class _LazyGroupCompressFactory(object): |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
364 |
"""Yield content from a GroupCompressBlock on demand."""
|
365 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
366 |
def __init__(self, key, parents, manager, start, end, first): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
367 |
"""Create a _LazyGroupCompressFactory
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
368 |
|
369 |
:param key: The key of just this record
|
|
370 |
:param parents: The parents of this key (possibly None)
|
|
371 |
:param gc_block: A GroupCompressBlock object
|
|
372 |
:param start: Offset of the first byte for this record in the
|
|
373 |
uncompressd content
|
|
374 |
:param end: Offset of the byte just after the end of this record
|
|
375 |
(ie, bytes = content[start:end])
|
|
376 |
:param first: Is this the first Factory for the given block?
|
|
377 |
"""
|
|
378 |
self.key = key |
|
379 |
self.parents = parents |
|
380 |
self.sha1 = None |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
381 |
# Note: This attribute coupled with Manager._factories creates a
|
382 |
# reference cycle. Perhaps we would rather use a weakref(), or
|
|
383 |
# find an appropriate time to release the ref. After the first
|
|
384 |
# get_bytes_as call? After Manager.get_record_stream() returns
|
|
385 |
# the object?
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
386 |
self._manager = manager |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
387 |
self._bytes = None |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
388 |
self.storage_kind = 'groupcompress-block' |
389 |
if not first: |
|
390 |
self.storage_kind = 'groupcompress-block-ref' |
|
391 |
self._first = first |
|
392 |
self._start = start |
|
393 |
self._end = end |
|
394 |
||
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
395 |
def __repr__(self): |
396 |
return '%s(%s, first=%s)' % (self.__class__.__name__, |
|
397 |
self.key, self._first) |
|
398 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
399 |
def get_bytes_as(self, storage_kind): |
400 |
if storage_kind == self.storage_kind: |
|
401 |
if self._first: |
|
402 |
# wire bytes, something...
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
403 |
return self._manager._wire_bytes() |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
404 |
else: |
405 |
return '' |
|
406 |
if storage_kind in ('fulltext', 'chunked'): |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
407 |
if self._bytes is None: |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
408 |
# Grab and cache the raw bytes for this entry
|
409 |
# and break the ref-cycle with _manager since we don't need it
|
|
410 |
# anymore
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
411 |
self._manager._prepare_for_extract() |
412 |
block = self._manager._block |
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
413 |
self._bytes = block.extract(self.key, self._start, self._end) |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
414 |
# There are code paths that first extract as fulltext, and then
|
415 |
# extract as storage_kind (smart fetch). So we don't break the
|
|
416 |
# refcycle here, but instead in manager.get_record_stream()
|
|
3735.2.163
by John Arbash Meinel
Merge bzr.dev 4187, and revert the change to fix refcycle issues. |
417 |
# self._manager = None
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
418 |
if storage_kind == 'fulltext': |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
419 |
return self._bytes |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
420 |
else: |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
421 |
return [self._bytes] |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
422 |
raise errors.UnavailableRepresentation(self.key, storage_kind, |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
423 |
self.storage_kind) |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
424 |
|
425 |
||
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
426 |
class _LazyGroupContentManager(object): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
427 |
"""This manages a group of _LazyGroupCompressFactory objects."""
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
428 |
|
429 |
def __init__(self, block): |
|
430 |
self._block = block |
|
431 |
# We need to preserve the ordering
|
|
432 |
self._factories = [] |
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
433 |
self._last_byte = 0 |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
434 |
|
435 |
def add_factory(self, key, parents, start, end): |
|
436 |
if not self._factories: |
|
437 |
first = True |
|
438 |
else: |
|
439 |
first = False |
|
440 |
# Note that this creates a reference cycle....
|
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
441 |
factory = _LazyGroupCompressFactory(key, parents, self, |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
442 |
start, end, first=first) |
3735.36.13
by John Arbash Meinel
max() shows up under lsprof as more expensive than creating an object. |
443 |
# max() works here, but as a function call, doing a compare seems to be
|
444 |
# significantly faster, timeit says 250ms for max() and 100ms for the
|
|
445 |
# comparison
|
|
446 |
if end > self._last_byte: |
|
447 |
self._last_byte = end |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
448 |
self._factories.append(factory) |
449 |
||
450 |
def get_record_stream(self): |
|
451 |
"""Get a record for all keys added so far."""
|
|
452 |
for factory in self._factories: |
|
453 |
yield factory |
|
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
454 |
# Break the ref-cycle
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
455 |
factory._bytes = None |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
456 |
factory._manager = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
457 |
# TODO: Consider setting self._factories = None after the above loop,
|
458 |
# as it will break the reference cycle
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
459 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
460 |
def _trim_block(self, last_byte): |
461 |
"""Create a new GroupCompressBlock, with just some of the content."""
|
|
462 |
# None of the factories need to be adjusted, because the content is
|
|
463 |
# located in an identical place. Just that some of the unreferenced
|
|
464 |
# trailing bytes are stripped
|
|
465 |
trace.mutter('stripping trailing bytes from groupcompress block' |
|
466 |
' %d => %d', self._block._content_length, last_byte) |
|
467 |
new_block = GroupCompressBlock() |
|
468 |
self._block._ensure_content(last_byte) |
|
469 |
new_block.set_content(self._block._content[:last_byte]) |
|
470 |
self._block = new_block |
|
471 |
||
472 |
def _rebuild_block(self): |
|
473 |
"""Create a new GroupCompressBlock with only the referenced texts."""
|
|
474 |
compressor = GroupCompressor() |
|
475 |
tstart = time.time() |
|
476 |
old_length = self._block._content_length |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
477 |
end_point = 0 |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
478 |
for factory in self._factories: |
479 |
bytes = factory.get_bytes_as('fulltext') |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
480 |
(found_sha1, start_point, end_point, |
481 |
type) = compressor.compress(factory.key, bytes, factory.sha1) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
482 |
# Now update this factory with the new offsets, etc
|
483 |
factory.sha1 = found_sha1 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
484 |
factory._start = start_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
485 |
factory._end = end_point |
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
486 |
self._last_byte = end_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
487 |
new_block = compressor.flush() |
488 |
# TODO: Should we check that new_block really *is* smaller than the old
|
|
489 |
# block? It seems hard to come up with a method that it would
|
|
490 |
# expand, since we do full compression again. Perhaps based on a
|
|
491 |
# request that ends up poorly ordered?
|
|
492 |
delta = time.time() - tstart |
|
493 |
self._block = new_block |
|
494 |
trace.mutter('creating new compressed block on-the-fly in %.3fs' |
|
495 |
' %d bytes => %d bytes', delta, old_length, |
|
496 |
self._block._content_length) |
|
497 |
||
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
498 |
def _prepare_for_extract(self): |
499 |
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
|
|
500 |
# We expect that if one child is going to fulltext, all will be. This
|
|
501 |
# helps prevent all of them from extracting a small amount at a time.
|
|
502 |
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
|
|
503 |
# time (self._block._content) is a little expensive.
|
|
504 |
self._block._ensure_content(self._last_byte) |
|
505 |
||
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
506 |
def _check_rebuild_block(self): |
507 |
"""Check to see if our block should be repacked."""
|
|
508 |
total_bytes_used = 0 |
|
509 |
last_byte_used = 0 |
|
510 |
for factory in self._factories: |
|
511 |
total_bytes_used += factory._end - factory._start |
|
512 |
last_byte_used = max(last_byte_used, factory._end) |
|
513 |
# If we are using most of the bytes from the block, we have nothing
|
|
514 |
# else to check (currently more that 1/2)
|
|
515 |
if total_bytes_used * 2 >= self._block._content_length: |
|
516 |
return
|
|
517 |
# Can we just strip off the trailing bytes? If we are going to be
|
|
518 |
# transmitting more than 50% of the front of the content, go ahead
|
|
519 |
if total_bytes_used * 2 > last_byte_used: |
|
520 |
self._trim_block(last_byte_used) |
|
521 |
return
|
|
522 |
||
523 |
# We are using a small amount of the data, and it isn't just packed
|
|
524 |
# nicely at the front, so rebuild the content.
|
|
525 |
# Note: This would be *nicer* as a strip-data-from-group, rather than
|
|
526 |
# building it up again from scratch
|
|
527 |
# It might be reasonable to consider the fulltext sizes for
|
|
528 |
# different bits when deciding this, too. As you may have a small
|
|
529 |
# fulltext, and a trivial delta, and you are just trading around
|
|
530 |
# for another fulltext. If we do a simple 'prune' you may end up
|
|
531 |
# expanding many deltas into fulltexts, as well.
|
|
532 |
# If we build a cheap enough 'strip', then we could try a strip,
|
|
533 |
# if that expands the content, we then rebuild.
|
|
534 |
self._rebuild_block() |
|
535 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
536 |
def _wire_bytes(self): |
537 |
"""Return a byte stream suitable for transmitting over the wire."""
|
|
3735.32.24
by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream |
538 |
self._check_rebuild_block() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
539 |
# The outer block starts with:
|
540 |
# 'groupcompress-block\n'
|
|
541 |
# <length of compressed key info>\n
|
|
542 |
# <length of uncompressed info>\n
|
|
543 |
# <length of gc block>\n
|
|
544 |
# <header bytes>
|
|
545 |
# <gc-block>
|
|
546 |
lines = ['groupcompress-block\n'] |
|
547 |
# The minimal info we need is the key, the start offset, and the
|
|
548 |
# parents. The length and type are encoded in the record itself.
|
|
549 |
# However, passing in the other bits makes it easier. The list of
|
|
550 |
# keys, and the start offset, the length
|
|
551 |
# 1 line key
|
|
552 |
# 1 line with parents, '' for ()
|
|
553 |
# 1 line for start offset
|
|
554 |
# 1 line for end byte
|
|
555 |
header_lines = [] |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
556 |
for factory in self._factories: |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
557 |
key_bytes = '\x00'.join(factory.key) |
558 |
parents = factory.parents |
|
559 |
if parents is None: |
|
560 |
parent_bytes = 'None:' |
|
561 |
else: |
|
562 |
parent_bytes = '\t'.join('\x00'.join(key) for key in parents) |
|
563 |
record_header = '%s\n%s\n%d\n%d\n' % ( |
|
564 |
key_bytes, parent_bytes, factory._start, factory._end) |
|
565 |
header_lines.append(record_header) |
|
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
566 |
# TODO: Can we break the refcycle at this point and set
|
567 |
# factory._manager = None?
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
568 |
header_bytes = ''.join(header_lines) |
569 |
del header_lines |
|
570 |
header_bytes_len = len(header_bytes) |
|
571 |
z_header_bytes = zlib.compress(header_bytes) |
|
572 |
del header_bytes |
|
573 |
z_header_bytes_len = len(z_header_bytes) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
574 |
block_bytes = self._block.to_bytes() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
575 |
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len, |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
576 |
len(block_bytes))) |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
577 |
lines.append(z_header_bytes) |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
578 |
lines.append(block_bytes) |
579 |
del z_header_bytes, block_bytes |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
580 |
return ''.join(lines) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
581 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
582 |
@classmethod
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
583 |
def from_bytes(cls, bytes): |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
584 |
# TODO: This does extra string copying, probably better to do it a
|
585 |
# different way
|
|
586 |
(storage_kind, z_header_len, header_len, |
|
587 |
block_len, rest) = bytes.split('\n', 4) |
|
588 |
del bytes |
|
589 |
if storage_kind != 'groupcompress-block': |
|
590 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
591 |
z_header_len = int(z_header_len) |
|
592 |
if len(rest) < z_header_len: |
|
593 |
raise ValueError('Compressed header len shorter than all bytes') |
|
594 |
z_header = rest[:z_header_len] |
|
595 |
header_len = int(header_len) |
|
596 |
header = zlib.decompress(z_header) |
|
597 |
if len(header) != header_len: |
|
598 |
raise ValueError('invalid length for decompressed bytes') |
|
599 |
del z_header |
|
600 |
block_len = int(block_len) |
|
601 |
if len(rest) != z_header_len + block_len: |
|
602 |
raise ValueError('Invalid length for block') |
|
603 |
block_bytes = rest[z_header_len:] |
|
604 |
del rest |
|
605 |
# So now we have a valid GCB, we just need to parse the factories that
|
|
606 |
# were sent to us
|
|
607 |
header_lines = header.split('\n') |
|
608 |
del header |
|
609 |
last = header_lines.pop() |
|
610 |
if last != '': |
|
611 |
raise ValueError('header lines did not end with a trailing' |
|
612 |
' newline') |
|
613 |
if len(header_lines) % 4 != 0: |
|
614 |
raise ValueError('The header was not an even multiple of 4 lines') |
|
615 |
block = GroupCompressBlock.from_bytes(block_bytes) |
|
616 |
del block_bytes |
|
617 |
result = cls(block) |
|
618 |
for start in xrange(0, len(header_lines), 4): |
|
619 |
# intern()?
|
|
620 |
key = tuple(header_lines[start].split('\x00')) |
|
621 |
parents_line = header_lines[start+1] |
|
622 |
if parents_line == 'None:': |
|
623 |
parents = None |
|
624 |
else: |
|
625 |
parents = tuple([tuple(segment.split('\x00')) |
|
626 |
for segment in parents_line.split('\t') |
|
627 |
if segment]) |
|
628 |
start_offset = int(header_lines[start+2]) |
|
629 |
end_offset = int(header_lines[start+3]) |
|
630 |
result.add_factory(key, parents, start_offset, end_offset) |
|
631 |
return result |
|
632 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
633 |
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
634 |
def network_block_to_records(storage_kind, bytes, line_end): |
635 |
if storage_kind != 'groupcompress-block': |
|
636 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
637 |
manager = _LazyGroupContentManager.from_bytes(bytes) |
|
638 |
return manager.get_record_stream() |
|
639 |
||
640 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
641 |
class _CommonGroupCompressor(object): |
642 |
||
643 |
def __init__(self): |
|
644 |
"""Create a GroupCompressor."""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
645 |
self.chunks = [] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
646 |
self._last = None |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
647 |
self.endpoint = 0 |
648 |
self.input_bytes = 0 |
|
649 |
self.labels_deltas = {} |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
650 |
self._delta_index = None # Set by the children |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
651 |
self._block = GroupCompressBlock() |
652 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
653 |
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False): |
654 |
"""Compress lines with label key.
|
|
655 |
||
656 |
:param key: A key tuple. It is stored in the output
|
|
657 |
for identification of the text during decompression. If the last
|
|
658 |
element is 'None' it is replaced with the sha1 of the text -
|
|
659 |
e.g. sha1:xxxxxxx.
|
|
660 |
:param bytes: The bytes to be compressed
|
|
661 |
:param expected_sha: If non-None, the sha the lines are believed to
|
|
662 |
have. During compression the sha is calculated; a mismatch will
|
|
663 |
cause an error.
|
|
664 |
:param nostore_sha: If the computed sha1 sum matches, we will raise
|
|
665 |
ExistingContent rather than adding the text.
|
|
666 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
667 |
ranges to match to be considered for a copy command.
|
|
668 |
||
669 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
670 |
the type ('fulltext' or 'delta').
|
|
671 |
||
672 |
:seealso VersionedFiles.add_lines:
|
|
673 |
"""
|
|
674 |
if not bytes: # empty, like a dir entry, etc |
|
675 |
if nostore_sha == _null_sha1: |
|
676 |
raise errors.ExistingContent() |
|
677 |
return _null_sha1, 0, 0, 'fulltext' |
|
678 |
# we assume someone knew what they were doing when they passed it in
|
|
679 |
if expected_sha is not None: |
|
680 |
sha1 = expected_sha |
|
681 |
else: |
|
682 |
sha1 = osutils.sha_string(bytes) |
|
683 |
if nostore_sha is not None: |
|
684 |
if sha1 == nostore_sha: |
|
685 |
raise errors.ExistingContent() |
|
686 |
if key[-1] is None: |
|
687 |
key = key[:-1] + ('sha1:' + sha1,) |
|
688 |
||
689 |
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft) |
|
690 |
return sha1, start, end, type |
|
691 |
||
692 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
693 |
"""Compress lines with label key.
|
|
694 |
||
695 |
:param key: A key tuple. It is stored in the output for identification
|
|
696 |
of the text during decompression.
|
|
697 |
||
698 |
:param bytes: The bytes to be compressed
|
|
699 |
||
700 |
:param max_delta_size: The size above which we issue a fulltext instead
|
|
701 |
of a delta.
|
|
702 |
||
703 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
704 |
ranges to match to be considered for a copy command.
|
|
705 |
||
706 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
707 |
the type ('fulltext' or 'delta').
|
|
708 |
"""
|
|
709 |
raise NotImplementedError(self._compress) |
|
710 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
711 |
def extract(self, key): |
712 |
"""Extract a key previously added to the compressor.
|
|
713 |
||
714 |
:param key: The key to extract.
|
|
715 |
:return: An iterable over bytes and the sha1.
|
|
716 |
"""
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
717 |
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key] |
718 |
delta_chunks = self.chunks[start_chunk:end_chunk] |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
719 |
stored_bytes = ''.join(delta_chunks) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
720 |
if stored_bytes[0] == 'f': |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
721 |
fulltext_len, offset = decode_base128_int(stored_bytes[1:10]) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
722 |
data_len = fulltext_len + 1 + offset |
723 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
724 |
raise ValueError('Index claimed fulltext len, but stored bytes' |
725 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
726 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
727 |
bytes = stored_bytes[offset + 1:] |
728 |
else: |
|
729 |
# XXX: This is inefficient at best
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
730 |
source = ''.join(self.chunks[:start_chunk]) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
731 |
if stored_bytes[0] != 'd': |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
732 |
raise ValueError('Unknown content kind, bytes claim %s' |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
733 |
% (stored_bytes[0],)) |
734 |
delta_len, offset = decode_base128_int(stored_bytes[1:10]) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
735 |
data_len = delta_len + 1 + offset |
736 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
737 |
raise ValueError('Index claimed delta len, but stored bytes' |
738 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
739 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
740 |
bytes = apply_delta(source, stored_bytes[offset + 1:]) |
741 |
bytes_sha1 = osutils.sha_string(bytes) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
742 |
return bytes, bytes_sha1 |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
743 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
744 |
def flush(self): |
745 |
"""Finish this group, creating a formatted stream.
|
|
746 |
||
747 |
After calling this, the compressor should no longer be used
|
|
748 |
"""
|
|
749 |
content = ''.join(self.chunks) |
|
750 |
self.chunks = None |
|
751 |
self._delta_index = None |
|
752 |
self._block.set_content(content) |
|
753 |
return self._block |
|
754 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
755 |
def pop_last(self): |
756 |
"""Call this if you want to 'revoke' the last compression.
|
|
757 |
||
758 |
After this, the data structures will be rolled back, but you cannot do
|
|
759 |
more compression.
|
|
760 |
"""
|
|
761 |
self._delta_index = None |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
762 |
del self.chunks[self._last[0]:] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
763 |
self.endpoint = self._last[1] |
764 |
self._last = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
765 |
|
766 |
def ratio(self): |
|
767 |
"""Return the overall compression ratio."""
|
|
768 |
return float(self.input_bytes) / float(self.endpoint) |
|
769 |
||
770 |
||
771 |
class PythonGroupCompressor(_CommonGroupCompressor): |
|
772 |
||
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
773 |
def __init__(self): |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
774 |
"""Create a GroupCompressor.
|
775 |
||
776 |
Used only if the pyrex version is not available.
|
|
777 |
"""
|
|
778 |
super(PythonGroupCompressor, self).__init__() |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
779 |
self._delta_index = LinesDeltaIndex([]) |
780 |
# The actual content is managed by LinesDeltaIndex
|
|
781 |
self.chunks = self._delta_index.lines |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
782 |
|
783 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
784 |
"""see _CommonGroupCompressor._compress"""
|
|
785 |
input_len = len(bytes) |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
786 |
new_lines = osutils.split_lines(bytes) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
787 |
out_lines, index_lines = self._delta_index.make_delta( |
788 |
new_lines, bytes_length=input_len, soft=soft) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
789 |
delta_length = sum(map(len, out_lines)) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
790 |
if delta_length > max_delta_size: |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
791 |
# The delta is longer than the fulltext, insert a fulltext
|
792 |
type = 'fulltext' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
793 |
out_lines = ['f', encode_base128_int(input_len)] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
794 |
out_lines.extend(new_lines) |
795 |
index_lines = [False, False] |
|
796 |
index_lines.extend([True] * len(new_lines)) |
|
797 |
else: |
|
798 |
# this is a worthy delta, output it
|
|
799 |
type = 'delta' |
|
800 |
out_lines[0] = 'd' |
|
801 |
# Update the delta_length to include those two encoded integers
|
|
802 |
out_lines[1] = encode_base128_int(delta_length) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
803 |
# Before insertion
|
804 |
start = self.endpoint |
|
805 |
chunk_start = len(self.chunks) |
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
806 |
self._last = (chunk_start, self.endpoint) |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
807 |
self._delta_index.extend_lines(out_lines, index_lines) |
808 |
self.endpoint = self._delta_index.endpoint |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
809 |
self.input_bytes += input_len |
810 |
chunk_end = len(self.chunks) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
811 |
self.labels_deltas[key] = (start, chunk_start, |
812 |
self.endpoint, chunk_end) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
813 |
return start, self.endpoint, type |
814 |
||
815 |
||
816 |
class PyrexGroupCompressor(_CommonGroupCompressor): |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
817 |
"""Produce a serialised group of compressed texts.
|
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
818 |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
819 |
It contains code very similar to SequenceMatcher because of having a similar
|
820 |
task. However some key differences apply:
|
|
821 |
- there is no junk, we want a minimal edit not a human readable diff.
|
|
822 |
- we don't filter very common lines (because we don't know where a good
|
|
823 |
range will start, and after the first text we want to be emitting minmal
|
|
824 |
edits only.
|
|
825 |
- we chain the left side, not the right side
|
|
826 |
- we incrementally update the adjacency matrix as new lines are provided.
|
|
827 |
- we look for matches in all of the left side, so the routine which does
|
|
828 |
the analagous task of find_longest_match does not need to filter on the
|
|
829 |
left side.
|
|
830 |
"""
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
831 |
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
832 |
def __init__(self): |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
833 |
super(PyrexGroupCompressor, self).__init__() |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
834 |
self._delta_index = DeltaIndex() |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
835 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
836 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
837 |
"""see _CommonGroupCompressor._compress"""
|
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
838 |
input_len = len(bytes) |
0.23.12
by John Arbash Meinel
Add a 'len:' field to the data. |
839 |
# By having action/label/sha1/len, we can parse the group if the index
|
840 |
# was ever destroyed, we have the key in 'label', we know the final
|
|
841 |
# bytes are valid from sha1, and we know where to find the end of this
|
|
842 |
# record because of 'len'. (the delta record itself will store the
|
|
843 |
# total length for the expanded record)
|
|
0.23.13
by John Arbash Meinel
Factor out the ability to have/not have labels. |
844 |
# 'len: %d\n' costs approximately 1% increase in total data
|
845 |
# Having the labels at all costs us 9-10% increase, 38% increase for
|
|
846 |
# inventory pages, and 5.8% increase for text pages
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
847 |
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
|
0.23.33
by John Arbash Meinel
Fix a bug when handling multiple large-range copies. |
848 |
if self._delta_index._source_offset != self.endpoint: |
849 |
raise AssertionError('_source_offset != endpoint' |
|
850 |
' somehow the DeltaIndex got out of sync with'
|
|
851 |
' the output lines') |
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
852 |
delta = self._delta_index.make_delta(bytes, max_delta_size) |
853 |
if (delta is None): |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
854 |
type = 'fulltext' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
855 |
enc_length = encode_base128_int(len(bytes)) |
856 |
len_mini_header = 1 + len(enc_length) |
|
857 |
self._delta_index.add_source(bytes, len_mini_header) |
|
858 |
new_chunks = ['f', enc_length, bytes] |
|
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
859 |
else: |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
860 |
type = 'delta' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
861 |
enc_length = encode_base128_int(len(delta)) |
862 |
len_mini_header = 1 + len(enc_length) |
|
863 |
new_chunks = ['d', enc_length, delta] |
|
3735.38.5
by John Arbash Meinel
A bit of testing showed that _FAST=True was actually *slower*. |
864 |
self._delta_index.add_delta_source(delta, len_mini_header) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
865 |
# Before insertion
|
866 |
start = self.endpoint |
|
867 |
chunk_start = len(self.chunks) |
|
868 |
# Now output these bytes
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
869 |
self._output_chunks(new_chunks) |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
870 |
self.input_bytes += input_len |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
871 |
chunk_end = len(self.chunks) |
872 |
self.labels_deltas[key] = (start, chunk_start, |
|
873 |
self.endpoint, chunk_end) |
|
0.23.29
by John Arbash Meinel
Forgot to add the delta bytes to the index objects. |
874 |
if not self._delta_index._source_offset == self.endpoint: |
875 |
raise AssertionError('the delta index is out of sync' |
|
876 |
'with the output lines %s != %s' |
|
877 |
% (self._delta_index._source_offset, self.endpoint)) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
878 |
return start, self.endpoint, type |
0.17.2
by Robert Collins
Core proof of concept working. |
879 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
880 |
def _output_chunks(self, new_chunks): |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
881 |
"""Output some chunks.
|
882 |
||
883 |
:param new_chunks: The chunks to output.
|
|
884 |
"""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
885 |
self._last = (len(self.chunks), self.endpoint) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
886 |
endpoint = self.endpoint |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
887 |
self.chunks.extend(new_chunks) |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
888 |
endpoint += sum(map(len, new_chunks)) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
889 |
self.endpoint = endpoint |
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
890 |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
891 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
892 |
def make_pack_factory(graph, delta, keylength): |
893 |
"""Create a factory for creating a pack based groupcompress.
|
|
894 |
||
895 |
This is only functional enough to run interface tests, it doesn't try to
|
|
896 |
provide a full pack environment.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
897 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
898 |
:param graph: Store a graph.
|
899 |
:param delta: Delta compress contents.
|
|
900 |
:param keylength: How long should keys be.
|
|
901 |
"""
|
|
902 |
def factory(transport): |
|
3735.32.2
by John Arbash Meinel
The 'delta' flag has no effect on the content (all GC is delta'd), |
903 |
parents = graph |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
904 |
ref_length = 0 |
905 |
if graph: |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
906 |
ref_length = 1 |
0.17.7
by Robert Collins
Update for current index2 changes. |
907 |
graph_index = BTreeBuilder(reference_lists=ref_length, |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
908 |
key_elements=keylength) |
909 |
stream = transport.open_write_stream('newpack') |
|
910 |
writer = pack.ContainerWriter(stream.write) |
|
911 |
writer.begin() |
|
912 |
index = _GCGraphIndex(graph_index, lambda:True, parents=parents, |
|
0.17.9
by Robert Collins
Initial stab at repository format support. |
913 |
add_callback=graph_index.add_nodes) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
914 |
access = _DirectPackAccess({}) |
915 |
access.set_writer(writer, graph_index, (transport, 'newpack')) |
|
0.17.2
by Robert Collins
Core proof of concept working. |
916 |
result = GroupCompressVersionedFiles(index, access, delta) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
917 |
result.stream = stream |
918 |
result.writer = writer |
|
919 |
return result |
|
920 |
return factory |
|
921 |
||
922 |
||
923 |
def cleanup_pack_group(versioned_files): |
|
0.17.23
by Robert Collins
Only decompress as much of the zlib data as is needed to read the text recipe. |
924 |
versioned_files.writer.end() |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
925 |
versioned_files.stream.close() |
926 |
||
927 |
||
928 |
class GroupCompressVersionedFiles(VersionedFiles): |
|
929 |
"""A group-compress based VersionedFiles implementation."""
|
|
930 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
931 |
def __init__(self, index, access, delta=True): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
932 |
"""Create a GroupCompressVersionedFiles object.
|
933 |
||
934 |
:param index: The index object storing access and graph data.
|
|
935 |
:param access: The access object storing raw data.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
936 |
:param delta: Whether to delta compress or just entropy compress.
|
937 |
"""
|
|
938 |
self._index = index |
|
939 |
self._access = access |
|
940 |
self._delta = delta |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
941 |
self._unadded_refs = {} |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
942 |
self._group_cache = LRUSizeCache(max_size=50*1024*1024) |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
943 |
self._fallback_vfs = [] |
0.17.2
by Robert Collins
Core proof of concept working. |
944 |
|
945 |
def add_lines(self, key, parents, lines, parent_texts=None, |
|
946 |
left_matching_blocks=None, nostore_sha=None, random_id=False, |
|
947 |
check_content=True): |
|
948 |
"""Add a text to the store.
|
|
949 |
||
950 |
:param key: The key tuple of the text to add.
|
|
951 |
:param parents: The parents key tuples of the text to add.
|
|
952 |
:param lines: A list of lines. Each line must be a bytestring. And all
|
|
953 |
of them except the last must be terminated with \n and contain no
|
|
954 |
other \n's. The last line may either contain no \n's or a single
|
|
955 |
terminating \n. If the lines list does meet this constraint the add
|
|
956 |
routine may error or may succeed - but you will be unable to read
|
|
957 |
the data back accurately. (Checking the lines have been split
|
|
958 |
correctly is expensive and extremely unlikely to catch bugs so it
|
|
959 |
is not done at runtime unless check_content is True.)
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
960 |
:param parent_texts: An optional dictionary containing the opaque
|
0.17.2
by Robert Collins
Core proof of concept working. |
961 |
representations of some or all of the parents of version_id to
|
962 |
allow delta optimisations. VERY IMPORTANT: the texts must be those
|
|
963 |
returned by add_lines or data corruption can be caused.
|
|
964 |
:param left_matching_blocks: a hint about which areas are common
|
|
965 |
between the text and its left-hand-parent. The format is
|
|
966 |
the SequenceMatcher.get_matching_blocks format.
|
|
967 |
:param nostore_sha: Raise ExistingContent and do not add the lines to
|
|
968 |
the versioned file if the digest of the lines matches this.
|
|
969 |
:param random_id: If True a random id has been selected rather than
|
|
970 |
an id determined by some deterministic process such as a converter
|
|
971 |
from a foreign VCS. When True the backend may choose not to check
|
|
972 |
for uniqueness of the resulting key within the versioned file, so
|
|
973 |
this should only be done when the result is expected to be unique
|
|
974 |
anyway.
|
|
975 |
:param check_content: If True, the lines supplied are verified to be
|
|
976 |
bytestrings that are correctly formed lines.
|
|
977 |
:return: The text sha1, the number of bytes in the text, and an opaque
|
|
978 |
representation of the inserted version which can be provided
|
|
979 |
back to future add_lines calls in the parent_texts dictionary.
|
|
980 |
"""
|
|
981 |
self._index._check_write_ok() |
|
982 |
self._check_add(key, lines, random_id, check_content) |
|
983 |
if parents is None: |
|
984 |
# The caller might pass None if there is no graph data, but kndx
|
|
985 |
# indexes can't directly store that, so we give them
|
|
986 |
# an empty tuple instead.
|
|
987 |
parents = () |
|
988 |
# double handling for now. Make it work until then.
|
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
989 |
length = sum(map(len, lines)) |
990 |
record = ChunkedContentFactory(key, parents, None, lines) |
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
991 |
sha1 = list(self._insert_record_stream([record], random_id=random_id, |
992 |
nostore_sha=nostore_sha))[0] |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
993 |
return sha1, length, None |
0.17.2
by Robert Collins
Core proof of concept working. |
994 |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
995 |
def add_fallback_versioned_files(self, a_versioned_files): |
996 |
"""Add a source of texts for texts not present in this knit.
|
|
997 |
||
998 |
:param a_versioned_files: A VersionedFiles object.
|
|
999 |
"""
|
|
1000 |
self._fallback_vfs.append(a_versioned_files) |
|
1001 |
||
0.17.4
by Robert Collins
Annotate. |
1002 |
def annotate(self, key): |
1003 |
"""See VersionedFiles.annotate."""
|
|
1004 |
graph = Graph(self) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1005 |
parent_map = self.get_parent_map([key]) |
1006 |
if not parent_map: |
|
1007 |
raise errors.RevisionNotPresent(key, self) |
|
1008 |
if parent_map[key] is not None: |
|
1009 |
search = graph._make_breadth_first_searcher([key]) |
|
1010 |
keys = set() |
|
1011 |
while True: |
|
1012 |
try: |
|
1013 |
present, ghosts = search.next_with_ghosts() |
|
1014 |
except StopIteration: |
|
1015 |
break
|
|
1016 |
keys.update(present) |
|
1017 |
parent_map = self.get_parent_map(keys) |
|
1018 |
else: |
|
1019 |
keys = [key] |
|
1020 |
parent_map = {key:()} |
|
0.17.4
by Robert Collins
Annotate. |
1021 |
head_cache = _mod_graph.FrozenHeadsCache(graph) |
1022 |
parent_cache = {} |
|
1023 |
reannotate = annotate.reannotate |
|
1024 |
for record in self.get_record_stream(keys, 'topological', True): |
|
1025 |
key = record.key |
|
0.20.2
by John Arbash Meinel
Teach groupcompress about 'chunked' encoding |
1026 |
chunks = osutils.chunks_to_lines(record.get_bytes_as('chunked')) |
0.17.4
by Robert Collins
Annotate. |
1027 |
parent_lines = [parent_cache[parent] for parent in parent_map[key]] |
1028 |
parent_cache[key] = list( |
|
0.20.21
by John Arbash Meinel
Merge the chk sorting code. |
1029 |
reannotate(parent_lines, chunks, key, None, head_cache)) |
0.17.4
by Robert Collins
Annotate. |
1030 |
return parent_cache[key] |
1031 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1032 |
def check(self, progress_bar=None): |
1033 |
"""See VersionedFiles.check()."""
|
|
1034 |
keys = self.keys() |
|
1035 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1036 |
record.get_bytes_as('fulltext') |
|
1037 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1038 |
def _check_add(self, key, lines, random_id, check_content): |
1039 |
"""check that version_id and lines are safe to add."""
|
|
1040 |
version_id = key[-1] |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1041 |
if version_id is not None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1042 |
if osutils.contains_whitespace(version_id): |
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
1043 |
raise errors.InvalidRevisionId(version_id, self) |
0.17.2
by Robert Collins
Core proof of concept working. |
1044 |
self.check_not_reserved_id(version_id) |
1045 |
# TODO: If random_id==False and the key is already present, we should
|
|
1046 |
# probably check that the existing content is identical to what is
|
|
1047 |
# being inserted, and otherwise raise an exception. This would make
|
|
1048 |
# the bundle code simpler.
|
|
1049 |
if check_content: |
|
1050 |
self._check_lines_not_unicode(lines) |
|
1051 |
self._check_lines_are_lines(lines) |
|
1052 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1053 |
def get_parent_map(self, keys): |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1054 |
"""Get a map of the graph parents of keys.
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1055 |
|
1056 |
:param keys: The keys to look up parents for.
|
|
1057 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1058 |
the mapping.
|
|
1059 |
"""
|
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1060 |
return self._get_parent_map_with_sources(keys)[0] |
1061 |
||
1062 |
def _get_parent_map_with_sources(self, keys): |
|
1063 |
"""Get a map of the parents of keys.
|
|
1064 |
||
1065 |
:param keys: The keys to look up parents for.
|
|
1066 |
:return: A tuple. The first element is a mapping from keys to parents.
|
|
1067 |
Absent keys are absent from the mapping. The second element is a
|
|
1068 |
list with the locations each key was found in. The first element
|
|
1069 |
is the in-this-knit parents, the second the first fallback source,
|
|
1070 |
and so on.
|
|
1071 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1072 |
result = {} |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1073 |
sources = [self._index] + self._fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1074 |
source_results = [] |
1075 |
missing = set(keys) |
|
1076 |
for source in sources: |
|
1077 |
if not missing: |
|
1078 |
break
|
|
1079 |
new_result = source.get_parent_map(missing) |
|
1080 |
source_results.append(new_result) |
|
1081 |
result.update(new_result) |
|
1082 |
missing.difference_update(set(new_result)) |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1083 |
return result, source_results |
0.17.5
by Robert Collins
nograph tests completely passing. |
1084 |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1085 |
def _get_block(self, index_memo): |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1086 |
read_memo = index_memo[0:3] |
1087 |
# get the group:
|
|
1088 |
try: |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1089 |
block = self._group_cache[read_memo] |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1090 |
except KeyError: |
1091 |
# read the group
|
|
1092 |
zdata = self._access.get_raw_records([read_memo]).next() |
|
1093 |
# decompress - whole thing - this is not a bug, as it
|
|
1094 |
# permits caching. We might want to store the partially
|
|
1095 |
# decompresed group and decompress object, so that recent
|
|
1096 |
# texts are not penalised by big groups.
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1097 |
block = GroupCompressBlock.from_bytes(zdata) |
1098 |
self._group_cache[read_memo] = block |
|
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1099 |
# cheapo debugging:
|
1100 |
# print len(zdata), len(plain)
|
|
1101 |
# parse - requires split_lines, better to have byte offsets
|
|
1102 |
# here (but not by much - we only split the region for the
|
|
1103 |
# recipe, and we often want to end up with lines anyway.
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1104 |
return block |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1105 |
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1106 |
def get_missing_compression_parent_keys(self): |
1107 |
"""Return the keys of missing compression parents.
|
|
1108 |
||
1109 |
Missing compression parents occur when a record stream was missing
|
|
1110 |
basis texts, or a index was scanned that had missing basis texts.
|
|
1111 |
"""
|
|
1112 |
# GroupCompress cannot currently reference texts that are not in the
|
|
1113 |
# group, so this is valid for now
|
|
1114 |
return frozenset() |
|
1115 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1116 |
def get_record_stream(self, keys, ordering, include_delta_closure): |
1117 |
"""Get a stream of records for keys.
|
|
1118 |
||
1119 |
:param keys: The keys to include.
|
|
1120 |
:param ordering: Either 'unordered' or 'topological'. A topologically
|
|
1121 |
sorted stream has compression parents strictly before their
|
|
1122 |
children.
|
|
1123 |
:param include_delta_closure: If True then the closure across any
|
|
1124 |
compression parents will be included (in the opaque data).
|
|
1125 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1126 |
valid until the iterator is advanced.
|
|
1127 |
"""
|
|
1128 |
# keys might be a generator
|
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1129 |
orig_keys = list(keys) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1130 |
keys = set(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1131 |
if not keys: |
1132 |
return
|
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
1133 |
if (not self._index.has_graph |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
1134 |
and ordering in ('topological', 'groupcompress')): |
0.17.5
by Robert Collins
nograph tests completely passing. |
1135 |
# Cannot topological order when no graph has been stored.
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1136 |
# but we allow 'as-requested' or 'unordered'
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1137 |
ordering = 'unordered' |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1138 |
|
1139 |
remaining_keys = keys |
|
1140 |
while True: |
|
1141 |
try: |
|
1142 |
keys = set(remaining_keys) |
|
1143 |
for content_factory in self._get_remaining_record_stream(keys, |
|
1144 |
orig_keys, ordering, include_delta_closure): |
|
1145 |
remaining_keys.discard(content_factory.key) |
|
1146 |
yield content_factory |
|
1147 |
return
|
|
1148 |
except errors.RetryWithNewPacks, e: |
|
1149 |
self._access.reload_or_raise(e) |
|
1150 |
||
1151 |
def _find_from_fallback(self, missing): |
|
1152 |
"""Find whatever keys you can from the fallbacks.
|
|
1153 |
||
1154 |
:param missing: A set of missing keys. This set will be mutated as keys
|
|
1155 |
are found from a fallback_vfs
|
|
1156 |
:return: (parent_map, key_to_source_map, source_results)
|
|
1157 |
parent_map the overall key => parent_keys
|
|
1158 |
key_to_source_map a dict from {key: source}
|
|
1159 |
source_results a list of (source: keys)
|
|
1160 |
"""
|
|
1161 |
parent_map = {} |
|
1162 |
key_to_source_map = {} |
|
1163 |
source_results = [] |
|
1164 |
for source in self._fallback_vfs: |
|
1165 |
if not missing: |
|
1166 |
break
|
|
1167 |
source_parents = source.get_parent_map(missing) |
|
1168 |
parent_map.update(source_parents) |
|
1169 |
source_parents = list(source_parents) |
|
1170 |
source_results.append((source, source_parents)) |
|
1171 |
key_to_source_map.update((key, source) for key in source_parents) |
|
1172 |
missing.difference_update(source_parents) |
|
1173 |
return parent_map, key_to_source_map, source_results |
|
1174 |
||
1175 |
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map): |
|
1176 |
"""Get the (source, [keys]) list.
|
|
1177 |
||
1178 |
The returned objects should be in the order defined by 'ordering',
|
|
1179 |
which can weave between different sources.
|
|
1180 |
:param ordering: Must be one of 'topological' or 'groupcompress'
|
|
1181 |
:return: List of [(source, [keys])] tuples, such that all keys are in
|
|
1182 |
the defined order, regardless of source.
|
|
1183 |
"""
|
|
1184 |
if ordering == 'topological': |
|
1185 |
present_keys = topo_sort(parent_map) |
|
1186 |
else: |
|
1187 |
# ordering == 'groupcompress'
|
|
1188 |
# XXX: This only optimizes for the target ordering. We may need
|
|
1189 |
# to balance that with the time it takes to extract
|
|
1190 |
# ordering, by somehow grouping based on
|
|
1191 |
# locations[key][0:3]
|
|
1192 |
present_keys = sort_gc_optimal(parent_map) |
|
1193 |
# Now group by source:
|
|
1194 |
source_keys = [] |
|
1195 |
current_source = None |
|
1196 |
for key in present_keys: |
|
1197 |
source = key_to_source_map.get(key, self) |
|
1198 |
if source is not current_source: |
|
1199 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1200 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1201 |
source_keys[-1][1].append(key) |
1202 |
return source_keys |
|
1203 |
||
1204 |
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys, |
|
1205 |
key_to_source_map): |
|
1206 |
source_keys = [] |
|
1207 |
current_source = None |
|
1208 |
for key in orig_keys: |
|
1209 |
if key in locations or key in unadded_keys: |
|
1210 |
source = self |
|
1211 |
elif key in key_to_source_map: |
|
1212 |
source = key_to_source_map[key] |
|
1213 |
else: # absent |
|
1214 |
continue
|
|
1215 |
if source is not current_source: |
|
1216 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1217 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1218 |
source_keys[-1][1].append(key) |
1219 |
return source_keys |
|
1220 |
||
1221 |
def _get_io_ordered_source_keys(self, locations, unadded_keys, |
|
1222 |
source_result): |
|
1223 |
def get_group(key): |
|
1224 |
# This is the group the bytes are stored in, followed by the
|
|
1225 |
# location in the group
|
|
1226 |
return locations[key][0] |
|
1227 |
present_keys = sorted(locations.iterkeys(), key=get_group) |
|
1228 |
# We don't have an ordering for keys in the in-memory object, but
|
|
1229 |
# lets process the in-memory ones first.
|
|
1230 |
present_keys = list(unadded_keys) + present_keys |
|
1231 |
# Now grab all of the ones from other sources
|
|
1232 |
source_keys = [(self, present_keys)] |
|
1233 |
source_keys.extend(source_result) |
|
1234 |
return source_keys |
|
1235 |
||
1236 |
def _get_remaining_record_stream(self, keys, orig_keys, ordering, |
|
1237 |
include_delta_closure): |
|
1238 |
"""Get a stream of records for keys.
|
|
1239 |
||
1240 |
:param keys: The keys to include.
|
|
1241 |
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
|
|
1242 |
'as-requested'
|
|
1243 |
:param include_delta_closure: If True then the closure across any
|
|
1244 |
compression parents will be included (in the opaque data).
|
|
1245 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1246 |
valid until the iterator is advanced.
|
|
1247 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1248 |
# Cheap: iterate
|
1249 |
locations = self._index.get_build_details(keys) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1250 |
unadded_keys = set(self._unadded_refs).intersection(keys) |
1251 |
missing = keys.difference(locations) |
|
1252 |
missing.difference_update(unadded_keys) |
|
1253 |
(fallback_parent_map, key_to_source_map, |
|
1254 |
source_result) = self._find_from_fallback(missing) |
|
1255 |
if ordering in ('topological', 'groupcompress'): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1256 |
# would be better to not globally sort initially but instead
|
1257 |
# start with one key, recurse to its oldest parent, then grab
|
|
1258 |
# everything in the same group, etc.
|
|
1259 |
parent_map = dict((key, details[2]) for key, details in |
|
1260 |
locations.iteritems()) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1261 |
for key in unadded_keys: |
1262 |
parent_map[key] = self._unadded_refs[key] |
|
1263 |
parent_map.update(fallback_parent_map) |
|
1264 |
source_keys = self._get_ordered_source_keys(ordering, parent_map, |
|
1265 |
key_to_source_map) |
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1266 |
elif ordering == 'as-requested': |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1267 |
source_keys = self._get_as_requested_source_keys(orig_keys, |
1268 |
locations, unadded_keys, key_to_source_map) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1269 |
else: |
0.20.10
by John Arbash Meinel
Change the extraction ordering for 'unordered'. |
1270 |
# We want to yield the keys in a semi-optimal (read-wise) ordering.
|
1271 |
# Otherwise we thrash the _group_cache and destroy performance
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1272 |
source_keys = self._get_io_ordered_source_keys(locations, |
1273 |
unadded_keys, source_result) |
|
1274 |
for key in missing: |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1275 |
yield AbsentContentFactory(key) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1276 |
manager = None |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1277 |
last_read_memo = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
1278 |
# TODO: This works fairly well at batching up existing groups into a
|
1279 |
# streamable format, and possibly allowing for taking one big
|
|
1280 |
# group and splitting it when it isn't fully utilized.
|
|
1281 |
# However, it doesn't allow us to find under-utilized groups and
|
|
1282 |
# combine them into a bigger group on the fly.
|
|
1283 |
# (Consider the issue with how chk_map inserts texts
|
|
1284 |
# one-at-a-time.) This could be done at insert_record_stream()
|
|
1285 |
# time, but it probably would decrease the number of
|
|
1286 |
# bytes-on-the-wire for fetch.
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1287 |
for source, keys in source_keys: |
1288 |
if source is self: |
|
1289 |
for key in keys: |
|
1290 |
if key in self._unadded_refs: |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1291 |
if manager is not None: |
1292 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1293 |
yield factory |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1294 |
last_read_memo = manager = None |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1295 |
bytes, sha1 = self._compressor.extract(key) |
1296 |
parents = self._unadded_refs[key] |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1297 |
yield FulltextContentFactory(key, parents, sha1, bytes) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1298 |
else: |
1299 |
index_memo, _, parents, (method, _) = locations[key] |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
1300 |
read_memo = index_memo[0:3] |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1301 |
if last_read_memo != read_memo: |
1302 |
# We are starting a new block. If we have a
|
|
1303 |
# manager, we have found everything that fits for
|
|
1304 |
# now, so yield records
|
|
1305 |
if manager is not None: |
|
1306 |
for factory in manager.get_record_stream(): |
|
1307 |
yield factory |
|
1308 |
# Now start a new manager
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
1309 |
block = self._get_block(index_memo) |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1310 |
manager = _LazyGroupContentManager(block) |
1311 |
last_read_memo = read_memo |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
1312 |
start, end = index_memo[3:5] |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1313 |
manager.add_factory(key, parents, start, end) |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1314 |
else: |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1315 |
if manager is not None: |
1316 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1317 |
yield factory |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1318 |
last_read_memo = manager = None |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1319 |
for record in source.get_record_stream(keys, ordering, |
1320 |
include_delta_closure): |
|
1321 |
yield record |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1322 |
if manager is not None: |
1323 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1324 |
yield factory |
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1325 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1326 |
def get_sha1s(self, keys): |
1327 |
"""See VersionedFiles.get_sha1s()."""
|
|
1328 |
result = {} |
|
1329 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1330 |
if record.sha1 != None: |
|
1331 |
result[record.key] = record.sha1 |
|
1332 |
else: |
|
1333 |
if record.storage_kind != 'absent': |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1334 |
result[record.key] = osutils.sha_string( |
1335 |
record.get_bytes_as('fulltext')) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1336 |
return result |
1337 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1338 |
def insert_record_stream(self, stream): |
1339 |
"""Insert a record stream into this container.
|
|
1340 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1341 |
:param stream: A stream of records to insert.
|
0.17.2
by Robert Collins
Core proof of concept working. |
1342 |
:return: None
|
1343 |
:seealso VersionedFiles.get_record_stream:
|
|
1344 |
"""
|
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1345 |
# XXX: Setting random_id=True makes
|
1346 |
# test_insert_record_stream_existing_keys fail for groupcompress and
|
|
1347 |
# groupcompress-nograph, this needs to be revisited while addressing
|
|
1348 |
# 'bzr branch' performance issues.
|
|
1349 |
for _ in self._insert_record_stream(stream, random_id=False): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1350 |
pass
|
0.17.2
by Robert Collins
Core proof of concept working. |
1351 |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1352 |
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None, |
1353 |
reuse_blocks=True): |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1354 |
"""Internal core to insert a record stream into this container.
|
1355 |
||
1356 |
This helper function has a different interface than insert_record_stream
|
|
1357 |
to allow add_lines to be minimal, but still return the needed data.
|
|
1358 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1359 |
:param stream: A stream of records to insert.
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
1360 |
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
|
1361 |
raise ExistingContent, rather than committing the new text.
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1362 |
:param reuse_blocks: If the source is streaming from
|
1363 |
groupcompress-blocks, just insert the blocks as-is, rather than
|
|
1364 |
expanding the texts and inserting again.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
1365 |
:return: An iterator over the sha1 of the inserted records.
|
1366 |
:seealso insert_record_stream:
|
|
1367 |
:seealso add_lines:
|
|
1368 |
"""
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1369 |
adapters = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1370 |
def get_adapter(adapter_key): |
1371 |
try: |
|
1372 |
return adapters[adapter_key] |
|
1373 |
except KeyError: |
|
1374 |
adapter_factory = adapter_registry.get(adapter_key) |
|
1375 |
adapter = adapter_factory(self) |
|
1376 |
adapters[adapter_key] = adapter |
|
1377 |
return adapter |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1378 |
# This will go up to fulltexts for gc to gc fetching, which isn't
|
1379 |
# ideal.
|
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
1380 |
self._compressor = GroupCompressor() |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1381 |
self._unadded_refs = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1382 |
keys_to_add = [] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1383 |
def flush(): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1384 |
bytes = self._compressor.flush().to_bytes() |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1385 |
index, start, length = self._access.add_raw_records( |
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
1386 |
[(None, len(bytes))], bytes)[0] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1387 |
nodes = [] |
1388 |
for key, reads, refs in keys_to_add: |
|
1389 |
nodes.append((key, "%d %d %s" % (start, length, reads), refs)) |
|
1390 |
self._index.add_records(nodes, random_id=random_id) |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1391 |
self._unadded_refs = {} |
1392 |
del keys_to_add[:] |
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
1393 |
self._compressor = GroupCompressor() |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1394 |
|
0.20.15
by John Arbash Meinel
Change so that regions that have lots of copies get converted back |
1395 |
last_prefix = None |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1396 |
max_fulltext_len = 0 |
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1397 |
max_fulltext_prefix = None |
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
1398 |
insert_manager = None |
1399 |
block_start = None |
|
1400 |
block_length = None |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1401 |
# XXX: TODO: remove this, it is just for safety checking for now
|
1402 |
inserted_keys = set() |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1403 |
for record in stream: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1404 |
# Raise an error when a record is missing.
|
1405 |
if record.storage_kind == 'absent': |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1406 |
raise errors.RevisionNotPresent(record.key, self) |
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1407 |
if random_id: |
1408 |
if record.key in inserted_keys: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1409 |
trace.note('Insert claimed random_id=True,' |
1410 |
' but then inserted %r two times', record.key) |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1411 |
continue
|
1412 |
inserted_keys.add(record.key) |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1413 |
if reuse_blocks: |
1414 |
# If the reuse_blocks flag is set, check to see if we can just
|
|
1415 |
# copy a groupcompress block as-is.
|
|
1416 |
if record.storage_kind == 'groupcompress-block': |
|
1417 |
# Insert the raw block into the target repo
|
|
1418 |
insert_manager = record._manager |
|
3735.2.163
by John Arbash Meinel
Merge bzr.dev 4187, and revert the change to fix refcycle issues. |
1419 |
insert_manager._check_rebuild_block() |
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1420 |
bytes = record._manager._block.to_bytes() |
1421 |
_, start, length = self._access.add_raw_records( |
|
1422 |
[(None, len(bytes))], bytes)[0] |
|
1423 |
del bytes |
|
1424 |
block_start = start |
|
1425 |
block_length = length |
|
1426 |
if record.storage_kind in ('groupcompress-block', |
|
1427 |
'groupcompress-block-ref'): |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1428 |
if insert_manager is None: |
1429 |
raise AssertionError('No insert_manager set') |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1430 |
value = "%d %d %d %d" % (block_start, block_length, |
1431 |
record._start, record._end) |
|
1432 |
nodes = [(record.key, value, (record.parents,))] |
|
3735.38.1
by John Arbash Meinel
Change the delta byte stream to remove the 'source length' entry. |
1433 |
# TODO: Consider buffering up many nodes to be added, not
|
1434 |
# sure how much overhead this has, but we're seeing
|
|
1435 |
# ~23s / 120s in add_records calls
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1436 |
self._index.add_records(nodes, random_id=random_id) |
1437 |
continue
|
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1438 |
try: |
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
1439 |
bytes = record.get_bytes_as('fulltext') |
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1440 |
except errors.UnavailableRepresentation: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1441 |
adapter_key = record.storage_kind, 'fulltext' |
1442 |
adapter = get_adapter(adapter_key) |
|
0.20.21
by John Arbash Meinel
Merge the chk sorting code. |
1443 |
bytes = adapter.get_bytes(record) |
0.20.13
by John Arbash Meinel
Play around a bit. |
1444 |
if len(record.key) > 1: |
1445 |
prefix = record.key[0] |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1446 |
soft = (prefix == last_prefix) |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1447 |
else: |
1448 |
prefix = None |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1449 |
soft = False |
1450 |
if max_fulltext_len < len(bytes): |
|
1451 |
max_fulltext_len = len(bytes) |
|
1452 |
max_fulltext_prefix = prefix |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1453 |
(found_sha1, start_point, end_point, |
1454 |
type) = self._compressor.compress(record.key, |
|
1455 |
bytes, record.sha1, soft=soft, |
|
1456 |
nostore_sha=nostore_sha) |
|
1457 |
# delta_ratio = float(len(bytes)) / (end_point - start_point)
|
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1458 |
# Check if we want to continue to include that text
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1459 |
if (prefix == max_fulltext_prefix |
1460 |
and end_point < 2 * max_fulltext_len): |
|
1461 |
# As long as we are on the same file_id, we will fill at least
|
|
1462 |
# 2 * max_fulltext_len
|
|
1463 |
start_new_block = False |
|
1464 |
elif end_point > 4*1024*1024: |
|
1465 |
start_new_block = True |
|
1466 |
elif (prefix is not None and prefix != last_prefix |
|
1467 |
and end_point > 2*1024*1024): |
|
1468 |
start_new_block = True |
|
1469 |
else: |
|
1470 |
start_new_block = False |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1471 |
last_prefix = prefix |
1472 |
if start_new_block: |
|
1473 |
self._compressor.pop_last() |
|
1474 |
flush() |
|
1475 |
max_fulltext_len = len(bytes) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1476 |
(found_sha1, start_point, end_point, |
1477 |
type) = self._compressor.compress(record.key, bytes, |
|
1478 |
record.sha1) |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1479 |
if record.key[-1] is None: |
1480 |
key = record.key[:-1] + ('sha1:' + found_sha1,) |
|
1481 |
else: |
|
1482 |
key = record.key |
|
1483 |
self._unadded_refs[key] = record.parents |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
1484 |
yield found_sha1 |
3735.2.164
by John Arbash Meinel
Fix a critical bug that caused problems with the index entries. |
1485 |
keys_to_add.append((key, '%d %d' % (start_point, end_point), |
0.17.5
by Robert Collins
nograph tests completely passing. |
1486 |
(record.parents,))) |
0.17.8
by Robert Collins
Flush pending updates at the end of _insert_record_stream |
1487 |
if len(keys_to_add): |
1488 |
flush() |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1489 |
self._compressor = None |
0.17.5
by Robert Collins
nograph tests completely passing. |
1490 |
|
1491 |
def iter_lines_added_or_present_in_keys(self, keys, pb=None): |
|
1492 |
"""Iterate over the lines in the versioned files from keys.
|
|
1493 |
||
1494 |
This may return lines from other keys. Each item the returned
|
|
1495 |
iterator yields is a tuple of a line and a text version that that line
|
|
1496 |
is present in (not introduced in).
|
|
1497 |
||
1498 |
Ordering of results is in whatever order is most suitable for the
|
|
1499 |
underlying storage format.
|
|
1500 |
||
1501 |
If a progress bar is supplied, it may be used to indicate progress.
|
|
1502 |
The caller is responsible for cleaning up progress bars (because this
|
|
1503 |
is an iterator).
|
|
1504 |
||
1505 |
NOTES:
|
|
1506 |
* Lines are normalised by the underlying store: they will all have \n
|
|
1507 |
terminators.
|
|
1508 |
* Lines are returned in arbitrary order.
|
|
1509 |
||
1510 |
:return: An iterator over (line, key).
|
|
1511 |
"""
|
|
1512 |
if pb is None: |
|
1513 |
pb = progress.DummyProgress() |
|
1514 |
keys = set(keys) |
|
1515 |
total = len(keys) |
|
1516 |
# we don't care about inclusions, the caller cares.
|
|
1517 |
# but we need to setup a list of records to visit.
|
|
1518 |
# we need key, position, length
|
|
1519 |
for key_idx, record in enumerate(self.get_record_stream(keys, |
|
1520 |
'unordered', True)): |
|
1521 |
# XXX: todo - optimise to use less than full texts.
|
|
1522 |
key = record.key |
|
3735.32.1
by John Arbash Meinel
Fix the VF WalkingContent checks. |
1523 |
pb.update('Walking content', key_idx, total) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1524 |
if record.storage_kind == 'absent': |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1525 |
raise errors.RevisionNotPresent(key, self) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1526 |
lines = osutils.split_lines(record.get_bytes_as('fulltext')) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1527 |
for line in lines: |
1528 |
yield line, key |
|
3735.32.1
by John Arbash Meinel
Fix the VF WalkingContent checks. |
1529 |
pb.update('Walking content', total, total) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1530 |
|
1531 |
def keys(self): |
|
1532 |
"""See VersionedFiles.keys."""
|
|
1533 |
if 'evil' in debug.debug_flags: |
|
1534 |
trace.mutter_callsite(2, "keys scales with size of history") |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1535 |
sources = [self._index] + self._fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1536 |
result = set() |
1537 |
for source in sources: |
|
1538 |
result.update(source.keys()) |
|
1539 |
return result |
|
1540 |
||
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1541 |
|
1542 |
class _GCGraphIndex(object): |
|
1543 |
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
|
|
1544 |
||
0.17.9
by Robert Collins
Initial stab at repository format support. |
1545 |
def __init__(self, graph_index, is_locked, parents=True, |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1546 |
add_callback=None): |
1547 |
"""Construct a _GCGraphIndex on a graph_index.
|
|
1548 |
||
1549 |
:param graph_index: An implementation of bzrlib.index.GraphIndex.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1550 |
:param is_locked: A callback, returns True if the index is locked and
|
1551 |
thus usable.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1552 |
:param parents: If True, record knits parents, if not do not record
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1553 |
parents.
|
1554 |
:param add_callback: If not None, allow additions to the index and call
|
|
1555 |
this callback with a list of added GraphIndex nodes:
|
|
1556 |
[(node, value, node_refs), ...]
|
|
1557 |
"""
|
|
1558 |
self._add_callback = add_callback |
|
1559 |
self._graph_index = graph_index |
|
1560 |
self._parents = parents |
|
1561 |
self.has_graph = parents |
|
1562 |
self._is_locked = is_locked |
|
1563 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1564 |
def add_records(self, records, random_id=False): |
1565 |
"""Add multiple records to the index.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1566 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1567 |
This function does not insert data into the Immutable GraphIndex
|
1568 |
backing the KnitGraphIndex, instead it prepares data for insertion by
|
|
1569 |
the caller and checks that it is safe to insert then calls
|
|
1570 |
self._add_callback with the prepared GraphIndex nodes.
|
|
1571 |
||
1572 |
:param records: a list of tuples:
|
|
1573 |
(key, options, access_memo, parents).
|
|
1574 |
:param random_id: If True the ids being added were randomly generated
|
|
1575 |
and no check for existence will be performed.
|
|
1576 |
"""
|
|
1577 |
if not self._add_callback: |
|
1578 |
raise errors.ReadOnlyError(self) |
|
1579 |
# we hope there are no repositories with inconsistent parentage
|
|
1580 |
# anymore.
|
|
1581 |
||
1582 |
changed = False |
|
1583 |
keys = {} |
|
1584 |
for (key, value, refs) in records: |
|
1585 |
if not self._parents: |
|
1586 |
if refs: |
|
1587 |
for ref in refs: |
|
1588 |
if ref: |
|
1589 |
raise KnitCorrupt(self, |
|
1590 |
"attempt to add node with parents "
|
|
1591 |
"in parentless index.") |
|
1592 |
refs = () |
|
1593 |
changed = True |
|
1594 |
keys[key] = (value, refs) |
|
1595 |
# check for dups
|
|
1596 |
if not random_id: |
|
1597 |
present_nodes = self._get_entries(keys) |
|
1598 |
for (index, key, value, node_refs) in present_nodes: |
|
1599 |
if node_refs != keys[key][1]: |
|
1600 |
raise errors.KnitCorrupt(self, "inconsistent details in add_records" |
|
1601 |
": %s %s" % ((value, node_refs), keys[key])) |
|
1602 |
del keys[key] |
|
1603 |
changed = True |
|
1604 |
if changed: |
|
1605 |
result = [] |
|
1606 |
if self._parents: |
|
1607 |
for key, (value, node_refs) in keys.iteritems(): |
|
1608 |
result.append((key, value, node_refs)) |
|
1609 |
else: |
|
1610 |
for key, (value, node_refs) in keys.iteritems(): |
|
1611 |
result.append((key, value)) |
|
1612 |
records = result |
|
1613 |
self._add_callback(records) |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1614 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1615 |
def _check_read(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1616 |
"""Raise an exception if reads are not permitted."""
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1617 |
if not self._is_locked(): |
1618 |
raise errors.ObjectNotLocked(self) |
|
1619 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1620 |
def _check_write_ok(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1621 |
"""Raise an exception if writes are not permitted."""
|
0.17.2
by Robert Collins
Core proof of concept working. |
1622 |
if not self._is_locked(): |
1623 |
raise errors.ObjectNotLocked(self) |
|
1624 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1625 |
def _get_entries(self, keys, check_present=False): |
1626 |
"""Get the entries for keys.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1627 |
|
1628 |
Note: Callers are responsible for checking that the index is locked
|
|
1629 |
before calling this method.
|
|
1630 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1631 |
:param keys: An iterable of index key tuples.
|
1632 |
"""
|
|
1633 |
keys = set(keys) |
|
1634 |
found_keys = set() |
|
1635 |
if self._parents: |
|
1636 |
for node in self._graph_index.iter_entries(keys): |
|
1637 |
yield node |
|
1638 |
found_keys.add(node[1]) |
|
1639 |
else: |
|
1640 |
# adapt parentless index to the rest of the code.
|
|
1641 |
for node in self._graph_index.iter_entries(keys): |
|
1642 |
yield node[0], node[1], node[2], () |
|
1643 |
found_keys.add(node[1]) |
|
1644 |
if check_present: |
|
1645 |
missing_keys = keys.difference(found_keys) |
|
1646 |
if missing_keys: |
|
1647 |
raise RevisionNotPresent(missing_keys.pop(), self) |
|
1648 |
||
1649 |
def get_parent_map(self, keys): |
|
1650 |
"""Get a map of the parents of keys.
|
|
1651 |
||
1652 |
:param keys: The keys to look up parents for.
|
|
1653 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1654 |
the mapping.
|
|
1655 |
"""
|
|
1656 |
self._check_read() |
|
1657 |
nodes = self._get_entries(keys) |
|
1658 |
result = {} |
|
1659 |
if self._parents: |
|
1660 |
for node in nodes: |
|
1661 |
result[node[1]] = node[3][0] |
|
1662 |
else: |
|
1663 |
for node in nodes: |
|
1664 |
result[node[1]] = None |
|
1665 |
return result |
|
1666 |
||
1667 |
def get_build_details(self, keys): |
|
1668 |
"""Get the various build details for keys.
|
|
1669 |
||
1670 |
Ghosts are omitted from the result.
|
|
1671 |
||
1672 |
:param keys: An iterable of keys.
|
|
1673 |
:return: A dict of key:
|
|
1674 |
(index_memo, compression_parent, parents, record_details).
|
|
1675 |
index_memo
|
|
1676 |
opaque structure to pass to read_records to extract the raw
|
|
1677 |
data
|
|
1678 |
compression_parent
|
|
1679 |
Content that this record is built upon, may be None
|
|
1680 |
parents
|
|
1681 |
Logical parents of this node
|
|
1682 |
record_details
|
|
1683 |
extra information about the content which needs to be passed to
|
|
1684 |
Factory.parse_record
|
|
1685 |
"""
|
|
1686 |
self._check_read() |
|
1687 |
result = {} |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1688 |
entries = self._get_entries(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1689 |
for entry in entries: |
1690 |
key = entry[1] |
|
1691 |
if not self._parents: |
|
1692 |
parents = None |
|
1693 |
else: |
|
1694 |
parents = entry[3][0] |
|
1695 |
method = 'group' |
|
1696 |
result[key] = (self._node_to_position(entry), |
|
1697 |
None, parents, (method, None)) |
|
1698 |
return result |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1699 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1700 |
def keys(self): |
1701 |
"""Get all the keys in the collection.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1702 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1703 |
The keys are not ordered.
|
1704 |
"""
|
|
1705 |
self._check_read() |
|
1706 |
return [node[1] for node in self._graph_index.iter_all_entries()] |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1707 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1708 |
def _node_to_position(self, node): |
1709 |
"""Convert an index value to position details."""
|
|
1710 |
bits = node[2].split(' ') |
|
1711 |
# It would be nice not to read the entire gzip.
|
|
1712 |
start = int(bits[0]) |
|
1713 |
stop = int(bits[1]) |
|
1714 |
basis_end = int(bits[2]) |
|
1715 |
delta_end = int(bits[3]) |
|
1716 |
return node[0], start, stop, basis_end, delta_end |
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1717 |
|
1718 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
1719 |
from bzrlib._groupcompress_py import ( |
1720 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
1721 |
apply_delta_to_source, |
3735.40.11
by John Arbash Meinel
Implement make_delta and apply_delta. |
1722 |
encode_base128_int, |
1723 |
decode_base128_int, |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
1724 |
decode_copy_instruction, |
3735.40.13
by John Arbash Meinel
Rename EquivalenceTable to LinesDeltaIndex. |
1725 |
LinesDeltaIndex, |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
1726 |
)
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1727 |
try: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1728 |
from bzrlib._groupcompress_pyx import ( |
1729 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
1730 |
apply_delta_to_source, |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1731 |
DeltaIndex, |
3735.40.16
by John Arbash Meinel
Implement (de|en)code_base128_int in pyrex. |
1732 |
encode_base128_int, |
1733 |
decode_base128_int, |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1734 |
)
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1735 |
GroupCompressor = PyrexGroupCompressor |
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1736 |
except ImportError: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1737 |
GroupCompressor = PythonGroupCompressor |
1738 |