3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1 |
# Copyright (C) 2008, 2009 Canonical Ltd
|
2 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
3 |
# This program is free software; you can redistribute it and/or modify
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
4 |
# it under the terms of the GNU General Public License as published by
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
12 |
#
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
3735.36.3
by John Arbash Meinel
Add the new address for FSF to the new files. |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
16 |
|
17 |
"""Core compression logic for compressing streams of related files."""
|
|
18 |
||
0.17.13
by Robert Collins
Do not output copy instructions which take more to encode than a fresh insert. (But do not refer to those insertions when finding ranges to copy: they are not interesting). |
19 |
from itertools import izip |
0.17.5
by Robert Collins
nograph tests completely passing. |
20 |
from cStringIO import StringIO |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
21 |
import time |
0.17.5
by Robert Collins
nograph tests completely passing. |
22 |
import zlib |
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
23 |
try: |
24 |
import pylzma |
|
25 |
except ImportError: |
|
26 |
pylzma = None |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
27 |
|
0.17.4
by Robert Collins
Annotate. |
28 |
from bzrlib import ( |
29 |
annotate, |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
30 |
debug, |
0.17.4
by Robert Collins
Annotate. |
31 |
diff, |
0.17.5
by Robert Collins
nograph tests completely passing. |
32 |
errors, |
0.17.4
by Robert Collins
Annotate. |
33 |
graph as _mod_graph, |
0.20.2
by John Arbash Meinel
Teach groupcompress about 'chunked' encoding |
34 |
osutils, |
0.17.4
by Robert Collins
Annotate. |
35 |
pack, |
36 |
patiencediff, |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
37 |
trace, |
0.17.4
by Robert Collins
Annotate. |
38 |
)
|
39 |
from bzrlib.graph import Graph |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
40 |
from bzrlib.knit import _DirectPackAccess |
0.17.21
by Robert Collins
Update groupcompress to bzrlib 1.10. |
41 |
from bzrlib.btree_index import BTreeBuilder |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
42 |
from bzrlib.lru_cache import LRUSizeCache |
0.17.9
by Robert Collins
Initial stab at repository format support. |
43 |
from bzrlib.tsort import topo_sort |
0.17.2
by Robert Collins
Core proof of concept working. |
44 |
from bzrlib.versionedfile import ( |
0.17.5
by Robert Collins
nograph tests completely passing. |
45 |
adapter_registry, |
46 |
AbsentContentFactory, |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
47 |
ChunkedContentFactory, |
0.17.2
by Robert Collins
Core proof of concept working. |
48 |
FulltextContentFactory, |
49 |
VersionedFiles, |
|
50 |
)
|
|
51 |
||
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
52 |
_USE_LZMA = False and (pylzma is not None) |
0.17.2
by Robert Collins
Core proof of concept working. |
53 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
54 |
# osutils.sha_string('')
|
55 |
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' |
|
56 |
||
57 |
||
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
58 |
def sort_gc_optimal(parent_map): |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
59 |
"""Sort and group the keys in parent_map into groupcompress order.
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
60 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
61 |
groupcompress is defined (currently) as reverse-topological order, grouped
|
62 |
by the key prefix.
|
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
63 |
|
64 |
:return: A sorted-list of keys
|
|
65 |
"""
|
|
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
66 |
# groupcompress ordering is approximately reverse topological,
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
67 |
# properly grouped by file-id.
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
68 |
per_prefix_map = {} |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
69 |
for item in parent_map.iteritems(): |
70 |
key = item[0] |
|
71 |
if isinstance(key, str) or len(key) == 1: |
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
72 |
prefix = '' |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
73 |
else: |
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
74 |
prefix = key[0] |
75 |
try: |
|
76 |
per_prefix_map[prefix].append(item) |
|
77 |
except KeyError: |
|
78 |
per_prefix_map[prefix] = [item] |
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
79 |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
80 |
present_keys = [] |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
81 |
for prefix in sorted(per_prefix_map): |
82 |
present_keys.extend(reversed(topo_sort(per_prefix_map[prefix]))) |
|
83 |
return present_keys |
|
84 |
||
85 |
||
3735.32.9
by John Arbash Meinel
Use a 32kB extension, since that is the max window size for zlib. |
86 |
# The max zlib window size is 32kB, so if we set 'max_size' output of the
|
87 |
# decompressor to the requested bytes + 32kB, then we should guarantee
|
|
88 |
# num_bytes coming out.
|
|
89 |
_ZLIB_DECOMP_WINDOW = 32*1024 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
90 |
|
91 |
class GroupCompressBlock(object): |
|
92 |
"""An object which maintains the internal structure of the compressed data.
|
|
93 |
||
94 |
This tracks the meta info (start of text, length, type, etc.)
|
|
95 |
"""
|
|
96 |
||
0.25.5
by John Arbash Meinel
Now using a zlib compressed format. |
97 |
# Group Compress Block v1 Zlib
|
98 |
GCB_HEADER = 'gcb1z\n' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
99 |
# Group Compress Block v1 Lzma
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
100 |
GCB_LZ_HEADER = 'gcb1l\n' |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
101 |
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
102 |
|
103 |
def __init__(self): |
|
104 |
# map by key? or just order in file?
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
105 |
self._compressor_name = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
106 |
self._z_content = None |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
107 |
self._z_content_decompressor = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
108 |
self._z_content_length = None |
109 |
self._content_length = None |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
110 |
self._content = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
111 |
|
112 |
def __len__(self): |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
113 |
# This is the maximum number of bytes this object will reference if
|
114 |
# everything is decompressed. However, if we decompress less than
|
|
115 |
# everything... (this would cause some problems for LRUSizeCache)
|
|
116 |
return self._content_length + self._z_content_length |
|
0.17.48
by John Arbash Meinel
if _NO_LABELS is set, don't bother parsing the mini header. |
117 |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
118 |
def _ensure_content(self, num_bytes=None): |
119 |
"""Make sure that content has been expanded enough.
|
|
120 |
||
121 |
:param num_bytes: Ensure that we have extracted at least num_bytes of
|
|
122 |
content. If None, consume everything
|
|
123 |
"""
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
124 |
# TODO: If we re-use the same content block at different times during
|
125 |
# get_record_stream(), it is possible that the first pass will
|
|
126 |
# get inserted, triggering an extract/_ensure_content() which
|
|
127 |
# will get rid of _z_content. And then the next use of the block
|
|
128 |
# will try to access _z_content (to send it over the wire), and
|
|
129 |
# fail because it is already extracted. Consider never releasing
|
|
130 |
# _z_content because of this.
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
131 |
if num_bytes is None: |
132 |
num_bytes = self._content_length |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
133 |
elif (self._content_length is not None |
134 |
and num_bytes > self._content_length): |
|
135 |
raise AssertionError( |
|
136 |
'requested num_bytes (%d) > content length (%d)' |
|
137 |
% (num_bytes, self._content_length)) |
|
138 |
# Expand the content if required
|
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
139 |
if self._content is None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
140 |
if self._z_content is None: |
141 |
raise AssertionError('No content to decompress') |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
142 |
if self._z_content == '': |
143 |
self._content = '' |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
144 |
elif self._compressor_name == 'lzma': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
145 |
# We don't do partial lzma decomp yet
|
3735.2.160
by John Arbash Meinel
Fix a trivial typo |
146 |
self._content = pylzma.decompress(self._z_content) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
147 |
elif self._compressor_name == 'zlib': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
148 |
# Start a zlib decompressor
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
149 |
if num_bytes is None: |
150 |
self._content = zlib.decompress(self._z_content) |
|
151 |
else: |
|
152 |
self._z_content_decompressor = zlib.decompressobj() |
|
153 |
# Seed the decompressor with the uncompressed bytes, so
|
|
154 |
# that the rest of the code is simplified
|
|
155 |
self._content = self._z_content_decompressor.decompress( |
|
156 |
self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
157 |
else: |
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
158 |
raise AssertionError('Unknown compressor: %r' |
3735.2.183
by John Arbash Meinel
Fix the compressor name. |
159 |
% self._compressor_name) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
160 |
# Any bytes remaining to be decompressed will be in the decompressors
|
161 |
# 'unconsumed_tail'
|
|
162 |
||
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
163 |
# Do we have enough bytes already?
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
164 |
if num_bytes is not None and len(self._content) >= num_bytes: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
165 |
return
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
166 |
if num_bytes is None and self._z_content_decompressor is None: |
167 |
# We must have already decompressed everything
|
|
168 |
return
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
169 |
# If we got this far, and don't have a decompressor, something is wrong
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
170 |
if self._z_content_decompressor is None: |
171 |
raise AssertionError( |
|
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
172 |
'No decompressor to decompress %d bytes' % num_bytes) |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
173 |
remaining_decomp = self._z_content_decompressor.unconsumed_tail |
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
174 |
if num_bytes is None: |
175 |
if remaining_decomp: |
|
176 |
# We don't know how much is left, but we'll decompress it all
|
|
177 |
self._content += self._z_content_decompressor.decompress( |
|
178 |
remaining_decomp) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
179 |
# Note: There's what I consider a bug in zlib.decompressobj
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
180 |
# If you pass back in the entire unconsumed_tail, only
|
181 |
# this time you don't pass a max-size, it doesn't
|
|
182 |
# change the unconsumed_tail back to None/''.
|
|
183 |
# However, we know we are done with the whole stream
|
|
184 |
self._z_content_decompressor = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
185 |
# XXX: Why is this the only place in this routine we set this?
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
186 |
self._content_length = len(self._content) |
187 |
else: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
188 |
if not remaining_decomp: |
189 |
raise AssertionError('Nothing left to decompress') |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
190 |
needed_bytes = num_bytes - len(self._content) |
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
191 |
# We always set max_size to 32kB over the minimum needed, so that
|
192 |
# zlib will give us as much as we really want.
|
|
193 |
# TODO: If this isn't good enough, we could make a loop here,
|
|
194 |
# that keeps expanding the request until we get enough
|
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
195 |
self._content += self._z_content_decompressor.decompress( |
196 |
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
197 |
if len(self._content) < num_bytes: |
198 |
raise AssertionError('%d bytes wanted, only %d available' |
|
199 |
% (num_bytes, len(self._content))) |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
200 |
if not self._z_content_decompressor.unconsumed_tail: |
201 |
# The stream is finished
|
|
202 |
self._z_content_decompressor = None |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
203 |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
204 |
def _parse_bytes(self, bytes, pos): |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
205 |
"""Read the various lengths from the header.
|
206 |
||
207 |
This also populates the various 'compressed' buffers.
|
|
208 |
||
209 |
:return: The position in bytes just after the last newline
|
|
210 |
"""
|
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
211 |
# At present, we have 2 integers for the compressed and uncompressed
|
212 |
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
|
|
213 |
# checking too far, cap the search to 14 bytes.
|
|
214 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
215 |
self._z_content_length = int(bytes[pos:pos2]) |
|
216 |
pos = pos2 + 1 |
|
217 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
218 |
self._content_length = int(bytes[pos:pos2]) |
|
219 |
pos = pos2 + 1 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
220 |
if len(bytes) != (pos + self._z_content_length): |
221 |
# XXX: Define some GCCorrupt error ?
|
|
222 |
raise AssertionError('Invalid bytes: (%d) != %d + %d' % |
|
223 |
(len(bytes), pos, self._z_content_length)) |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
224 |
self._z_content = bytes[pos:] |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
225 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
226 |
@classmethod
|
227 |
def from_bytes(cls, bytes): |
|
228 |
out = cls() |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
229 |
if bytes[:6] not in cls.GCB_KNOWN_HEADERS: |
230 |
raise ValueError('bytes did not start with any of %r' |
|
231 |
% (cls.GCB_KNOWN_HEADERS,)) |
|
232 |
# XXX: why not testing the whole header ?
|
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
233 |
if bytes[4] == 'z': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
234 |
out._compressor_name = 'zlib' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
235 |
elif bytes[4] == 'l': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
236 |
out._compressor_name = 'lzma' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
237 |
else: |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
238 |
raise ValueError('unknown compressor: %r' % (bytes,)) |
3735.38.4
by John Arbash Meinel
Another disk format change. |
239 |
out._parse_bytes(bytes, 6) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
240 |
return out |
241 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
242 |
def extract(self, key, start, end, sha1=None): |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
243 |
"""Extract the text for a specific key.
|
244 |
||
245 |
:param key: The label used for this content
|
|
246 |
:param sha1: TODO (should we validate only when sha1 is supplied?)
|
|
247 |
:return: The bytes for the content
|
|
248 |
"""
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
249 |
if start == end == 0: |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
250 |
return '' |
251 |
self._ensure_content(end) |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
252 |
# The bytes are 'f' or 'd' for the type, then a variable-length
|
253 |
# base128 integer for the content size, then the actual content
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
254 |
# We know that the variable-length integer won't be longer than 5
|
255 |
# bytes (it takes 5 bytes to encode 2^32)
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
256 |
c = self._content[start] |
257 |
if c == 'f': |
|
258 |
type = 'fulltext' |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
259 |
else: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
260 |
if c != 'd': |
261 |
raise ValueError('Unknown content control code: %s' |
|
262 |
% (c,)) |
|
263 |
type = 'delta' |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
264 |
content_len, len_len = decode_base128_int( |
265 |
self._content[start + 1:start + 6]) |
|
266 |
content_start = start + 1 + len_len |
|
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
267 |
if end != content_start + content_len: |
268 |
raise ValueError('end != len according to field header' |
|
269 |
' %s != %s' % (end, content_start + content_len)) |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
270 |
if c == 'f': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
271 |
bytes = self._content[content_start:end] |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
272 |
elif c == 'd': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
273 |
bytes = apply_delta_to_source(self._content, content_start, end) |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
274 |
return bytes |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
275 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
276 |
def set_content(self, content): |
277 |
"""Set the content of this block."""
|
|
278 |
self._content_length = len(content) |
|
279 |
self._content = content |
|
280 |
self._z_content = None |
|
281 |
||
282 |
def to_bytes(self): |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
283 |
"""Encode the information into a byte stream."""
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
284 |
compress = zlib.compress |
285 |
if _USE_LZMA: |
|
286 |
compress = pylzma.compress |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
287 |
if self._z_content is None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
288 |
if self._content is None: |
289 |
raise AssertionError('Nothing to compress') |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
290 |
self._z_content = compress(self._content) |
291 |
self._z_content_length = len(self._z_content) |
|
0.17.46
by John Arbash Meinel
Set the proper header when using/not using lzma |
292 |
if _USE_LZMA: |
293 |
header = self.GCB_LZ_HEADER |
|
294 |
else: |
|
295 |
header = self.GCB_HEADER |
|
296 |
chunks = [header, |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
297 |
'%d\n%d\n' % (self._z_content_length, self._content_length), |
298 |
self._z_content, |
|
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
299 |
]
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
300 |
return ''.join(chunks) |
301 |
||
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
302 |
def _dump(self, include_text=False): |
303 |
"""Take this block, and spit out a human-readable structure.
|
|
304 |
||
305 |
:param include_text: Inserts also include text bits, chose whether you
|
|
306 |
want this displayed in the dump or not.
|
|
307 |
:return: A dump of the given block. The layout is something like:
|
|
308 |
[('f', length), ('d', delta_length, text_length, [delta_info])]
|
|
309 |
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
|
|
310 |
...]
|
|
311 |
"""
|
|
312 |
self._ensure_content() |
|
313 |
result = [] |
|
314 |
pos = 0 |
|
315 |
while pos < self._content_length: |
|
316 |
kind = self._content[pos] |
|
317 |
pos += 1 |
|
318 |
if kind not in ('f', 'd'): |
|
319 |
raise ValueError('invalid kind character: %r' % (kind,)) |
|
320 |
content_len, len_len = decode_base128_int( |
|
321 |
self._content[pos:pos + 5]) |
|
322 |
pos += len_len |
|
323 |
if content_len + pos > self._content_length: |
|
324 |
raise ValueError('invalid content_len %d for record @ pos %d' |
|
325 |
% (content_len, pos - len_len - 1)) |
|
326 |
if kind == 'f': # Fulltext |
|
327 |
result.append(('f', content_len)) |
|
328 |
elif kind == 'd': # Delta |
|
329 |
delta_content = self._content[pos:pos+content_len] |
|
330 |
delta_info = [] |
|
331 |
# The first entry in a delta is the decompressed length
|
|
332 |
decomp_len, delta_pos = decode_base128_int(delta_content) |
|
333 |
result.append(('d', content_len, decomp_len, delta_info)) |
|
334 |
measured_len = 0 |
|
335 |
while delta_pos < content_len: |
|
336 |
c = ord(delta_content[delta_pos]) |
|
337 |
delta_pos += 1 |
|
338 |
if c & 0x80: # Copy |
|
339 |
(offset, length, |
|
340 |
delta_pos) = decode_copy_instruction(delta_content, c, |
|
341 |
delta_pos) |
|
342 |
delta_info.append(('c', offset, length)) |
|
343 |
measured_len += length |
|
344 |
else: # Insert |
|
345 |
if include_text: |
|
346 |
txt = delta_content[delta_pos:delta_pos+c] |
|
347 |
else: |
|
348 |
txt = '' |
|
349 |
delta_info.append(('i', c, txt)) |
|
350 |
measured_len += c |
|
351 |
delta_pos += c |
|
352 |
if delta_pos != content_len: |
|
353 |
raise ValueError('Delta consumed a bad number of bytes:' |
|
354 |
' %d != %d' % (delta_pos, content_len)) |
|
355 |
if measured_len != decomp_len: |
|
356 |
raise ValueError('Delta claimed fulltext was %d bytes, but' |
|
357 |
' extraction resulted in %d bytes' |
|
358 |
% (decomp_len, measured_len)) |
|
359 |
pos += content_len |
|
360 |
return result |
|
361 |
||
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
362 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
363 |
class _LazyGroupCompressFactory(object): |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
364 |
"""Yield content from a GroupCompressBlock on demand."""
|
365 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
366 |
def __init__(self, key, parents, manager, start, end, first): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
367 |
"""Create a _LazyGroupCompressFactory
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
368 |
|
369 |
:param key: The key of just this record
|
|
370 |
:param parents: The parents of this key (possibly None)
|
|
371 |
:param gc_block: A GroupCompressBlock object
|
|
372 |
:param start: Offset of the first byte for this record in the
|
|
373 |
uncompressd content
|
|
374 |
:param end: Offset of the byte just after the end of this record
|
|
375 |
(ie, bytes = content[start:end])
|
|
376 |
:param first: Is this the first Factory for the given block?
|
|
377 |
"""
|
|
378 |
self.key = key |
|
379 |
self.parents = parents |
|
380 |
self.sha1 = None |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
381 |
# Note: This attribute coupled with Manager._factories creates a
|
382 |
# reference cycle. Perhaps we would rather use a weakref(), or
|
|
383 |
# find an appropriate time to release the ref. After the first
|
|
384 |
# get_bytes_as call? After Manager.get_record_stream() returns
|
|
385 |
# the object?
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
386 |
self._manager = manager |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
387 |
self._bytes = None |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
388 |
self.storage_kind = 'groupcompress-block' |
389 |
if not first: |
|
390 |
self.storage_kind = 'groupcompress-block-ref' |
|
391 |
self._first = first |
|
392 |
self._start = start |
|
393 |
self._end = end |
|
394 |
||
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
395 |
def __repr__(self): |
396 |
return '%s(%s, first=%s)' % (self.__class__.__name__, |
|
397 |
self.key, self._first) |
|
398 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
399 |
def get_bytes_as(self, storage_kind): |
400 |
if storage_kind == self.storage_kind: |
|
401 |
if self._first: |
|
402 |
# wire bytes, something...
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
403 |
return self._manager._wire_bytes() |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
404 |
else: |
405 |
return '' |
|
406 |
if storage_kind in ('fulltext', 'chunked'): |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
407 |
if self._bytes is None: |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
408 |
# Grab and cache the raw bytes for this entry
|
409 |
# and break the ref-cycle with _manager since we don't need it
|
|
410 |
# anymore
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
411 |
self._manager._prepare_for_extract() |
412 |
block = self._manager._block |
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
413 |
self._bytes = block.extract(self.key, self._start, self._end) |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
414 |
# There are code paths that first extract as fulltext, and then
|
415 |
# extract as storage_kind (smart fetch). So we don't break the
|
|
416 |
# refcycle here, but instead in manager.get_record_stream()
|
|
3735.2.163
by John Arbash Meinel
Merge bzr.dev 4187, and revert the change to fix refcycle issues. |
417 |
# self._manager = None
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
418 |
if storage_kind == 'fulltext': |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
419 |
return self._bytes |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
420 |
else: |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
421 |
return [self._bytes] |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
422 |
raise errors.UnavailableRepresentation(self.key, storage_kind, |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
423 |
self.storage_kind) |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
424 |
|
425 |
||
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
426 |
class _LazyGroupContentManager(object): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
427 |
"""This manages a group of _LazyGroupCompressFactory objects."""
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
428 |
|
429 |
def __init__(self, block): |
|
430 |
self._block = block |
|
431 |
# We need to preserve the ordering
|
|
432 |
self._factories = [] |
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
433 |
self._last_byte = 0 |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
434 |
|
435 |
def add_factory(self, key, parents, start, end): |
|
436 |
if not self._factories: |
|
437 |
first = True |
|
438 |
else: |
|
439 |
first = False |
|
440 |
# Note that this creates a reference cycle....
|
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
441 |
factory = _LazyGroupCompressFactory(key, parents, self, |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
442 |
start, end, first=first) |
3735.36.13
by John Arbash Meinel
max() shows up under lsprof as more expensive than creating an object. |
443 |
# max() works here, but as a function call, doing a compare seems to be
|
444 |
# significantly faster, timeit says 250ms for max() and 100ms for the
|
|
445 |
# comparison
|
|
446 |
if end > self._last_byte: |
|
447 |
self._last_byte = end |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
448 |
self._factories.append(factory) |
449 |
||
450 |
def get_record_stream(self): |
|
451 |
"""Get a record for all keys added so far."""
|
|
452 |
for factory in self._factories: |
|
453 |
yield factory |
|
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
454 |
# Break the ref-cycle
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
455 |
factory._bytes = None |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
456 |
factory._manager = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
457 |
# TODO: Consider setting self._factories = None after the above loop,
|
458 |
# as it will break the reference cycle
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
459 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
460 |
def _trim_block(self, last_byte): |
461 |
"""Create a new GroupCompressBlock, with just some of the content."""
|
|
462 |
# None of the factories need to be adjusted, because the content is
|
|
463 |
# located in an identical place. Just that some of the unreferenced
|
|
464 |
# trailing bytes are stripped
|
|
465 |
trace.mutter('stripping trailing bytes from groupcompress block' |
|
466 |
' %d => %d', self._block._content_length, last_byte) |
|
467 |
new_block = GroupCompressBlock() |
|
468 |
self._block._ensure_content(last_byte) |
|
469 |
new_block.set_content(self._block._content[:last_byte]) |
|
470 |
self._block = new_block |
|
471 |
||
472 |
def _rebuild_block(self): |
|
473 |
"""Create a new GroupCompressBlock with only the referenced texts."""
|
|
474 |
compressor = GroupCompressor() |
|
475 |
tstart = time.time() |
|
476 |
old_length = self._block._content_length |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
477 |
end_point = 0 |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
478 |
for factory in self._factories: |
479 |
bytes = factory.get_bytes_as('fulltext') |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
480 |
(found_sha1, start_point, end_point, |
481 |
type) = compressor.compress(factory.key, bytes, factory.sha1) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
482 |
# Now update this factory with the new offsets, etc
|
483 |
factory.sha1 = found_sha1 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
484 |
factory._start = start_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
485 |
factory._end = end_point |
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
486 |
self._last_byte = end_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
487 |
new_block = compressor.flush() |
488 |
# TODO: Should we check that new_block really *is* smaller than the old
|
|
489 |
# block? It seems hard to come up with a method that it would
|
|
490 |
# expand, since we do full compression again. Perhaps based on a
|
|
491 |
# request that ends up poorly ordered?
|
|
492 |
delta = time.time() - tstart |
|
493 |
self._block = new_block |
|
494 |
trace.mutter('creating new compressed block on-the-fly in %.3fs' |
|
495 |
' %d bytes => %d bytes', delta, old_length, |
|
496 |
self._block._content_length) |
|
497 |
||
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
498 |
def _prepare_for_extract(self): |
499 |
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
|
|
500 |
# We expect that if one child is going to fulltext, all will be. This
|
|
501 |
# helps prevent all of them from extracting a small amount at a time.
|
|
502 |
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
|
|
503 |
# time (self._block._content) is a little expensive.
|
|
504 |
self._block._ensure_content(self._last_byte) |
|
505 |
||
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
506 |
def _check_rebuild_block(self): |
507 |
"""Check to see if our block should be repacked."""
|
|
508 |
total_bytes_used = 0 |
|
509 |
last_byte_used = 0 |
|
510 |
for factory in self._factories: |
|
511 |
total_bytes_used += factory._end - factory._start |
|
512 |
last_byte_used = max(last_byte_used, factory._end) |
|
513 |
# If we are using most of the bytes from the block, we have nothing
|
|
514 |
# else to check (currently more that 1/2)
|
|
515 |
if total_bytes_used * 2 >= self._block._content_length: |
|
516 |
return
|
|
517 |
# Can we just strip off the trailing bytes? If we are going to be
|
|
518 |
# transmitting more than 50% of the front of the content, go ahead
|
|
519 |
if total_bytes_used * 2 > last_byte_used: |
|
520 |
self._trim_block(last_byte_used) |
|
521 |
return
|
|
522 |
||
523 |
# We are using a small amount of the data, and it isn't just packed
|
|
524 |
# nicely at the front, so rebuild the content.
|
|
525 |
# Note: This would be *nicer* as a strip-data-from-group, rather than
|
|
526 |
# building it up again from scratch
|
|
527 |
# It might be reasonable to consider the fulltext sizes for
|
|
528 |
# different bits when deciding this, too. As you may have a small
|
|
529 |
# fulltext, and a trivial delta, and you are just trading around
|
|
530 |
# for another fulltext. If we do a simple 'prune' you may end up
|
|
531 |
# expanding many deltas into fulltexts, as well.
|
|
532 |
# If we build a cheap enough 'strip', then we could try a strip,
|
|
533 |
# if that expands the content, we then rebuild.
|
|
534 |
self._rebuild_block() |
|
535 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
536 |
def _wire_bytes(self): |
537 |
"""Return a byte stream suitable for transmitting over the wire."""
|
|
3735.32.24
by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream |
538 |
self._check_rebuild_block() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
539 |
# The outer block starts with:
|
540 |
# 'groupcompress-block\n'
|
|
541 |
# <length of compressed key info>\n
|
|
542 |
# <length of uncompressed info>\n
|
|
543 |
# <length of gc block>\n
|
|
544 |
# <header bytes>
|
|
545 |
# <gc-block>
|
|
546 |
lines = ['groupcompress-block\n'] |
|
547 |
# The minimal info we need is the key, the start offset, and the
|
|
548 |
# parents. The length and type are encoded in the record itself.
|
|
549 |
# However, passing in the other bits makes it easier. The list of
|
|
550 |
# keys, and the start offset, the length
|
|
551 |
# 1 line key
|
|
552 |
# 1 line with parents, '' for ()
|
|
553 |
# 1 line for start offset
|
|
554 |
# 1 line for end byte
|
|
555 |
header_lines = [] |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
556 |
for factory in self._factories: |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
557 |
key_bytes = '\x00'.join(factory.key) |
558 |
parents = factory.parents |
|
559 |
if parents is None: |
|
560 |
parent_bytes = 'None:' |
|
561 |
else: |
|
562 |
parent_bytes = '\t'.join('\x00'.join(key) for key in parents) |
|
563 |
record_header = '%s\n%s\n%d\n%d\n' % ( |
|
564 |
key_bytes, parent_bytes, factory._start, factory._end) |
|
565 |
header_lines.append(record_header) |
|
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
566 |
# TODO: Can we break the refcycle at this point and set
|
567 |
# factory._manager = None?
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
568 |
header_bytes = ''.join(header_lines) |
569 |
del header_lines |
|
570 |
header_bytes_len = len(header_bytes) |
|
571 |
z_header_bytes = zlib.compress(header_bytes) |
|
572 |
del header_bytes |
|
573 |
z_header_bytes_len = len(z_header_bytes) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
574 |
block_bytes = self._block.to_bytes() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
575 |
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len, |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
576 |
len(block_bytes))) |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
577 |
lines.append(z_header_bytes) |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
578 |
lines.append(block_bytes) |
579 |
del z_header_bytes, block_bytes |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
580 |
return ''.join(lines) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
581 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
582 |
@classmethod
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
583 |
def from_bytes(cls, bytes): |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
584 |
# TODO: This does extra string copying, probably better to do it a
|
585 |
# different way
|
|
586 |
(storage_kind, z_header_len, header_len, |
|
587 |
block_len, rest) = bytes.split('\n', 4) |
|
588 |
del bytes |
|
589 |
if storage_kind != 'groupcompress-block': |
|
590 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
591 |
z_header_len = int(z_header_len) |
|
592 |
if len(rest) < z_header_len: |
|
593 |
raise ValueError('Compressed header len shorter than all bytes') |
|
594 |
z_header = rest[:z_header_len] |
|
595 |
header_len = int(header_len) |
|
596 |
header = zlib.decompress(z_header) |
|
597 |
if len(header) != header_len: |
|
598 |
raise ValueError('invalid length for decompressed bytes') |
|
599 |
del z_header |
|
600 |
block_len = int(block_len) |
|
601 |
if len(rest) != z_header_len + block_len: |
|
602 |
raise ValueError('Invalid length for block') |
|
603 |
block_bytes = rest[z_header_len:] |
|
604 |
del rest |
|
605 |
# So now we have a valid GCB, we just need to parse the factories that
|
|
606 |
# were sent to us
|
|
607 |
header_lines = header.split('\n') |
|
608 |
del header |
|
609 |
last = header_lines.pop() |
|
610 |
if last != '': |
|
611 |
raise ValueError('header lines did not end with a trailing' |
|
612 |
' newline') |
|
613 |
if len(header_lines) % 4 != 0: |
|
614 |
raise ValueError('The header was not an even multiple of 4 lines') |
|
615 |
block = GroupCompressBlock.from_bytes(block_bytes) |
|
616 |
del block_bytes |
|
617 |
result = cls(block) |
|
618 |
for start in xrange(0, len(header_lines), 4): |
|
619 |
# intern()?
|
|
620 |
key = tuple(header_lines[start].split('\x00')) |
|
621 |
parents_line = header_lines[start+1] |
|
622 |
if parents_line == 'None:': |
|
623 |
parents = None |
|
624 |
else: |
|
625 |
parents = tuple([tuple(segment.split('\x00')) |
|
626 |
for segment in parents_line.split('\t') |
|
627 |
if segment]) |
|
628 |
start_offset = int(header_lines[start+2]) |
|
629 |
end_offset = int(header_lines[start+3]) |
|
630 |
result.add_factory(key, parents, start_offset, end_offset) |
|
631 |
return result |
|
632 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
633 |
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
634 |
def network_block_to_records(storage_kind, bytes, line_end): |
635 |
if storage_kind != 'groupcompress-block': |
|
636 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
637 |
manager = _LazyGroupContentManager.from_bytes(bytes) |
|
638 |
return manager.get_record_stream() |
|
639 |
||
640 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
641 |
class _CommonGroupCompressor(object): |
642 |
||
643 |
def __init__(self): |
|
644 |
"""Create a GroupCompressor."""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
645 |
self.chunks = [] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
646 |
self._last = None |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
647 |
self.endpoint = 0 |
648 |
self.input_bytes = 0 |
|
649 |
self.labels_deltas = {} |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
650 |
self._delta_index = None # Set by the children |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
651 |
self._block = GroupCompressBlock() |
652 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
653 |
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False): |
654 |
"""Compress lines with label key.
|
|
655 |
||
656 |
:param key: A key tuple. It is stored in the output
|
|
657 |
for identification of the text during decompression. If the last
|
|
658 |
element is 'None' it is replaced with the sha1 of the text -
|
|
659 |
e.g. sha1:xxxxxxx.
|
|
660 |
:param bytes: The bytes to be compressed
|
|
661 |
:param expected_sha: If non-None, the sha the lines are believed to
|
|
662 |
have. During compression the sha is calculated; a mismatch will
|
|
663 |
cause an error.
|
|
664 |
:param nostore_sha: If the computed sha1 sum matches, we will raise
|
|
665 |
ExistingContent rather than adding the text.
|
|
666 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
667 |
ranges to match to be considered for a copy command.
|
|
668 |
||
669 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
670 |
the type ('fulltext' or 'delta').
|
|
671 |
||
672 |
:seealso VersionedFiles.add_lines:
|
|
673 |
"""
|
|
674 |
if not bytes: # empty, like a dir entry, etc |
|
675 |
if nostore_sha == _null_sha1: |
|
676 |
raise errors.ExistingContent() |
|
677 |
return _null_sha1, 0, 0, 'fulltext' |
|
678 |
# we assume someone knew what they were doing when they passed it in
|
|
679 |
if expected_sha is not None: |
|
680 |
sha1 = expected_sha |
|
681 |
else: |
|
682 |
sha1 = osutils.sha_string(bytes) |
|
683 |
if nostore_sha is not None: |
|
684 |
if sha1 == nostore_sha: |
|
685 |
raise errors.ExistingContent() |
|
686 |
if key[-1] is None: |
|
687 |
key = key[:-1] + ('sha1:' + sha1,) |
|
688 |
||
689 |
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft) |
|
690 |
return sha1, start, end, type |
|
691 |
||
692 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
693 |
"""Compress lines with label key.
|
|
694 |
||
695 |
:param key: A key tuple. It is stored in the output for identification
|
|
696 |
of the text during decompression.
|
|
697 |
||
698 |
:param bytes: The bytes to be compressed
|
|
699 |
||
700 |
:param max_delta_size: The size above which we issue a fulltext instead
|
|
701 |
of a delta.
|
|
702 |
||
703 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
704 |
ranges to match to be considered for a copy command.
|
|
705 |
||
706 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
707 |
the type ('fulltext' or 'delta').
|
|
708 |
"""
|
|
709 |
raise NotImplementedError(self._compress) |
|
710 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
711 |
def extract(self, key): |
712 |
"""Extract a key previously added to the compressor.
|
|
713 |
||
714 |
:param key: The key to extract.
|
|
715 |
:return: An iterable over bytes and the sha1.
|
|
716 |
"""
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
717 |
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key] |
718 |
delta_chunks = self.chunks[start_chunk:end_chunk] |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
719 |
stored_bytes = ''.join(delta_chunks) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
720 |
if stored_bytes[0] == 'f': |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
721 |
fulltext_len, offset = decode_base128_int(stored_bytes[1:10]) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
722 |
data_len = fulltext_len + 1 + offset |
723 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
724 |
raise ValueError('Index claimed fulltext len, but stored bytes' |
725 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
726 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
727 |
bytes = stored_bytes[offset + 1:] |
728 |
else: |
|
729 |
# XXX: This is inefficient at best
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
730 |
source = ''.join(self.chunks[:start_chunk]) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
731 |
if stored_bytes[0] != 'd': |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
732 |
raise ValueError('Unknown content kind, bytes claim %s' |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
733 |
% (stored_bytes[0],)) |
734 |
delta_len, offset = decode_base128_int(stored_bytes[1:10]) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
735 |
data_len = delta_len + 1 + offset |
736 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
737 |
raise ValueError('Index claimed delta len, but stored bytes' |
738 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
739 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
740 |
bytes = apply_delta(source, stored_bytes[offset + 1:]) |
741 |
bytes_sha1 = osutils.sha_string(bytes) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
742 |
return bytes, bytes_sha1 |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
743 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
744 |
def flush(self): |
745 |
"""Finish this group, creating a formatted stream.
|
|
746 |
||
747 |
After calling this, the compressor should no longer be used
|
|
748 |
"""
|
|
749 |
content = ''.join(self.chunks) |
|
750 |
self.chunks = None |
|
751 |
self._delta_index = None |
|
752 |
self._block.set_content(content) |
|
753 |
return self._block |
|
754 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
755 |
def pop_last(self): |
756 |
"""Call this if you want to 'revoke' the last compression.
|
|
757 |
||
758 |
After this, the data structures will be rolled back, but you cannot do
|
|
759 |
more compression.
|
|
760 |
"""
|
|
761 |
self._delta_index = None |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
762 |
del self.chunks[self._last[0]:] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
763 |
self.endpoint = self._last[1] |
764 |
self._last = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
765 |
|
766 |
def ratio(self): |
|
767 |
"""Return the overall compression ratio."""
|
|
768 |
return float(self.input_bytes) / float(self.endpoint) |
|
769 |
||
770 |
||
771 |
class PythonGroupCompressor(_CommonGroupCompressor): |
|
772 |
||
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
773 |
def __init__(self): |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
774 |
"""Create a GroupCompressor.
|
775 |
||
776 |
Used only if the pyrex version is not available.
|
|
777 |
"""
|
|
778 |
super(PythonGroupCompressor, self).__init__() |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
779 |
self._delta_index = LinesDeltaIndex([]) |
780 |
# The actual content is managed by LinesDeltaIndex
|
|
781 |
self.chunks = self._delta_index.lines |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
782 |
|
783 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
784 |
"""see _CommonGroupCompressor._compress"""
|
|
785 |
input_len = len(bytes) |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
786 |
new_lines = osutils.split_lines(bytes) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
787 |
out_lines, index_lines = self._delta_index.make_delta( |
788 |
new_lines, bytes_length=input_len, soft=soft) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
789 |
delta_length = sum(map(len, out_lines)) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
790 |
if delta_length > max_delta_size: |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
791 |
# The delta is longer than the fulltext, insert a fulltext
|
792 |
type = 'fulltext' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
793 |
out_lines = ['f', encode_base128_int(input_len)] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
794 |
out_lines.extend(new_lines) |
795 |
index_lines = [False, False] |
|
796 |
index_lines.extend([True] * len(new_lines)) |
|
797 |
else: |
|
798 |
# this is a worthy delta, output it
|
|
799 |
type = 'delta' |
|
800 |
out_lines[0] = 'd' |
|
801 |
# Update the delta_length to include those two encoded integers
|
|
802 |
out_lines[1] = encode_base128_int(delta_length) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
803 |
# Before insertion
|
804 |
start = self.endpoint |
|
805 |
chunk_start = len(self.chunks) |
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
806 |
self._last = (chunk_start, self.endpoint) |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
807 |
self._delta_index.extend_lines(out_lines, index_lines) |
808 |
self.endpoint = self._delta_index.endpoint |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
809 |
self.input_bytes += input_len |
810 |
chunk_end = len(self.chunks) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
811 |
self.labels_deltas[key] = (start, chunk_start, |
812 |
self.endpoint, chunk_end) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
813 |
return start, self.endpoint, type |
814 |
||
815 |
||
816 |
class PyrexGroupCompressor(_CommonGroupCompressor): |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
817 |
"""Produce a serialised group of compressed texts.
|
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
818 |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
819 |
It contains code very similar to SequenceMatcher because of having a similar
|
820 |
task. However some key differences apply:
|
|
821 |
- there is no junk, we want a minimal edit not a human readable diff.
|
|
822 |
- we don't filter very common lines (because we don't know where a good
|
|
823 |
range will start, and after the first text we want to be emitting minmal
|
|
824 |
edits only.
|
|
825 |
- we chain the left side, not the right side
|
|
826 |
- we incrementally update the adjacency matrix as new lines are provided.
|
|
827 |
- we look for matches in all of the left side, so the routine which does
|
|
828 |
the analagous task of find_longest_match does not need to filter on the
|
|
829 |
left side.
|
|
830 |
"""
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
831 |
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
832 |
def __init__(self): |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
833 |
super(PyrexGroupCompressor, self).__init__() |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
834 |
self._delta_index = DeltaIndex() |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
835 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
836 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
837 |
"""see _CommonGroupCompressor._compress"""
|
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
838 |
input_len = len(bytes) |
0.23.12
by John Arbash Meinel
Add a 'len:' field to the data. |
839 |
# By having action/label/sha1/len, we can parse the group if the index
|
840 |
# was ever destroyed, we have the key in 'label', we know the final
|
|
841 |
# bytes are valid from sha1, and we know where to find the end of this
|
|
842 |
# record because of 'len'. (the delta record itself will store the
|
|
843 |
# total length for the expanded record)
|
|
0.23.13
by John Arbash Meinel
Factor out the ability to have/not have labels. |
844 |
# 'len: %d\n' costs approximately 1% increase in total data
|
845 |
# Having the labels at all costs us 9-10% increase, 38% increase for
|
|
846 |
# inventory pages, and 5.8% increase for text pages
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
847 |
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
|
0.23.33
by John Arbash Meinel
Fix a bug when handling multiple large-range copies. |
848 |
if self._delta_index._source_offset != self.endpoint: |
849 |
raise AssertionError('_source_offset != endpoint' |
|
850 |
' somehow the DeltaIndex got out of sync with'
|
|
851 |
' the output lines') |
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
852 |
delta = self._delta_index.make_delta(bytes, max_delta_size) |
853 |
if (delta is None): |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
854 |
type = 'fulltext' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
855 |
enc_length = encode_base128_int(len(bytes)) |
856 |
len_mini_header = 1 + len(enc_length) |
|
857 |
self._delta_index.add_source(bytes, len_mini_header) |
|
858 |
new_chunks = ['f', enc_length, bytes] |
|
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
859 |
else: |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
860 |
type = 'delta' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
861 |
enc_length = encode_base128_int(len(delta)) |
862 |
len_mini_header = 1 + len(enc_length) |
|
863 |
new_chunks = ['d', enc_length, delta] |
|
3735.38.5
by John Arbash Meinel
A bit of testing showed that _FAST=True was actually *slower*. |
864 |
self._delta_index.add_delta_source(delta, len_mini_header) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
865 |
# Before insertion
|
866 |
start = self.endpoint |
|
867 |
chunk_start = len(self.chunks) |
|
868 |
# Now output these bytes
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
869 |
self._output_chunks(new_chunks) |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
870 |
self.input_bytes += input_len |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
871 |
chunk_end = len(self.chunks) |
872 |
self.labels_deltas[key] = (start, chunk_start, |
|
873 |
self.endpoint, chunk_end) |
|
0.23.29
by John Arbash Meinel
Forgot to add the delta bytes to the index objects. |
874 |
if not self._delta_index._source_offset == self.endpoint: |
875 |
raise AssertionError('the delta index is out of sync' |
|
876 |
'with the output lines %s != %s' |
|
877 |
% (self._delta_index._source_offset, self.endpoint)) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
878 |
return start, self.endpoint, type |
0.17.2
by Robert Collins
Core proof of concept working. |
879 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
880 |
def _output_chunks(self, new_chunks): |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
881 |
"""Output some chunks.
|
882 |
||
883 |
:param new_chunks: The chunks to output.
|
|
884 |
"""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
885 |
self._last = (len(self.chunks), self.endpoint) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
886 |
endpoint = self.endpoint |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
887 |
self.chunks.extend(new_chunks) |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
888 |
endpoint += sum(map(len, new_chunks)) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
889 |
self.endpoint = endpoint |
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
890 |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
891 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
892 |
def make_pack_factory(graph, delta, keylength): |
893 |
"""Create a factory for creating a pack based groupcompress.
|
|
894 |
||
895 |
This is only functional enough to run interface tests, it doesn't try to
|
|
896 |
provide a full pack environment.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
897 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
898 |
:param graph: Store a graph.
|
899 |
:param delta: Delta compress contents.
|
|
900 |
:param keylength: How long should keys be.
|
|
901 |
"""
|
|
902 |
def factory(transport): |
|
3735.32.2
by John Arbash Meinel
The 'delta' flag has no effect on the content (all GC is delta'd), |
903 |
parents = graph |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
904 |
ref_length = 0 |
905 |
if graph: |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
906 |
ref_length = 1 |
0.17.7
by Robert Collins
Update for current index2 changes. |
907 |
graph_index = BTreeBuilder(reference_lists=ref_length, |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
908 |
key_elements=keylength) |
909 |
stream = transport.open_write_stream('newpack') |
|
910 |
writer = pack.ContainerWriter(stream.write) |
|
911 |
writer.begin() |
|
912 |
index = _GCGraphIndex(graph_index, lambda:True, parents=parents, |
|
0.17.9
by Robert Collins
Initial stab at repository format support. |
913 |
add_callback=graph_index.add_nodes) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
914 |
access = _DirectPackAccess({}) |
915 |
access.set_writer(writer, graph_index, (transport, 'newpack')) |
|
0.17.2
by Robert Collins
Core proof of concept working. |
916 |
result = GroupCompressVersionedFiles(index, access, delta) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
917 |
result.stream = stream |
918 |
result.writer = writer |
|
919 |
return result |
|
920 |
return factory |
|
921 |
||
922 |
||
923 |
def cleanup_pack_group(versioned_files): |
|
0.17.23
by Robert Collins
Only decompress as much of the zlib data as is needed to read the text recipe. |
924 |
versioned_files.writer.end() |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
925 |
versioned_files.stream.close() |
926 |
||
927 |
||
928 |
class GroupCompressVersionedFiles(VersionedFiles): |
|
929 |
"""A group-compress based VersionedFiles implementation."""
|
|
930 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
931 |
def __init__(self, index, access, delta=True): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
932 |
"""Create a GroupCompressVersionedFiles object.
|
933 |
||
934 |
:param index: The index object storing access and graph data.
|
|
935 |
:param access: The access object storing raw data.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
936 |
:param delta: Whether to delta compress or just entropy compress.
|
937 |
"""
|
|
938 |
self._index = index |
|
939 |
self._access = access |
|
940 |
self._delta = delta |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
941 |
self._unadded_refs = {} |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
942 |
self._group_cache = LRUSizeCache(max_size=50*1024*1024) |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
943 |
self._fallback_vfs = [] |
0.17.2
by Robert Collins
Core proof of concept working. |
944 |
|
945 |
def add_lines(self, key, parents, lines, parent_texts=None, |
|
946 |
left_matching_blocks=None, nostore_sha=None, random_id=False, |
|
947 |
check_content=True): |
|
948 |
"""Add a text to the store.
|
|
949 |
||
950 |
:param key: The key tuple of the text to add.
|
|
951 |
:param parents: The parents key tuples of the text to add.
|
|
952 |
:param lines: A list of lines. Each line must be a bytestring. And all
|
|
953 |
of them except the last must be terminated with \n and contain no
|
|
954 |
other \n's. The last line may either contain no \n's or a single
|
|
955 |
terminating \n. If the lines list does meet this constraint the add
|
|
956 |
routine may error or may succeed - but you will be unable to read
|
|
957 |
the data back accurately. (Checking the lines have been split
|
|
958 |
correctly is expensive and extremely unlikely to catch bugs so it
|
|
959 |
is not done at runtime unless check_content is True.)
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
960 |
:param parent_texts: An optional dictionary containing the opaque
|
0.17.2
by Robert Collins
Core proof of concept working. |
961 |
representations of some or all of the parents of version_id to
|
962 |
allow delta optimisations. VERY IMPORTANT: the texts must be those
|
|
963 |
returned by add_lines or data corruption can be caused.
|
|
964 |
:param left_matching_blocks: a hint about which areas are common
|
|
965 |
between the text and its left-hand-parent. The format is
|
|
966 |
the SequenceMatcher.get_matching_blocks format.
|
|
967 |
:param nostore_sha: Raise ExistingContent and do not add the lines to
|
|
968 |
the versioned file if the digest of the lines matches this.
|
|
969 |
:param random_id: If True a random id has been selected rather than
|
|
970 |
an id determined by some deterministic process such as a converter
|
|
971 |
from a foreign VCS. When True the backend may choose not to check
|
|
972 |
for uniqueness of the resulting key within the versioned file, so
|
|
973 |
this should only be done when the result is expected to be unique
|
|
974 |
anyway.
|
|
975 |
:param check_content: If True, the lines supplied are verified to be
|
|
976 |
bytestrings that are correctly formed lines.
|
|
977 |
:return: The text sha1, the number of bytes in the text, and an opaque
|
|
978 |
representation of the inserted version which can be provided
|
|
979 |
back to future add_lines calls in the parent_texts dictionary.
|
|
980 |
"""
|
|
981 |
self._index._check_write_ok() |
|
982 |
self._check_add(key, lines, random_id, check_content) |
|
983 |
if parents is None: |
|
984 |
# The caller might pass None if there is no graph data, but kndx
|
|
985 |
# indexes can't directly store that, so we give them
|
|
986 |
# an empty tuple instead.
|
|
987 |
parents = () |
|
988 |
# double handling for now. Make it work until then.
|
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
989 |
length = sum(map(len, lines)) |
990 |
record = ChunkedContentFactory(key, parents, None, lines) |
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
991 |
sha1 = list(self._insert_record_stream([record], random_id=random_id, |
992 |
nostore_sha=nostore_sha))[0] |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
993 |
return sha1, length, None |
0.17.2
by Robert Collins
Core proof of concept working. |
994 |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
995 |
def add_fallback_versioned_files(self, a_versioned_files): |
996 |
"""Add a source of texts for texts not present in this knit.
|
|
997 |
||
998 |
:param a_versioned_files: A VersionedFiles object.
|
|
999 |
"""
|
|
1000 |
self._fallback_vfs.append(a_versioned_files) |
|
1001 |
||
0.17.4
by Robert Collins
Annotate. |
1002 |
def annotate(self, key): |
1003 |
"""See VersionedFiles.annotate."""
|
|
1004 |
graph = Graph(self) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1005 |
parent_map = self.get_parent_map([key]) |
1006 |
if not parent_map: |
|
1007 |
raise errors.RevisionNotPresent(key, self) |
|
1008 |
if parent_map[key] is not None: |
|
1009 |
search = graph._make_breadth_first_searcher([key]) |
|
1010 |
keys = set() |
|
1011 |
while True: |
|
1012 |
try: |
|
1013 |
present, ghosts = search.next_with_ghosts() |
|
1014 |
except StopIteration: |
|
1015 |
break
|
|
1016 |
keys.update(present) |
|
1017 |
parent_map = self.get_parent_map(keys) |
|
1018 |
else: |
|
1019 |
keys = [key] |
|
1020 |
parent_map = {key:()} |
|
4371.2.1
by Vincent Ladeuil
Start fixing annotate for gc. |
1021 |
# So we used Graph(self) to load the parent_map, but now that we have
|
1022 |
# it, we can just query the parent map directly, so create a new Graph
|
|
1023 |
# object
|
|
1024 |
graph = _mod_graph.Graph(_mod_graph.DictParentsProvider(parent_map)) |
|
0.17.4
by Robert Collins
Annotate. |
1025 |
head_cache = _mod_graph.FrozenHeadsCache(graph) |
1026 |
parent_cache = {} |
|
1027 |
reannotate = annotate.reannotate |
|
1028 |
for record in self.get_record_stream(keys, 'topological', True): |
|
1029 |
key = record.key |
|
4371.2.1
by Vincent Ladeuil
Start fixing annotate for gc. |
1030 |
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked')) |
0.17.4
by Robert Collins
Annotate. |
1031 |
parent_lines = [parent_cache[parent] for parent in parent_map[key]] |
1032 |
parent_cache[key] = list( |
|
4371.2.1
by Vincent Ladeuil
Start fixing annotate for gc. |
1033 |
reannotate(parent_lines, lines, key, None, head_cache)) |
0.17.4
by Robert Collins
Annotate. |
1034 |
return parent_cache[key] |
1035 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1036 |
def check(self, progress_bar=None): |
1037 |
"""See VersionedFiles.check()."""
|
|
1038 |
keys = self.keys() |
|
1039 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1040 |
record.get_bytes_as('fulltext') |
|
1041 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1042 |
def _check_add(self, key, lines, random_id, check_content): |
1043 |
"""check that version_id and lines are safe to add."""
|
|
1044 |
version_id = key[-1] |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1045 |
if version_id is not None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1046 |
if osutils.contains_whitespace(version_id): |
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
1047 |
raise errors.InvalidRevisionId(version_id, self) |
0.17.2
by Robert Collins
Core proof of concept working. |
1048 |
self.check_not_reserved_id(version_id) |
1049 |
# TODO: If random_id==False and the key is already present, we should
|
|
1050 |
# probably check that the existing content is identical to what is
|
|
1051 |
# being inserted, and otherwise raise an exception. This would make
|
|
1052 |
# the bundle code simpler.
|
|
1053 |
if check_content: |
|
1054 |
self._check_lines_not_unicode(lines) |
|
1055 |
self._check_lines_are_lines(lines) |
|
1056 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1057 |
def get_parent_map(self, keys): |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1058 |
"""Get a map of the graph parents of keys.
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1059 |
|
1060 |
:param keys: The keys to look up parents for.
|
|
1061 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1062 |
the mapping.
|
|
1063 |
"""
|
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1064 |
return self._get_parent_map_with_sources(keys)[0] |
1065 |
||
1066 |
def _get_parent_map_with_sources(self, keys): |
|
1067 |
"""Get a map of the parents of keys.
|
|
1068 |
||
1069 |
:param keys: The keys to look up parents for.
|
|
1070 |
:return: A tuple. The first element is a mapping from keys to parents.
|
|
1071 |
Absent keys are absent from the mapping. The second element is a
|
|
1072 |
list with the locations each key was found in. The first element
|
|
1073 |
is the in-this-knit parents, the second the first fallback source,
|
|
1074 |
and so on.
|
|
1075 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1076 |
result = {} |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1077 |
sources = [self._index] + self._fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1078 |
source_results = [] |
1079 |
missing = set(keys) |
|
1080 |
for source in sources: |
|
1081 |
if not missing: |
|
1082 |
break
|
|
1083 |
new_result = source.get_parent_map(missing) |
|
1084 |
source_results.append(new_result) |
|
1085 |
result.update(new_result) |
|
1086 |
missing.difference_update(set(new_result)) |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1087 |
return result, source_results |
0.17.5
by Robert Collins
nograph tests completely passing. |
1088 |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1089 |
def _get_block(self, index_memo): |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1090 |
read_memo = index_memo[0:3] |
1091 |
# get the group:
|
|
1092 |
try: |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1093 |
block = self._group_cache[read_memo] |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1094 |
except KeyError: |
1095 |
# read the group
|
|
1096 |
zdata = self._access.get_raw_records([read_memo]).next() |
|
1097 |
# decompress - whole thing - this is not a bug, as it
|
|
1098 |
# permits caching. We might want to store the partially
|
|
1099 |
# decompresed group and decompress object, so that recent
|
|
1100 |
# texts are not penalised by big groups.
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1101 |
block = GroupCompressBlock.from_bytes(zdata) |
1102 |
self._group_cache[read_memo] = block |
|
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1103 |
# cheapo debugging:
|
1104 |
# print len(zdata), len(plain)
|
|
1105 |
# parse - requires split_lines, better to have byte offsets
|
|
1106 |
# here (but not by much - we only split the region for the
|
|
1107 |
# recipe, and we often want to end up with lines anyway.
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1108 |
return block |
0.20.14
by John Arbash Meinel
Factor out _get_group_and_delta_lines. |
1109 |
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1110 |
def get_missing_compression_parent_keys(self): |
1111 |
"""Return the keys of missing compression parents.
|
|
1112 |
||
1113 |
Missing compression parents occur when a record stream was missing
|
|
1114 |
basis texts, or a index was scanned that had missing basis texts.
|
|
1115 |
"""
|
|
1116 |
# GroupCompress cannot currently reference texts that are not in the
|
|
1117 |
# group, so this is valid for now
|
|
1118 |
return frozenset() |
|
1119 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1120 |
def get_record_stream(self, keys, ordering, include_delta_closure): |
1121 |
"""Get a stream of records for keys.
|
|
1122 |
||
1123 |
:param keys: The keys to include.
|
|
1124 |
:param ordering: Either 'unordered' or 'topological'. A topologically
|
|
1125 |
sorted stream has compression parents strictly before their
|
|
1126 |
children.
|
|
1127 |
:param include_delta_closure: If True then the closure across any
|
|
1128 |
compression parents will be included (in the opaque data).
|
|
1129 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1130 |
valid until the iterator is advanced.
|
|
1131 |
"""
|
|
1132 |
# keys might be a generator
|
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1133 |
orig_keys = list(keys) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1134 |
keys = set(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1135 |
if not keys: |
1136 |
return
|
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
1137 |
if (not self._index.has_graph |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
1138 |
and ordering in ('topological', 'groupcompress')): |
0.17.5
by Robert Collins
nograph tests completely passing. |
1139 |
# Cannot topological order when no graph has been stored.
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1140 |
# but we allow 'as-requested' or 'unordered'
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1141 |
ordering = 'unordered' |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1142 |
|
1143 |
remaining_keys = keys |
|
1144 |
while True: |
|
1145 |
try: |
|
1146 |
keys = set(remaining_keys) |
|
1147 |
for content_factory in self._get_remaining_record_stream(keys, |
|
1148 |
orig_keys, ordering, include_delta_closure): |
|
1149 |
remaining_keys.discard(content_factory.key) |
|
1150 |
yield content_factory |
|
1151 |
return
|
|
1152 |
except errors.RetryWithNewPacks, e: |
|
1153 |
self._access.reload_or_raise(e) |
|
1154 |
||
1155 |
def _find_from_fallback(self, missing): |
|
1156 |
"""Find whatever keys you can from the fallbacks.
|
|
1157 |
||
1158 |
:param missing: A set of missing keys. This set will be mutated as keys
|
|
1159 |
are found from a fallback_vfs
|
|
1160 |
:return: (parent_map, key_to_source_map, source_results)
|
|
1161 |
parent_map the overall key => parent_keys
|
|
1162 |
key_to_source_map a dict from {key: source}
|
|
1163 |
source_results a list of (source: keys)
|
|
1164 |
"""
|
|
1165 |
parent_map = {} |
|
1166 |
key_to_source_map = {} |
|
1167 |
source_results = [] |
|
1168 |
for source in self._fallback_vfs: |
|
1169 |
if not missing: |
|
1170 |
break
|
|
1171 |
source_parents = source.get_parent_map(missing) |
|
1172 |
parent_map.update(source_parents) |
|
1173 |
source_parents = list(source_parents) |
|
1174 |
source_results.append((source, source_parents)) |
|
1175 |
key_to_source_map.update((key, source) for key in source_parents) |
|
1176 |
missing.difference_update(source_parents) |
|
1177 |
return parent_map, key_to_source_map, source_results |
|
1178 |
||
1179 |
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map): |
|
1180 |
"""Get the (source, [keys]) list.
|
|
1181 |
||
1182 |
The returned objects should be in the order defined by 'ordering',
|
|
1183 |
which can weave between different sources.
|
|
1184 |
:param ordering: Must be one of 'topological' or 'groupcompress'
|
|
1185 |
:return: List of [(source, [keys])] tuples, such that all keys are in
|
|
1186 |
the defined order, regardless of source.
|
|
1187 |
"""
|
|
1188 |
if ordering == 'topological': |
|
1189 |
present_keys = topo_sort(parent_map) |
|
1190 |
else: |
|
1191 |
# ordering == 'groupcompress'
|
|
1192 |
# XXX: This only optimizes for the target ordering. We may need
|
|
1193 |
# to balance that with the time it takes to extract
|
|
1194 |
# ordering, by somehow grouping based on
|
|
1195 |
# locations[key][0:3]
|
|
1196 |
present_keys = sort_gc_optimal(parent_map) |
|
1197 |
# Now group by source:
|
|
1198 |
source_keys = [] |
|
1199 |
current_source = None |
|
1200 |
for key in present_keys: |
|
1201 |
source = key_to_source_map.get(key, self) |
|
1202 |
if source is not current_source: |
|
1203 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1204 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1205 |
source_keys[-1][1].append(key) |
1206 |
return source_keys |
|
1207 |
||
1208 |
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys, |
|
1209 |
key_to_source_map): |
|
1210 |
source_keys = [] |
|
1211 |
current_source = None |
|
1212 |
for key in orig_keys: |
|
1213 |
if key in locations or key in unadded_keys: |
|
1214 |
source = self |
|
1215 |
elif key in key_to_source_map: |
|
1216 |
source = key_to_source_map[key] |
|
1217 |
else: # absent |
|
1218 |
continue
|
|
1219 |
if source is not current_source: |
|
1220 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1221 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1222 |
source_keys[-1][1].append(key) |
1223 |
return source_keys |
|
1224 |
||
1225 |
def _get_io_ordered_source_keys(self, locations, unadded_keys, |
|
1226 |
source_result): |
|
1227 |
def get_group(key): |
|
1228 |
# This is the group the bytes are stored in, followed by the
|
|
1229 |
# location in the group
|
|
1230 |
return locations[key][0] |
|
1231 |
present_keys = sorted(locations.iterkeys(), key=get_group) |
|
1232 |
# We don't have an ordering for keys in the in-memory object, but
|
|
1233 |
# lets process the in-memory ones first.
|
|
1234 |
present_keys = list(unadded_keys) + present_keys |
|
1235 |
# Now grab all of the ones from other sources
|
|
1236 |
source_keys = [(self, present_keys)] |
|
1237 |
source_keys.extend(source_result) |
|
1238 |
return source_keys |
|
1239 |
||
1240 |
def _get_remaining_record_stream(self, keys, orig_keys, ordering, |
|
1241 |
include_delta_closure): |
|
1242 |
"""Get a stream of records for keys.
|
|
1243 |
||
1244 |
:param keys: The keys to include.
|
|
1245 |
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
|
|
1246 |
'as-requested'
|
|
1247 |
:param include_delta_closure: If True then the closure across any
|
|
1248 |
compression parents will be included (in the opaque data).
|
|
1249 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1250 |
valid until the iterator is advanced.
|
|
1251 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1252 |
# Cheap: iterate
|
1253 |
locations = self._index.get_build_details(keys) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1254 |
unadded_keys = set(self._unadded_refs).intersection(keys) |
1255 |
missing = keys.difference(locations) |
|
1256 |
missing.difference_update(unadded_keys) |
|
1257 |
(fallback_parent_map, key_to_source_map, |
|
1258 |
source_result) = self._find_from_fallback(missing) |
|
1259 |
if ordering in ('topological', 'groupcompress'): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1260 |
# would be better to not globally sort initially but instead
|
1261 |
# start with one key, recurse to its oldest parent, then grab
|
|
1262 |
# everything in the same group, etc.
|
|
1263 |
parent_map = dict((key, details[2]) for key, details in |
|
1264 |
locations.iteritems()) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1265 |
for key in unadded_keys: |
1266 |
parent_map[key] = self._unadded_refs[key] |
|
1267 |
parent_map.update(fallback_parent_map) |
|
1268 |
source_keys = self._get_ordered_source_keys(ordering, parent_map, |
|
1269 |
key_to_source_map) |
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1270 |
elif ordering == 'as-requested': |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1271 |
source_keys = self._get_as_requested_source_keys(orig_keys, |
1272 |
locations, unadded_keys, key_to_source_map) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1273 |
else: |
0.20.10
by John Arbash Meinel
Change the extraction ordering for 'unordered'. |
1274 |
# We want to yield the keys in a semi-optimal (read-wise) ordering.
|
1275 |
# Otherwise we thrash the _group_cache and destroy performance
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1276 |
source_keys = self._get_io_ordered_source_keys(locations, |
1277 |
unadded_keys, source_result) |
|
1278 |
for key in missing: |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1279 |
yield AbsentContentFactory(key) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1280 |
manager = None |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1281 |
last_read_memo = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
1282 |
# TODO: This works fairly well at batching up existing groups into a
|
1283 |
# streamable format, and possibly allowing for taking one big
|
|
1284 |
# group and splitting it when it isn't fully utilized.
|
|
1285 |
# However, it doesn't allow us to find under-utilized groups and
|
|
1286 |
# combine them into a bigger group on the fly.
|
|
1287 |
# (Consider the issue with how chk_map inserts texts
|
|
1288 |
# one-at-a-time.) This could be done at insert_record_stream()
|
|
1289 |
# time, but it probably would decrease the number of
|
|
1290 |
# bytes-on-the-wire for fetch.
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1291 |
for source, keys in source_keys: |
1292 |
if source is self: |
|
1293 |
for key in keys: |
|
1294 |
if key in self._unadded_refs: |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1295 |
if manager is not None: |
1296 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1297 |
yield factory |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1298 |
last_read_memo = manager = None |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1299 |
bytes, sha1 = self._compressor.extract(key) |
1300 |
parents = self._unadded_refs[key] |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1301 |
yield FulltextContentFactory(key, parents, sha1, bytes) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1302 |
else: |
1303 |
index_memo, _, parents, (method, _) = locations[key] |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
1304 |
read_memo = index_memo[0:3] |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1305 |
if last_read_memo != read_memo: |
1306 |
# We are starting a new block. If we have a
|
|
1307 |
# manager, we have found everything that fits for
|
|
1308 |
# now, so yield records
|
|
1309 |
if manager is not None: |
|
1310 |
for factory in manager.get_record_stream(): |
|
1311 |
yield factory |
|
1312 |
# Now start a new manager
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
1313 |
block = self._get_block(index_memo) |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1314 |
manager = _LazyGroupContentManager(block) |
1315 |
last_read_memo = read_memo |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
1316 |
start, end = index_memo[3:5] |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1317 |
manager.add_factory(key, parents, start, end) |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1318 |
else: |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1319 |
if manager is not None: |
1320 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1321 |
yield factory |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
1322 |
last_read_memo = manager = None |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1323 |
for record in source.get_record_stream(keys, ordering, |
1324 |
include_delta_closure): |
|
1325 |
yield record |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
1326 |
if manager is not None: |
1327 |
for factory in manager.get_record_stream(): |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1328 |
yield factory |
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1329 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1330 |
def get_sha1s(self, keys): |
1331 |
"""See VersionedFiles.get_sha1s()."""
|
|
1332 |
result = {} |
|
1333 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1334 |
if record.sha1 != None: |
|
1335 |
result[record.key] = record.sha1 |
|
1336 |
else: |
|
1337 |
if record.storage_kind != 'absent': |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1338 |
result[record.key] = osutils.sha_string( |
1339 |
record.get_bytes_as('fulltext')) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1340 |
return result |
1341 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1342 |
def insert_record_stream(self, stream): |
1343 |
"""Insert a record stream into this container.
|
|
1344 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1345 |
:param stream: A stream of records to insert.
|
0.17.2
by Robert Collins
Core proof of concept working. |
1346 |
:return: None
|
1347 |
:seealso VersionedFiles.get_record_stream:
|
|
1348 |
"""
|
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1349 |
# XXX: Setting random_id=True makes
|
1350 |
# test_insert_record_stream_existing_keys fail for groupcompress and
|
|
1351 |
# groupcompress-nograph, this needs to be revisited while addressing
|
|
1352 |
# 'bzr branch' performance issues.
|
|
1353 |
for _ in self._insert_record_stream(stream, random_id=False): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1354 |
pass
|
0.17.2
by Robert Collins
Core proof of concept working. |
1355 |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1356 |
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None, |
1357 |
reuse_blocks=True): |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1358 |
"""Internal core to insert a record stream into this container.
|
1359 |
||
1360 |
This helper function has a different interface than insert_record_stream
|
|
1361 |
to allow add_lines to be minimal, but still return the needed data.
|
|
1362 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1363 |
:param stream: A stream of records to insert.
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
1364 |
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
|
1365 |
raise ExistingContent, rather than committing the new text.
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1366 |
:param reuse_blocks: If the source is streaming from
|
1367 |
groupcompress-blocks, just insert the blocks as-is, rather than
|
|
1368 |
expanding the texts and inserting again.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
1369 |
:return: An iterator over the sha1 of the inserted records.
|
1370 |
:seealso insert_record_stream:
|
|
1371 |
:seealso add_lines:
|
|
1372 |
"""
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1373 |
adapters = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1374 |
def get_adapter(adapter_key): |
1375 |
try: |
|
1376 |
return adapters[adapter_key] |
|
1377 |
except KeyError: |
|
1378 |
adapter_factory = adapter_registry.get(adapter_key) |
|
1379 |
adapter = adapter_factory(self) |
|
1380 |
adapters[adapter_key] = adapter |
|
1381 |
return adapter |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1382 |
# This will go up to fulltexts for gc to gc fetching, which isn't
|
1383 |
# ideal.
|
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
1384 |
self._compressor = GroupCompressor() |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1385 |
self._unadded_refs = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1386 |
keys_to_add = [] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1387 |
def flush(): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1388 |
bytes = self._compressor.flush().to_bytes() |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1389 |
index, start, length = self._access.add_raw_records( |
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
1390 |
[(None, len(bytes))], bytes)[0] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1391 |
nodes = [] |
1392 |
for key, reads, refs in keys_to_add: |
|
1393 |
nodes.append((key, "%d %d %s" % (start, length, reads), refs)) |
|
1394 |
self._index.add_records(nodes, random_id=random_id) |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1395 |
self._unadded_refs = {} |
1396 |
del keys_to_add[:] |
|
3735.32.19
by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway. |
1397 |
self._compressor = GroupCompressor() |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1398 |
|
0.20.15
by John Arbash Meinel
Change so that regions that have lots of copies get converted back |
1399 |
last_prefix = None |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1400 |
max_fulltext_len = 0 |
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1401 |
max_fulltext_prefix = None |
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
1402 |
insert_manager = None |
1403 |
block_start = None |
|
1404 |
block_length = None |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1405 |
# XXX: TODO: remove this, it is just for safety checking for now
|
1406 |
inserted_keys = set() |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1407 |
for record in stream: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1408 |
# Raise an error when a record is missing.
|
1409 |
if record.storage_kind == 'absent': |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1410 |
raise errors.RevisionNotPresent(record.key, self) |
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1411 |
if random_id: |
1412 |
if record.key in inserted_keys: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1413 |
trace.note('Insert claimed random_id=True,' |
1414 |
' but then inserted %r two times', record.key) |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1415 |
continue
|
1416 |
inserted_keys.add(record.key) |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1417 |
if reuse_blocks: |
1418 |
# If the reuse_blocks flag is set, check to see if we can just
|
|
1419 |
# copy a groupcompress block as-is.
|
|
1420 |
if record.storage_kind == 'groupcompress-block': |
|
1421 |
# Insert the raw block into the target repo
|
|
1422 |
insert_manager = record._manager |
|
3735.2.163
by John Arbash Meinel
Merge bzr.dev 4187, and revert the change to fix refcycle issues. |
1423 |
insert_manager._check_rebuild_block() |
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1424 |
bytes = record._manager._block.to_bytes() |
1425 |
_, start, length = self._access.add_raw_records( |
|
1426 |
[(None, len(bytes))], bytes)[0] |
|
1427 |
del bytes |
|
1428 |
block_start = start |
|
1429 |
block_length = length |
|
1430 |
if record.storage_kind in ('groupcompress-block', |
|
1431 |
'groupcompress-block-ref'): |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1432 |
if insert_manager is None: |
1433 |
raise AssertionError('No insert_manager set') |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1434 |
value = "%d %d %d %d" % (block_start, block_length, |
1435 |
record._start, record._end) |
|
1436 |
nodes = [(record.key, value, (record.parents,))] |
|
3735.38.1
by John Arbash Meinel
Change the delta byte stream to remove the 'source length' entry. |
1437 |
# TODO: Consider buffering up many nodes to be added, not
|
1438 |
# sure how much overhead this has, but we're seeing
|
|
1439 |
# ~23s / 120s in add_records calls
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1440 |
self._index.add_records(nodes, random_id=random_id) |
1441 |
continue
|
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1442 |
try: |
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
1443 |
bytes = record.get_bytes_as('fulltext') |
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1444 |
except errors.UnavailableRepresentation: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1445 |
adapter_key = record.storage_kind, 'fulltext' |
1446 |
adapter = get_adapter(adapter_key) |
|
0.20.21
by John Arbash Meinel
Merge the chk sorting code. |
1447 |
bytes = adapter.get_bytes(record) |
0.20.13
by John Arbash Meinel
Play around a bit. |
1448 |
if len(record.key) > 1: |
1449 |
prefix = record.key[0] |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1450 |
soft = (prefix == last_prefix) |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1451 |
else: |
1452 |
prefix = None |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1453 |
soft = False |
1454 |
if max_fulltext_len < len(bytes): |
|
1455 |
max_fulltext_len = len(bytes) |
|
1456 |
max_fulltext_prefix = prefix |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1457 |
(found_sha1, start_point, end_point, |
1458 |
type) = self._compressor.compress(record.key, |
|
1459 |
bytes, record.sha1, soft=soft, |
|
1460 |
nostore_sha=nostore_sha) |
|
1461 |
# delta_ratio = float(len(bytes)) / (end_point - start_point)
|
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1462 |
# Check if we want to continue to include that text
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1463 |
if (prefix == max_fulltext_prefix |
1464 |
and end_point < 2 * max_fulltext_len): |
|
1465 |
# As long as we are on the same file_id, we will fill at least
|
|
1466 |
# 2 * max_fulltext_len
|
|
1467 |
start_new_block = False |
|
1468 |
elif end_point > 4*1024*1024: |
|
1469 |
start_new_block = True |
|
1470 |
elif (prefix is not None and prefix != last_prefix |
|
1471 |
and end_point > 2*1024*1024): |
|
1472 |
start_new_block = True |
|
1473 |
else: |
|
1474 |
start_new_block = False |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1475 |
last_prefix = prefix |
1476 |
if start_new_block: |
|
1477 |
self._compressor.pop_last() |
|
1478 |
flush() |
|
1479 |
max_fulltext_len = len(bytes) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1480 |
(found_sha1, start_point, end_point, |
1481 |
type) = self._compressor.compress(record.key, bytes, |
|
1482 |
record.sha1) |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1483 |
if record.key[-1] is None: |
1484 |
key = record.key[:-1] + ('sha1:' + found_sha1,) |
|
1485 |
else: |
|
1486 |
key = record.key |
|
1487 |
self._unadded_refs[key] = record.parents |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
1488 |
yield found_sha1 |
3735.2.164
by John Arbash Meinel
Fix a critical bug that caused problems with the index entries. |
1489 |
keys_to_add.append((key, '%d %d' % (start_point, end_point), |
0.17.5
by Robert Collins
nograph tests completely passing. |
1490 |
(record.parents,))) |
0.17.8
by Robert Collins
Flush pending updates at the end of _insert_record_stream |
1491 |
if len(keys_to_add): |
1492 |
flush() |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1493 |
self._compressor = None |
0.17.5
by Robert Collins
nograph tests completely passing. |
1494 |
|
1495 |
def iter_lines_added_or_present_in_keys(self, keys, pb=None): |
|
1496 |
"""Iterate over the lines in the versioned files from keys.
|
|
1497 |
||
1498 |
This may return lines from other keys. Each item the returned
|
|
1499 |
iterator yields is a tuple of a line and a text version that that line
|
|
1500 |
is present in (not introduced in).
|
|
1501 |
||
1502 |
Ordering of results is in whatever order is most suitable for the
|
|
1503 |
underlying storage format.
|
|
1504 |
||
1505 |
If a progress bar is supplied, it may be used to indicate progress.
|
|
1506 |
The caller is responsible for cleaning up progress bars (because this
|
|
1507 |
is an iterator).
|
|
1508 |
||
1509 |
NOTES:
|
|
1510 |
* Lines are normalised by the underlying store: they will all have \n
|
|
1511 |
terminators.
|
|
1512 |
* Lines are returned in arbitrary order.
|
|
1513 |
||
1514 |
:return: An iterator over (line, key).
|
|
1515 |
"""
|
|
1516 |
if pb is None: |
|
1517 |
pb = progress.DummyProgress() |
|
1518 |
keys = set(keys) |
|
1519 |
total = len(keys) |
|
1520 |
# we don't care about inclusions, the caller cares.
|
|
1521 |
# but we need to setup a list of records to visit.
|
|
1522 |
# we need key, position, length
|
|
1523 |
for key_idx, record in enumerate(self.get_record_stream(keys, |
|
1524 |
'unordered', True)): |
|
1525 |
# XXX: todo - optimise to use less than full texts.
|
|
1526 |
key = record.key |
|
3735.32.1
by John Arbash Meinel
Fix the VF WalkingContent checks. |
1527 |
pb.update('Walking content', key_idx, total) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1528 |
if record.storage_kind == 'absent': |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1529 |
raise errors.RevisionNotPresent(key, self) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1530 |
lines = osutils.split_lines(record.get_bytes_as('fulltext')) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1531 |
for line in lines: |
1532 |
yield line, key |
|
3735.32.1
by John Arbash Meinel
Fix the VF WalkingContent checks. |
1533 |
pb.update('Walking content', total, total) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1534 |
|
1535 |
def keys(self): |
|
1536 |
"""See VersionedFiles.keys."""
|
|
1537 |
if 'evil' in debug.debug_flags: |
|
1538 |
trace.mutter_callsite(2, "keys scales with size of history") |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1539 |
sources = [self._index] + self._fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1540 |
result = set() |
1541 |
for source in sources: |
|
1542 |
result.update(source.keys()) |
|
1543 |
return result |
|
1544 |
||
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1545 |
|
1546 |
class _GCGraphIndex(object): |
|
1547 |
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
|
|
1548 |
||
0.17.9
by Robert Collins
Initial stab at repository format support. |
1549 |
def __init__(self, graph_index, is_locked, parents=True, |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1550 |
add_callback=None): |
1551 |
"""Construct a _GCGraphIndex on a graph_index.
|
|
1552 |
||
1553 |
:param graph_index: An implementation of bzrlib.index.GraphIndex.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1554 |
:param is_locked: A callback, returns True if the index is locked and
|
1555 |
thus usable.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1556 |
:param parents: If True, record knits parents, if not do not record
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1557 |
parents.
|
1558 |
:param add_callback: If not None, allow additions to the index and call
|
|
1559 |
this callback with a list of added GraphIndex nodes:
|
|
1560 |
[(node, value, node_refs), ...]
|
|
1561 |
"""
|
|
1562 |
self._add_callback = add_callback |
|
1563 |
self._graph_index = graph_index |
|
1564 |
self._parents = parents |
|
1565 |
self.has_graph = parents |
|
1566 |
self._is_locked = is_locked |
|
1567 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1568 |
def add_records(self, records, random_id=False): |
1569 |
"""Add multiple records to the index.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1570 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1571 |
This function does not insert data into the Immutable GraphIndex
|
1572 |
backing the KnitGraphIndex, instead it prepares data for insertion by
|
|
1573 |
the caller and checks that it is safe to insert then calls
|
|
1574 |
self._add_callback with the prepared GraphIndex nodes.
|
|
1575 |
||
1576 |
:param records: a list of tuples:
|
|
1577 |
(key, options, access_memo, parents).
|
|
1578 |
:param random_id: If True the ids being added were randomly generated
|
|
1579 |
and no check for existence will be performed.
|
|
1580 |
"""
|
|
1581 |
if not self._add_callback: |
|
1582 |
raise errors.ReadOnlyError(self) |
|
1583 |
# we hope there are no repositories with inconsistent parentage
|
|
1584 |
# anymore.
|
|
1585 |
||
1586 |
changed = False |
|
1587 |
keys = {} |
|
1588 |
for (key, value, refs) in records: |
|
1589 |
if not self._parents: |
|
1590 |
if refs: |
|
1591 |
for ref in refs: |
|
1592 |
if ref: |
|
1593 |
raise KnitCorrupt(self, |
|
1594 |
"attempt to add node with parents "
|
|
1595 |
"in parentless index.") |
|
1596 |
refs = () |
|
1597 |
changed = True |
|
1598 |
keys[key] = (value, refs) |
|
1599 |
# check for dups
|
|
1600 |
if not random_id: |
|
1601 |
present_nodes = self._get_entries(keys) |
|
1602 |
for (index, key, value, node_refs) in present_nodes: |
|
1603 |
if node_refs != keys[key][1]: |
|
1604 |
raise errors.KnitCorrupt(self, "inconsistent details in add_records" |
|
1605 |
": %s %s" % ((value, node_refs), keys[key])) |
|
1606 |
del keys[key] |
|
1607 |
changed = True |
|
1608 |
if changed: |
|
1609 |
result = [] |
|
1610 |
if self._parents: |
|
1611 |
for key, (value, node_refs) in keys.iteritems(): |
|
1612 |
result.append((key, value, node_refs)) |
|
1613 |
else: |
|
1614 |
for key, (value, node_refs) in keys.iteritems(): |
|
1615 |
result.append((key, value)) |
|
1616 |
records = result |
|
1617 |
self._add_callback(records) |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1618 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1619 |
def _check_read(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1620 |
"""Raise an exception if reads are not permitted."""
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1621 |
if not self._is_locked(): |
1622 |
raise errors.ObjectNotLocked(self) |
|
1623 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
1624 |
def _check_write_ok(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1625 |
"""Raise an exception if writes are not permitted."""
|
0.17.2
by Robert Collins
Core proof of concept working. |
1626 |
if not self._is_locked(): |
1627 |
raise errors.ObjectNotLocked(self) |
|
1628 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1629 |
def _get_entries(self, keys, check_present=False): |
1630 |
"""Get the entries for keys.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1631 |
|
1632 |
Note: Callers are responsible for checking that the index is locked
|
|
1633 |
before calling this method.
|
|
1634 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1635 |
:param keys: An iterable of index key tuples.
|
1636 |
"""
|
|
1637 |
keys = set(keys) |
|
1638 |
found_keys = set() |
|
1639 |
if self._parents: |
|
1640 |
for node in self._graph_index.iter_entries(keys): |
|
1641 |
yield node |
|
1642 |
found_keys.add(node[1]) |
|
1643 |
else: |
|
1644 |
# adapt parentless index to the rest of the code.
|
|
1645 |
for node in self._graph_index.iter_entries(keys): |
|
1646 |
yield node[0], node[1], node[2], () |
|
1647 |
found_keys.add(node[1]) |
|
1648 |
if check_present: |
|
1649 |
missing_keys = keys.difference(found_keys) |
|
1650 |
if missing_keys: |
|
1651 |
raise RevisionNotPresent(missing_keys.pop(), self) |
|
1652 |
||
1653 |
def get_parent_map(self, keys): |
|
1654 |
"""Get a map of the parents of keys.
|
|
1655 |
||
1656 |
:param keys: The keys to look up parents for.
|
|
1657 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1658 |
the mapping.
|
|
1659 |
"""
|
|
1660 |
self._check_read() |
|
1661 |
nodes = self._get_entries(keys) |
|
1662 |
result = {} |
|
1663 |
if self._parents: |
|
1664 |
for node in nodes: |
|
1665 |
result[node[1]] = node[3][0] |
|
1666 |
else: |
|
1667 |
for node in nodes: |
|
1668 |
result[node[1]] = None |
|
1669 |
return result |
|
1670 |
||
1671 |
def get_build_details(self, keys): |
|
1672 |
"""Get the various build details for keys.
|
|
1673 |
||
1674 |
Ghosts are omitted from the result.
|
|
1675 |
||
1676 |
:param keys: An iterable of keys.
|
|
1677 |
:return: A dict of key:
|
|
1678 |
(index_memo, compression_parent, parents, record_details).
|
|
1679 |
index_memo
|
|
1680 |
opaque structure to pass to read_records to extract the raw
|
|
1681 |
data
|
|
1682 |
compression_parent
|
|
1683 |
Content that this record is built upon, may be None
|
|
1684 |
parents
|
|
1685 |
Logical parents of this node
|
|
1686 |
record_details
|
|
1687 |
extra information about the content which needs to be passed to
|
|
1688 |
Factory.parse_record
|
|
1689 |
"""
|
|
1690 |
self._check_read() |
|
1691 |
result = {} |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1692 |
entries = self._get_entries(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1693 |
for entry in entries: |
1694 |
key = entry[1] |
|
1695 |
if not self._parents: |
|
1696 |
parents = None |
|
1697 |
else: |
|
1698 |
parents = entry[3][0] |
|
1699 |
method = 'group' |
|
1700 |
result[key] = (self._node_to_position(entry), |
|
1701 |
None, parents, (method, None)) |
|
1702 |
return result |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1703 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1704 |
def keys(self): |
1705 |
"""Get all the keys in the collection.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1706 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1707 |
The keys are not ordered.
|
1708 |
"""
|
|
1709 |
self._check_read() |
|
1710 |
return [node[1] for node in self._graph_index.iter_all_entries()] |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1711 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1712 |
def _node_to_position(self, node): |
1713 |
"""Convert an index value to position details."""
|
|
1714 |
bits = node[2].split(' ') |
|
1715 |
# It would be nice not to read the entire gzip.
|
|
1716 |
start = int(bits[0]) |
|
1717 |
stop = int(bits[1]) |
|
1718 |
basis_end = int(bits[2]) |
|
1719 |
delta_end = int(bits[3]) |
|
1720 |
return node[0], start, stop, basis_end, delta_end |
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1721 |
|
1722 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
1723 |
from bzrlib._groupcompress_py import ( |
1724 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
1725 |
apply_delta_to_source, |
3735.40.11
by John Arbash Meinel
Implement make_delta and apply_delta. |
1726 |
encode_base128_int, |
1727 |
decode_base128_int, |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
1728 |
decode_copy_instruction, |
3735.40.13
by John Arbash Meinel
Rename EquivalenceTable to LinesDeltaIndex. |
1729 |
LinesDeltaIndex, |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
1730 |
)
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1731 |
try: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1732 |
from bzrlib._groupcompress_pyx import ( |
1733 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
1734 |
apply_delta_to_source, |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1735 |
DeltaIndex, |
3735.40.16
by John Arbash Meinel
Implement (de|en)code_base128_int in pyrex. |
1736 |
encode_base128_int, |
1737 |
decode_base128_int, |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1738 |
)
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1739 |
GroupCompressor = PyrexGroupCompressor |
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
1740 |
except ImportError: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1741 |
GroupCompressor = PythonGroupCompressor |
1742 |