5050.70.1
by Martin Pool
Add failing test for bug 715000 |
1 |
# Copyright (C) 2008-2011 Canonical Ltd
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
2 |
#
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
3 |
# This program is free software; you can redistribute it and/or modify
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
4 |
# it under the terms of the GNU General Public License as published by
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
12 |
#
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
3735.36.3
by John Arbash Meinel
Add the new address for FSF to the new files. |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
16 |
|
17 |
"""Core compression logic for compressing streams of related files."""
|
|
18 |
||
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
19 |
import time |
0.17.5
by Robert Collins
nograph tests completely passing. |
20 |
import zlib |
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
21 |
try: |
22 |
import pylzma |
|
23 |
except ImportError: |
|
24 |
pylzma = None |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
25 |
|
5757.8.2
by Jelmer Vernooij
Avoid annotate import during 'bzr st'. |
26 |
from bzrlib.lazy_import import lazy_import |
27 |
lazy_import(globals(), """ |
|
0.17.4
by Robert Collins
Annotate. |
28 |
from bzrlib import (
|
29 |
annotate,
|
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
30 |
config,
|
0.17.5
by Robert Collins
nograph tests completely passing. |
31 |
debug,
|
32 |
errors,
|
|
0.17.4
by Robert Collins
Annotate. |
33 |
graph as _mod_graph,
|
0.20.2
by John Arbash Meinel
Teach groupcompress about 'chunked' encoding |
34 |
osutils,
|
0.17.4
by Robert Collins
Annotate. |
35 |
pack,
|
4789.28.3
by John Arbash Meinel
Add a static_tuple.as_tuples() helper. |
36 |
static_tuple,
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
37 |
trace,
|
5757.8.2
by Jelmer Vernooij
Avoid annotate import during 'bzr st'. |
38 |
tsort,
|
0.17.4
by Robert Collins
Annotate. |
39 |
)
|
5757.8.7
by Jelmer Vernooij
Merge moving of _DirectPackAccess. |
40 |
|
41 |
from bzrlib.repofmt import pack_repo
|
|
5757.8.2
by Jelmer Vernooij
Avoid annotate import during 'bzr st'. |
42 |
""") |
43 |
||
0.17.21
by Robert Collins
Update groupcompress to bzrlib 1.10. |
44 |
from bzrlib.btree_index import BTreeBuilder |
0.17.24
by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group. |
45 |
from bzrlib.lru_cache import LRUSizeCache |
0.17.2
by Robert Collins
Core proof of concept working. |
46 |
from bzrlib.versionedfile import ( |
5757.8.1
by Jelmer Vernooij
Avoid bzrlib.knit imports when using groupcompress repositories. |
47 |
_KeyRefs, |
0.17.5
by Robert Collins
nograph tests completely passing. |
48 |
adapter_registry, |
49 |
AbsentContentFactory, |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
50 |
ChunkedContentFactory, |
0.17.2
by Robert Collins
Core proof of concept working. |
51 |
FulltextContentFactory, |
5816.8.1
by Andrew Bennetts
Be a little more clever about constructing a parents provider for stacked repositories, so that get_parent_map with local-stacked-on-remote doesn't use HPSS VFS calls. |
52 |
VersionedFilesWithFallbacks, |
0.17.2
by Robert Collins
Core proof of concept working. |
53 |
)
|
54 |
||
4634.3.17
by Andrew Bennetts
Make BATCH_SIZE a global. |
55 |
# Minimum number of uncompressed bytes to try fetch at once when retrieving
|
56 |
# groupcompress blocks.
|
|
57 |
BATCH_SIZE = 2**16 |
|
58 |
||
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
59 |
_USE_LZMA = False and (pylzma is not None) |
0.17.2
by Robert Collins
Core proof of concept working. |
60 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
61 |
# osutils.sha_string('')
|
62 |
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' |
|
63 |
||
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
64 |
def sort_gc_optimal(parent_map): |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
65 |
"""Sort and group the keys in parent_map into groupcompress order.
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
66 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
67 |
groupcompress is defined (currently) as reverse-topological order, grouped
|
68 |
by the key prefix.
|
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
69 |
|
70 |
:return: A sorted-list of keys
|
|
71 |
"""
|
|
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
72 |
# groupcompress ordering is approximately reverse topological,
|
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
73 |
# properly grouped by file-id.
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
74 |
per_prefix_map = {} |
4593.5.43
by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value) |
75 |
for key, value in parent_map.iteritems(): |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
76 |
if isinstance(key, str) or len(key) == 1: |
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
77 |
prefix = '' |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
78 |
else: |
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
79 |
prefix = key[0] |
80 |
try: |
|
4593.5.43
by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value) |
81 |
per_prefix_map[prefix][key] = value |
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
82 |
except KeyError: |
4593.5.43
by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value) |
83 |
per_prefix_map[prefix] = {key: value} |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
84 |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
85 |
present_keys = [] |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
86 |
for prefix in sorted(per_prefix_map): |
5757.8.2
by Jelmer Vernooij
Avoid annotate import during 'bzr st'. |
87 |
present_keys.extend(reversed(tsort.topo_sort(per_prefix_map[prefix]))) |
0.20.11
by John Arbash Meinel
start experimenting with gc-optimal ordering. |
88 |
return present_keys |
89 |
||
90 |
||
3735.32.9
by John Arbash Meinel
Use a 32kB extension, since that is the max window size for zlib. |
91 |
# The max zlib window size is 32kB, so if we set 'max_size' output of the
|
92 |
# decompressor to the requested bytes + 32kB, then we should guarantee
|
|
93 |
# num_bytes coming out.
|
|
94 |
_ZLIB_DECOMP_WINDOW = 32*1024 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
95 |
|
96 |
class GroupCompressBlock(object): |
|
97 |
"""An object which maintains the internal structure of the compressed data.
|
|
98 |
||
99 |
This tracks the meta info (start of text, length, type, etc.)
|
|
100 |
"""
|
|
101 |
||
0.25.5
by John Arbash Meinel
Now using a zlib compressed format. |
102 |
# Group Compress Block v1 Zlib
|
103 |
GCB_HEADER = 'gcb1z\n' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
104 |
# Group Compress Block v1 Lzma
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
105 |
GCB_LZ_HEADER = 'gcb1l\n' |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
106 |
GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
107 |
|
108 |
def __init__(self): |
|
109 |
# map by key? or just order in file?
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
110 |
self._compressor_name = None |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
111 |
self._z_content_chunks = None |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
112 |
self._z_content_decompressor = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
113 |
self._z_content_length = None |
114 |
self._content_length = None |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
115 |
self._content = None |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
116 |
self._content_chunks = None |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
117 |
|
118 |
def __len__(self): |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
119 |
# This is the maximum number of bytes this object will reference if
|
120 |
# everything is decompressed. However, if we decompress less than
|
|
121 |
# everything... (this would cause some problems for LRUSizeCache)
|
|
122 |
return self._content_length + self._z_content_length |
|
0.17.48
by John Arbash Meinel
if _NO_LABELS is set, don't bother parsing the mini header. |
123 |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
124 |
def _ensure_content(self, num_bytes=None): |
125 |
"""Make sure that content has been expanded enough.
|
|
126 |
||
127 |
:param num_bytes: Ensure that we have extracted at least num_bytes of
|
|
128 |
content. If None, consume everything
|
|
129 |
"""
|
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
130 |
if self._content_length is None: |
131 |
raise AssertionError('self._content_length should never be None') |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
132 |
if num_bytes is None: |
133 |
num_bytes = self._content_length |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
134 |
elif (self._content_length is not None |
135 |
and num_bytes > self._content_length): |
|
136 |
raise AssertionError( |
|
137 |
'requested num_bytes (%d) > content length (%d)' |
|
138 |
% (num_bytes, self._content_length)) |
|
139 |
# Expand the content if required
|
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
140 |
if self._content is None: |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
141 |
if self._content_chunks is not None: |
142 |
self._content = ''.join(self._content_chunks) |
|
143 |
self._content_chunks = None |
|
144 |
if self._content is None: |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
145 |
# We join self._z_content_chunks here, because if we are
|
146 |
# decompressing, then it is *very* likely that we have a single
|
|
147 |
# chunk
|
|
148 |
if self._z_content_chunks is None: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
149 |
raise AssertionError('No content to decompress') |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
150 |
z_content = ''.join(self._z_content_chunks) |
151 |
if z_content == '': |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
152 |
self._content = '' |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
153 |
elif self._compressor_name == 'lzma': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
154 |
# We don't do partial lzma decomp yet
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
155 |
self._content = pylzma.decompress(z_content) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
156 |
elif self._compressor_name == 'zlib': |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
157 |
# Start a zlib decompressor
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
158 |
if num_bytes * 4 > self._content_length * 3: |
159 |
# If we are requesting more that 3/4ths of the content,
|
|
160 |
# just extract the whole thing in a single pass
|
|
161 |
num_bytes = self._content_length |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
162 |
self._content = zlib.decompress(z_content) |
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
163 |
else: |
164 |
self._z_content_decompressor = zlib.decompressobj() |
|
165 |
# Seed the decompressor with the uncompressed bytes, so
|
|
166 |
# that the rest of the code is simplified
|
|
167 |
self._content = self._z_content_decompressor.decompress( |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
168 |
z_content, num_bytes + _ZLIB_DECOMP_WINDOW) |
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
169 |
if not self._z_content_decompressor.unconsumed_tail: |
170 |
self._z_content_decompressor = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
171 |
else: |
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
172 |
raise AssertionError('Unknown compressor: %r' |
3735.2.183
by John Arbash Meinel
Fix the compressor name. |
173 |
% self._compressor_name) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
174 |
# Any bytes remaining to be decompressed will be in the decompressors
|
175 |
# 'unconsumed_tail'
|
|
176 |
||
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
177 |
# Do we have enough bytes already?
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
178 |
if len(self._content) >= num_bytes: |
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
179 |
return
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
180 |
# If we got this far, and don't have a decompressor, something is wrong
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
181 |
if self._z_content_decompressor is None: |
182 |
raise AssertionError( |
|
3735.2.182
by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others |
183 |
'No decompressor to decompress %d bytes' % num_bytes) |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
184 |
remaining_decomp = self._z_content_decompressor.unconsumed_tail |
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
185 |
if not remaining_decomp: |
186 |
raise AssertionError('Nothing left to decompress') |
|
187 |
needed_bytes = num_bytes - len(self._content) |
|
188 |
# We always set max_size to 32kB over the minimum needed, so that
|
|
189 |
# zlib will give us as much as we really want.
|
|
190 |
# TODO: If this isn't good enough, we could make a loop here,
|
|
191 |
# that keeps expanding the request until we get enough
|
|
192 |
self._content += self._z_content_decompressor.decompress( |
|
193 |
remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW) |
|
194 |
if len(self._content) < num_bytes: |
|
195 |
raise AssertionError('%d bytes wanted, only %d available' |
|
196 |
% (num_bytes, len(self._content))) |
|
197 |
if not self._z_content_decompressor.unconsumed_tail: |
|
198 |
# The stream is finished
|
|
199 |
self._z_content_decompressor = None |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
200 |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
201 |
def _parse_bytes(self, bytes, pos): |
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
202 |
"""Read the various lengths from the header.
|
203 |
||
204 |
This also populates the various 'compressed' buffers.
|
|
205 |
||
206 |
:return: The position in bytes just after the last newline
|
|
207 |
"""
|
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
208 |
# At present, we have 2 integers for the compressed and uncompressed
|
209 |
# content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
|
|
210 |
# checking too far, cap the search to 14 bytes.
|
|
211 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
212 |
self._z_content_length = int(bytes[pos:pos2]) |
|
213 |
pos = pos2 + 1 |
|
214 |
pos2 = bytes.index('\n', pos, pos + 14) |
|
215 |
self._content_length = int(bytes[pos:pos2]) |
|
216 |
pos = pos2 + 1 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
217 |
if len(bytes) != (pos + self._z_content_length): |
218 |
# XXX: Define some GCCorrupt error ?
|
|
219 |
raise AssertionError('Invalid bytes: (%d) != %d + %d' % |
|
220 |
(len(bytes), pos, self._z_content_length)) |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
221 |
self._z_content_chunks = (bytes[pos:],) |
222 |
||
223 |
@property
|
|
224 |
def _z_content(self): |
|
5439.2.2
by John Arbash Meinel
Smal tweaks from reviewer feedback. |
225 |
"""Return z_content_chunks as a simple string.
|
226 |
||
227 |
Meant only to be used by the test suite.
|
|
228 |
"""
|
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
229 |
if self._z_content_chunks is not None: |
230 |
return ''.join(self._z_content_chunks) |
|
231 |
return None |
|
3735.32.5
by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes. |
232 |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
233 |
@classmethod
|
234 |
def from_bytes(cls, bytes): |
|
235 |
out = cls() |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
236 |
if bytes[:6] not in cls.GCB_KNOWN_HEADERS: |
237 |
raise ValueError('bytes did not start with any of %r' |
|
238 |
% (cls.GCB_KNOWN_HEADERS,)) |
|
239 |
# XXX: why not testing the whole header ?
|
|
0.17.44
by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups. |
240 |
if bytes[4] == 'z': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
241 |
out._compressor_name = 'zlib' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
242 |
elif bytes[4] == 'l': |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
243 |
out._compressor_name = 'lzma' |
0.17.45
by John Arbash Meinel
Just make sure we have the right decompressor |
244 |
else: |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
245 |
raise ValueError('unknown compressor: %r' % (bytes,)) |
3735.38.4
by John Arbash Meinel
Another disk format change. |
246 |
out._parse_bytes(bytes, 6) |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
247 |
return out |
248 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
249 |
def extract(self, key, start, end, sha1=None): |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
250 |
"""Extract the text for a specific key.
|
251 |
||
252 |
:param key: The label used for this content
|
|
253 |
:param sha1: TODO (should we validate only when sha1 is supplied?)
|
|
254 |
:return: The bytes for the content
|
|
255 |
"""
|
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
256 |
if start == end == 0: |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
257 |
return '' |
258 |
self._ensure_content(end) |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
259 |
# The bytes are 'f' or 'd' for the type, then a variable-length
|
260 |
# base128 integer for the content size, then the actual content
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
261 |
# We know that the variable-length integer won't be longer than 5
|
262 |
# bytes (it takes 5 bytes to encode 2^32)
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
263 |
c = self._content[start] |
264 |
if c == 'f': |
|
265 |
type = 'fulltext' |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
266 |
else: |
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
267 |
if c != 'd': |
268 |
raise ValueError('Unknown content control code: %s' |
|
269 |
% (c,)) |
|
270 |
type = 'delta' |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
271 |
content_len, len_len = decode_base128_int( |
272 |
self._content[start + 1:start + 6]) |
|
273 |
content_start = start + 1 + len_len |
|
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
274 |
if end != content_start + content_len: |
275 |
raise ValueError('end != len according to field header' |
|
276 |
' %s != %s' % (end, content_start + content_len)) |
|
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
277 |
if c == 'f': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
278 |
bytes = self._content[content_start:end] |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
279 |
elif c == 'd': |
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
280 |
bytes = apply_delta_to_source(self._content, content_start, end) |
3735.2.158
by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract. |
281 |
return bytes |
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
282 |
|
4469.1.2
by John Arbash Meinel
The only caller already knows the content length, so make the api such that |
283 |
def set_chunked_content(self, content_chunks, length): |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
284 |
"""Set the content of this block to the given chunks."""
|
4469.1.3
by John Arbash Meinel
Notes on why we do it the way we do. |
285 |
# If we have lots of short lines, it is may be more efficient to join
|
286 |
# the content ahead of time. If the content is <10MiB, we don't really
|
|
287 |
# care about the extra memory consumption, so we can just pack it and
|
|
288 |
# be done. However, timing showed 18s => 17.9s for repacking 1k revs of
|
|
289 |
# mysql, which is below the noise margin
|
|
4469.1.2
by John Arbash Meinel
The only caller already knows the content length, so make the api such that |
290 |
self._content_length = length |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
291 |
self._content_chunks = content_chunks |
4469.1.2
by John Arbash Meinel
The only caller already knows the content length, so make the api such that |
292 |
self._content = None |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
293 |
self._z_content_chunks = None |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
294 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
295 |
def set_content(self, content): |
296 |
"""Set the content of this block."""
|
|
297 |
self._content_length = len(content) |
|
298 |
self._content = content |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
299 |
self._z_content_chunks = None |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
300 |
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
301 |
def _create_z_content_using_lzma(self): |
302 |
if self._content_chunks is not None: |
|
303 |
self._content = ''.join(self._content_chunks) |
|
304 |
self._content_chunks = None |
|
305 |
if self._content is None: |
|
306 |
raise AssertionError('Nothing to compress') |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
307 |
z_content = pylzma.compress(self._content) |
308 |
self._z_content_chunks = (z_content,) |
|
309 |
self._z_content_length = len(z_content) |
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
310 |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
311 |
def _create_z_content_from_chunks(self, chunks): |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
312 |
compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION) |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
313 |
# Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead
|
314 |
# (measured peak is maybe 30MB over the above...)
|
|
315 |
compressed_chunks = map(compressor.compress, chunks) |
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
316 |
compressed_chunks.append(compressor.flush()) |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
317 |
# Ignore empty chunks
|
318 |
self._z_content_chunks = [c for c in compressed_chunks if c] |
|
319 |
self._z_content_length = sum(map(len, self._z_content_chunks)) |
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
320 |
|
321 |
def _create_z_content(self): |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
322 |
if self._z_content_chunks is not None: |
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
323 |
return
|
324 |
if _USE_LZMA: |
|
325 |
self._create_z_content_using_lzma() |
|
326 |
return
|
|
327 |
if self._content_chunks is not None: |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
328 |
chunks = self._content_chunks |
329 |
else: |
|
330 |
chunks = (self._content,) |
|
331 |
self._create_z_content_from_chunks(chunks) |
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
332 |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
333 |
def to_chunks(self): |
334 |
"""Create the byte stream as a series of 'chunks'"""
|
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
335 |
self._create_z_content() |
0.17.46
by John Arbash Meinel
Set the proper header when using/not using lzma |
336 |
if _USE_LZMA: |
337 |
header = self.GCB_LZ_HEADER |
|
338 |
else: |
|
339 |
header = self.GCB_HEADER |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
340 |
chunks = ['%s%d\n%d\n' |
341 |
% (header, self._z_content_length, self._content_length), |
|
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
342 |
]
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
343 |
chunks.extend(self._z_content_chunks) |
344 |
total_len = sum(map(len, chunks)) |
|
345 |
return total_len, chunks |
|
346 |
||
347 |
def to_bytes(self): |
|
348 |
"""Encode the information into a byte stream."""
|
|
349 |
total_len, chunks = self.to_chunks() |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
350 |
return ''.join(chunks) |
351 |
||
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
352 |
def _dump(self, include_text=False): |
353 |
"""Take this block, and spit out a human-readable structure.
|
|
354 |
||
355 |
:param include_text: Inserts also include text bits, chose whether you
|
|
356 |
want this displayed in the dump or not.
|
|
357 |
:return: A dump of the given block. The layout is something like:
|
|
358 |
[('f', length), ('d', delta_length, text_length, [delta_info])]
|
|
359 |
delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
|
|
360 |
...]
|
|
361 |
"""
|
|
362 |
self._ensure_content() |
|
363 |
result = [] |
|
364 |
pos = 0 |
|
365 |
while pos < self._content_length: |
|
366 |
kind = self._content[pos] |
|
367 |
pos += 1 |
|
368 |
if kind not in ('f', 'd'): |
|
369 |
raise ValueError('invalid kind character: %r' % (kind,)) |
|
370 |
content_len, len_len = decode_base128_int( |
|
371 |
self._content[pos:pos + 5]) |
|
372 |
pos += len_len |
|
373 |
if content_len + pos > self._content_length: |
|
374 |
raise ValueError('invalid content_len %d for record @ pos %d' |
|
375 |
% (content_len, pos - len_len - 1)) |
|
376 |
if kind == 'f': # Fulltext |
|
4398.5.6
by John Arbash Meinel
A bit more debugging information from gcblock._dump(True) |
377 |
if include_text: |
378 |
text = self._content[pos:pos+content_len] |
|
379 |
result.append(('f', content_len, text)) |
|
380 |
else: |
|
381 |
result.append(('f', content_len)) |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
382 |
elif kind == 'd': # Delta |
383 |
delta_content = self._content[pos:pos+content_len] |
|
384 |
delta_info = [] |
|
385 |
# The first entry in a delta is the decompressed length
|
|
386 |
decomp_len, delta_pos = decode_base128_int(delta_content) |
|
387 |
result.append(('d', content_len, decomp_len, delta_info)) |
|
388 |
measured_len = 0 |
|
389 |
while delta_pos < content_len: |
|
390 |
c = ord(delta_content[delta_pos]) |
|
391 |
delta_pos += 1 |
|
392 |
if c & 0x80: # Copy |
|
393 |
(offset, length, |
|
394 |
delta_pos) = decode_copy_instruction(delta_content, c, |
|
395 |
delta_pos) |
|
4398.5.6
by John Arbash Meinel
A bit more debugging information from gcblock._dump(True) |
396 |
if include_text: |
397 |
text = self._content[offset:offset+length] |
|
398 |
delta_info.append(('c', offset, length, text)) |
|
399 |
else: |
|
400 |
delta_info.append(('c', offset, length)) |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
401 |
measured_len += length |
402 |
else: # Insert |
|
403 |
if include_text: |
|
404 |
txt = delta_content[delta_pos:delta_pos+c] |
|
405 |
else: |
|
406 |
txt = '' |
|
407 |
delta_info.append(('i', c, txt)) |
|
408 |
measured_len += c |
|
409 |
delta_pos += c |
|
410 |
if delta_pos != content_len: |
|
411 |
raise ValueError('Delta consumed a bad number of bytes:' |
|
412 |
' %d != %d' % (delta_pos, content_len)) |
|
413 |
if measured_len != decomp_len: |
|
414 |
raise ValueError('Delta claimed fulltext was %d bytes, but' |
|
415 |
' extraction resulted in %d bytes' |
|
416 |
% (decomp_len, measured_len)) |
|
417 |
pos += content_len |
|
418 |
return result |
|
419 |
||
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
420 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
421 |
class _LazyGroupCompressFactory(object): |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
422 |
"""Yield content from a GroupCompressBlock on demand."""
|
423 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
424 |
def __init__(self, key, parents, manager, start, end, first): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
425 |
"""Create a _LazyGroupCompressFactory
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
426 |
|
427 |
:param key: The key of just this record
|
|
428 |
:param parents: The parents of this key (possibly None)
|
|
429 |
:param gc_block: A GroupCompressBlock object
|
|
430 |
:param start: Offset of the first byte for this record in the
|
|
431 |
uncompressd content
|
|
432 |
:param end: Offset of the byte just after the end of this record
|
|
433 |
(ie, bytes = content[start:end])
|
|
434 |
:param first: Is this the first Factory for the given block?
|
|
435 |
"""
|
|
436 |
self.key = key |
|
437 |
self.parents = parents |
|
438 |
self.sha1 = None |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
439 |
# Note: This attribute coupled with Manager._factories creates a
|
440 |
# reference cycle. Perhaps we would rather use a weakref(), or
|
|
441 |
# find an appropriate time to release the ref. After the first
|
|
442 |
# get_bytes_as call? After Manager.get_record_stream() returns
|
|
443 |
# the object?
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
444 |
self._manager = manager |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
445 |
self._bytes = None |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
446 |
self.storage_kind = 'groupcompress-block' |
447 |
if not first: |
|
448 |
self.storage_kind = 'groupcompress-block-ref' |
|
449 |
self._first = first |
|
450 |
self._start = start |
|
451 |
self._end = end |
|
452 |
||
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
453 |
def __repr__(self): |
454 |
return '%s(%s, first=%s)' % (self.__class__.__name__, |
|
455 |
self.key, self._first) |
|
456 |
||
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
457 |
def get_bytes_as(self, storage_kind): |
458 |
if storage_kind == self.storage_kind: |
|
459 |
if self._first: |
|
460 |
# wire bytes, something...
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
461 |
return self._manager._wire_bytes() |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
462 |
else: |
463 |
return '' |
|
464 |
if storage_kind in ('fulltext', 'chunked'): |
|
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
465 |
if self._bytes is None: |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
466 |
# Grab and cache the raw bytes for this entry
|
467 |
# and break the ref-cycle with _manager since we don't need it
|
|
468 |
# anymore
|
|
5927.2.2
by Jonathan Riddell
throw the error |
469 |
try: |
470 |
self._manager._prepare_for_extract() |
|
471 |
except zlib.error as value: |
|
5927.2.6
by Jonathan Riddell
Make error message less specific (might not be a local disk issue) and pass through zlib error |
472 |
raise errors.DecompressCorruption("zlib: " + str(value)) |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
473 |
block = self._manager._block |
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
474 |
self._bytes = block.extract(self.key, self._start, self._end) |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
475 |
# There are code paths that first extract as fulltext, and then
|
476 |
# extract as storage_kind (smart fetch). So we don't break the
|
|
477 |
# refcycle here, but instead in manager.get_record_stream()
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
478 |
if storage_kind == 'fulltext': |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
479 |
return self._bytes |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
480 |
else: |
3735.34.1
by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit. |
481 |
return [self._bytes] |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
482 |
raise errors.UnavailableRepresentation(self.key, storage_kind, |
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
483 |
self.storage_kind) |
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
484 |
|
485 |
||
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
486 |
class _LazyGroupContentManager(object): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
487 |
"""This manages a group of _LazyGroupCompressFactory objects."""
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
488 |
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
489 |
_max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of |
490 |
# current size, and still be considered
|
|
491 |
# resuable
|
|
492 |
_full_block_size = 4*1024*1024 |
|
493 |
_full_mixed_block_size = 2*1024*1024 |
|
494 |
_full_enough_block_size = 3*1024*1024 # size at which we won't repack |
|
495 |
_full_enough_mixed_block_size = 2*768*1024 # 1.5MB |
|
496 |
||
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
497 |
def __init__(self, block, get_compressor_settings=None): |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
498 |
self._block = block |
499 |
# We need to preserve the ordering
|
|
500 |
self._factories = [] |
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
501 |
self._last_byte = 0 |
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
502 |
self._get_settings = get_compressor_settings |
503 |
self._compressor_settings = None |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
504 |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
505 |
def _get_compressor_settings(self): |
506 |
if self._compressor_settings is not None: |
|
507 |
return self._compressor_settings |
|
508 |
settings = None |
|
509 |
if self._get_settings is not None: |
|
510 |
settings = self._get_settings() |
|
511 |
if settings is None: |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
512 |
vf = GroupCompressVersionedFiles |
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
513 |
settings = vf._DEFAULT_COMPRESSOR_SETTINGS |
514 |
self._compressor_settings = settings |
|
515 |
return self._compressor_settings |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
516 |
|
517 |
def add_factory(self, key, parents, start, end): |
|
518 |
if not self._factories: |
|
519 |
first = True |
|
520 |
else: |
|
521 |
first = False |
|
522 |
# Note that this creates a reference cycle....
|
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
523 |
factory = _LazyGroupCompressFactory(key, parents, self, |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
524 |
start, end, first=first) |
3735.36.13
by John Arbash Meinel
max() shows up under lsprof as more expensive than creating an object. |
525 |
# max() works here, but as a function call, doing a compare seems to be
|
526 |
# significantly faster, timeit says 250ms for max() and 100ms for the
|
|
527 |
# comparison
|
|
528 |
if end > self._last_byte: |
|
529 |
self._last_byte = end |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
530 |
self._factories.append(factory) |
531 |
||
532 |
def get_record_stream(self): |
|
533 |
"""Get a record for all keys added so far."""
|
|
534 |
for factory in self._factories: |
|
535 |
yield factory |
|
3735.34.3
by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core. |
536 |
# Break the ref-cycle
|
3735.34.2
by John Arbash Meinel
Merge brisbane-core tip, resolve differences. |
537 |
factory._bytes = None |
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
538 |
factory._manager = None |
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
539 |
# TODO: Consider setting self._factories = None after the above loop,
|
540 |
# as it will break the reference cycle
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
541 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
542 |
def _trim_block(self, last_byte): |
543 |
"""Create a new GroupCompressBlock, with just some of the content."""
|
|
544 |
# None of the factories need to be adjusted, because the content is
|
|
545 |
# located in an identical place. Just that some of the unreferenced
|
|
546 |
# trailing bytes are stripped
|
|
547 |
trace.mutter('stripping trailing bytes from groupcompress block' |
|
548 |
' %d => %d', self._block._content_length, last_byte) |
|
549 |
new_block = GroupCompressBlock() |
|
550 |
self._block._ensure_content(last_byte) |
|
551 |
new_block.set_content(self._block._content[:last_byte]) |
|
552 |
self._block = new_block |
|
553 |
||
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
554 |
def _make_group_compressor(self): |
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
555 |
return GroupCompressor(self._get_compressor_settings()) |
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
556 |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
557 |
def _rebuild_block(self): |
558 |
"""Create a new GroupCompressBlock with only the referenced texts."""
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
559 |
compressor = self._make_group_compressor() |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
560 |
tstart = time.time() |
561 |
old_length = self._block._content_length |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
562 |
end_point = 0 |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
563 |
for factory in self._factories: |
564 |
bytes = factory.get_bytes_as('fulltext') |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
565 |
(found_sha1, start_point, end_point, |
566 |
type) = compressor.compress(factory.key, bytes, factory.sha1) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
567 |
# Now update this factory with the new offsets, etc
|
568 |
factory.sha1 = found_sha1 |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
569 |
factory._start = start_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
570 |
factory._end = end_point |
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
571 |
self._last_byte = end_point |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
572 |
new_block = compressor.flush() |
573 |
# TODO: Should we check that new_block really *is* smaller than the old
|
|
574 |
# block? It seems hard to come up with a method that it would
|
|
575 |
# expand, since we do full compression again. Perhaps based on a
|
|
576 |
# request that ends up poorly ordered?
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
577 |
# TODO: If the content would have expanded, then we would want to
|
578 |
# handle a case where we need to split the block.
|
|
579 |
# Now that we have a user-tweakable option
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
580 |
# (max_bytes_to_index), it is possible that one person set it
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
581 |
# to a very low value, causing poor compression.
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
582 |
delta = time.time() - tstart |
583 |
self._block = new_block |
|
4641.4.2
by John Arbash Meinel
Use unordered fetches to avoid fragmentation (bug #402645) |
584 |
trace.mutter('creating new compressed block on-the-fly in %.3fs' |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
585 |
' %d bytes => %d bytes', delta, old_length, |
586 |
self._block._content_length) |
|
587 |
||
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
588 |
def _prepare_for_extract(self): |
589 |
"""A _LazyGroupCompressFactory is about to extract to fulltext."""
|
|
590 |
# We expect that if one child is going to fulltext, all will be. This
|
|
591 |
# helps prevent all of them from extracting a small amount at a time.
|
|
592 |
# Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
|
|
593 |
# time (self._block._content) is a little expensive.
|
|
594 |
self._block._ensure_content(self._last_byte) |
|
595 |
||
4665.3.4
by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially |
596 |
def _check_rebuild_action(self): |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
597 |
"""Check to see if our block should be repacked."""
|
598 |
total_bytes_used = 0 |
|
599 |
last_byte_used = 0 |
|
600 |
for factory in self._factories: |
|
601 |
total_bytes_used += factory._end - factory._start |
|
4665.3.4
by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially |
602 |
if last_byte_used < factory._end: |
603 |
last_byte_used = factory._end |
|
604 |
# If we are using more than half of the bytes from the block, we have
|
|
605 |
# nothing else to check
|
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
606 |
if total_bytes_used * 2 >= self._block._content_length: |
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
607 |
return None, last_byte_used, total_bytes_used |
4665.3.4
by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially |
608 |
# We are using less than 50% of the content. Is the content we are
|
609 |
# using at the beginning of the block? If so, we can just trim the
|
|
610 |
# tail, rather than rebuilding from scratch.
|
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
611 |
if total_bytes_used * 2 > last_byte_used: |
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
612 |
return 'trim', last_byte_used, total_bytes_used |
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
613 |
|
614 |
# We are using a small amount of the data, and it isn't just packed
|
|
615 |
# nicely at the front, so rebuild the content.
|
|
616 |
# Note: This would be *nicer* as a strip-data-from-group, rather than
|
|
617 |
# building it up again from scratch
|
|
618 |
# It might be reasonable to consider the fulltext sizes for
|
|
619 |
# different bits when deciding this, too. As you may have a small
|
|
620 |
# fulltext, and a trivial delta, and you are just trading around
|
|
621 |
# for another fulltext. If we do a simple 'prune' you may end up
|
|
622 |
# expanding many deltas into fulltexts, as well.
|
|
623 |
# If we build a cheap enough 'strip', then we could try a strip,
|
|
624 |
# if that expands the content, we then rebuild.
|
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
625 |
return 'rebuild', last_byte_used, total_bytes_used |
626 |
||
627 |
def check_is_well_utilized(self): |
|
628 |
"""Is the current block considered 'well utilized'?
|
|
629 |
||
4665.3.15
by Robert Collins
Review and tweak |
630 |
This heuristic asks if the current block considers itself to be a fully
|
631 |
developed group, rather than just a loose collection of data.
|
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
632 |
"""
|
633 |
if len(self._factories) == 1: |
|
4665.3.15
by Robert Collins
Review and tweak |
634 |
# A block of length 1 could be improved by combining with other
|
635 |
# groups - don't look deeper. Even larger than max size groups
|
|
636 |
# could compress well with adjacent versions of the same thing.
|
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
637 |
return False |
638 |
action, last_byte_used, total_bytes_used = self._check_rebuild_action() |
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
639 |
block_size = self._block._content_length |
640 |
if total_bytes_used < block_size * self._max_cut_fraction: |
|
641 |
# This block wants to trim itself small enough that we want to
|
|
642 |
# consider it under-utilized.
|
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
643 |
return False |
644 |
# TODO: This code is meant to be the twin of _insert_record_stream's
|
|
645 |
# 'start_new_block' logic. It would probably be better to factor
|
|
646 |
# out that logic into a shared location, so that it stays
|
|
647 |
# together better
|
|
4665.3.6
by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough' |
648 |
# We currently assume a block is properly utilized whenever it is >75%
|
649 |
# of the size of a 'full' block. In normal operation, a block is
|
|
650 |
# considered full when it hits 4MB of same-file content. So any block
|
|
651 |
# >3MB is 'full enough'.
|
|
652 |
# The only time this isn't true is when a given block has large-object
|
|
653 |
# content. (a single file >4MB, etc.)
|
|
654 |
# Under these circumstances, we allow a block to grow to
|
|
655 |
# 2 x largest_content. Which means that if a given block had a large
|
|
656 |
# object, it may actually be under-utilized. However, given that this
|
|
657 |
# is 'pack-on-the-fly' it is probably reasonable to not repack large
|
|
4665.3.15
by Robert Collins
Review and tweak |
658 |
# content blobs on-the-fly. Note that because we return False for all
|
659 |
# 1-item blobs, we will repack them; we may wish to reevaluate our
|
|
660 |
# treatment of large object blobs in the future.
|
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
661 |
if block_size >= self._full_enough_block_size: |
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
662 |
return True |
4665.3.6
by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough' |
663 |
# If a block is <3MB, it still may be considered 'full' if it contains
|
664 |
# mixed content. The current rule is 2MB of mixed content is considered
|
|
665 |
# full. So check to see if this block contains mixed content, and
|
|
666 |
# set the threshold appropriately.
|
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
667 |
common_prefix = None |
668 |
for factory in self._factories: |
|
669 |
prefix = factory.key[:-1] |
|
670 |
if common_prefix is None: |
|
671 |
common_prefix = prefix |
|
672 |
elif prefix != common_prefix: |
|
4665.3.6
by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough' |
673 |
# Mixed content, check the size appropriately
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
674 |
if block_size >= self._full_enough_mixed_block_size: |
4665.3.6
by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough' |
675 |
return True |
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
676 |
break
|
4665.3.6
by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough' |
677 |
# The content failed both the mixed check and the single-content check
|
678 |
# so obviously it is not fully utilized
|
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
679 |
# TODO: there is one other constraint that isn't being checked
|
680 |
# namely, that the entries in the block are in the appropriate
|
|
681 |
# order. For example, you could insert the entries in exactly
|
|
682 |
# reverse groupcompress order, and we would think that is ok.
|
|
683 |
# (all the right objects are in one group, and it is fully
|
|
684 |
# utilized, etc.) For now, we assume that case is rare,
|
|
685 |
# especially since we should always fetch in 'groupcompress'
|
|
686 |
# order.
|
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
687 |
return False |
4665.3.4
by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially |
688 |
|
689 |
def _check_rebuild_block(self): |
|
4665.3.5
by John Arbash Meinel
Work out a heuristic about when a block is well utilized |
690 |
action, last_byte_used, total_bytes_used = self._check_rebuild_action() |
4665.3.4
by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially |
691 |
if action is None: |
692 |
return
|
|
693 |
if action == 'trim': |
|
694 |
self._trim_block(last_byte_used) |
|
695 |
elif action == 'rebuild': |
|
696 |
self._rebuild_block() |
|
697 |
else: |
|
698 |
raise ValueError('unknown rebuild action: %r' % (action,)) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
699 |
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
700 |
def _wire_bytes(self): |
701 |
"""Return a byte stream suitable for transmitting over the wire."""
|
|
3735.32.24
by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream |
702 |
self._check_rebuild_block() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
703 |
# The outer block starts with:
|
704 |
# 'groupcompress-block\n'
|
|
705 |
# <length of compressed key info>\n
|
|
706 |
# <length of uncompressed info>\n
|
|
707 |
# <length of gc block>\n
|
|
708 |
# <header bytes>
|
|
709 |
# <gc-block>
|
|
710 |
lines = ['groupcompress-block\n'] |
|
711 |
# The minimal info we need is the key, the start offset, and the
|
|
712 |
# parents. The length and type are encoded in the record itself.
|
|
713 |
# However, passing in the other bits makes it easier. The list of
|
|
714 |
# keys, and the start offset, the length
|
|
715 |
# 1 line key
|
|
716 |
# 1 line with parents, '' for ()
|
|
717 |
# 1 line for start offset
|
|
718 |
# 1 line for end byte
|
|
719 |
header_lines = [] |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
720 |
for factory in self._factories: |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
721 |
key_bytes = '\x00'.join(factory.key) |
722 |
parents = factory.parents |
|
723 |
if parents is None: |
|
724 |
parent_bytes = 'None:' |
|
725 |
else: |
|
726 |
parent_bytes = '\t'.join('\x00'.join(key) for key in parents) |
|
727 |
record_header = '%s\n%s\n%d\n%d\n' % ( |
|
728 |
key_bytes, parent_bytes, factory._start, factory._end) |
|
729 |
header_lines.append(record_header) |
|
3735.37.5
by John Arbash Meinel
Restore the refcycle reduction code. |
730 |
# TODO: Can we break the refcycle at this point and set
|
731 |
# factory._manager = None?
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
732 |
header_bytes = ''.join(header_lines) |
733 |
del header_lines |
|
734 |
header_bytes_len = len(header_bytes) |
|
735 |
z_header_bytes = zlib.compress(header_bytes) |
|
736 |
del header_bytes |
|
737 |
z_header_bytes_len = len(z_header_bytes) |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
738 |
block_bytes_len, block_chunks = self._block.to_chunks() |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
739 |
lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len, |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
740 |
block_bytes_len)) |
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
741 |
lines.append(z_header_bytes) |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
742 |
lines.extend(block_chunks) |
743 |
del z_header_bytes, block_chunks |
|
744 |
# TODO: This is a point where we will double the memory consumption. To
|
|
745 |
# avoid this, we probably have to switch to a 'chunked' api
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
746 |
return ''.join(lines) |
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
747 |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
748 |
@classmethod
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
749 |
def from_bytes(cls, bytes): |
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
750 |
# TODO: This does extra string copying, probably better to do it a
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
751 |
# different way. At a minimum this creates 2 copies of the
|
752 |
# compressed content
|
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
753 |
(storage_kind, z_header_len, header_len, |
754 |
block_len, rest) = bytes.split('\n', 4) |
|
755 |
del bytes |
|
756 |
if storage_kind != 'groupcompress-block': |
|
757 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
758 |
z_header_len = int(z_header_len) |
|
759 |
if len(rest) < z_header_len: |
|
760 |
raise ValueError('Compressed header len shorter than all bytes') |
|
761 |
z_header = rest[:z_header_len] |
|
762 |
header_len = int(header_len) |
|
763 |
header = zlib.decompress(z_header) |
|
764 |
if len(header) != header_len: |
|
765 |
raise ValueError('invalid length for decompressed bytes') |
|
766 |
del z_header |
|
767 |
block_len = int(block_len) |
|
768 |
if len(rest) != z_header_len + block_len: |
|
769 |
raise ValueError('Invalid length for block') |
|
770 |
block_bytes = rest[z_header_len:] |
|
771 |
del rest |
|
772 |
# So now we have a valid GCB, we just need to parse the factories that
|
|
773 |
# were sent to us
|
|
774 |
header_lines = header.split('\n') |
|
775 |
del header |
|
776 |
last = header_lines.pop() |
|
777 |
if last != '': |
|
778 |
raise ValueError('header lines did not end with a trailing' |
|
779 |
' newline') |
|
780 |
if len(header_lines) % 4 != 0: |
|
781 |
raise ValueError('The header was not an even multiple of 4 lines') |
|
782 |
block = GroupCompressBlock.from_bytes(block_bytes) |
|
783 |
del block_bytes |
|
784 |
result = cls(block) |
|
785 |
for start in xrange(0, len(header_lines), 4): |
|
786 |
# intern()?
|
|
787 |
key = tuple(header_lines[start].split('\x00')) |
|
788 |
parents_line = header_lines[start+1] |
|
789 |
if parents_line == 'None:': |
|
790 |
parents = None |
|
791 |
else: |
|
792 |
parents = tuple([tuple(segment.split('\x00')) |
|
793 |
for segment in parents_line.split('\t') |
|
794 |
if segment]) |
|
795 |
start_offset = int(header_lines[start+2]) |
|
796 |
end_offset = int(header_lines[start+3]) |
|
797 |
result.add_factory(key, parents, start_offset, end_offset) |
|
798 |
return result |
|
799 |
||
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
800 |
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
801 |
def network_block_to_records(storage_kind, bytes, line_end): |
802 |
if storage_kind != 'groupcompress-block': |
|
803 |
raise ValueError('Unknown storage kind: %s' % (storage_kind,)) |
|
804 |
manager = _LazyGroupContentManager.from_bytes(bytes) |
|
805 |
return manager.get_record_stream() |
|
806 |
||
807 |
||
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
808 |
class _CommonGroupCompressor(object): |
809 |
||
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
810 |
def __init__(self, settings=None): |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
811 |
"""Create a GroupCompressor."""
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
812 |
self.chunks = [] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
813 |
self._last = None |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
814 |
self.endpoint = 0 |
815 |
self.input_bytes = 0 |
|
816 |
self.labels_deltas = {} |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
817 |
self._delta_index = None # Set by the children |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
818 |
self._block = GroupCompressBlock() |
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
819 |
if settings is None: |
820 |
self._settings = {} |
|
821 |
else: |
|
822 |
self._settings = settings |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
823 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
824 |
def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False): |
825 |
"""Compress lines with label key.
|
|
826 |
||
827 |
:param key: A key tuple. It is stored in the output
|
|
828 |
for identification of the text during decompression. If the last
|
|
829 |
element is 'None' it is replaced with the sha1 of the text -
|
|
830 |
e.g. sha1:xxxxxxx.
|
|
831 |
:param bytes: The bytes to be compressed
|
|
832 |
:param expected_sha: If non-None, the sha the lines are believed to
|
|
833 |
have. During compression the sha is calculated; a mismatch will
|
|
834 |
cause an error.
|
|
835 |
:param nostore_sha: If the computed sha1 sum matches, we will raise
|
|
836 |
ExistingContent rather than adding the text.
|
|
837 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
838 |
ranges to match to be considered for a copy command.
|
|
839 |
||
840 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
841 |
the type ('fulltext' or 'delta').
|
|
842 |
||
843 |
:seealso VersionedFiles.add_lines:
|
|
844 |
"""
|
|
845 |
if not bytes: # empty, like a dir entry, etc |
|
846 |
if nostore_sha == _null_sha1: |
|
847 |
raise errors.ExistingContent() |
|
848 |
return _null_sha1, 0, 0, 'fulltext' |
|
849 |
# we assume someone knew what they were doing when they passed it in
|
|
850 |
if expected_sha is not None: |
|
851 |
sha1 = expected_sha |
|
852 |
else: |
|
853 |
sha1 = osutils.sha_string(bytes) |
|
854 |
if nostore_sha is not None: |
|
855 |
if sha1 == nostore_sha: |
|
856 |
raise errors.ExistingContent() |
|
857 |
if key[-1] is None: |
|
858 |
key = key[:-1] + ('sha1:' + sha1,) |
|
859 |
||
860 |
start, end, type = self._compress(key, bytes, len(bytes) / 2, soft) |
|
861 |
return sha1, start, end, type |
|
862 |
||
863 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
864 |
"""Compress lines with label key.
|
|
865 |
||
866 |
:param key: A key tuple. It is stored in the output for identification
|
|
867 |
of the text during decompression.
|
|
868 |
||
869 |
:param bytes: The bytes to be compressed
|
|
870 |
||
871 |
:param max_delta_size: The size above which we issue a fulltext instead
|
|
872 |
of a delta.
|
|
873 |
||
874 |
:param soft: Do a 'soft' compression. This means that we require larger
|
|
875 |
ranges to match to be considered for a copy command.
|
|
876 |
||
877 |
:return: The sha1 of lines, the start and end offsets in the delta, and
|
|
878 |
the type ('fulltext' or 'delta').
|
|
879 |
"""
|
|
880 |
raise NotImplementedError(self._compress) |
|
881 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
882 |
def extract(self, key): |
883 |
"""Extract a key previously added to the compressor.
|
|
884 |
||
885 |
:param key: The key to extract.
|
|
886 |
:return: An iterable over bytes and the sha1.
|
|
887 |
"""
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
888 |
(start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key] |
889 |
delta_chunks = self.chunks[start_chunk:end_chunk] |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
890 |
stored_bytes = ''.join(delta_chunks) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
891 |
if stored_bytes[0] == 'f': |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
892 |
fulltext_len, offset = decode_base128_int(stored_bytes[1:10]) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
893 |
data_len = fulltext_len + 1 + offset |
894 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
895 |
raise ValueError('Index claimed fulltext len, but stored bytes' |
896 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
897 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
898 |
bytes = stored_bytes[offset + 1:] |
899 |
else: |
|
900 |
# XXX: This is inefficient at best
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
901 |
source = ''.join(self.chunks[:start_chunk]) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
902 |
if stored_bytes[0] != 'd': |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
903 |
raise ValueError('Unknown content kind, bytes claim %s' |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
904 |
% (stored_bytes[0],)) |
905 |
delta_len, offset = decode_base128_int(stored_bytes[1:10]) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
906 |
data_len = delta_len + 1 + offset |
907 |
if data_len != len(stored_bytes): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
908 |
raise ValueError('Index claimed delta len, but stored bytes' |
909 |
' claim %s != %s' |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
910 |
% (len(stored_bytes), data_len)) |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
911 |
bytes = apply_delta(source, stored_bytes[offset + 1:]) |
912 |
bytes_sha1 = osutils.sha_string(bytes) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
913 |
return bytes, bytes_sha1 |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
914 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
915 |
def flush(self): |
916 |
"""Finish this group, creating a formatted stream.
|
|
917 |
||
918 |
After calling this, the compressor should no longer be used
|
|
919 |
"""
|
|
4469.1.2
by John Arbash Meinel
The only caller already knows the content length, so make the api such that |
920 |
self._block.set_chunked_content(self.chunks, self.endpoint) |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
921 |
self.chunks = None |
922 |
self._delta_index = None |
|
923 |
return self._block |
|
924 |
||
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
925 |
def pop_last(self): |
926 |
"""Call this if you want to 'revoke' the last compression.
|
|
927 |
||
928 |
After this, the data structures will be rolled back, but you cannot do
|
|
929 |
more compression.
|
|
930 |
"""
|
|
931 |
self._delta_index = None |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
932 |
del self.chunks[self._last[0]:] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
933 |
self.endpoint = self._last[1] |
934 |
self._last = None |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
935 |
|
936 |
def ratio(self): |
|
937 |
"""Return the overall compression ratio."""
|
|
938 |
return float(self.input_bytes) / float(self.endpoint) |
|
939 |
||
940 |
||
941 |
class PythonGroupCompressor(_CommonGroupCompressor): |
|
942 |
||
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
943 |
def __init__(self, settings=None): |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
944 |
"""Create a GroupCompressor.
|
945 |
||
946 |
Used only if the pyrex version is not available.
|
|
947 |
"""
|
|
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
948 |
super(PythonGroupCompressor, self).__init__(settings) |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
949 |
self._delta_index = LinesDeltaIndex([]) |
950 |
# The actual content is managed by LinesDeltaIndex
|
|
951 |
self.chunks = self._delta_index.lines |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
952 |
|
953 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
|
954 |
"""see _CommonGroupCompressor._compress"""
|
|
955 |
input_len = len(bytes) |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
956 |
new_lines = osutils.split_lines(bytes) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
957 |
out_lines, index_lines = self._delta_index.make_delta( |
958 |
new_lines, bytes_length=input_len, soft=soft) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
959 |
delta_length = sum(map(len, out_lines)) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
960 |
if delta_length > max_delta_size: |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
961 |
# The delta is longer than the fulltext, insert a fulltext
|
962 |
type = 'fulltext' |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
963 |
out_lines = ['f', encode_base128_int(input_len)] |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
964 |
out_lines.extend(new_lines) |
965 |
index_lines = [False, False] |
|
966 |
index_lines.extend([True] * len(new_lines)) |
|
967 |
else: |
|
968 |
# this is a worthy delta, output it
|
|
969 |
type = 'delta' |
|
970 |
out_lines[0] = 'd' |
|
971 |
# Update the delta_length to include those two encoded integers
|
|
972 |
out_lines[1] = encode_base128_int(delta_length) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
973 |
# Before insertion
|
974 |
start = self.endpoint |
|
975 |
chunk_start = len(self.chunks) |
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
976 |
self._last = (chunk_start, self.endpoint) |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
977 |
self._delta_index.extend_lines(out_lines, index_lines) |
978 |
self.endpoint = self._delta_index.endpoint |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
979 |
self.input_bytes += input_len |
980 |
chunk_end = len(self.chunks) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
981 |
self.labels_deltas[key] = (start, chunk_start, |
982 |
self.endpoint, chunk_end) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
983 |
return start, self.endpoint, type |
984 |
||
985 |
||
986 |
class PyrexGroupCompressor(_CommonGroupCompressor): |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
987 |
"""Produce a serialised group of compressed texts.
|
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
988 |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
989 |
It contains code very similar to SequenceMatcher because of having a similar
|
990 |
task. However some key differences apply:
|
|
5891.1.2
by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier. |
991 |
|
992 |
* there is no junk, we want a minimal edit not a human readable diff.
|
|
993 |
* we don't filter very common lines (because we don't know where a good
|
|
994 |
range will start, and after the first text we want to be emitting minmal
|
|
995 |
edits only.
|
|
996 |
* we chain the left side, not the right side
|
|
997 |
* we incrementally update the adjacency matrix as new lines are provided.
|
|
998 |
* we look for matches in all of the left side, so the routine which does
|
|
999 |
the analagous task of find_longest_match does not need to filter on the
|
|
1000 |
left side.
|
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
1001 |
"""
|
0.17.2
by Robert Collins
Core proof of concept working. |
1002 |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1003 |
def __init__(self, settings=None): |
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
1004 |
super(PyrexGroupCompressor, self).__init__(settings) |
1005 |
max_bytes_to_index = self._settings.get('max_bytes_to_index', 0) |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1006 |
self._delta_index = DeltaIndex(max_bytes_to_index=max_bytes_to_index) |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
1007 |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1008 |
def _compress(self, key, bytes, max_delta_size, soft=False): |
1009 |
"""see _CommonGroupCompressor._compress"""
|
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
1010 |
input_len = len(bytes) |
0.23.12
by John Arbash Meinel
Add a 'len:' field to the data. |
1011 |
# By having action/label/sha1/len, we can parse the group if the index
|
1012 |
# was ever destroyed, we have the key in 'label', we know the final
|
|
1013 |
# bytes are valid from sha1, and we know where to find the end of this
|
|
1014 |
# record because of 'len'. (the delta record itself will store the
|
|
1015 |
# total length for the expanded record)
|
|
0.23.13
by John Arbash Meinel
Factor out the ability to have/not have labels. |
1016 |
# 'len: %d\n' costs approximately 1% increase in total data
|
1017 |
# Having the labels at all costs us 9-10% increase, 38% increase for
|
|
1018 |
# inventory pages, and 5.8% increase for text pages
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
1019 |
# new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
|
0.23.33
by John Arbash Meinel
Fix a bug when handling multiple large-range copies. |
1020 |
if self._delta_index._source_offset != self.endpoint: |
1021 |
raise AssertionError('_source_offset != endpoint' |
|
1022 |
' somehow the DeltaIndex got out of sync with'
|
|
1023 |
' the output lines') |
|
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
1024 |
delta = self._delta_index.make_delta(bytes, max_delta_size) |
1025 |
if (delta is None): |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1026 |
type = 'fulltext' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
1027 |
enc_length = encode_base128_int(len(bytes)) |
1028 |
len_mini_header = 1 + len(enc_length) |
|
1029 |
self._delta_index.add_source(bytes, len_mini_header) |
|
1030 |
new_chunks = ['f', enc_length, bytes] |
|
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
1031 |
else: |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1032 |
type = 'delta' |
0.17.36
by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes |
1033 |
enc_length = encode_base128_int(len(delta)) |
1034 |
len_mini_header = 1 + len(enc_length) |
|
1035 |
new_chunks = ['d', enc_length, delta] |
|
3735.38.5
by John Arbash Meinel
A bit of testing showed that _FAST=True was actually *slower*. |
1036 |
self._delta_index.add_delta_source(delta, len_mini_header) |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1037 |
# Before insertion
|
1038 |
start = self.endpoint |
|
1039 |
chunk_start = len(self.chunks) |
|
1040 |
# Now output these bytes
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
1041 |
self._output_chunks(new_chunks) |
0.23.6
by John Arbash Meinel
Start stripping out the actual GroupCompressor |
1042 |
self.input_bytes += input_len |
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1043 |
chunk_end = len(self.chunks) |
1044 |
self.labels_deltas[key] = (start, chunk_start, |
|
1045 |
self.endpoint, chunk_end) |
|
0.23.29
by John Arbash Meinel
Forgot to add the delta bytes to the index objects. |
1046 |
if not self._delta_index._source_offset == self.endpoint: |
1047 |
raise AssertionError('the delta index is out of sync' |
|
1048 |
'with the output lines %s != %s' |
|
1049 |
% (self._delta_index._source_offset, self.endpoint)) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1050 |
return start, self.endpoint, type |
0.17.2
by Robert Collins
Core proof of concept working. |
1051 |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
1052 |
def _output_chunks(self, new_chunks): |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
1053 |
"""Output some chunks.
|
1054 |
||
1055 |
:param new_chunks: The chunks to output.
|
|
1056 |
"""
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
1057 |
self._last = (len(self.chunks), self.endpoint) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
1058 |
endpoint = self.endpoint |
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
1059 |
self.chunks.extend(new_chunks) |
0.23.9
by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor. |
1060 |
endpoint += sum(map(len, new_chunks)) |
0.17.12
by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead. |
1061 |
self.endpoint = endpoint |
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
1062 |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1063 |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
1064 |
def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1065 |
"""Create a factory for creating a pack based groupcompress.
|
1066 |
||
1067 |
This is only functional enough to run interface tests, it doesn't try to
|
|
1068 |
provide a full pack environment.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1069 |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1070 |
:param graph: Store a graph.
|
1071 |
:param delta: Delta compress contents.
|
|
1072 |
:param keylength: How long should keys be.
|
|
1073 |
"""
|
|
1074 |
def factory(transport): |
|
3735.32.2
by John Arbash Meinel
The 'delta' flag has no effect on the content (all GC is delta'd), |
1075 |
parents = graph |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1076 |
ref_length = 0 |
1077 |
if graph: |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1078 |
ref_length = 1 |
0.17.7
by Robert Collins
Update for current index2 changes. |
1079 |
graph_index = BTreeBuilder(reference_lists=ref_length, |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1080 |
key_elements=keylength) |
1081 |
stream = transport.open_write_stream('newpack') |
|
1082 |
writer = pack.ContainerWriter(stream.write) |
|
1083 |
writer.begin() |
|
1084 |
index = _GCGraphIndex(graph_index, lambda:True, parents=parents, |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
1085 |
add_callback=graph_index.add_nodes, |
1086 |
inconsistency_fatal=inconsistency_fatal) |
|
5757.5.1
by Jelmer Vernooij
Move _DirectPackAccess to bzrlib.repofmt.pack_repo. |
1087 |
access = pack_repo._DirectPackAccess({}) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1088 |
access.set_writer(writer, graph_index, (transport, 'newpack')) |
0.17.2
by Robert Collins
Core proof of concept working. |
1089 |
result = GroupCompressVersionedFiles(index, access, delta) |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1090 |
result.stream = stream |
1091 |
result.writer = writer |
|
1092 |
return result |
|
1093 |
return factory |
|
1094 |
||
1095 |
||
1096 |
def cleanup_pack_group(versioned_files): |
|
0.17.23
by Robert Collins
Only decompress as much of the zlib data as is needed to read the text recipe. |
1097 |
versioned_files.writer.end() |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1098 |
versioned_files.stream.close() |
1099 |
||
1100 |
||
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1101 |
class _BatchingBlockFetcher(object): |
1102 |
"""Fetch group compress blocks in batches.
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1103 |
|
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1104 |
:ivar total_bytes: int of expected number of bytes needed to fetch the
|
1105 |
currently pending batch.
|
|
1106 |
"""
|
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1107 |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1108 |
def __init__(self, gcvf, locations, get_compressor_settings=None): |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1109 |
self.gcvf = gcvf |
1110 |
self.locations = locations |
|
1111 |
self.keys = [] |
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1112 |
self.batch_memos = {} |
1113 |
self.memos_to_get = [] |
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1114 |
self.total_bytes = 0 |
1115 |
self.last_read_memo = None |
|
1116 |
self.manager = None |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1117 |
self._get_compressor_settings = get_compressor_settings |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1118 |
|
1119 |
def add_key(self, key): |
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1120 |
"""Add another to key to fetch.
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1121 |
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1122 |
:return: The estimated number of bytes needed to fetch the batch so
|
1123 |
far.
|
|
1124 |
"""
|
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1125 |
self.keys.append(key) |
1126 |
index_memo, _, _, _ = self.locations[key] |
|
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1127 |
read_memo = index_memo[0:3] |
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1128 |
# Three possibilities for this read_memo:
|
1129 |
# - it's already part of this batch; or
|
|
1130 |
# - it's not yet part of this batch, but is already cached; or
|
|
1131 |
# - it's not yet part of this batch and will need to be fetched.
|
|
1132 |
if read_memo in self.batch_memos: |
|
1133 |
# This read memo is already in this batch.
|
|
4634.3.16
by Andrew Bennetts
Fix buglets. |
1134 |
return self.total_bytes |
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1135 |
try: |
1136 |
cached_block = self.gcvf._group_cache[read_memo] |
|
1137 |
except KeyError: |
|
1138 |
# This read memo is new to this batch, and the data isn't cached
|
|
1139 |
# either.
|
|
1140 |
self.batch_memos[read_memo] = None |
|
1141 |
self.memos_to_get.append(read_memo) |
|
4634.3.12
by Andrew Bennetts
Bump up the batch size to 256k, and fix the batch size estimate to use the length of the raw bytes that will be fetched (not the uncompressed bytes). |
1142 |
byte_length = read_memo[2] |
1143 |
self.total_bytes += byte_length |
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1144 |
else: |
1145 |
# This read memo is new to this batch, but cached.
|
|
1146 |
# Keep a reference to the cached block in batch_memos because it's
|
|
1147 |
# certain that we'll use it when this batch is processed, but
|
|
1148 |
# there's a risk that it would fall out of _group_cache between now
|
|
1149 |
# and then.
|
|
1150 |
self.batch_memos[read_memo] = cached_block |
|
1151 |
return self.total_bytes |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1152 |
|
4634.3.13
by Andrew Bennetts
Rename empty_manager to _flush_manager. |
1153 |
def _flush_manager(self): |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1154 |
if self.manager is not None: |
1155 |
for factory in self.manager.get_record_stream(): |
|
1156 |
yield factory |
|
1157 |
self.manager = None |
|
4634.3.4
by Andrew Bennetts
Decruftify a little more. |
1158 |
self.last_read_memo = None |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1159 |
|
1160 |
def yield_factories(self, full_flush=False): |
|
4634.3.5
by Andrew Bennetts
More docstrings. |
1161 |
"""Yield factories for keys added since the last yield. They will be
|
1162 |
returned in the order they were added via add_key.
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1163 |
|
4634.3.5
by Andrew Bennetts
More docstrings. |
1164 |
:param full_flush: by default, some results may not be returned in case
|
1165 |
they can be part of the next batch. If full_flush is True, then
|
|
1166 |
all results are returned.
|
|
1167 |
"""
|
|
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1168 |
if self.manager is None and not self.keys: |
1169 |
return
|
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1170 |
# Fetch all memos in this batch.
|
1171 |
blocks = self.gcvf._get_blocks(self.memos_to_get) |
|
1172 |
# Turn blocks into factories and yield them.
|
|
1173 |
memos_to_get_stack = list(self.memos_to_get) |
|
1174 |
memos_to_get_stack.reverse() |
|
4634.3.2
by Andrew Bennetts
Stop using (and remove) unnecessary key_batch var that was causing a bug. |
1175 |
for key in self.keys: |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1176 |
index_memo, _, parents, _ = self.locations[key] |
1177 |
read_memo = index_memo[:3] |
|
4634.3.4
by Andrew Bennetts
Decruftify a little more. |
1178 |
if self.last_read_memo != read_memo: |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1179 |
# We are starting a new block. If we have a
|
1180 |
# manager, we have found everything that fits for
|
|
1181 |
# now, so yield records
|
|
4634.3.13
by Andrew Bennetts
Rename empty_manager to _flush_manager. |
1182 |
for factory in self._flush_manager(): |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1183 |
yield factory |
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1184 |
# Now start a new manager.
|
1185 |
if memos_to_get_stack and memos_to_get_stack[-1] == read_memo: |
|
1186 |
# The next block from _get_blocks will be the block we
|
|
1187 |
# need.
|
|
1188 |
block_read_memo, block = blocks.next() |
|
1189 |
if block_read_memo != read_memo: |
|
1190 |
raise AssertionError( |
|
4634.3.16
by Andrew Bennetts
Fix buglets. |
1191 |
"block_read_memo out of sync with read_memo"
|
1192 |
"(%r != %r)" % (block_read_memo, read_memo)) |
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1193 |
self.batch_memos[read_memo] = block |
1194 |
memos_to_get_stack.pop() |
|
1195 |
else: |
|
1196 |
block = self.batch_memos[read_memo] |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1197 |
self.manager = _LazyGroupContentManager(block, |
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1198 |
get_compressor_settings=self._get_compressor_settings) |
4634.3.4
by Andrew Bennetts
Decruftify a little more. |
1199 |
self.last_read_memo = read_memo |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1200 |
start, end = index_memo[3:5] |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1201 |
self.manager.add_factory(key, parents, start, end) |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1202 |
if full_flush: |
4634.3.13
by Andrew Bennetts
Rename empty_manager to _flush_manager. |
1203 |
for factory in self._flush_manager(): |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1204 |
yield factory |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1205 |
del self.keys[:] |
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1206 |
self.batch_memos.clear() |
1207 |
del self.memos_to_get[:] |
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1208 |
self.total_bytes = 0 |
1209 |
||
1210 |
||
5816.8.1
by Andrew Bennetts
Be a little more clever about constructing a parents provider for stacked repositories, so that get_parent_map with local-stacked-on-remote doesn't use HPSS VFS calls. |
1211 |
class GroupCompressVersionedFiles(VersionedFilesWithFallbacks): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1212 |
"""A group-compress based VersionedFiles implementation."""
|
1213 |
||
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1214 |
# This controls how the GroupCompress DeltaIndex works. Basically, we
|
1215 |
# compute hash pointers into the source blocks (so hash(text) => text).
|
|
1216 |
# However each of these references costs some memory in trade against a
|
|
1217 |
# more accurate match result. For very large files, they either are
|
|
1218 |
# pre-compressed and change in bulk whenever they change, or change in just
|
|
1219 |
# local blocks. Either way, 'improved resolution' is not very helpful,
|
|
1220 |
# versus running out of memory trying to track everything. The default max
|
|
1221 |
# gives 100% sampling of a 1MB file.
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1222 |
_DEFAULT_MAX_BYTES_TO_INDEX = 1024 * 1024 |
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
1223 |
_DEFAULT_COMPRESSOR_SETTINGS = {'max_bytes_to_index': |
1224 |
_DEFAULT_MAX_BYTES_TO_INDEX} |
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1225 |
|
5816.8.7
by Andrew Bennetts
Some tweaks to caching prompted by John's review. |
1226 |
def __init__(self, index, access, delta=True, _unadded_refs=None, |
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1227 |
_group_cache=None): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1228 |
"""Create a GroupCompressVersionedFiles object.
|
1229 |
||
1230 |
:param index: The index object storing access and graph data.
|
|
1231 |
:param access: The access object storing raw data.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
1232 |
:param delta: Whether to delta compress or just entropy compress.
|
4634.35.10
by Andrew Bennetts
Move tests to per_repository_chk. |
1233 |
:param _unadded_refs: private parameter, don't use.
|
5816.8.7
by Andrew Bennetts
Some tweaks to caching prompted by John's review. |
1234 |
:param _group_cache: private parameter, don't use.
|
0.17.2
by Robert Collins
Core proof of concept working. |
1235 |
"""
|
1236 |
self._index = index |
|
1237 |
self._access = access |
|
1238 |
self._delta = delta |
|
4634.35.10
by Andrew Bennetts
Move tests to per_repository_chk. |
1239 |
if _unadded_refs is None: |
1240 |
_unadded_refs = {} |
|
1241 |
self._unadded_refs = _unadded_refs |
|
5816.8.7
by Andrew Bennetts
Some tweaks to caching prompted by John's review. |
1242 |
if _group_cache is None: |
1243 |
_group_cache = LRUSizeCache(max_size=50*1024*1024) |
|
1244 |
self._group_cache = _group_cache |
|
5652.2.4
by Martin Pool
Rename to _immediate_fallback_vfs |
1245 |
self._immediate_fallback_vfs = [] |
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1246 |
self._max_bytes_to_index = None |
0.17.2
by Robert Collins
Core proof of concept working. |
1247 |
|
4634.35.1
by Andrew Bennetts
Check for all necessary chk nodes, not just roots. |
1248 |
def without_fallbacks(self): |
4634.35.10
by Andrew Bennetts
Move tests to per_repository_chk. |
1249 |
"""Return a clone of this object without any fallbacks configured."""
|
1250 |
return GroupCompressVersionedFiles(self._index, self._access, |
|
5816.8.7
by Andrew Bennetts
Some tweaks to caching prompted by John's review. |
1251 |
self._delta, _unadded_refs=dict(self._unadded_refs), |
1252 |
_group_cache=self._group_cache) |
|
4634.35.1
by Andrew Bennetts
Check for all necessary chk nodes, not just roots. |
1253 |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1254 |
def add_lines(self, key, parents, lines, parent_texts=None, |
1255 |
left_matching_blocks=None, nostore_sha=None, random_id=False, |
|
1256 |
check_content=True): |
|
1257 |
"""Add a text to the store.
|
|
1258 |
||
1259 |
:param key: The key tuple of the text to add.
|
|
1260 |
:param parents: The parents key tuples of the text to add.
|
|
1261 |
:param lines: A list of lines. Each line must be a bytestring. And all
|
|
5891.1.2
by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier. |
1262 |
of them except the last must be terminated with \\n and contain no
|
1263 |
other \\n's. The last line may either contain no \\n's or a single
|
|
1264 |
terminating \\n. If the lines list does meet this constraint the
|
|
1265 |
add routine may error or may succeed - but you will be unable to
|
|
1266 |
read the data back accurately. (Checking the lines have been split
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
1267 |
correctly is expensive and extremely unlikely to catch bugs so it
|
1268 |
is not done at runtime unless check_content is True.)
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1269 |
:param parent_texts: An optional dictionary containing the opaque
|
0.17.2
by Robert Collins
Core proof of concept working. |
1270 |
representations of some or all of the parents of version_id to
|
1271 |
allow delta optimisations. VERY IMPORTANT: the texts must be those
|
|
1272 |
returned by add_lines or data corruption can be caused.
|
|
1273 |
:param left_matching_blocks: a hint about which areas are common
|
|
1274 |
between the text and its left-hand-parent. The format is
|
|
1275 |
the SequenceMatcher.get_matching_blocks format.
|
|
1276 |
:param nostore_sha: Raise ExistingContent and do not add the lines to
|
|
1277 |
the versioned file if the digest of the lines matches this.
|
|
1278 |
:param random_id: If True a random id has been selected rather than
|
|
1279 |
an id determined by some deterministic process such as a converter
|
|
1280 |
from a foreign VCS. When True the backend may choose not to check
|
|
1281 |
for uniqueness of the resulting key within the versioned file, so
|
|
1282 |
this should only be done when the result is expected to be unique
|
|
1283 |
anyway.
|
|
1284 |
:param check_content: If True, the lines supplied are verified to be
|
|
1285 |
bytestrings that are correctly formed lines.
|
|
1286 |
:return: The text sha1, the number of bytes in the text, and an opaque
|
|
1287 |
representation of the inserted version which can be provided
|
|
1288 |
back to future add_lines calls in the parent_texts dictionary.
|
|
1289 |
"""
|
|
1290 |
self._index._check_write_ok() |
|
1291 |
self._check_add(key, lines, random_id, check_content) |
|
1292 |
if parents is None: |
|
1293 |
# The caller might pass None if there is no graph data, but kndx
|
|
1294 |
# indexes can't directly store that, so we give them
|
|
1295 |
# an empty tuple instead.
|
|
1296 |
parents = () |
|
1297 |
# double handling for now. Make it work until then.
|
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1298 |
length = sum(map(len, lines)) |
1299 |
record = ChunkedContentFactory(key, parents, None, lines) |
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
1300 |
sha1 = list(self._insert_record_stream([record], random_id=random_id, |
1301 |
nostore_sha=nostore_sha))[0] |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1302 |
return sha1, length, None |
0.17.2
by Robert Collins
Core proof of concept working. |
1303 |
|
4398.8.6
by John Arbash Meinel
Switch the api from VF.add_text to VF._add_text and trim some extra 'features'. |
1304 |
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False): |
4398.9.1
by Matt Nordhoff
Update _add_text docstrings that still referred to add_text. |
1305 |
"""See VersionedFiles._add_text()."""
|
4398.8.4
by John Arbash Meinel
Implement add_text for GroupCompressVersionedFiles |
1306 |
self._index._check_write_ok() |
1307 |
self._check_add(key, None, random_id, check_content=False) |
|
1308 |
if text.__class__ is not str: |
|
1309 |
raise errors.BzrBadParameterUnicode("text") |
|
1310 |
if parents is None: |
|
1311 |
# The caller might pass None if there is no graph data, but kndx
|
|
1312 |
# indexes can't directly store that, so we give them
|
|
1313 |
# an empty tuple instead.
|
|
1314 |
parents = () |
|
1315 |
# double handling for now. Make it work until then.
|
|
1316 |
length = len(text) |
|
1317 |
record = FulltextContentFactory(key, parents, None, text) |
|
1318 |
sha1 = list(self._insert_record_stream([record], random_id=random_id, |
|
1319 |
nostore_sha=nostore_sha))[0] |
|
1320 |
return sha1, length, None |
|
1321 |
||
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1322 |
def add_fallback_versioned_files(self, a_versioned_files): |
1323 |
"""Add a source of texts for texts not present in this knit.
|
|
1324 |
||
1325 |
:param a_versioned_files: A VersionedFiles object.
|
|
1326 |
"""
|
|
5652.2.4
by Martin Pool
Rename to _immediate_fallback_vfs |
1327 |
self._immediate_fallback_vfs.append(a_versioned_files) |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1328 |
|
0.17.4
by Robert Collins
Annotate. |
1329 |
def annotate(self, key): |
1330 |
"""See VersionedFiles.annotate."""
|
|
4454.3.58
by John Arbash Meinel
Enable the new annotator for gc format repos. |
1331 |
ann = annotate.Annotator(self) |
1332 |
return ann.annotate_flat(key) |
|
0.17.4
by Robert Collins
Annotate. |
1333 |
|
4454.3.65
by John Arbash Meinel
Tests that VF implementations support .get_annotator() |
1334 |
def get_annotator(self): |
1335 |
return annotate.Annotator(self) |
|
1336 |
||
4332.3.28
by Robert Collins
Start checking file texts in a single pass. |
1337 |
def check(self, progress_bar=None, keys=None): |
0.17.5
by Robert Collins
nograph tests completely passing. |
1338 |
"""See VersionedFiles.check()."""
|
4332.3.28
by Robert Collins
Start checking file texts in a single pass. |
1339 |
if keys is None: |
1340 |
keys = self.keys() |
|
1341 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1342 |
record.get_bytes_as('fulltext') |
|
1343 |
else: |
|
1344 |
return self.get_record_stream(keys, 'unordered', True) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1345 |
|
4744.2.5
by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api. |
1346 |
def clear_cache(self): |
1347 |
"""See VersionedFiles.clear_cache()"""
|
|
1348 |
self._group_cache.clear() |
|
4744.2.7
by John Arbash Meinel
Add .clear_cache() members to GraphIndexBuilder and BTreeBuilder. |
1349 |
self._index._graph_index.clear_cache() |
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
1350 |
self._index._int_cache.clear() |
4744.2.5
by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api. |
1351 |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1352 |
def _check_add(self, key, lines, random_id, check_content): |
1353 |
"""check that version_id and lines are safe to add."""
|
|
1354 |
version_id = key[-1] |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1355 |
if version_id is not None: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1356 |
if osutils.contains_whitespace(version_id): |
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
1357 |
raise errors.InvalidRevisionId(version_id, self) |
0.17.2
by Robert Collins
Core proof of concept working. |
1358 |
self.check_not_reserved_id(version_id) |
1359 |
# TODO: If random_id==False and the key is already present, we should
|
|
1360 |
# probably check that the existing content is identical to what is
|
|
1361 |
# being inserted, and otherwise raise an exception. This would make
|
|
1362 |
# the bundle code simpler.
|
|
1363 |
if check_content: |
|
1364 |
self._check_lines_not_unicode(lines) |
|
1365 |
self._check_lines_are_lines(lines) |
|
1366 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1367 |
def get_parent_map(self, keys): |
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1368 |
"""Get a map of the graph parents of keys.
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1369 |
|
1370 |
:param keys: The keys to look up parents for.
|
|
1371 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1372 |
the mapping.
|
|
1373 |
"""
|
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1374 |
return self._get_parent_map_with_sources(keys)[0] |
1375 |
||
1376 |
def _get_parent_map_with_sources(self, keys): |
|
1377 |
"""Get a map of the parents of keys.
|
|
1378 |
||
1379 |
:param keys: The keys to look up parents for.
|
|
1380 |
:return: A tuple. The first element is a mapping from keys to parents.
|
|
1381 |
Absent keys are absent from the mapping. The second element is a
|
|
1382 |
list with the locations each key was found in. The first element
|
|
1383 |
is the in-this-knit parents, the second the first fallback source,
|
|
1384 |
and so on.
|
|
1385 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1386 |
result = {} |
5652.2.4
by Martin Pool
Rename to _immediate_fallback_vfs |
1387 |
sources = [self._index] + self._immediate_fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1388 |
source_results = [] |
1389 |
missing = set(keys) |
|
1390 |
for source in sources: |
|
1391 |
if not missing: |
|
1392 |
break
|
|
1393 |
new_result = source.get_parent_map(missing) |
|
1394 |
source_results.append(new_result) |
|
1395 |
result.update(new_result) |
|
1396 |
missing.difference_update(set(new_result)) |
|
3735.31.7
by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos. |
1397 |
return result, source_results |
0.17.5
by Robert Collins
nograph tests completely passing. |
1398 |
|
4634.3.11
by Andrew Bennetts
Simplify further, comment more. |
1399 |
def _get_blocks(self, read_memos): |
1400 |
"""Get GroupCompressBlocks for the given read_memos.
|
|
1401 |
||
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1402 |
:returns: a series of (read_memo, block) pairs, in the order they were
|
1403 |
originally passed.
|
|
4634.3.11
by Andrew Bennetts
Simplify further, comment more. |
1404 |
"""
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1405 |
cached = {} |
1406 |
for read_memo in read_memos: |
|
1407 |
try: |
|
1408 |
block = self._group_cache[read_memo] |
|
1409 |
except KeyError: |
|
1410 |
pass
|
|
1411 |
else: |
|
1412 |
cached[read_memo] = block |
|
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1413 |
not_cached = [] |
1414 |
not_cached_seen = set() |
|
1415 |
for read_memo in read_memos: |
|
1416 |
if read_memo in cached: |
|
1417 |
# Don't fetch what we already have
|
|
1418 |
continue
|
|
1419 |
if read_memo in not_cached_seen: |
|
1420 |
# Don't try to fetch the same data twice
|
|
1421 |
continue
|
|
1422 |
not_cached.append(read_memo) |
|
1423 |
not_cached_seen.add(read_memo) |
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1424 |
raw_records = self._access.get_raw_records(not_cached) |
1425 |
for read_memo in read_memos: |
|
1426 |
try: |
|
4634.3.16
by Andrew Bennetts
Fix buglets. |
1427 |
yield read_memo, cached[read_memo] |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1428 |
except KeyError: |
4634.3.15
by Andrew Bennetts
Get rid of inaccurate comment. |
1429 |
# Read the block, and cache it.
|
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1430 |
zdata = raw_records.next() |
1431 |
block = GroupCompressBlock.from_bytes(zdata) |
|
1432 |
self._group_cache[read_memo] = block |
|
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1433 |
cached[read_memo] = block |
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1434 |
yield read_memo, block |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1435 |
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1436 |
def get_missing_compression_parent_keys(self): |
1437 |
"""Return the keys of missing compression parents.
|
|
1438 |
||
1439 |
Missing compression parents occur when a record stream was missing
|
|
1440 |
basis texts, or a index was scanned that had missing basis texts.
|
|
1441 |
"""
|
|
1442 |
# GroupCompress cannot currently reference texts that are not in the
|
|
1443 |
# group, so this is valid for now
|
|
1444 |
return frozenset() |
|
1445 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
1446 |
def get_record_stream(self, keys, ordering, include_delta_closure): |
1447 |
"""Get a stream of records for keys.
|
|
1448 |
||
1449 |
:param keys: The keys to include.
|
|
1450 |
:param ordering: Either 'unordered' or 'topological'. A topologically
|
|
1451 |
sorted stream has compression parents strictly before their
|
|
1452 |
children.
|
|
1453 |
:param include_delta_closure: If True then the closure across any
|
|
1454 |
compression parents will be included (in the opaque data).
|
|
1455 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1456 |
valid until the iterator is advanced.
|
|
1457 |
"""
|
|
1458 |
# keys might be a generator
|
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1459 |
orig_keys = list(keys) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1460 |
keys = set(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1461 |
if not keys: |
1462 |
return
|
|
0.20.23
by John Arbash Meinel
Add a progress indicator for chk pages. |
1463 |
if (not self._index.has_graph |
3735.31.14
by John Arbash Meinel
Change the gc-optimal to 'groupcompress' |
1464 |
and ordering in ('topological', 'groupcompress')): |
0.17.5
by Robert Collins
nograph tests completely passing. |
1465 |
# Cannot topological order when no graph has been stored.
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1466 |
# but we allow 'as-requested' or 'unordered'
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1467 |
ordering = 'unordered' |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1468 |
|
1469 |
remaining_keys = keys |
|
1470 |
while True: |
|
1471 |
try: |
|
1472 |
keys = set(remaining_keys) |
|
1473 |
for content_factory in self._get_remaining_record_stream(keys, |
|
1474 |
orig_keys, ordering, include_delta_closure): |
|
1475 |
remaining_keys.discard(content_factory.key) |
|
1476 |
yield content_factory |
|
1477 |
return
|
|
1478 |
except errors.RetryWithNewPacks, e: |
|
1479 |
self._access.reload_or_raise(e) |
|
1480 |
||
1481 |
def _find_from_fallback(self, missing): |
|
1482 |
"""Find whatever keys you can from the fallbacks.
|
|
1483 |
||
1484 |
:param missing: A set of missing keys. This set will be mutated as keys
|
|
1485 |
are found from a fallback_vfs
|
|
1486 |
:return: (parent_map, key_to_source_map, source_results)
|
|
1487 |
parent_map the overall key => parent_keys
|
|
1488 |
key_to_source_map a dict from {key: source}
|
|
1489 |
source_results a list of (source: keys)
|
|
1490 |
"""
|
|
1491 |
parent_map = {} |
|
1492 |
key_to_source_map = {} |
|
1493 |
source_results = [] |
|
5652.2.4
by Martin Pool
Rename to _immediate_fallback_vfs |
1494 |
for source in self._immediate_fallback_vfs: |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1495 |
if not missing: |
1496 |
break
|
|
1497 |
source_parents = source.get_parent_map(missing) |
|
1498 |
parent_map.update(source_parents) |
|
1499 |
source_parents = list(source_parents) |
|
1500 |
source_results.append((source, source_parents)) |
|
1501 |
key_to_source_map.update((key, source) for key in source_parents) |
|
1502 |
missing.difference_update(source_parents) |
|
1503 |
return parent_map, key_to_source_map, source_results |
|
1504 |
||
1505 |
def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map): |
|
1506 |
"""Get the (source, [keys]) list.
|
|
1507 |
||
1508 |
The returned objects should be in the order defined by 'ordering',
|
|
1509 |
which can weave between different sources.
|
|
5891.1.2
by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier. |
1510 |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1511 |
:param ordering: Must be one of 'topological' or 'groupcompress'
|
1512 |
:return: List of [(source, [keys])] tuples, such that all keys are in
|
|
1513 |
the defined order, regardless of source.
|
|
1514 |
"""
|
|
1515 |
if ordering == 'topological': |
|
5757.8.4
by Jelmer Vernooij
Fix import. |
1516 |
present_keys = tsort.topo_sort(parent_map) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1517 |
else: |
1518 |
# ordering == 'groupcompress'
|
|
1519 |
# XXX: This only optimizes for the target ordering. We may need
|
|
1520 |
# to balance that with the time it takes to extract
|
|
1521 |
# ordering, by somehow grouping based on
|
|
1522 |
# locations[key][0:3]
|
|
1523 |
present_keys = sort_gc_optimal(parent_map) |
|
1524 |
# Now group by source:
|
|
1525 |
source_keys = [] |
|
1526 |
current_source = None |
|
1527 |
for key in present_keys: |
|
1528 |
source = key_to_source_map.get(key, self) |
|
1529 |
if source is not current_source: |
|
1530 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1531 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1532 |
source_keys[-1][1].append(key) |
1533 |
return source_keys |
|
1534 |
||
1535 |
def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys, |
|
1536 |
key_to_source_map): |
|
1537 |
source_keys = [] |
|
1538 |
current_source = None |
|
1539 |
for key in orig_keys: |
|
1540 |
if key in locations or key in unadded_keys: |
|
1541 |
source = self |
|
1542 |
elif key in key_to_source_map: |
|
1543 |
source = key_to_source_map[key] |
|
1544 |
else: # absent |
|
1545 |
continue
|
|
1546 |
if source is not current_source: |
|
1547 |
source_keys.append((source, [])) |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1548 |
current_source = source |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1549 |
source_keys[-1][1].append(key) |
1550 |
return source_keys |
|
1551 |
||
1552 |
def _get_io_ordered_source_keys(self, locations, unadded_keys, |
|
1553 |
source_result): |
|
1554 |
def get_group(key): |
|
1555 |
# This is the group the bytes are stored in, followed by the
|
|
1556 |
# location in the group
|
|
1557 |
return locations[key][0] |
|
1558 |
present_keys = sorted(locations.iterkeys(), key=get_group) |
|
1559 |
# We don't have an ordering for keys in the in-memory object, but
|
|
1560 |
# lets process the in-memory ones first.
|
|
1561 |
present_keys = list(unadded_keys) + present_keys |
|
1562 |
# Now grab all of the ones from other sources
|
|
1563 |
source_keys = [(self, present_keys)] |
|
1564 |
source_keys.extend(source_result) |
|
1565 |
return source_keys |
|
1566 |
||
1567 |
def _get_remaining_record_stream(self, keys, orig_keys, ordering, |
|
1568 |
include_delta_closure): |
|
1569 |
"""Get a stream of records for keys.
|
|
1570 |
||
1571 |
:param keys: The keys to include.
|
|
1572 |
:param ordering: one of 'unordered', 'topological', 'groupcompress' or
|
|
1573 |
'as-requested'
|
|
1574 |
:param include_delta_closure: If True then the closure across any
|
|
1575 |
compression parents will be included (in the opaque data).
|
|
1576 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1577 |
valid until the iterator is advanced.
|
|
1578 |
"""
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1579 |
# Cheap: iterate
|
1580 |
locations = self._index.get_build_details(keys) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1581 |
unadded_keys = set(self._unadded_refs).intersection(keys) |
1582 |
missing = keys.difference(locations) |
|
1583 |
missing.difference_update(unadded_keys) |
|
1584 |
(fallback_parent_map, key_to_source_map, |
|
1585 |
source_result) = self._find_from_fallback(missing) |
|
1586 |
if ordering in ('topological', 'groupcompress'): |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1587 |
# would be better to not globally sort initially but instead
|
1588 |
# start with one key, recurse to its oldest parent, then grab
|
|
1589 |
# everything in the same group, etc.
|
|
1590 |
parent_map = dict((key, details[2]) for key, details in |
|
1591 |
locations.iteritems()) |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1592 |
for key in unadded_keys: |
1593 |
parent_map[key] = self._unadded_refs[key] |
|
1594 |
parent_map.update(fallback_parent_map) |
|
1595 |
source_keys = self._get_ordered_source_keys(ordering, parent_map, |
|
1596 |
key_to_source_map) |
|
0.22.6
by John Arbash Meinel
Clustering chk pages properly makes a big difference. |
1597 |
elif ordering == 'as-requested': |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1598 |
source_keys = self._get_as_requested_source_keys(orig_keys, |
1599 |
locations, unadded_keys, key_to_source_map) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1600 |
else: |
0.20.10
by John Arbash Meinel
Change the extraction ordering for 'unordered'. |
1601 |
# We want to yield the keys in a semi-optimal (read-wise) ordering.
|
1602 |
# Otherwise we thrash the _group_cache and destroy performance
|
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1603 |
source_keys = self._get_io_ordered_source_keys(locations, |
1604 |
unadded_keys, source_result) |
|
1605 |
for key in missing: |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1606 |
yield AbsentContentFactory(key) |
4634.3.3
by Andrew Bennetts
Fix bug, add docstrings, improve clarity. |
1607 |
# Batch up as many keys as we can until either:
|
1608 |
# - we encounter an unadded ref, or
|
|
1609 |
# - we run out of keys, or
|
|
4634.3.17
by Andrew Bennetts
Make BATCH_SIZE a global. |
1610 |
# - the total bytes to retrieve for this batch > BATCH_SIZE
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1611 |
batcher = _BatchingBlockFetcher(self, locations, |
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1612 |
get_compressor_settings=self._get_compressor_settings) |
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1613 |
for source, keys in source_keys: |
1614 |
if source is self: |
|
1615 |
for key in keys: |
|
1616 |
if key in self._unadded_refs: |
|
4634.3.8
by Andrew Bennetts
Tweak some comments. |
1617 |
# Flush batch, then yield unadded ref from
|
1618 |
# self._compressor.
|
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1619 |
for factory in batcher.yield_factories(full_flush=True): |
1620 |
yield factory |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1621 |
bytes, sha1 = self._compressor.extract(key) |
1622 |
parents = self._unadded_refs[key] |
|
3735.32.12
by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types. |
1623 |
yield FulltextContentFactory(key, parents, sha1, bytes) |
4634.3.1
by Andrew Bennetts
Add some batching to _get_remaining_record_stream. |
1624 |
continue
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1625 |
if batcher.add_key(key) > BATCH_SIZE: |
4634.3.8
by Andrew Bennetts
Tweak some comments. |
1626 |
# Ok, this batch is big enough. Yield some results.
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1627 |
for factory in batcher.yield_factories(): |
1628 |
yield factory |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1629 |
else: |
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1630 |
for factory in batcher.yield_factories(full_flush=True): |
1631 |
yield factory |
|
3735.31.18
by John Arbash Meinel
Implement stacking support across all ordering implementations. |
1632 |
for record in source.get_record_stream(keys, ordering, |
1633 |
include_delta_closure): |
|
1634 |
yield record |
|
4634.3.14
by Andrew Bennetts
Some changes prompted by John's review. |
1635 |
for factory in batcher.yield_factories(full_flush=True): |
1636 |
yield factory |
|
0.20.5
by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks. |
1637 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1638 |
def get_sha1s(self, keys): |
1639 |
"""See VersionedFiles.get_sha1s()."""
|
|
1640 |
result = {} |
|
1641 |
for record in self.get_record_stream(keys, 'unordered', True): |
|
1642 |
if record.sha1 != None: |
|
1643 |
result[record.key] = record.sha1 |
|
1644 |
else: |
|
1645 |
if record.storage_kind != 'absent': |
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
1646 |
result[record.key] = osutils.sha_string( |
1647 |
record.get_bytes_as('fulltext')) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1648 |
return result |
1649 |
||
5195.3.26
by Parth Malwankar
reverted changes done to insert_record_stream API |
1650 |
def insert_record_stream(self, stream): |
0.17.2
by Robert Collins
Core proof of concept working. |
1651 |
"""Insert a record stream into this container.
|
1652 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1653 |
:param stream: A stream of records to insert.
|
0.17.2
by Robert Collins
Core proof of concept working. |
1654 |
:return: None
|
1655 |
:seealso VersionedFiles.get_record_stream:
|
|
1656 |
"""
|
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1657 |
# XXX: Setting random_id=True makes
|
1658 |
# test_insert_record_stream_existing_keys fail for groupcompress and
|
|
1659 |
# groupcompress-nograph, this needs to be revisited while addressing
|
|
1660 |
# 'bzr branch' performance issues.
|
|
5195.3.26
by Parth Malwankar
reverted changes done to insert_record_stream API |
1661 |
for _ in self._insert_record_stream(stream, random_id=False): |
0.17.5
by Robert Collins
nograph tests completely passing. |
1662 |
pass
|
0.17.2
by Robert Collins
Core proof of concept working. |
1663 |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1664 |
def _get_compressor_settings(self): |
1665 |
if self._max_bytes_to_index is None: |
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1666 |
# TODO: VersionedFiles don't know about their containing
|
1667 |
# repository, so they don't have much of an idea about their
|
|
1668 |
# location. So for now, this is only a global option.
|
|
1669 |
c = config.GlobalConfig() |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1670 |
val = c.get_user_option('bzr.groupcompress.max_bytes_to_index') |
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1671 |
if val is not None: |
1672 |
try: |
|
1673 |
val = int(val) |
|
1674 |
except ValueError, e: |
|
1675 |
trace.warning('Value for ' |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1676 |
'"bzr.groupcompress.max_bytes_to_index"'
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1677 |
' %r is not an integer' |
1678 |
% (val,)) |
|
1679 |
val = None |
|
1680 |
if val is None: |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1681 |
val = self._DEFAULT_MAX_BYTES_TO_INDEX |
1682 |
self._max_bytes_to_index = val |
|
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
1683 |
return {'max_bytes_to_index': self._max_bytes_to_index} |
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1684 |
|
1685 |
def _make_group_compressor(self): |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1686 |
return GroupCompressor(self._get_compressor_settings()) |
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1687 |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1688 |
def _insert_record_stream(self, stream, random_id=False, nostore_sha=None, |
5195.3.26
by Parth Malwankar
reverted changes done to insert_record_stream API |
1689 |
reuse_blocks=True): |
0.17.2
by Robert Collins
Core proof of concept working. |
1690 |
"""Internal core to insert a record stream into this container.
|
1691 |
||
1692 |
This helper function has a different interface than insert_record_stream
|
|
1693 |
to allow add_lines to be minimal, but still return the needed data.
|
|
1694 |
||
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1695 |
:param stream: A stream of records to insert.
|
3735.31.12
by John Arbash Meinel
Push nostore_sha down through the stack. |
1696 |
:param nostore_sha: If the sha1 of a given text matches nostore_sha,
|
1697 |
raise ExistingContent, rather than committing the new text.
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1698 |
:param reuse_blocks: If the source is streaming from
|
1699 |
groupcompress-blocks, just insert the blocks as-is, rather than
|
|
1700 |
expanding the texts and inserting again.
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
1701 |
:return: An iterator over the sha1 of the inserted records.
|
1702 |
:seealso insert_record_stream:
|
|
1703 |
:seealso add_lines:
|
|
1704 |
"""
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1705 |
adapters = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1706 |
def get_adapter(adapter_key): |
1707 |
try: |
|
1708 |
return adapters[adapter_key] |
|
1709 |
except KeyError: |
|
1710 |
adapter_factory = adapter_registry.get(adapter_key) |
|
1711 |
adapter = adapter_factory(self) |
|
1712 |
adapters[adapter_key] = adapter |
|
1713 |
return adapter |
|
0.17.2
by Robert Collins
Core proof of concept working. |
1714 |
# This will go up to fulltexts for gc to gc fetching, which isn't
|
1715 |
# ideal.
|
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1716 |
self._compressor = self._make_group_compressor() |
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1717 |
self._unadded_refs = {} |
0.17.5
by Robert Collins
nograph tests completely passing. |
1718 |
keys_to_add = [] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1719 |
def flush(): |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
1720 |
bytes_len, chunks = self._compressor.flush().to_chunks() |
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
1721 |
self._compressor = self._make_group_compressor() |
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
1722 |
# Note: At this point we still have 1 copy of the fulltext (in
|
1723 |
# record and the var 'bytes'), and this generates 2 copies of
|
|
1724 |
# the compressed text (one for bytes, one in chunks)
|
|
1725 |
# TODO: Push 'chunks' down into the _access api, so that we don't
|
|
1726 |
# have to double compressed memory here
|
|
1727 |
# TODO: Figure out how to indicate that we would be happy to free
|
|
1728 |
# the fulltext content at this point. Note that sometimes we
|
|
1729 |
# will want it later (streaming CHK pages), but most of the
|
|
1730 |
# time we won't (everything else)
|
|
1731 |
bytes = ''.join(chunks) |
|
1732 |
del chunks |
|
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1733 |
index, start, length = self._access.add_raw_records( |
0.25.7
by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content. |
1734 |
[(None, len(bytes))], bytes)[0] |
0.17.6
by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big). |
1735 |
nodes = [] |
1736 |
for key, reads, refs in keys_to_add: |
|
1737 |
nodes.append((key, "%d %d %s" % (start, length, reads), refs)) |
|
1738 |
self._index.add_records(nodes, random_id=random_id) |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1739 |
self._unadded_refs = {} |
1740 |
del keys_to_add[:] |
|
1741 |
||
0.20.15
by John Arbash Meinel
Change so that regions that have lots of copies get converted back |
1742 |
last_prefix = None |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1743 |
max_fulltext_len = 0 |
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1744 |
max_fulltext_prefix = None |
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
1745 |
insert_manager = None |
1746 |
block_start = None |
|
1747 |
block_length = None |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1748 |
# XXX: TODO: remove this, it is just for safety checking for now
|
1749 |
inserted_keys = set() |
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
1750 |
reuse_this_block = reuse_blocks |
0.17.2
by Robert Collins
Core proof of concept working. |
1751 |
for record in stream: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1752 |
# Raise an error when a record is missing.
|
1753 |
if record.storage_kind == 'absent': |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1754 |
raise errors.RevisionNotPresent(record.key, self) |
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1755 |
if random_id: |
1756 |
if record.key in inserted_keys: |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1757 |
trace.note('Insert claimed random_id=True,' |
1758 |
' but then inserted %r two times', record.key) |
|
3735.36.15
by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices. |
1759 |
continue
|
1760 |
inserted_keys.add(record.key) |
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
1761 |
if reuse_blocks: |
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1762 |
# If the reuse_blocks flag is set, check to see if we can just
|
1763 |
# copy a groupcompress block as-is.
|
|
4665.3.10
by John Arbash Meinel
Get a test written which exercises the 'trim' code path. |
1764 |
# We only check on the first record (groupcompress-block) not
|
1765 |
# on all of the (groupcompress-block-ref) entries.
|
|
1766 |
# The reuse_this_block flag is then kept for as long as
|
|
4634.23.1
by Robert Collins
Cherrypick from bzr.dev: Fix bug 402652: recompress badly packed groups during fetch. (John Arbash Meinel, Robert Collins) |
1767 |
if record.storage_kind == 'groupcompress-block': |
4665.3.2
by John Arbash Meinel
An alternative implementation that passes both tests. |
1768 |
# Check to see if we really want to re-use this block
|
1769 |
insert_manager = record._manager |
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
1770 |
reuse_this_block = insert_manager.check_is_well_utilized() |
4665.3.10
by John Arbash Meinel
Get a test written which exercises the 'trim' code path. |
1771 |
else: |
1772 |
reuse_this_block = False |
|
4665.3.2
by John Arbash Meinel
An alternative implementation that passes both tests. |
1773 |
if reuse_this_block: |
1774 |
# We still want to reuse this block
|
|
1775 |
if record.storage_kind == 'groupcompress-block': |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1776 |
# Insert the raw block into the target repo
|
1777 |
insert_manager = record._manager |
|
1778 |
bytes = record._manager._block.to_bytes() |
|
1779 |
_, start, length = self._access.add_raw_records( |
|
1780 |
[(None, len(bytes))], bytes)[0] |
|
1781 |
del bytes |
|
1782 |
block_start = start |
|
1783 |
block_length = length |
|
1784 |
if record.storage_kind in ('groupcompress-block', |
|
1785 |
'groupcompress-block-ref'): |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1786 |
if insert_manager is None: |
1787 |
raise AssertionError('No insert_manager set') |
|
4665.3.4
by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially |
1788 |
if insert_manager is not record._manager: |
1789 |
raise AssertionError('insert_manager does not match' |
|
1790 |
' the current record, we cannot be positive'
|
|
1791 |
' that the appropriate content was inserted.'
|
|
1792 |
)
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1793 |
value = "%d %d %d %d" % (block_start, block_length, |
1794 |
record._start, record._end) |
|
1795 |
nodes = [(record.key, value, (record.parents,))] |
|
3735.38.1
by John Arbash Meinel
Change the delta byte stream to remove the 'source length' entry. |
1796 |
# TODO: Consider buffering up many nodes to be added, not
|
1797 |
# sure how much overhead this has, but we're seeing
|
|
1798 |
# ~23s / 120s in add_records calls
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
1799 |
self._index.add_records(nodes, random_id=random_id) |
1800 |
continue
|
|
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1801 |
try: |
0.23.52
by John Arbash Meinel
Use the max_delta flag. |
1802 |
bytes = record.get_bytes_as('fulltext') |
0.20.18
by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys() |
1803 |
except errors.UnavailableRepresentation: |
0.17.5
by Robert Collins
nograph tests completely passing. |
1804 |
adapter_key = record.storage_kind, 'fulltext' |
1805 |
adapter = get_adapter(adapter_key) |
|
0.20.21
by John Arbash Meinel
Merge the chk sorting code. |
1806 |
bytes = adapter.get_bytes(record) |
0.20.13
by John Arbash Meinel
Play around a bit. |
1807 |
if len(record.key) > 1: |
1808 |
prefix = record.key[0] |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1809 |
soft = (prefix == last_prefix) |
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1810 |
else: |
1811 |
prefix = None |
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1812 |
soft = False |
1813 |
if max_fulltext_len < len(bytes): |
|
1814 |
max_fulltext_len = len(bytes) |
|
1815 |
max_fulltext_prefix = prefix |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1816 |
(found_sha1, start_point, end_point, |
1817 |
type) = self._compressor.compress(record.key, |
|
1818 |
bytes, record.sha1, soft=soft, |
|
1819 |
nostore_sha=nostore_sha) |
|
1820 |
# delta_ratio = float(len(bytes)) / (end_point - start_point)
|
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1821 |
# Check if we want to continue to include that text
|
0.25.11
by John Arbash Meinel
Slightly different handling of large texts. |
1822 |
if (prefix == max_fulltext_prefix |
1823 |
and end_point < 2 * max_fulltext_len): |
|
1824 |
# As long as we are on the same file_id, we will fill at least
|
|
1825 |
# 2 * max_fulltext_len
|
|
1826 |
start_new_block = False |
|
1827 |
elif end_point > 4*1024*1024: |
|
1828 |
start_new_block = True |
|
1829 |
elif (prefix is not None and prefix != last_prefix |
|
1830 |
and end_point > 2*1024*1024): |
|
1831 |
start_new_block = True |
|
1832 |
else: |
|
1833 |
start_new_block = False |
|
0.25.10
by John Arbash Meinel
Play around with detecting compression breaks. |
1834 |
last_prefix = prefix |
1835 |
if start_new_block: |
|
1836 |
self._compressor.pop_last() |
|
1837 |
flush() |
|
1838 |
max_fulltext_len = len(bytes) |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1839 |
(found_sha1, start_point, end_point, |
1840 |
type) = self._compressor.compress(record.key, bytes, |
|
1841 |
record.sha1) |
|
0.17.26
by Robert Collins
Working better --gc-plain-chk. |
1842 |
if record.key[-1] is None: |
1843 |
key = record.key[:-1] + ('sha1:' + found_sha1,) |
|
1844 |
else: |
|
1845 |
key = record.key |
|
1846 |
self._unadded_refs[key] = record.parents |
|
0.17.3
by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression. |
1847 |
yield found_sha1 |
4842.1.1
by Andrew Bennetts
Fix crash involving static_tuple when C extensions are not built. |
1848 |
as_st = static_tuple.StaticTuple.from_sequence |
1849 |
if record.parents is not None: |
|
1850 |
parents = as_st([as_st(p) for p in record.parents]) |
|
1851 |
else: |
|
1852 |
parents = None |
|
1853 |
refs = static_tuple.StaticTuple(parents) |
|
1854 |
keys_to_add.append((key, '%d %d' % (start_point, end_point), refs)) |
|
0.17.8
by Robert Collins
Flush pending updates at the end of _insert_record_stream |
1855 |
if len(keys_to_add): |
1856 |
flush() |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
1857 |
self._compressor = None |
5195.3.12
by Parth Malwankar
initial approximation of progress. |
1858 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1859 |
def iter_lines_added_or_present_in_keys(self, keys, pb=None): |
1860 |
"""Iterate over the lines in the versioned files from keys.
|
|
1861 |
||
1862 |
This may return lines from other keys. Each item the returned
|
|
1863 |
iterator yields is a tuple of a line and a text version that that line
|
|
1864 |
is present in (not introduced in).
|
|
1865 |
||
1866 |
Ordering of results is in whatever order is most suitable for the
|
|
1867 |
underlying storage format.
|
|
1868 |
||
1869 |
If a progress bar is supplied, it may be used to indicate progress.
|
|
1870 |
The caller is responsible for cleaning up progress bars (because this
|
|
1871 |
is an iterator).
|
|
1872 |
||
1873 |
NOTES:
|
|
1874 |
* Lines are normalised by the underlying store: they will all have \n
|
|
1875 |
terminators.
|
|
1876 |
* Lines are returned in arbitrary order.
|
|
1877 |
||
1878 |
:return: An iterator over (line, key).
|
|
1879 |
"""
|
|
1880 |
keys = set(keys) |
|
1881 |
total = len(keys) |
|
1882 |
# we don't care about inclusions, the caller cares.
|
|
1883 |
# but we need to setup a list of records to visit.
|
|
1884 |
# we need key, position, length
|
|
1885 |
for key_idx, record in enumerate(self.get_record_stream(keys, |
|
1886 |
'unordered', True)): |
|
1887 |
# XXX: todo - optimise to use less than full texts.
|
|
1888 |
key = record.key |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
1889 |
if pb is not None: |
1890 |
pb.update('Walking content', key_idx, total) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1891 |
if record.storage_kind == 'absent': |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1892 |
raise errors.RevisionNotPresent(key, self) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
1893 |
lines = osutils.split_lines(record.get_bytes_as('fulltext')) |
0.17.5
by Robert Collins
nograph tests completely passing. |
1894 |
for line in lines: |
1895 |
yield line, key |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
1896 |
if pb is not None: |
1897 |
pb.update('Walking content', total, total) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1898 |
|
1899 |
def keys(self): |
|
1900 |
"""See VersionedFiles.keys."""
|
|
1901 |
if 'evil' in debug.debug_flags: |
|
1902 |
trace.mutter_callsite(2, "keys scales with size of history") |
|
5652.2.4
by Martin Pool
Rename to _immediate_fallback_vfs |
1903 |
sources = [self._index] + self._immediate_fallback_vfs |
0.17.5
by Robert Collins
nograph tests completely passing. |
1904 |
result = set() |
1905 |
for source in sources: |
|
1906 |
result.update(source.keys()) |
|
1907 |
return result |
|
1908 |
||
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1909 |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1910 |
class _GCBuildDetails(object): |
1911 |
"""A blob of data about the build details.
|
|
1912 |
||
1913 |
This stores the minimal data, which then allows compatibility with the old
|
|
1914 |
api, without taking as much memory.
|
|
1915 |
"""
|
|
1916 |
||
1917 |
__slots__ = ('_index', '_group_start', '_group_end', '_basis_end', |
|
1918 |
'_delta_end', '_parents') |
|
1919 |
||
1920 |
method = 'group' |
|
1921 |
compression_parent = None |
|
1922 |
||
1923 |
def __init__(self, parents, position_info): |
|
1924 |
self._parents = parents |
|
5365.4.2
by John Arbash Meinel
As suggested by Martin <gz>, switch to tuple unpacking for attribute assignment |
1925 |
(self._index, self._group_start, self._group_end, self._basis_end, |
1926 |
self._delta_end) = position_info |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1927 |
|
1928 |
def __repr__(self): |
|
1929 |
return '%s(%s, %s)' % (self.__class__.__name__, |
|
1930 |
self.index_memo, self._parents) |
|
1931 |
||
1932 |
@property
|
|
1933 |
def index_memo(self): |
|
1934 |
return (self._index, self._group_start, self._group_end, |
|
1935 |
self._basis_end, self._delta_end) |
|
1936 |
||
1937 |
@property
|
|
1938 |
def record_details(self): |
|
1939 |
return static_tuple.StaticTuple(self.method, None) |
|
1940 |
||
1941 |
def __getitem__(self, offset): |
|
1942 |
"""Compatibility thunk to act like a tuple."""
|
|
1943 |
if offset == 0: |
|
1944 |
return self.index_memo |
|
1945 |
elif offset == 1: |
|
1946 |
return self.compression_parent # Always None |
|
1947 |
elif offset == 2: |
|
1948 |
return self._parents |
|
1949 |
elif offset == 3: |
|
1950 |
return self.record_details |
|
1951 |
else: |
|
1952 |
raise IndexError('offset out of range') |
|
1953 |
||
1954 |
def __len__(self): |
|
1955 |
return 4 |
|
1956 |
||
1957 |
||
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1958 |
class _GCGraphIndex(object): |
1959 |
"""Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
|
|
1960 |
||
0.17.9
by Robert Collins
Initial stab at repository format support. |
1961 |
def __init__(self, graph_index, is_locked, parents=True, |
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
1962 |
add_callback=None, track_external_parent_refs=False, |
4634.29.1
by Andrew Bennetts
Rough code to reject commit_write_group if any inventory's CHK root is absent. |
1963 |
inconsistency_fatal=True, track_new_keys=False): |
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1964 |
"""Construct a _GCGraphIndex on a graph_index.
|
1965 |
||
1966 |
:param graph_index: An implementation of bzrlib.index.GraphIndex.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
1967 |
:param is_locked: A callback, returns True if the index is locked and
|
1968 |
thus usable.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1969 |
:param parents: If True, record knits parents, if not do not record
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1970 |
parents.
|
1971 |
:param add_callback: If not None, allow additions to the index and call
|
|
1972 |
this callback with a list of added GraphIndex nodes:
|
|
1973 |
[(node, value, node_refs), ...]
|
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1974 |
:param track_external_parent_refs: As keys are added, keep track of the
|
1975 |
keys they reference, so that we can query get_missing_parents(),
|
|
1976 |
etc.
|
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
1977 |
:param inconsistency_fatal: When asked to add records that are already
|
1978 |
present, and the details are inconsistent with the existing
|
|
1979 |
record, raise an exception instead of warning (and skipping the
|
|
1980 |
record).
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1981 |
"""
|
1982 |
self._add_callback = add_callback |
|
1983 |
self._graph_index = graph_index |
|
1984 |
self._parents = parents |
|
1985 |
self.has_graph = parents |
|
1986 |
self._is_locked = is_locked |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
1987 |
self._inconsistency_fatal = inconsistency_fatal |
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
1988 |
# GroupCompress records tend to have the same 'group' start + offset
|
1989 |
# repeated over and over, this creates a surplus of ints
|
|
1990 |
self._int_cache = {} |
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1991 |
if track_external_parent_refs: |
5757.8.1
by Jelmer Vernooij
Avoid bzrlib.knit imports when using groupcompress repositories. |
1992 |
self._key_dependencies = _KeyRefs( |
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
1993 |
track_new_keys=track_new_keys) |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
1994 |
else: |
1995 |
self._key_dependencies = None |
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
1996 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
1997 |
def add_records(self, records, random_id=False): |
1998 |
"""Add multiple records to the index.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
1999 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2000 |
This function does not insert data into the Immutable GraphIndex
|
2001 |
backing the KnitGraphIndex, instead it prepares data for insertion by
|
|
2002 |
the caller and checks that it is safe to insert then calls
|
|
2003 |
self._add_callback with the prepared GraphIndex nodes.
|
|
2004 |
||
2005 |
:param records: a list of tuples:
|
|
2006 |
(key, options, access_memo, parents).
|
|
2007 |
:param random_id: If True the ids being added were randomly generated
|
|
2008 |
and no check for existence will be performed.
|
|
2009 |
"""
|
|
2010 |
if not self._add_callback: |
|
2011 |
raise errors.ReadOnlyError(self) |
|
2012 |
# we hope there are no repositories with inconsistent parentage
|
|
2013 |
# anymore.
|
|
2014 |
||
2015 |
changed = False |
|
2016 |
keys = {} |
|
2017 |
for (key, value, refs) in records: |
|
2018 |
if not self._parents: |
|
2019 |
if refs: |
|
2020 |
for ref in refs: |
|
2021 |
if ref: |
|
4398.8.1
by John Arbash Meinel
Add a VersionedFile.add_text() api. |
2022 |
raise errors.KnitCorrupt(self, |
0.17.5
by Robert Collins
nograph tests completely passing. |
2023 |
"attempt to add node with parents "
|
2024 |
"in parentless index.") |
|
2025 |
refs = () |
|
2026 |
changed = True |
|
2027 |
keys[key] = (value, refs) |
|
2028 |
# check for dups
|
|
2029 |
if not random_id: |
|
2030 |
present_nodes = self._get_entries(keys) |
|
2031 |
for (index, key, value, node_refs) in present_nodes: |
|
4789.28.3
by John Arbash Meinel
Add a static_tuple.as_tuples() helper. |
2032 |
# Sometimes these are passed as a list rather than a tuple
|
2033 |
node_refs = static_tuple.as_tuples(node_refs) |
|
2034 |
passed = static_tuple.as_tuples(keys[key]) |
|
2035 |
if node_refs != passed[1]: |
|
2036 |
details = '%s %s %s' % (key, (value, node_refs), passed) |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
2037 |
if self._inconsistency_fatal: |
2038 |
raise errors.KnitCorrupt(self, "inconsistent details" |
|
2039 |
" in add_records: %s" % |
|
2040 |
details) |
|
2041 |
else: |
|
2042 |
trace.warning("inconsistent details in skipped" |
|
2043 |
" record: %s", details) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2044 |
del keys[key] |
2045 |
changed = True |
|
2046 |
if changed: |
|
2047 |
result = [] |
|
2048 |
if self._parents: |
|
2049 |
for key, (value, node_refs) in keys.iteritems(): |
|
2050 |
result.append((key, value, node_refs)) |
|
2051 |
else: |
|
2052 |
for key, (value, node_refs) in keys.iteritems(): |
|
2053 |
result.append((key, value)) |
|
2054 |
records = result |
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
2055 |
key_dependencies = self._key_dependencies |
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2056 |
if key_dependencies is not None: |
2057 |
if self._parents: |
|
2058 |
for key, value, refs in records: |
|
2059 |
parents = refs[0] |
|
2060 |
key_dependencies.add_references(key, parents) |
|
2061 |
else: |
|
2062 |
for key, value, refs in records: |
|
2063 |
new_keys.add_key(key) |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2064 |
self._add_callback(records) |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
2065 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2066 |
def _check_read(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
2067 |
"""Raise an exception if reads are not permitted."""
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2068 |
if not self._is_locked(): |
2069 |
raise errors.ObjectNotLocked(self) |
|
2070 |
||
0.17.2
by Robert Collins
Core proof of concept working. |
2071 |
def _check_write_ok(self): |
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
2072 |
"""Raise an exception if writes are not permitted."""
|
0.17.2
by Robert Collins
Core proof of concept working. |
2073 |
if not self._is_locked(): |
2074 |
raise errors.ObjectNotLocked(self) |
|
2075 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
2076 |
def _get_entries(self, keys, check_present=False): |
2077 |
"""Get the entries for keys.
|
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
2078 |
|
2079 |
Note: Callers are responsible for checking that the index is locked
|
|
2080 |
before calling this method.
|
|
2081 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
2082 |
:param keys: An iterable of index key tuples.
|
2083 |
"""
|
|
2084 |
keys = set(keys) |
|
2085 |
found_keys = set() |
|
2086 |
if self._parents: |
|
2087 |
for node in self._graph_index.iter_entries(keys): |
|
2088 |
yield node |
|
2089 |
found_keys.add(node[1]) |
|
2090 |
else: |
|
2091 |
# adapt parentless index to the rest of the code.
|
|
2092 |
for node in self._graph_index.iter_entries(keys): |
|
2093 |
yield node[0], node[1], node[2], () |
|
2094 |
found_keys.add(node[1]) |
|
2095 |
if check_present: |
|
2096 |
missing_keys = keys.difference(found_keys) |
|
2097 |
if missing_keys: |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
2098 |
raise errors.RevisionNotPresent(missing_keys.pop(), self) |
0.17.5
by Robert Collins
nograph tests completely passing. |
2099 |
|
4634.11.3
by John Arbash Meinel
Implement _GCGraphIndex.find_ancestry() |
2100 |
def find_ancestry(self, keys): |
2101 |
"""See CombinedGraphIndex.find_ancestry"""
|
|
2102 |
return self._graph_index.find_ancestry(keys, 0) |
|
2103 |
||
0.17.5
by Robert Collins
nograph tests completely passing. |
2104 |
def get_parent_map(self, keys): |
2105 |
"""Get a map of the parents of keys.
|
|
2106 |
||
2107 |
:param keys: The keys to look up parents for.
|
|
2108 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
2109 |
the mapping.
|
|
2110 |
"""
|
|
2111 |
self._check_read() |
|
2112 |
nodes = self._get_entries(keys) |
|
2113 |
result = {} |
|
2114 |
if self._parents: |
|
2115 |
for node in nodes: |
|
2116 |
result[node[1]] = node[3][0] |
|
2117 |
else: |
|
2118 |
for node in nodes: |
|
2119 |
result[node[1]] = None |
|
2120 |
return result |
|
2121 |
||
4343.3.1
by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories. |
2122 |
def get_missing_parents(self): |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
2123 |
"""Return the keys of missing parents."""
|
2124 |
# Copied from _KnitGraphIndex.get_missing_parents
|
|
2125 |
# We may have false positives, so filter those out.
|
|
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2126 |
self._key_dependencies.satisfy_refs_for_keys( |
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
2127 |
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs())) |
2128 |
return frozenset(self._key_dependencies.get_unsatisfied_refs()) |
|
4343.3.1
by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories. |
2129 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2130 |
def get_build_details(self, keys): |
2131 |
"""Get the various build details for keys.
|
|
2132 |
||
2133 |
Ghosts are omitted from the result.
|
|
2134 |
||
2135 |
:param keys: An iterable of keys.
|
|
2136 |
:return: A dict of key:
|
|
2137 |
(index_memo, compression_parent, parents, record_details).
|
|
5891.1.2
by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier. |
2138 |
|
2139 |
* index_memo: opaque structure to pass to read_records to extract
|
|
2140 |
the raw data
|
|
2141 |
* compression_parent: Content that this record is built upon, may
|
|
2142 |
be None
|
|
2143 |
* parents: Logical parents of this node
|
|
2144 |
* record_details: extra information about the content which needs
|
|
2145 |
to be passed to Factory.parse_record
|
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2146 |
"""
|
2147 |
self._check_read() |
|
2148 |
result = {} |
|
0.20.29
by Ian Clatworthy
groupcompress.py code cleanups |
2149 |
entries = self._get_entries(keys) |
0.17.5
by Robert Collins
nograph tests completely passing. |
2150 |
for entry in entries: |
2151 |
key = entry[1] |
|
2152 |
if not self._parents: |
|
2153 |
parents = None |
|
2154 |
else: |
|
2155 |
parents = entry[3][0] |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
2156 |
details = _GCBuildDetails(parents, self._node_to_position(entry)) |
2157 |
result[key] = details |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2158 |
return result |
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
2159 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2160 |
def keys(self): |
2161 |
"""Get all the keys in the collection.
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
2162 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2163 |
The keys are not ordered.
|
2164 |
"""
|
|
2165 |
self._check_read() |
|
2166 |
return [node[1] for node in self._graph_index.iter_all_entries()] |
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
2167 |
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2168 |
def _node_to_position(self, node): |
2169 |
"""Convert an index value to position details."""
|
|
2170 |
bits = node[2].split(' ') |
|
2171 |
# It would be nice not to read the entire gzip.
|
|
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
2172 |
# start and stop are put into _int_cache because they are very common.
|
2173 |
# They define the 'group' that an entry is in, and many groups can have
|
|
2174 |
# thousands of objects.
|
|
2175 |
# Branching Launchpad, for example, saves ~600k integers, at 12 bytes
|
|
2176 |
# each, or about 7MB. Note that it might be even more when you consider
|
|
2177 |
# how PyInt is allocated in separate slabs. And you can't return a slab
|
|
2178 |
# to the OS if even 1 int on it is in use. Note though that Python uses
|
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
2179 |
# a LIFO when re-using PyInt slots, which might cause more
|
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
2180 |
# fragmentation.
|
0.17.5
by Robert Collins
nograph tests completely passing. |
2181 |
start = int(bits[0]) |
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
2182 |
start = self._int_cache.setdefault(start, start) |
0.17.5
by Robert Collins
nograph tests completely passing. |
2183 |
stop = int(bits[1]) |
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
2184 |
stop = self._int_cache.setdefault(stop, stop) |
0.17.5
by Robert Collins
nograph tests completely passing. |
2185 |
basis_end = int(bits[2]) |
2186 |
delta_end = int(bits[3]) |
|
4679.9.19
by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/ |
2187 |
# We can't use StaticTuple here, because node[0] is a BTreeGraphIndex
|
2188 |
# instance...
|
|
2189 |
return (node[0], start, stop, basis_end, delta_end) |
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
2190 |
|
4343.3.2
by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos |
2191 |
def scan_unvalidated_index(self, graph_index): |
2192 |
"""Inform this _GCGraphIndex that there is an unvalidated index.
|
|
2193 |
||
2194 |
This allows this _GCGraphIndex to keep track of any missing
|
|
2195 |
compression parents we may want to have filled in to make those
|
|
4634.29.3
by Andrew Bennetts
Simplify further. |
2196 |
indices valid. It also allows _GCGraphIndex to track any new keys.
|
4343.3.2
by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos |
2197 |
|
2198 |
:param graph_index: A GraphIndex
|
|
2199 |
"""
|
|
4634.29.3
by Andrew Bennetts
Simplify further. |
2200 |
key_dependencies = self._key_dependencies |
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2201 |
if key_dependencies is None: |
4634.29.1
by Andrew Bennetts
Rough code to reject commit_write_group if any inventory's CHK root is absent. |
2202 |
return
|
2203 |
for node in graph_index.iter_all_entries(): |
|
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2204 |
# Add parent refs from graph_index (and discard parent refs
|
2205 |
# that the graph_index has).
|
|
2206 |
key_dependencies.add_references(node[1], node[3][0]) |
|
4343.3.2
by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos |
2207 |
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
2208 |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
2209 |
from bzrlib._groupcompress_py import ( |
2210 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
2211 |
apply_delta_to_source, |
3735.40.11
by John Arbash Meinel
Implement make_delta and apply_delta. |
2212 |
encode_base128_int, |
2213 |
decode_base128_int, |
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
2214 |
decode_copy_instruction, |
3735.40.13
by John Arbash Meinel
Rename EquivalenceTable to LinesDeltaIndex. |
2215 |
LinesDeltaIndex, |
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
2216 |
)
|
0.18.14
by John Arbash Meinel
A bit more work, not really usable yet. |
2217 |
try: |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
2218 |
from bzrlib._groupcompress_pyx import ( |
2219 |
apply_delta, |
|
3735.40.19
by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string. |
2220 |
apply_delta_to_source, |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
2221 |
DeltaIndex, |
3735.40.16
by John Arbash Meinel
Implement (de|en)code_base128_int in pyrex. |
2222 |
encode_base128_int, |
2223 |
decode_base128_int, |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
2224 |
)
|
3735.40.2
by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function. |
2225 |
GroupCompressor = PyrexGroupCompressor |
4574.3.6
by Martin Pool
More warnings when failing to load extensions |
2226 |
except ImportError, e: |
4574.3.8
by Martin Pool
Only mutter extension load errors when they occur, and record for later |
2227 |
osutils.failed_to_load_extension(e) |
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
2228 |
GroupCompressor = PythonGroupCompressor |
2229 |