~bzr-pqm/bzr/bzr.dev

4763.2.4 by John Arbash Meinel
merge bzr.2.1 in preparation for NEWS entry.
1
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2
#
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
3
# This program is free software; you can redistribute it and/or modify
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
12
#
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
3735.36.3 by John Arbash Meinel
Add the new address for FSF to the new files.
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
16
17
"""Core compression logic for compressing streams of related files."""
18
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
19
import time
0.17.5 by Robert Collins
nograph tests completely passing.
20
import zlib
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
21
try:
22
    import pylzma
23
except ImportError:
24
    pylzma = None
0.17.5 by Robert Collins
nograph tests completely passing.
25
0.17.4 by Robert Collins
Annotate.
26
from bzrlib import (
27
    annotate,
0.17.5 by Robert Collins
nograph tests completely passing.
28
    debug,
29
    errors,
0.17.4 by Robert Collins
Annotate.
30
    graph as _mod_graph,
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
31
    knit,
0.20.2 by John Arbash Meinel
Teach groupcompress about 'chunked' encoding
32
    osutils,
0.17.4 by Robert Collins
Annotate.
33
    pack,
4789.28.3 by John Arbash Meinel
Add a static_tuple.as_tuples() helper.
34
    static_tuple,
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
35
    trace,
0.17.4 by Robert Collins
Annotate.
36
    )
0.17.21 by Robert Collins
Update groupcompress to bzrlib 1.10.
37
from bzrlib.btree_index import BTreeBuilder
0.17.24 by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group.
38
from bzrlib.lru_cache import LRUSizeCache
0.17.9 by Robert Collins
Initial stab at repository format support.
39
from bzrlib.tsort import topo_sort
0.17.2 by Robert Collins
Core proof of concept working.
40
from bzrlib.versionedfile import (
0.17.5 by Robert Collins
nograph tests completely passing.
41
    adapter_registry,
42
    AbsentContentFactory,
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
43
    ChunkedContentFactory,
0.17.2 by Robert Collins
Core proof of concept working.
44
    FulltextContentFactory,
45
    VersionedFiles,
46
    )
47
4634.3.17 by Andrew Bennetts
Make BATCH_SIZE a global.
48
# Minimum number of uncompressed bytes to try fetch at once when retrieving
49
# groupcompress blocks.
50
BATCH_SIZE = 2**16
51
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
52
_USE_LZMA = False and (pylzma is not None)
0.17.2 by Robert Collins
Core proof of concept working.
53
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
54
# osutils.sha_string('')
55
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
56
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
57
def sort_gc_optimal(parent_map):
3735.31.14 by John Arbash Meinel
Change the gc-optimal to 'groupcompress'
58
    """Sort and group the keys in parent_map into groupcompress order.
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
59
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
60
    groupcompress is defined (currently) as reverse-topological order, grouped
61
    by the key prefix.
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
62
63
    :return: A sorted-list of keys
64
    """
3735.31.14 by John Arbash Meinel
Change the gc-optimal to 'groupcompress'
65
    # groupcompress ordering is approximately reverse topological,
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
66
    # properly grouped by file-id.
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
67
    per_prefix_map = {}
4593.5.43 by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value)
68
    for key, value in parent_map.iteritems():
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
69
        if isinstance(key, str) or len(key) == 1:
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
70
            prefix = ''
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
71
        else:
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
72
            prefix = key[0]
73
        try:
4593.5.43 by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value)
74
            per_prefix_map[prefix][key] = value
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
75
        except KeyError:
4593.5.43 by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value)
76
            per_prefix_map[prefix] = {key: value}
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
77
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
78
    present_keys = []
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
79
    for prefix in sorted(per_prefix_map):
80
        present_keys.extend(reversed(topo_sort(per_prefix_map[prefix])))
81
    return present_keys
82
83
3735.32.9 by John Arbash Meinel
Use a 32kB extension, since that is the max window size for zlib.
84
# The max zlib window size is 32kB, so if we set 'max_size' output of the
85
# decompressor to the requested bytes + 32kB, then we should guarantee
86
# num_bytes coming out.
87
_ZLIB_DECOMP_WINDOW = 32*1024
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
88
89
class GroupCompressBlock(object):
90
    """An object which maintains the internal structure of the compressed data.
91
92
    This tracks the meta info (start of text, length, type, etc.)
93
    """
94
0.25.5 by John Arbash Meinel
Now using a zlib compressed format.
95
    # Group Compress Block v1 Zlib
96
    GCB_HEADER = 'gcb1z\n'
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
97
    # Group Compress Block v1 Lzma
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
98
    GCB_LZ_HEADER = 'gcb1l\n'
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
99
    GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
100
101
    def __init__(self):
102
        # map by key? or just order in file?
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
103
        self._compressor_name = None
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
104
        self._z_content = None
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
105
        self._z_content_decompressor = None
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
106
        self._z_content_length = None
107
        self._content_length = None
0.25.6 by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header
108
        self._content = None
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
109
        self._content_chunks = None
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
110
111
    def __len__(self):
3735.38.4 by John Arbash Meinel
Another disk format change.
112
        # This is the maximum number of bytes this object will reference if
113
        # everything is decompressed. However, if we decompress less than
114
        # everything... (this would cause some problems for LRUSizeCache)
115
        return self._content_length + self._z_content_length
0.17.48 by John Arbash Meinel
if _NO_LABELS is set, don't bother parsing the mini header.
116
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
117
    def _ensure_content(self, num_bytes=None):
118
        """Make sure that content has been expanded enough.
119
120
        :param num_bytes: Ensure that we have extracted at least num_bytes of
121
            content. If None, consume everything
122
        """
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
123
        if self._content_length is None:
124
            raise AssertionError('self._content_length should never be None')
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
125
        if num_bytes is None:
126
            num_bytes = self._content_length
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
127
        elif (self._content_length is not None
128
              and num_bytes > self._content_length):
129
            raise AssertionError(
130
                'requested num_bytes (%d) > content length (%d)'
131
                % (num_bytes, self._content_length))
132
        # Expand the content if required
3735.32.6 by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time.
133
        if self._content is None:
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
134
            if self._content_chunks is not None:
135
                self._content = ''.join(self._content_chunks)
136
                self._content_chunks = None
137
        if self._content is None:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
138
            if self._z_content is None:
139
                raise AssertionError('No content to decompress')
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
140
            if self._z_content == '':
141
                self._content = ''
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
142
            elif self._compressor_name == 'lzma':
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
143
                # We don't do partial lzma decomp yet
3735.2.160 by John Arbash Meinel
Fix a trivial typo
144
                self._content = pylzma.decompress(self._z_content)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
145
            elif self._compressor_name == 'zlib':
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
146
                # Start a zlib decompressor
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
147
                if num_bytes * 4 > self._content_length * 3:
148
                    # If we are requesting more that 3/4ths of the content,
149
                    # just extract the whole thing in a single pass
150
                    num_bytes = self._content_length
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
151
                    self._content = zlib.decompress(self._z_content)
152
                else:
153
                    self._z_content_decompressor = zlib.decompressobj()
154
                    # Seed the decompressor with the uncompressed bytes, so
155
                    # that the rest of the code is simplified
156
                    self._content = self._z_content_decompressor.decompress(
157
                        self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
158
                    if not self._z_content_decompressor.unconsumed_tail:
159
                        self._z_content_decompressor = None
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
160
            else:
3735.2.182 by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others
161
                raise AssertionError('Unknown compressor: %r'
3735.2.183 by John Arbash Meinel
Fix the compressor name.
162
                                     % self._compressor_name)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
163
        # Any bytes remaining to be decompressed will be in the decompressors
164
        # 'unconsumed_tail'
165
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
166
        # Do we have enough bytes already?
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
167
        if len(self._content) >= num_bytes:
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
168
            return
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
169
        # If we got this far, and don't have a decompressor, something is wrong
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
170
        if self._z_content_decompressor is None:
171
            raise AssertionError(
3735.2.182 by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others
172
                'No decompressor to decompress %d bytes' % num_bytes)
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
173
        remaining_decomp = self._z_content_decompressor.unconsumed_tail
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
174
        if not remaining_decomp:
175
            raise AssertionError('Nothing left to decompress')
176
        needed_bytes = num_bytes - len(self._content)
177
        # We always set max_size to 32kB over the minimum needed, so that
178
        # zlib will give us as much as we really want.
179
        # TODO: If this isn't good enough, we could make a loop here,
180
        #       that keeps expanding the request until we get enough
181
        self._content += self._z_content_decompressor.decompress(
182
            remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
183
        if len(self._content) < num_bytes:
184
            raise AssertionError('%d bytes wanted, only %d available'
185
                                 % (num_bytes, len(self._content)))
186
        if not self._z_content_decompressor.unconsumed_tail:
187
            # The stream is finished
188
            self._z_content_decompressor = None
3735.32.6 by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time.
189
3735.38.4 by John Arbash Meinel
Another disk format change.
190
    def _parse_bytes(self, bytes, pos):
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
191
        """Read the various lengths from the header.
192
193
        This also populates the various 'compressed' buffers.
194
195
        :return: The position in bytes just after the last newline
196
        """
3735.38.4 by John Arbash Meinel
Another disk format change.
197
        # At present, we have 2 integers for the compressed and uncompressed
198
        # content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
199
        # checking too far, cap the search to 14 bytes.
200
        pos2 = bytes.index('\n', pos, pos + 14)
201
        self._z_content_length = int(bytes[pos:pos2])
202
        pos = pos2 + 1
203
        pos2 = bytes.index('\n', pos, pos + 14)
204
        self._content_length = int(bytes[pos:pos2])
205
        pos = pos2 + 1
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
206
        if len(bytes) != (pos + self._z_content_length):
207
            # XXX: Define some GCCorrupt error ?
208
            raise AssertionError('Invalid bytes: (%d) != %d + %d' %
209
                                 (len(bytes), pos, self._z_content_length))
3735.38.4 by John Arbash Meinel
Another disk format change.
210
        self._z_content = bytes[pos:]
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
211
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
212
    @classmethod
213
    def from_bytes(cls, bytes):
214
        out = cls()
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
215
        if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
216
            raise ValueError('bytes did not start with any of %r'
217
                             % (cls.GCB_KNOWN_HEADERS,))
218
        # XXX: why not testing the whole header ?
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
219
        if bytes[4] == 'z':
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
220
            out._compressor_name = 'zlib'
0.17.45 by John Arbash Meinel
Just make sure we have the right decompressor
221
        elif bytes[4] == 'l':
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
222
            out._compressor_name = 'lzma'
0.17.45 by John Arbash Meinel
Just make sure we have the right decompressor
223
        else:
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
224
            raise ValueError('unknown compressor: %r' % (bytes,))
3735.38.4 by John Arbash Meinel
Another disk format change.
225
        out._parse_bytes(bytes, 6)
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
226
        return out
227
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
228
    def extract(self, key, start, end, sha1=None):
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
229
        """Extract the text for a specific key.
230
231
        :param key: The label used for this content
232
        :param sha1: TODO (should we validate only when sha1 is supplied?)
233
        :return: The bytes for the content
234
        """
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
235
        if start == end == 0:
3735.2.158 by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract.
236
            return ''
237
        self._ensure_content(end)
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
238
        # The bytes are 'f' or 'd' for the type, then a variable-length
239
        # base128 integer for the content size, then the actual content
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
240
        # We know that the variable-length integer won't be longer than 5
241
        # bytes (it takes 5 bytes to encode 2^32)
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
242
        c = self._content[start]
243
        if c == 'f':
244
            type = 'fulltext'
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
245
        else:
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
246
            if c != 'd':
247
                raise ValueError('Unknown content control code: %s'
248
                                 % (c,))
249
            type = 'delta'
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
250
        content_len, len_len = decode_base128_int(
251
                            self._content[start + 1:start + 6])
252
        content_start = start + 1 + len_len
3735.2.158 by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract.
253
        if end != content_start + content_len:
254
            raise ValueError('end != len according to field header'
255
                ' %s != %s' % (end, content_start + content_len))
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
256
        if c == 'f':
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
257
            bytes = self._content[content_start:end]
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
258
        elif c == 'd':
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
259
            bytes = apply_delta_to_source(self._content, content_start, end)
3735.2.158 by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract.
260
        return bytes
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
261
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
262
    def set_chunked_content(self, content_chunks, length):
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
263
        """Set the content of this block to the given chunks."""
4469.1.3 by John Arbash Meinel
Notes on why we do it the way we do.
264
        # If we have lots of short lines, it is may be more efficient to join
265
        # the content ahead of time. If the content is <10MiB, we don't really
266
        # care about the extra memory consumption, so we can just pack it and
267
        # be done. However, timing showed 18s => 17.9s for repacking 1k revs of
268
        # mysql, which is below the noise margin
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
269
        self._content_length = length
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
270
        self._content_chunks = content_chunks
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
271
        self._content = None
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
272
        self._z_content = None
273
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
274
    def set_content(self, content):
275
        """Set the content of this block."""
276
        self._content_length = len(content)
277
        self._content = content
278
        self._z_content = None
279
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
280
    def _create_z_content_using_lzma(self):
281
        if self._content_chunks is not None:
282
            self._content = ''.join(self._content_chunks)
283
            self._content_chunks = None
284
        if self._content is None:
285
            raise AssertionError('Nothing to compress')
286
        self._z_content = pylzma.compress(self._content)
287
        self._z_content_length = len(self._z_content)
288
289
    def _create_z_content_from_chunks(self):
290
        compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
4469.1.3 by John Arbash Meinel
Notes on why we do it the way we do.
291
        compressed_chunks = map(compressor.compress, self._content_chunks)
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
292
        compressed_chunks.append(compressor.flush())
293
        self._z_content = ''.join(compressed_chunks)
294
        self._z_content_length = len(self._z_content)
295
296
    def _create_z_content(self):
297
        if self._z_content is not None:
298
            return
299
        if _USE_LZMA:
300
            self._create_z_content_using_lzma()
301
            return
302
        if self._content_chunks is not None:
303
            self._create_z_content_from_chunks()
304
            return
305
        self._z_content = zlib.compress(self._content)
306
        self._z_content_length = len(self._z_content)
307
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
308
    def to_bytes(self):
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
309
        """Encode the information into a byte stream."""
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
310
        self._create_z_content()
0.17.46 by John Arbash Meinel
Set the proper header when using/not using lzma
311
        if _USE_LZMA:
312
            header = self.GCB_LZ_HEADER
313
        else:
314
            header = self.GCB_HEADER
315
        chunks = [header,
3735.38.4 by John Arbash Meinel
Another disk format change.
316
                  '%d\n%d\n' % (self._z_content_length, self._content_length),
317
                  self._z_content,
0.25.7 by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content.
318
                 ]
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
319
        return ''.join(chunks)
320
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
321
    def _dump(self, include_text=False):
322
        """Take this block, and spit out a human-readable structure.
323
324
        :param include_text: Inserts also include text bits, chose whether you
325
            want this displayed in the dump or not.
326
        :return: A dump of the given block. The layout is something like:
327
            [('f', length), ('d', delta_length, text_length, [delta_info])]
328
            delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
329
            ...]
330
        """
331
        self._ensure_content()
332
        result = []
333
        pos = 0
334
        while pos < self._content_length:
335
            kind = self._content[pos]
336
            pos += 1
337
            if kind not in ('f', 'd'):
338
                raise ValueError('invalid kind character: %r' % (kind,))
339
            content_len, len_len = decode_base128_int(
340
                                self._content[pos:pos + 5])
341
            pos += len_len
342
            if content_len + pos > self._content_length:
343
                raise ValueError('invalid content_len %d for record @ pos %d'
344
                                 % (content_len, pos - len_len - 1))
345
            if kind == 'f': # Fulltext
4398.5.6 by John Arbash Meinel
A bit more debugging information from gcblock._dump(True)
346
                if include_text:
347
                    text = self._content[pos:pos+content_len]
348
                    result.append(('f', content_len, text))
349
                else:
350
                    result.append(('f', content_len))
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
351
            elif kind == 'd': # Delta
352
                delta_content = self._content[pos:pos+content_len]
353
                delta_info = []
354
                # The first entry in a delta is the decompressed length
355
                decomp_len, delta_pos = decode_base128_int(delta_content)
356
                result.append(('d', content_len, decomp_len, delta_info))
357
                measured_len = 0
358
                while delta_pos < content_len:
359
                    c = ord(delta_content[delta_pos])
360
                    delta_pos += 1
361
                    if c & 0x80: # Copy
362
                        (offset, length,
363
                         delta_pos) = decode_copy_instruction(delta_content, c,
364
                                                              delta_pos)
4398.5.6 by John Arbash Meinel
A bit more debugging information from gcblock._dump(True)
365
                        if include_text:
366
                            text = self._content[offset:offset+length]
367
                            delta_info.append(('c', offset, length, text))
368
                        else:
369
                            delta_info.append(('c', offset, length))
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
370
                        measured_len += length
371
                    else: # Insert
372
                        if include_text:
373
                            txt = delta_content[delta_pos:delta_pos+c]
374
                        else:
375
                            txt = ''
376
                        delta_info.append(('i', c, txt))
377
                        measured_len += c
378
                        delta_pos += c
379
                if delta_pos != content_len:
380
                    raise ValueError('Delta consumed a bad number of bytes:'
381
                                     ' %d != %d' % (delta_pos, content_len))
382
                if measured_len != decomp_len:
383
                    raise ValueError('Delta claimed fulltext was %d bytes, but'
384
                                     ' extraction resulted in %d bytes'
385
                                     % (decomp_len, measured_len))
386
            pos += content_len
387
        return result
388
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
389
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
390
class _LazyGroupCompressFactory(object):
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
391
    """Yield content from a GroupCompressBlock on demand."""
392
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
393
    def __init__(self, key, parents, manager, start, end, first):
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
394
        """Create a _LazyGroupCompressFactory
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
395
396
        :param key: The key of just this record
397
        :param parents: The parents of this key (possibly None)
398
        :param gc_block: A GroupCompressBlock object
399
        :param start: Offset of the first byte for this record in the
400
            uncompressd content
401
        :param end: Offset of the byte just after the end of this record
402
            (ie, bytes = content[start:end])
403
        :param first: Is this the first Factory for the given block?
404
        """
405
        self.key = key
406
        self.parents = parents
407
        self.sha1 = None
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
408
        # Note: This attribute coupled with Manager._factories creates a
409
        #       reference cycle. Perhaps we would rather use a weakref(), or
410
        #       find an appropriate time to release the ref. After the first
411
        #       get_bytes_as call? After Manager.get_record_stream() returns
412
        #       the object?
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
413
        self._manager = manager
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
414
        self._bytes = None
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
415
        self.storage_kind = 'groupcompress-block'
416
        if not first:
417
            self.storage_kind = 'groupcompress-block-ref'
418
        self._first = first
419
        self._start = start
420
        self._end = end
421
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
422
    def __repr__(self):
423
        return '%s(%s, first=%s)' % (self.__class__.__name__,
424
            self.key, self._first)
425
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
426
    def get_bytes_as(self, storage_kind):
427
        if storage_kind == self.storage_kind:
428
            if self._first:
429
                # wire bytes, something...
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
430
                return self._manager._wire_bytes()
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
431
            else:
432
                return ''
433
        if storage_kind in ('fulltext', 'chunked'):
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
434
            if self._bytes is None:
3735.34.3 by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core.
435
                # Grab and cache the raw bytes for this entry
436
                # and break the ref-cycle with _manager since we don't need it
437
                # anymore
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
438
                self._manager._prepare_for_extract()
439
                block = self._manager._block
3735.34.2 by John Arbash Meinel
Merge brisbane-core tip, resolve differences.
440
                self._bytes = block.extract(self.key, self._start, self._end)
3735.37.5 by John Arbash Meinel
Restore the refcycle reduction code.
441
                # There are code paths that first extract as fulltext, and then
442
                # extract as storage_kind (smart fetch). So we don't break the
443
                # refcycle here, but instead in manager.get_record_stream()
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
444
            if storage_kind == 'fulltext':
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
445
                return self._bytes
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
446
            else:
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
447
                return [self._bytes]
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
448
        raise errors.UnavailableRepresentation(self.key, storage_kind,
3735.34.3 by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core.
449
                                               self.storage_kind)
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
450
451
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
452
class _LazyGroupContentManager(object):
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
453
    """This manages a group of _LazyGroupCompressFactory objects."""
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
454
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
455
    _max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
456
                             # current size, and still be considered
457
                             # resuable
458
    _full_block_size = 4*1024*1024
459
    _full_mixed_block_size = 2*1024*1024
460
    _full_enough_block_size = 3*1024*1024 # size at which we won't repack
461
    _full_enough_mixed_block_size = 2*768*1024 # 1.5MB
462
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
463
    def __init__(self, block):
464
        self._block = block
465
        # We need to preserve the ordering
466
        self._factories = []
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
467
        self._last_byte = 0
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
468
469
    def add_factory(self, key, parents, start, end):
470
        if not self._factories:
471
            first = True
472
        else:
473
            first = False
474
        # Note that this creates a reference cycle....
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
475
        factory = _LazyGroupCompressFactory(key, parents, self,
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
476
            start, end, first=first)
3735.36.13 by John Arbash Meinel
max() shows up under lsprof as more expensive than creating an object.
477
        # max() works here, but as a function call, doing a compare seems to be
478
        # significantly faster, timeit says 250ms for max() and 100ms for the
479
        # comparison
480
        if end > self._last_byte:
481
            self._last_byte = end
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
482
        self._factories.append(factory)
483
484
    def get_record_stream(self):
485
        """Get a record for all keys added so far."""
486
        for factory in self._factories:
487
            yield factory
3735.34.3 by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core.
488
            # Break the ref-cycle
3735.34.2 by John Arbash Meinel
Merge brisbane-core tip, resolve differences.
489
            factory._bytes = None
3735.37.5 by John Arbash Meinel
Restore the refcycle reduction code.
490
            factory._manager = None
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
491
        # TODO: Consider setting self._factories = None after the above loop,
492
        #       as it will break the reference cycle
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
493
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
494
    def _trim_block(self, last_byte):
495
        """Create a new GroupCompressBlock, with just some of the content."""
496
        # None of the factories need to be adjusted, because the content is
497
        # located in an identical place. Just that some of the unreferenced
498
        # trailing bytes are stripped
499
        trace.mutter('stripping trailing bytes from groupcompress block'
500
                     ' %d => %d', self._block._content_length, last_byte)
501
        new_block = GroupCompressBlock()
502
        self._block._ensure_content(last_byte)
503
        new_block.set_content(self._block._content[:last_byte])
504
        self._block = new_block
505
506
    def _rebuild_block(self):
507
        """Create a new GroupCompressBlock with only the referenced texts."""
508
        compressor = GroupCompressor()
509
        tstart = time.time()
510
        old_length = self._block._content_length
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
511
        end_point = 0
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
512
        for factory in self._factories:
513
            bytes = factory.get_bytes_as('fulltext')
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
514
            (found_sha1, start_point, end_point,
515
             type) = compressor.compress(factory.key, bytes, factory.sha1)
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
516
            # Now update this factory with the new offsets, etc
517
            factory.sha1 = found_sha1
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
518
            factory._start = start_point
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
519
            factory._end = end_point
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
520
        self._last_byte = end_point
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
521
        new_block = compressor.flush()
522
        # TODO: Should we check that new_block really *is* smaller than the old
523
        #       block? It seems hard to come up with a method that it would
524
        #       expand, since we do full compression again. Perhaps based on a
525
        #       request that ends up poorly ordered?
526
        delta = time.time() - tstart
527
        self._block = new_block
4641.4.2 by John Arbash Meinel
Use unordered fetches to avoid fragmentation (bug #402645)
528
        trace.mutter('creating new compressed block on-the-fly in %.3fs'
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
529
                     ' %d bytes => %d bytes', delta, old_length,
530
                     self._block._content_length)
531
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
532
    def _prepare_for_extract(self):
533
        """A _LazyGroupCompressFactory is about to extract to fulltext."""
534
        # We expect that if one child is going to fulltext, all will be. This
535
        # helps prevent all of them from extracting a small amount at a time.
536
        # Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
537
        # time (self._block._content) is a little expensive.
538
        self._block._ensure_content(self._last_byte)
539
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
540
    def _check_rebuild_action(self):
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
541
        """Check to see if our block should be repacked."""
542
        total_bytes_used = 0
543
        last_byte_used = 0
544
        for factory in self._factories:
545
            total_bytes_used += factory._end - factory._start
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
546
            if last_byte_used < factory._end:
547
                last_byte_used = factory._end
548
        # If we are using more than half of the bytes from the block, we have
549
        # nothing else to check
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
550
        if total_bytes_used * 2 >= self._block._content_length:
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
551
            return None, last_byte_used, total_bytes_used
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
552
        # We are using less than 50% of the content. Is the content we are
553
        # using at the beginning of the block? If so, we can just trim the
554
        # tail, rather than rebuilding from scratch.
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
555
        if total_bytes_used * 2 > last_byte_used:
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
556
            return 'trim', last_byte_used, total_bytes_used
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
557
558
        # We are using a small amount of the data, and it isn't just packed
559
        # nicely at the front, so rebuild the content.
560
        # Note: This would be *nicer* as a strip-data-from-group, rather than
561
        #       building it up again from scratch
562
        #       It might be reasonable to consider the fulltext sizes for
563
        #       different bits when deciding this, too. As you may have a small
564
        #       fulltext, and a trivial delta, and you are just trading around
565
        #       for another fulltext. If we do a simple 'prune' you may end up
566
        #       expanding many deltas into fulltexts, as well.
567
        #       If we build a cheap enough 'strip', then we could try a strip,
568
        #       if that expands the content, we then rebuild.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
569
        return 'rebuild', last_byte_used, total_bytes_used
570
571
    def check_is_well_utilized(self):
572
        """Is the current block considered 'well utilized'?
573
4665.3.15 by Robert Collins
Review and tweak
574
        This heuristic asks if the current block considers itself to be a fully
575
        developed group, rather than just a loose collection of data.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
576
        """
577
        if len(self._factories) == 1:
4665.3.15 by Robert Collins
Review and tweak
578
            # A block of length 1 could be improved by combining with other
579
            # groups - don't look deeper. Even larger than max size groups
580
            # could compress well with adjacent versions of the same thing.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
581
            return False
582
        action, last_byte_used, total_bytes_used = self._check_rebuild_action()
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
583
        block_size = self._block._content_length
584
        if total_bytes_used < block_size * self._max_cut_fraction:
585
            # This block wants to trim itself small enough that we want to
586
            # consider it under-utilized.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
587
            return False
588
        # TODO: This code is meant to be the twin of _insert_record_stream's
589
        #       'start_new_block' logic. It would probably be better to factor
590
        #       out that logic into a shared location, so that it stays
591
        #       together better
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
592
        # We currently assume a block is properly utilized whenever it is >75%
593
        # of the size of a 'full' block. In normal operation, a block is
594
        # considered full when it hits 4MB of same-file content. So any block
595
        # >3MB is 'full enough'.
596
        # The only time this isn't true is when a given block has large-object
597
        # content. (a single file >4MB, etc.)
598
        # Under these circumstances, we allow a block to grow to
599
        # 2 x largest_content.  Which means that if a given block had a large
600
        # object, it may actually be under-utilized. However, given that this
601
        # is 'pack-on-the-fly' it is probably reasonable to not repack large
4665.3.15 by Robert Collins
Review and tweak
602
        # content blobs on-the-fly. Note that because we return False for all
603
        # 1-item blobs, we will repack them; we may wish to reevaluate our
604
        # treatment of large object blobs in the future.
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
605
        if block_size >= self._full_enough_block_size:
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
606
            return True
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
607
        # If a block is <3MB, it still may be considered 'full' if it contains
608
        # mixed content. The current rule is 2MB of mixed content is considered
609
        # full. So check to see if this block contains mixed content, and
610
        # set the threshold appropriately.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
611
        common_prefix = None
612
        for factory in self._factories:
613
            prefix = factory.key[:-1]
614
            if common_prefix is None:
615
                common_prefix = prefix
616
            elif prefix != common_prefix:
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
617
                # Mixed content, check the size appropriately
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
618
                if block_size >= self._full_enough_mixed_block_size:
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
619
                    return True
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
620
                break
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
621
        # The content failed both the mixed check and the single-content check
622
        # so obviously it is not fully utilized
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
623
        # TODO: there is one other constraint that isn't being checked
624
        #       namely, that the entries in the block are in the appropriate
625
        #       order. For example, you could insert the entries in exactly
626
        #       reverse groupcompress order, and we would think that is ok.
627
        #       (all the right objects are in one group, and it is fully
628
        #       utilized, etc.) For now, we assume that case is rare,
629
        #       especially since we should always fetch in 'groupcompress'
630
        #       order.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
631
        return False
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
632
633
    def _check_rebuild_block(self):
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
634
        action, last_byte_used, total_bytes_used = self._check_rebuild_action()
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
635
        if action is None:
636
            return
637
        if action == 'trim':
638
            self._trim_block(last_byte_used)
639
        elif action == 'rebuild':
640
            self._rebuild_block()
641
        else:
642
            raise ValueError('unknown rebuild action: %r' % (action,))
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
643
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
644
    def _wire_bytes(self):
645
        """Return a byte stream suitable for transmitting over the wire."""
3735.32.24 by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream
646
        self._check_rebuild_block()
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
647
        # The outer block starts with:
648
        #   'groupcompress-block\n'
649
        #   <length of compressed key info>\n
650
        #   <length of uncompressed info>\n
651
        #   <length of gc block>\n
652
        #   <header bytes>
653
        #   <gc-block>
654
        lines = ['groupcompress-block\n']
655
        # The minimal info we need is the key, the start offset, and the
656
        # parents. The length and type are encoded in the record itself.
657
        # However, passing in the other bits makes it easier.  The list of
658
        # keys, and the start offset, the length
659
        # 1 line key
660
        # 1 line with parents, '' for ()
661
        # 1 line for start offset
662
        # 1 line for end byte
663
        header_lines = []
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
664
        for factory in self._factories:
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
665
            key_bytes = '\x00'.join(factory.key)
666
            parents = factory.parents
667
            if parents is None:
668
                parent_bytes = 'None:'
669
            else:
670
                parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
671
            record_header = '%s\n%s\n%d\n%d\n' % (
672
                key_bytes, parent_bytes, factory._start, factory._end)
673
            header_lines.append(record_header)
3735.37.5 by John Arbash Meinel
Restore the refcycle reduction code.
674
            # TODO: Can we break the refcycle at this point and set
675
            #       factory._manager = None?
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
676
        header_bytes = ''.join(header_lines)
677
        del header_lines
678
        header_bytes_len = len(header_bytes)
679
        z_header_bytes = zlib.compress(header_bytes)
680
        del header_bytes
681
        z_header_bytes_len = len(z_header_bytes)
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
682
        block_bytes = self._block.to_bytes()
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
683
        lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
684
                                       len(block_bytes)))
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
685
        lines.append(z_header_bytes)
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
686
        lines.append(block_bytes)
687
        del z_header_bytes, block_bytes
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
688
        return ''.join(lines)
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
689
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
690
    @classmethod
3735.32.18 by John Arbash Meinel
We now support generating a network stream.
691
    def from_bytes(cls, bytes):
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
692
        # TODO: This does extra string copying, probably better to do it a
693
        #       different way
694
        (storage_kind, z_header_len, header_len,
695
         block_len, rest) = bytes.split('\n', 4)
696
        del bytes
697
        if storage_kind != 'groupcompress-block':
698
            raise ValueError('Unknown storage kind: %s' % (storage_kind,))
699
        z_header_len = int(z_header_len)
700
        if len(rest) < z_header_len:
701
            raise ValueError('Compressed header len shorter than all bytes')
702
        z_header = rest[:z_header_len]
703
        header_len = int(header_len)
704
        header = zlib.decompress(z_header)
705
        if len(header) != header_len:
706
            raise ValueError('invalid length for decompressed bytes')
707
        del z_header
708
        block_len = int(block_len)
709
        if len(rest) != z_header_len + block_len:
710
            raise ValueError('Invalid length for block')
711
        block_bytes = rest[z_header_len:]
712
        del rest
713
        # So now we have a valid GCB, we just need to parse the factories that
714
        # were sent to us
715
        header_lines = header.split('\n')
716
        del header
717
        last = header_lines.pop()
718
        if last != '':
719
            raise ValueError('header lines did not end with a trailing'
720
                             ' newline')
721
        if len(header_lines) % 4 != 0:
722
            raise ValueError('The header was not an even multiple of 4 lines')
723
        block = GroupCompressBlock.from_bytes(block_bytes)
724
        del block_bytes
725
        result = cls(block)
726
        for start in xrange(0, len(header_lines), 4):
727
            # intern()?
728
            key = tuple(header_lines[start].split('\x00'))
729
            parents_line = header_lines[start+1]
730
            if parents_line == 'None:':
731
                parents = None
732
            else:
733
                parents = tuple([tuple(segment.split('\x00'))
734
                                 for segment in parents_line.split('\t')
735
                                  if segment])
736
            start_offset = int(header_lines[start+2])
737
            end_offset = int(header_lines[start+3])
738
            result.add_factory(key, parents, start_offset, end_offset)
739
        return result
740
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
741
3735.32.18 by John Arbash Meinel
We now support generating a network stream.
742
def network_block_to_records(storage_kind, bytes, line_end):
743
    if storage_kind != 'groupcompress-block':
744
        raise ValueError('Unknown storage kind: %s' % (storage_kind,))
745
    manager = _LazyGroupContentManager.from_bytes(bytes)
746
    return manager.get_record_stream()
747
748
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
749
class _CommonGroupCompressor(object):
750
751
    def __init__(self):
752
        """Create a GroupCompressor."""
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
753
        self.chunks = []
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
754
        self._last = None
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
755
        self.endpoint = 0
756
        self.input_bytes = 0
757
        self.labels_deltas = {}
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
758
        self._delta_index = None # Set by the children
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
759
        self._block = GroupCompressBlock()
760
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
761
    def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
762
        """Compress lines with label key.
763
764
        :param key: A key tuple. It is stored in the output
765
            for identification of the text during decompression. If the last
766
            element is 'None' it is replaced with the sha1 of the text -
767
            e.g. sha1:xxxxxxx.
768
        :param bytes: The bytes to be compressed
769
        :param expected_sha: If non-None, the sha the lines are believed to
770
            have. During compression the sha is calculated; a mismatch will
771
            cause an error.
772
        :param nostore_sha: If the computed sha1 sum matches, we will raise
773
            ExistingContent rather than adding the text.
774
        :param soft: Do a 'soft' compression. This means that we require larger
775
            ranges to match to be considered for a copy command.
776
777
        :return: The sha1 of lines, the start and end offsets in the delta, and
778
            the type ('fulltext' or 'delta').
779
780
        :seealso VersionedFiles.add_lines:
781
        """
782
        if not bytes: # empty, like a dir entry, etc
783
            if nostore_sha == _null_sha1:
784
                raise errors.ExistingContent()
785
            return _null_sha1, 0, 0, 'fulltext'
786
        # we assume someone knew what they were doing when they passed it in
787
        if expected_sha is not None:
788
            sha1 = expected_sha
789
        else:
790
            sha1 = osutils.sha_string(bytes)
791
        if nostore_sha is not None:
792
            if sha1 == nostore_sha:
793
                raise errors.ExistingContent()
794
        if key[-1] is None:
795
            key = key[:-1] + ('sha1:' + sha1,)
796
797
        start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
798
        return sha1, start, end, type
799
800
    def _compress(self, key, bytes, max_delta_size, soft=False):
801
        """Compress lines with label key.
802
803
        :param key: A key tuple. It is stored in the output for identification
804
            of the text during decompression.
805
806
        :param bytes: The bytes to be compressed
807
808
        :param max_delta_size: The size above which we issue a fulltext instead
809
            of a delta.
810
811
        :param soft: Do a 'soft' compression. This means that we require larger
812
            ranges to match to be considered for a copy command.
813
814
        :return: The sha1 of lines, the start and end offsets in the delta, and
815
            the type ('fulltext' or 'delta').
816
        """
817
        raise NotImplementedError(self._compress)
818
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
819
    def extract(self, key):
820
        """Extract a key previously added to the compressor.
821
822
        :param key: The key to extract.
823
        :return: An iterable over bytes and the sha1.
824
        """
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
825
        (start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
826
        delta_chunks = self.chunks[start_chunk:end_chunk]
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
827
        stored_bytes = ''.join(delta_chunks)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
828
        if stored_bytes[0] == 'f':
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
829
            fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
830
            data_len = fulltext_len + 1 + offset
831
            if  data_len != len(stored_bytes):
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
832
                raise ValueError('Index claimed fulltext len, but stored bytes'
833
                                 ' claim %s != %s'
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
834
                                 % (len(stored_bytes), data_len))
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
835
            bytes = stored_bytes[offset + 1:]
836
        else:
837
            # XXX: This is inefficient at best
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
838
            source = ''.join(self.chunks[:start_chunk])
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
839
            if stored_bytes[0] != 'd':
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
840
                raise ValueError('Unknown content kind, bytes claim %s'
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
841
                                 % (stored_bytes[0],))
842
            delta_len, offset = decode_base128_int(stored_bytes[1:10])
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
843
            data_len = delta_len + 1 + offset
844
            if data_len != len(stored_bytes):
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
845
                raise ValueError('Index claimed delta len, but stored bytes'
846
                                 ' claim %s != %s'
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
847
                                 % (len(stored_bytes), data_len))
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
848
            bytes = apply_delta(source, stored_bytes[offset + 1:])
849
        bytes_sha1 = osutils.sha_string(bytes)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
850
        return bytes, bytes_sha1
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
851
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
852
    def flush(self):
853
        """Finish this group, creating a formatted stream.
854
855
        After calling this, the compressor should no longer be used
856
        """
4398.6.2 by John Arbash Meinel
Add a TODO, marking the code that causes us to peak at 2x memory consumption
857
        # TODO: this causes us to 'bloat' to 2x the size of content in the
858
        #       group. This has an impact for 'commit' of large objects.
859
        #       One possibility is to use self._content_chunks, and be lazy and
860
        #       only fill out self._content as a full string when we actually
861
        #       need it. That would at least drop the peak memory consumption
862
        #       for 'commit' down to ~1x the size of the largest file, at a
863
        #       cost of increased complexity within this code. 2x is still <<
864
        #       3x the size of the largest file, so we are doing ok.
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
865
        self._block.set_chunked_content(self.chunks, self.endpoint)
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
866
        self.chunks = None
867
        self._delta_index = None
868
        return self._block
869
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
870
    def pop_last(self):
871
        """Call this if you want to 'revoke' the last compression.
872
873
        After this, the data structures will be rolled back, but you cannot do
874
        more compression.
875
        """
876
        self._delta_index = None
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
877
        del self.chunks[self._last[0]:]
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
878
        self.endpoint = self._last[1]
879
        self._last = None
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
880
881
    def ratio(self):
882
        """Return the overall compression ratio."""
883
        return float(self.input_bytes) / float(self.endpoint)
884
885
886
class PythonGroupCompressor(_CommonGroupCompressor):
887
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
888
    def __init__(self):
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
889
        """Create a GroupCompressor.
890
891
        Used only if the pyrex version is not available.
892
        """
893
        super(PythonGroupCompressor, self).__init__()
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
894
        self._delta_index = LinesDeltaIndex([])
895
        # The actual content is managed by LinesDeltaIndex
896
        self.chunks = self._delta_index.lines
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
897
898
    def _compress(self, key, bytes, max_delta_size, soft=False):
899
        """see _CommonGroupCompressor._compress"""
900
        input_len = len(bytes)
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
901
        new_lines = osutils.split_lines(bytes)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
902
        out_lines, index_lines = self._delta_index.make_delta(
903
            new_lines, bytes_length=input_len, soft=soft)
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
904
        delta_length = sum(map(len, out_lines))
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
905
        if delta_length > max_delta_size:
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
906
            # The delta is longer than the fulltext, insert a fulltext
907
            type = 'fulltext'
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
908
            out_lines = ['f', encode_base128_int(input_len)]
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
909
            out_lines.extend(new_lines)
910
            index_lines = [False, False]
911
            index_lines.extend([True] * len(new_lines))
912
        else:
913
            # this is a worthy delta, output it
914
            type = 'delta'
915
            out_lines[0] = 'd'
916
            # Update the delta_length to include those two encoded integers
917
            out_lines[1] = encode_base128_int(delta_length)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
918
        # Before insertion
919
        start = self.endpoint
920
        chunk_start = len(self.chunks)
4241.17.2 by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly.
921
        self._last = (chunk_start, self.endpoint)
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
922
        self._delta_index.extend_lines(out_lines, index_lines)
923
        self.endpoint = self._delta_index.endpoint
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
924
        self.input_bytes += input_len
925
        chunk_end = len(self.chunks)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
926
        self.labels_deltas[key] = (start, chunk_start,
927
                                   self.endpoint, chunk_end)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
928
        return start, self.endpoint, type
929
930
931
class PyrexGroupCompressor(_CommonGroupCompressor):
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
932
    """Produce a serialised group of compressed texts.
0.23.6 by John Arbash Meinel
Start stripping out the actual GroupCompressor
933
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
934
    It contains code very similar to SequenceMatcher because of having a similar
935
    task. However some key differences apply:
936
     - there is no junk, we want a minimal edit not a human readable diff.
937
     - we don't filter very common lines (because we don't know where a good
938
       range will start, and after the first text we want to be emitting minmal
939
       edits only.
940
     - we chain the left side, not the right side
941
     - we incrementally update the adjacency matrix as new lines are provided.
942
     - we look for matches in all of the left side, so the routine which does
943
       the analagous task of find_longest_match does not need to filter on the
944
       left side.
945
    """
0.17.2 by Robert Collins
Core proof of concept working.
946
3735.32.19 by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway.
947
    def __init__(self):
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
948
        super(PyrexGroupCompressor, self).__init__()
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
949
        self._delta_index = DeltaIndex()
0.23.6 by John Arbash Meinel
Start stripping out the actual GroupCompressor
950
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
951
    def _compress(self, key, bytes, max_delta_size, soft=False):
952
        """see _CommonGroupCompressor._compress"""
0.23.52 by John Arbash Meinel
Use the max_delta flag.
953
        input_len = len(bytes)
0.23.12 by John Arbash Meinel
Add a 'len:' field to the data.
954
        # By having action/label/sha1/len, we can parse the group if the index
955
        # was ever destroyed, we have the key in 'label', we know the final
956
        # bytes are valid from sha1, and we know where to find the end of this
957
        # record because of 'len'. (the delta record itself will store the
958
        # total length for the expanded record)
0.23.13 by John Arbash Meinel
Factor out the ability to have/not have labels.
959
        # 'len: %d\n' costs approximately 1% increase in total data
960
        # Having the labels at all costs us 9-10% increase, 38% increase for
961
        # inventory pages, and 5.8% increase for text pages
0.25.6 by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header
962
        # new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
0.23.33 by John Arbash Meinel
Fix a bug when handling multiple large-range copies.
963
        if self._delta_index._source_offset != self.endpoint:
964
            raise AssertionError('_source_offset != endpoint'
965
                ' somehow the DeltaIndex got out of sync with'
966
                ' the output lines')
0.23.52 by John Arbash Meinel
Use the max_delta flag.
967
        delta = self._delta_index.make_delta(bytes, max_delta_size)
968
        if (delta is None):
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
969
            type = 'fulltext'
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
970
            enc_length = encode_base128_int(len(bytes))
971
            len_mini_header = 1 + len(enc_length)
972
            self._delta_index.add_source(bytes, len_mini_header)
973
            new_chunks = ['f', enc_length, bytes]
0.23.9 by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor.
974
        else:
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
975
            type = 'delta'
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
976
            enc_length = encode_base128_int(len(delta))
977
            len_mini_header = 1 + len(enc_length)
978
            new_chunks = ['d', enc_length, delta]
3735.38.5 by John Arbash Meinel
A bit of testing showed that _FAST=True was actually *slower*.
979
            self._delta_index.add_delta_source(delta, len_mini_header)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
980
        # Before insertion
981
        start = self.endpoint
982
        chunk_start = len(self.chunks)
983
        # Now output these bytes
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
984
        self._output_chunks(new_chunks)
0.23.6 by John Arbash Meinel
Start stripping out the actual GroupCompressor
985
        self.input_bytes += input_len
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
986
        chunk_end = len(self.chunks)
987
        self.labels_deltas[key] = (start, chunk_start,
988
                                   self.endpoint, chunk_end)
0.23.29 by John Arbash Meinel
Forgot to add the delta bytes to the index objects.
989
        if not self._delta_index._source_offset == self.endpoint:
990
            raise AssertionError('the delta index is out of sync'
991
                'with the output lines %s != %s'
992
                % (self._delta_index._source_offset, self.endpoint))
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
993
        return start, self.endpoint, type
0.17.2 by Robert Collins
Core proof of concept working.
994
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
995
    def _output_chunks(self, new_chunks):
0.23.9 by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor.
996
        """Output some chunks.
997
998
        :param new_chunks: The chunks to output.
999
        """
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
1000
        self._last = (len(self.chunks), self.endpoint)
0.17.12 by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead.
1001
        endpoint = self.endpoint
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
1002
        self.chunks.extend(new_chunks)
0.23.9 by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor.
1003
        endpoint += sum(map(len, new_chunks))
0.17.12 by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead.
1004
        self.endpoint = endpoint
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
1005
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1006
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1007
def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1008
    """Create a factory for creating a pack based groupcompress.
1009
1010
    This is only functional enough to run interface tests, it doesn't try to
1011
    provide a full pack environment.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1012
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1013
    :param graph: Store a graph.
1014
    :param delta: Delta compress contents.
1015
    :param keylength: How long should keys be.
1016
    """
1017
    def factory(transport):
3735.32.2 by John Arbash Meinel
The 'delta' flag has no effect on the content (all GC is delta'd),
1018
        parents = graph
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1019
        ref_length = 0
1020
        if graph:
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1021
            ref_length = 1
0.17.7 by Robert Collins
Update for current index2 changes.
1022
        graph_index = BTreeBuilder(reference_lists=ref_length,
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1023
            key_elements=keylength)
1024
        stream = transport.open_write_stream('newpack')
1025
        writer = pack.ContainerWriter(stream.write)
1026
        writer.begin()
1027
        index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1028
            add_callback=graph_index.add_nodes,
1029
            inconsistency_fatal=inconsistency_fatal)
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1030
        access = knit._DirectPackAccess({})
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1031
        access.set_writer(writer, graph_index, (transport, 'newpack'))
0.17.2 by Robert Collins
Core proof of concept working.
1032
        result = GroupCompressVersionedFiles(index, access, delta)
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1033
        result.stream = stream
1034
        result.writer = writer
1035
        return result
1036
    return factory
1037
1038
1039
def cleanup_pack_group(versioned_files):
0.17.23 by Robert Collins
Only decompress as much of the zlib data as is needed to read the text recipe.
1040
    versioned_files.writer.end()
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1041
    versioned_files.stream.close()
1042
1043
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1044
class _BatchingBlockFetcher(object):
1045
    """Fetch group compress blocks in batches.
1046
    
1047
    :ivar total_bytes: int of expected number of bytes needed to fetch the
1048
        currently pending batch.
1049
    """
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1050
1051
    def __init__(self, gcvf, locations):
1052
        self.gcvf = gcvf
1053
        self.locations = locations
1054
        self.keys = []
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1055
        self.batch_memos = {}
1056
        self.memos_to_get = []
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1057
        self.total_bytes = 0
1058
        self.last_read_memo = None
1059
        self.manager = None
1060
1061
    def add_key(self, key):
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1062
        """Add another to key to fetch.
1063
        
1064
        :return: The estimated number of bytes needed to fetch the batch so
1065
            far.
1066
        """
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1067
        self.keys.append(key)
1068
        index_memo, _, _, _ = self.locations[key]
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1069
        read_memo = index_memo[0:3]
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1070
        # Three possibilities for this read_memo:
1071
        #  - it's already part of this batch; or
1072
        #  - it's not yet part of this batch, but is already cached; or
1073
        #  - it's not yet part of this batch and will need to be fetched.
1074
        if read_memo in self.batch_memos:
1075
            # This read memo is already in this batch.
4634.3.16 by Andrew Bennetts
Fix buglets.
1076
            return self.total_bytes
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1077
        try:
1078
            cached_block = self.gcvf._group_cache[read_memo]
1079
        except KeyError:
1080
            # This read memo is new to this batch, and the data isn't cached
1081
            # either.
1082
            self.batch_memos[read_memo] = None
1083
            self.memos_to_get.append(read_memo)
4634.3.12 by Andrew Bennetts
Bump up the batch size to 256k, and fix the batch size estimate to use the length of the raw bytes that will be fetched (not the uncompressed bytes).
1084
            byte_length = read_memo[2]
1085
            self.total_bytes += byte_length
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1086
        else:
1087
            # This read memo is new to this batch, but cached.
1088
            # Keep a reference to the cached block in batch_memos because it's
1089
            # certain that we'll use it when this batch is processed, but
1090
            # there's a risk that it would fall out of _group_cache between now
1091
            # and then.
1092
            self.batch_memos[read_memo] = cached_block
1093
        return self.total_bytes
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1094
        
4634.3.13 by Andrew Bennetts
Rename empty_manager to _flush_manager.
1095
    def _flush_manager(self):
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1096
        if self.manager is not None:
1097
            for factory in self.manager.get_record_stream():
1098
                yield factory
1099
            self.manager = None
4634.3.4 by Andrew Bennetts
Decruftify a little more.
1100
            self.last_read_memo = None
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1101
1102
    def yield_factories(self, full_flush=False):
4634.3.5 by Andrew Bennetts
More docstrings.
1103
        """Yield factories for keys added since the last yield.  They will be
1104
        returned in the order they were added via add_key.
1105
        
1106
        :param full_flush: by default, some results may not be returned in case
1107
            they can be part of the next batch.  If full_flush is True, then
1108
            all results are returned.
1109
        """
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1110
        if self.manager is None and not self.keys:
1111
            return
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1112
        # Fetch all memos in this batch.
1113
        blocks = self.gcvf._get_blocks(self.memos_to_get)
1114
        # Turn blocks into factories and yield them.
1115
        memos_to_get_stack = list(self.memos_to_get)
1116
        memos_to_get_stack.reverse()
4634.3.2 by Andrew Bennetts
Stop using (and remove) unnecessary key_batch var that was causing a bug.
1117
        for key in self.keys:
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1118
            index_memo, _, parents, _ = self.locations[key]
1119
            read_memo = index_memo[:3]
4634.3.4 by Andrew Bennetts
Decruftify a little more.
1120
            if self.last_read_memo != read_memo:
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1121
                # We are starting a new block. If we have a
1122
                # manager, we have found everything that fits for
1123
                # now, so yield records
4634.3.13 by Andrew Bennetts
Rename empty_manager to _flush_manager.
1124
                for factory in self._flush_manager():
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1125
                    yield factory
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1126
                # Now start a new manager.
1127
                if memos_to_get_stack and memos_to_get_stack[-1] == read_memo:
1128
                    # The next block from _get_blocks will be the block we
1129
                    # need.
1130
                    block_read_memo, block = blocks.next()
1131
                    if block_read_memo != read_memo:
1132
                        raise AssertionError(
4634.3.16 by Andrew Bennetts
Fix buglets.
1133
                            "block_read_memo out of sync with read_memo"
1134
                            "(%r != %r)" % (block_read_memo, read_memo))
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1135
                    self.batch_memos[read_memo] = block
1136
                    memos_to_get_stack.pop()
1137
                else:
1138
                    block = self.batch_memos[read_memo]
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1139
                self.manager = _LazyGroupContentManager(block)
4634.3.4 by Andrew Bennetts
Decruftify a little more.
1140
                self.last_read_memo = read_memo
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1141
            start, end = index_memo[3:5]
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1142
            self.manager.add_factory(key, parents, start, end)
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1143
        if full_flush:
4634.3.13 by Andrew Bennetts
Rename empty_manager to _flush_manager.
1144
            for factory in self._flush_manager():
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1145
                yield factory
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1146
        del self.keys[:]
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1147
        self.batch_memos.clear()
1148
        del self.memos_to_get[:]
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1149
        self.total_bytes = 0
1150
1151
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1152
class GroupCompressVersionedFiles(VersionedFiles):
1153
    """A group-compress based VersionedFiles implementation."""
1154
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1155
    def __init__(self, index, access, delta=True, _unadded_refs=None):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1156
        """Create a GroupCompressVersionedFiles object.
1157
1158
        :param index: The index object storing access and graph data.
1159
        :param access: The access object storing raw data.
0.17.2 by Robert Collins
Core proof of concept working.
1160
        :param delta: Whether to delta compress or just entropy compress.
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1161
        :param _unadded_refs: private parameter, don't use.
0.17.2 by Robert Collins
Core proof of concept working.
1162
        """
1163
        self._index = index
1164
        self._access = access
1165
        self._delta = delta
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1166
        if _unadded_refs is None:
1167
            _unadded_refs = {}
1168
        self._unadded_refs = _unadded_refs
0.17.24 by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group.
1169
        self._group_cache = LRUSizeCache(max_size=50*1024*1024)
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1170
        self._fallback_vfs = []
0.17.2 by Robert Collins
Core proof of concept working.
1171
4634.35.1 by Andrew Bennetts
Check for all necessary chk nodes, not just roots.
1172
    def without_fallbacks(self):
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1173
        """Return a clone of this object without any fallbacks configured."""
1174
        return GroupCompressVersionedFiles(self._index, self._access,
1175
            self._delta, _unadded_refs=dict(self._unadded_refs))
4634.35.1 by Andrew Bennetts
Check for all necessary chk nodes, not just roots.
1176
0.17.2 by Robert Collins
Core proof of concept working.
1177
    def add_lines(self, key, parents, lines, parent_texts=None,
1178
        left_matching_blocks=None, nostore_sha=None, random_id=False,
1179
        check_content=True):
1180
        """Add a text to the store.
1181
1182
        :param key: The key tuple of the text to add.
1183
        :param parents: The parents key tuples of the text to add.
1184
        :param lines: A list of lines. Each line must be a bytestring. And all
1185
            of them except the last must be terminated with \n and contain no
1186
            other \n's. The last line may either contain no \n's or a single
1187
            terminating \n. If the lines list does meet this constraint the add
1188
            routine may error or may succeed - but you will be unable to read
1189
            the data back accurately. (Checking the lines have been split
1190
            correctly is expensive and extremely unlikely to catch bugs so it
1191
            is not done at runtime unless check_content is True.)
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1192
        :param parent_texts: An optional dictionary containing the opaque
0.17.2 by Robert Collins
Core proof of concept working.
1193
            representations of some or all of the parents of version_id to
1194
            allow delta optimisations.  VERY IMPORTANT: the texts must be those
1195
            returned by add_lines or data corruption can be caused.
1196
        :param left_matching_blocks: a hint about which areas are common
1197
            between the text and its left-hand-parent.  The format is
1198
            the SequenceMatcher.get_matching_blocks format.
1199
        :param nostore_sha: Raise ExistingContent and do not add the lines to
1200
            the versioned file if the digest of the lines matches this.
1201
        :param random_id: If True a random id has been selected rather than
1202
            an id determined by some deterministic process such as a converter
1203
            from a foreign VCS. When True the backend may choose not to check
1204
            for uniqueness of the resulting key within the versioned file, so
1205
            this should only be done when the result is expected to be unique
1206
            anyway.
1207
        :param check_content: If True, the lines supplied are verified to be
1208
            bytestrings that are correctly formed lines.
1209
        :return: The text sha1, the number of bytes in the text, and an opaque
1210
                 representation of the inserted version which can be provided
1211
                 back to future add_lines calls in the parent_texts dictionary.
1212
        """
1213
        self._index._check_write_ok()
1214
        self._check_add(key, lines, random_id, check_content)
1215
        if parents is None:
1216
            # The caller might pass None if there is no graph data, but kndx
1217
            # indexes can't directly store that, so we give them
1218
            # an empty tuple instead.
1219
            parents = ()
1220
        # double handling for now. Make it work until then.
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
1221
        length = sum(map(len, lines))
1222
        record = ChunkedContentFactory(key, parents, None, lines)
3735.31.12 by John Arbash Meinel
Push nostore_sha down through the stack.
1223
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
1224
                                               nostore_sha=nostore_sha))[0]
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
1225
        return sha1, length, None
0.17.2 by Robert Collins
Core proof of concept working.
1226
4398.8.6 by John Arbash Meinel
Switch the api from VF.add_text to VF._add_text and trim some extra 'features'.
1227
    def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
4398.9.1 by Matt Nordhoff
Update _add_text docstrings that still referred to add_text.
1228
        """See VersionedFiles._add_text()."""
4398.8.4 by John Arbash Meinel
Implement add_text for GroupCompressVersionedFiles
1229
        self._index._check_write_ok()
1230
        self._check_add(key, None, random_id, check_content=False)
1231
        if text.__class__ is not str:
1232
            raise errors.BzrBadParameterUnicode("text")
1233
        if parents is None:
1234
            # The caller might pass None if there is no graph data, but kndx
1235
            # indexes can't directly store that, so we give them
1236
            # an empty tuple instead.
1237
            parents = ()
1238
        # double handling for now. Make it work until then.
1239
        length = len(text)
1240
        record = FulltextContentFactory(key, parents, None, text)
1241
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
1242
                                               nostore_sha=nostore_sha))[0]
1243
        return sha1, length, None
1244
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1245
    def add_fallback_versioned_files(self, a_versioned_files):
1246
        """Add a source of texts for texts not present in this knit.
1247
1248
        :param a_versioned_files: A VersionedFiles object.
1249
        """
1250
        self._fallback_vfs.append(a_versioned_files)
1251
0.17.4 by Robert Collins
Annotate.
1252
    def annotate(self, key):
1253
        """See VersionedFiles.annotate."""
4454.3.58 by John Arbash Meinel
Enable the new annotator for gc format repos.
1254
        ann = annotate.Annotator(self)
1255
        return ann.annotate_flat(key)
0.17.4 by Robert Collins
Annotate.
1256
4454.3.65 by John Arbash Meinel
Tests that VF implementations support .get_annotator()
1257
    def get_annotator(self):
1258
        return annotate.Annotator(self)
1259
4332.3.28 by Robert Collins
Start checking file texts in a single pass.
1260
    def check(self, progress_bar=None, keys=None):
0.17.5 by Robert Collins
nograph tests completely passing.
1261
        """See VersionedFiles.check()."""
4332.3.28 by Robert Collins
Start checking file texts in a single pass.
1262
        if keys is None:
1263
            keys = self.keys()
1264
            for record in self.get_record_stream(keys, 'unordered', True):
1265
                record.get_bytes_as('fulltext')
1266
        else:
1267
            return self.get_record_stream(keys, 'unordered', True)
0.17.5 by Robert Collins
nograph tests completely passing.
1268
4744.2.5 by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api.
1269
    def clear_cache(self):
1270
        """See VersionedFiles.clear_cache()"""
1271
        self._group_cache.clear()
4744.2.7 by John Arbash Meinel
Add .clear_cache() members to GraphIndexBuilder and BTreeBuilder.
1272
        self._index._graph_index.clear_cache()
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
1273
        self._index._int_cache.clear()
4744.2.5 by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api.
1274
0.17.2 by Robert Collins
Core proof of concept working.
1275
    def _check_add(self, key, lines, random_id, check_content):
1276
        """check that version_id and lines are safe to add."""
1277
        version_id = key[-1]
0.17.26 by Robert Collins
Working better --gc-plain-chk.
1278
        if version_id is not None:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1279
            if osutils.contains_whitespace(version_id):
3735.31.1 by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch.
1280
                raise errors.InvalidRevisionId(version_id, self)
0.17.2 by Robert Collins
Core proof of concept working.
1281
        self.check_not_reserved_id(version_id)
1282
        # TODO: If random_id==False and the key is already present, we should
1283
        # probably check that the existing content is identical to what is
1284
        # being inserted, and otherwise raise an exception.  This would make
1285
        # the bundle code simpler.
1286
        if check_content:
1287
            self._check_lines_not_unicode(lines)
1288
            self._check_lines_are_lines(lines)
1289
4593.5.20 by John Arbash Meinel
Expose KnownGraph off of VersionedFiles
1290
    def get_known_graph_ancestry(self, keys):
1291
        """Get a KnownGraph instance with the ancestry of keys."""
4634.11.2 by John Arbash Meinel
Teach VF.get_known_graph_ancestry to go to fallbacks (bug #419241)
1292
        # Note that this is identical to
1293
        # KnitVersionedFiles.get_known_graph_ancestry, but they don't share
1294
        # ancestry.
4634.11.3 by John Arbash Meinel
Implement _GCGraphIndex.find_ancestry()
1295
        parent_map, missing_keys = self._index.find_ancestry(keys)
4634.11.2 by John Arbash Meinel
Teach VF.get_known_graph_ancestry to go to fallbacks (bug #419241)
1296
        for fallback in self._fallback_vfs:
1297
            if not missing_keys:
1298
                break
4634.11.3 by John Arbash Meinel
Implement _GCGraphIndex.find_ancestry()
1299
            (f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1300
                                                missing_keys)
4634.11.2 by John Arbash Meinel
Teach VF.get_known_graph_ancestry to go to fallbacks (bug #419241)
1301
            parent_map.update(f_parent_map)
1302
            missing_keys = f_missing_keys
4593.5.20 by John Arbash Meinel
Expose KnownGraph off of VersionedFiles
1303
        kg = _mod_graph.KnownGraph(parent_map)
1304
        return kg
1305
0.17.5 by Robert Collins
nograph tests completely passing.
1306
    def get_parent_map(self, keys):
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1307
        """Get a map of the graph parents of keys.
0.17.5 by Robert Collins
nograph tests completely passing.
1308
1309
        :param keys: The keys to look up parents for.
1310
        :return: A mapping from keys to parents. Absent keys are absent from
1311
            the mapping.
1312
        """
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1313
        return self._get_parent_map_with_sources(keys)[0]
1314
1315
    def _get_parent_map_with_sources(self, keys):
1316
        """Get a map of the parents of keys.
1317
1318
        :param keys: The keys to look up parents for.
1319
        :return: A tuple. The first element is a mapping from keys to parents.
1320
            Absent keys are absent from the mapping. The second element is a
1321
            list with the locations each key was found in. The first element
1322
            is the in-this-knit parents, the second the first fallback source,
1323
            and so on.
1324
        """
0.17.5 by Robert Collins
nograph tests completely passing.
1325
        result = {}
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1326
        sources = [self._index] + self._fallback_vfs
0.17.5 by Robert Collins
nograph tests completely passing.
1327
        source_results = []
1328
        missing = set(keys)
1329
        for source in sources:
1330
            if not missing:
1331
                break
1332
            new_result = source.get_parent_map(missing)
1333
            source_results.append(new_result)
1334
            result.update(new_result)
1335
            missing.difference_update(set(new_result))
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1336
        return result, source_results
0.17.5 by Robert Collins
nograph tests completely passing.
1337
4634.3.11 by Andrew Bennetts
Simplify further, comment more.
1338
    def _get_blocks(self, read_memos):
1339
        """Get GroupCompressBlocks for the given read_memos.
1340
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1341
        :returns: a series of (read_memo, block) pairs, in the order they were
1342
            originally passed.
4634.3.11 by Andrew Bennetts
Simplify further, comment more.
1343
        """
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1344
        cached = {}
1345
        for read_memo in read_memos:
1346
            try:
1347
                block = self._group_cache[read_memo]
1348
            except KeyError:
1349
                pass
1350
            else:
1351
                cached[read_memo] = block
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1352
        not_cached = []
1353
        not_cached_seen = set()
1354
        for read_memo in read_memos:
1355
            if read_memo in cached:
1356
                # Don't fetch what we already have
1357
                continue
1358
            if read_memo in not_cached_seen:
1359
                # Don't try to fetch the same data twice
1360
                continue
1361
            not_cached.append(read_memo)
1362
            not_cached_seen.add(read_memo)
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1363
        raw_records = self._access.get_raw_records(not_cached)
1364
        for read_memo in read_memos:
1365
            try:
4634.3.16 by Andrew Bennetts
Fix buglets.
1366
                yield read_memo, cached[read_memo]
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1367
            except KeyError:
4634.3.15 by Andrew Bennetts
Get rid of inaccurate comment.
1368
                # Read the block, and cache it.
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1369
                zdata = raw_records.next()
1370
                block = GroupCompressBlock.from_bytes(zdata)
1371
                self._group_cache[read_memo] = block
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1372
                cached[read_memo] = block
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1373
                yield read_memo, block
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1374
0.20.18 by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys()
1375
    def get_missing_compression_parent_keys(self):
1376
        """Return the keys of missing compression parents.
1377
1378
        Missing compression parents occur when a record stream was missing
1379
        basis texts, or a index was scanned that had missing basis texts.
1380
        """
1381
        # GroupCompress cannot currently reference texts that are not in the
1382
        # group, so this is valid for now
1383
        return frozenset()
1384
0.17.5 by Robert Collins
nograph tests completely passing.
1385
    def get_record_stream(self, keys, ordering, include_delta_closure):
1386
        """Get a stream of records for keys.
1387
1388
        :param keys: The keys to include.
1389
        :param ordering: Either 'unordered' or 'topological'. A topologically
1390
            sorted stream has compression parents strictly before their
1391
            children.
1392
        :param include_delta_closure: If True then the closure across any
1393
            compression parents will be included (in the opaque data).
1394
        :return: An iterator of ContentFactory objects, each of which is only
1395
            valid until the iterator is advanced.
1396
        """
1397
        # keys might be a generator
0.22.6 by John Arbash Meinel
Clustering chk pages properly makes a big difference.
1398
        orig_keys = list(keys)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1399
        keys = set(keys)
0.17.5 by Robert Collins
nograph tests completely passing.
1400
        if not keys:
1401
            return
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
1402
        if (not self._index.has_graph
3735.31.14 by John Arbash Meinel
Change the gc-optimal to 'groupcompress'
1403
            and ordering in ('topological', 'groupcompress')):
0.17.5 by Robert Collins
nograph tests completely passing.
1404
            # Cannot topological order when no graph has been stored.
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1405
            # but we allow 'as-requested' or 'unordered'
0.17.5 by Robert Collins
nograph tests completely passing.
1406
            ordering = 'unordered'
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1407
1408
        remaining_keys = keys
1409
        while True:
1410
            try:
1411
                keys = set(remaining_keys)
1412
                for content_factory in self._get_remaining_record_stream(keys,
1413
                        orig_keys, ordering, include_delta_closure):
1414
                    remaining_keys.discard(content_factory.key)
1415
                    yield content_factory
1416
                return
1417
            except errors.RetryWithNewPacks, e:
1418
                self._access.reload_or_raise(e)
1419
1420
    def _find_from_fallback(self, missing):
1421
        """Find whatever keys you can from the fallbacks.
1422
1423
        :param missing: A set of missing keys. This set will be mutated as keys
1424
            are found from a fallback_vfs
1425
        :return: (parent_map, key_to_source_map, source_results)
1426
            parent_map  the overall key => parent_keys
1427
            key_to_source_map   a dict from {key: source}
1428
            source_results      a list of (source: keys)
1429
        """
1430
        parent_map = {}
1431
        key_to_source_map = {}
1432
        source_results = []
1433
        for source in self._fallback_vfs:
1434
            if not missing:
1435
                break
1436
            source_parents = source.get_parent_map(missing)
1437
            parent_map.update(source_parents)
1438
            source_parents = list(source_parents)
1439
            source_results.append((source, source_parents))
1440
            key_to_source_map.update((key, source) for key in source_parents)
1441
            missing.difference_update(source_parents)
1442
        return parent_map, key_to_source_map, source_results
1443
1444
    def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
1445
        """Get the (source, [keys]) list.
1446
1447
        The returned objects should be in the order defined by 'ordering',
1448
        which can weave between different sources.
1449
        :param ordering: Must be one of 'topological' or 'groupcompress'
1450
        :return: List of [(source, [keys])] tuples, such that all keys are in
1451
            the defined order, regardless of source.
1452
        """
1453
        if ordering == 'topological':
1454
            present_keys = topo_sort(parent_map)
1455
        else:
1456
            # ordering == 'groupcompress'
1457
            # XXX: This only optimizes for the target ordering. We may need
1458
            #      to balance that with the time it takes to extract
1459
            #      ordering, by somehow grouping based on
1460
            #      locations[key][0:3]
1461
            present_keys = sort_gc_optimal(parent_map)
1462
        # Now group by source:
1463
        source_keys = []
1464
        current_source = None
1465
        for key in present_keys:
1466
            source = key_to_source_map.get(key, self)
1467
            if source is not current_source:
1468
                source_keys.append((source, []))
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
1469
                current_source = source
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1470
            source_keys[-1][1].append(key)
1471
        return source_keys
1472
1473
    def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
1474
                                      key_to_source_map):
1475
        source_keys = []
1476
        current_source = None
1477
        for key in orig_keys:
1478
            if key in locations or key in unadded_keys:
1479
                source = self
1480
            elif key in key_to_source_map:
1481
                source = key_to_source_map[key]
1482
            else: # absent
1483
                continue
1484
            if source is not current_source:
1485
                source_keys.append((source, []))
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
1486
                current_source = source
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1487
            source_keys[-1][1].append(key)
1488
        return source_keys
1489
1490
    def _get_io_ordered_source_keys(self, locations, unadded_keys,
1491
                                    source_result):
1492
        def get_group(key):
1493
            # This is the group the bytes are stored in, followed by the
1494
            # location in the group
1495
            return locations[key][0]
1496
        present_keys = sorted(locations.iterkeys(), key=get_group)
1497
        # We don't have an ordering for keys in the in-memory object, but
1498
        # lets process the in-memory ones first.
1499
        present_keys = list(unadded_keys) + present_keys
1500
        # Now grab all of the ones from other sources
1501
        source_keys = [(self, present_keys)]
1502
        source_keys.extend(source_result)
1503
        return source_keys
1504
1505
    def _get_remaining_record_stream(self, keys, orig_keys, ordering,
1506
                                     include_delta_closure):
1507
        """Get a stream of records for keys.
1508
1509
        :param keys: The keys to include.
1510
        :param ordering: one of 'unordered', 'topological', 'groupcompress' or
1511
            'as-requested'
1512
        :param include_delta_closure: If True then the closure across any
1513
            compression parents will be included (in the opaque data).
1514
        :return: An iterator of ContentFactory objects, each of which is only
1515
            valid until the iterator is advanced.
1516
        """
0.17.5 by Robert Collins
nograph tests completely passing.
1517
        # Cheap: iterate
1518
        locations = self._index.get_build_details(keys)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1519
        unadded_keys = set(self._unadded_refs).intersection(keys)
1520
        missing = keys.difference(locations)
1521
        missing.difference_update(unadded_keys)
1522
        (fallback_parent_map, key_to_source_map,
1523
         source_result) = self._find_from_fallback(missing)
1524
        if ordering in ('topological', 'groupcompress'):
0.17.5 by Robert Collins
nograph tests completely passing.
1525
            # would be better to not globally sort initially but instead
1526
            # start with one key, recurse to its oldest parent, then grab
1527
            # everything in the same group, etc.
1528
            parent_map = dict((key, details[2]) for key, details in
1529
                locations.iteritems())
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1530
            for key in unadded_keys:
1531
                parent_map[key] = self._unadded_refs[key]
1532
            parent_map.update(fallback_parent_map)
1533
            source_keys = self._get_ordered_source_keys(ordering, parent_map,
1534
                                                        key_to_source_map)
0.22.6 by John Arbash Meinel
Clustering chk pages properly makes a big difference.
1535
        elif ordering == 'as-requested':
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1536
            source_keys = self._get_as_requested_source_keys(orig_keys,
1537
                locations, unadded_keys, key_to_source_map)
0.17.5 by Robert Collins
nograph tests completely passing.
1538
        else:
0.20.10 by John Arbash Meinel
Change the extraction ordering for 'unordered'.
1539
            # We want to yield the keys in a semi-optimal (read-wise) ordering.
1540
            # Otherwise we thrash the _group_cache and destroy performance
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1541
            source_keys = self._get_io_ordered_source_keys(locations,
1542
                unadded_keys, source_result)
1543
        for key in missing:
0.17.5 by Robert Collins
nograph tests completely passing.
1544
            yield AbsentContentFactory(key)
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1545
        # Batch up as many keys as we can until either:
1546
        #  - we encounter an unadded ref, or
1547
        #  - we run out of keys, or
4634.3.17 by Andrew Bennetts
Make BATCH_SIZE a global.
1548
        #  - the total bytes to retrieve for this batch > BATCH_SIZE
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1549
        batcher = _BatchingBlockFetcher(self, locations)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1550
        for source, keys in source_keys:
1551
            if source is self:
1552
                for key in keys:
1553
                    if key in self._unadded_refs:
4634.3.8 by Andrew Bennetts
Tweak some comments.
1554
                        # Flush batch, then yield unadded ref from
1555
                        # self._compressor.
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1556
                        for factory in batcher.yield_factories(full_flush=True):
1557
                            yield factory
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1558
                        bytes, sha1 = self._compressor.extract(key)
1559
                        parents = self._unadded_refs[key]
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
1560
                        yield FulltextContentFactory(key, parents, sha1, bytes)
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1561
                        continue
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1562
                    if batcher.add_key(key) > BATCH_SIZE:
4634.3.8 by Andrew Bennetts
Tweak some comments.
1563
                        # Ok, this batch is big enough.  Yield some results.
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1564
                        for factory in batcher.yield_factories():
1565
                            yield factory
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1566
            else:
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1567
                for factory in batcher.yield_factories(full_flush=True):
1568
                    yield factory
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1569
                for record in source.get_record_stream(keys, ordering,
1570
                                                       include_delta_closure):
1571
                    yield record
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1572
        for factory in batcher.yield_factories(full_flush=True):
1573
            yield factory
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
1574
0.17.5 by Robert Collins
nograph tests completely passing.
1575
    def get_sha1s(self, keys):
1576
        """See VersionedFiles.get_sha1s()."""
1577
        result = {}
1578
        for record in self.get_record_stream(keys, 'unordered', True):
1579
            if record.sha1 != None:
1580
                result[record.key] = record.sha1
1581
            else:
1582
                if record.storage_kind != 'absent':
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
1583
                    result[record.key] = osutils.sha_string(
1584
                        record.get_bytes_as('fulltext'))
0.17.5 by Robert Collins
nograph tests completely passing.
1585
        return result
1586
0.17.2 by Robert Collins
Core proof of concept working.
1587
    def insert_record_stream(self, stream):
1588
        """Insert a record stream into this container.
1589
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1590
        :param stream: A stream of records to insert.
0.17.2 by Robert Collins
Core proof of concept working.
1591
        :return: None
1592
        :seealso VersionedFiles.get_record_stream:
1593
        """
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1594
        # XXX: Setting random_id=True makes
1595
        # test_insert_record_stream_existing_keys fail for groupcompress and
1596
        # groupcompress-nograph, this needs to be revisited while addressing
1597
        # 'bzr branch' performance issues.
4665.3.2 by John Arbash Meinel
An alternative implementation that passes both tests.
1598
        for _ in self._insert_record_stream(stream, random_id=False):
0.17.5 by Robert Collins
nograph tests completely passing.
1599
            pass
0.17.2 by Robert Collins
Core proof of concept working.
1600
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1601
    def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1602
                              reuse_blocks=True):
0.17.2 by Robert Collins
Core proof of concept working.
1603
        """Internal core to insert a record stream into this container.
1604
1605
        This helper function has a different interface than insert_record_stream
1606
        to allow add_lines to be minimal, but still return the needed data.
1607
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1608
        :param stream: A stream of records to insert.
3735.31.12 by John Arbash Meinel
Push nostore_sha down through the stack.
1609
        :param nostore_sha: If the sha1 of a given text matches nostore_sha,
1610
            raise ExistingContent, rather than committing the new text.
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1611
        :param reuse_blocks: If the source is streaming from
1612
            groupcompress-blocks, just insert the blocks as-is, rather than
1613
            expanding the texts and inserting again.
0.17.2 by Robert Collins
Core proof of concept working.
1614
        :return: An iterator over the sha1 of the inserted records.
1615
        :seealso insert_record_stream:
1616
        :seealso add_lines:
1617
        """
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1618
        adapters = {}
0.17.5 by Robert Collins
nograph tests completely passing.
1619
        def get_adapter(adapter_key):
1620
            try:
1621
                return adapters[adapter_key]
1622
            except KeyError:
1623
                adapter_factory = adapter_registry.get(adapter_key)
1624
                adapter = adapter_factory(self)
1625
                adapters[adapter_key] = adapter
1626
                return adapter
0.17.2 by Robert Collins
Core proof of concept working.
1627
        # This will go up to fulltexts for gc to gc fetching, which isn't
1628
        # ideal.
3735.32.19 by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway.
1629
        self._compressor = GroupCompressor()
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1630
        self._unadded_refs = {}
0.17.5 by Robert Collins
nograph tests completely passing.
1631
        keys_to_add = []
0.17.6 by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big).
1632
        def flush():
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
1633
            bytes = self._compressor.flush().to_bytes()
0.17.6 by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big).
1634
            index, start, length = self._access.add_raw_records(
0.25.7 by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content.
1635
                [(None, len(bytes))], bytes)[0]
0.17.6 by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big).
1636
            nodes = []
1637
            for key, reads, refs in keys_to_add:
1638
                nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1639
            self._index.add_records(nodes, random_id=random_id)
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1640
            self._unadded_refs = {}
1641
            del keys_to_add[:]
3735.32.19 by John Arbash Meinel
Get rid of the 'delta' flag to GroupCompressor. It didn't do anything anyway.
1642
            self._compressor = GroupCompressor()
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1643
0.20.15 by John Arbash Meinel
Change so that regions that have lots of copies get converted back
1644
        last_prefix = None
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1645
        max_fulltext_len = 0
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1646
        max_fulltext_prefix = None
3735.32.20 by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given.
1647
        insert_manager = None
1648
        block_start = None
1649
        block_length = None
3735.36.15 by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices.
1650
        # XXX: TODO: remove this, it is just for safety checking for now
1651
        inserted_keys = set()
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
1652
        reuse_this_block = reuse_blocks
0.17.2 by Robert Collins
Core proof of concept working.
1653
        for record in stream:
0.17.5 by Robert Collins
nograph tests completely passing.
1654
            # Raise an error when a record is missing.
1655
            if record.storage_kind == 'absent':
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1656
                raise errors.RevisionNotPresent(record.key, self)
3735.36.15 by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices.
1657
            if random_id:
1658
                if record.key in inserted_keys:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1659
                    trace.note('Insert claimed random_id=True,'
1660
                               ' but then inserted %r two times', record.key)
3735.36.15 by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices.
1661
                    continue
1662
                inserted_keys.add(record.key)
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
1663
            if reuse_blocks:
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1664
                # If the reuse_blocks flag is set, check to see if we can just
1665
                # copy a groupcompress block as-is.
4665.3.10 by John Arbash Meinel
Get a test written which exercises the 'trim' code path.
1666
                # We only check on the first record (groupcompress-block) not
1667
                # on all of the (groupcompress-block-ref) entries.
1668
                # The reuse_this_block flag is then kept for as long as
4634.23.1 by Robert Collins
Cherrypick from bzr.dev: Fix bug 402652: recompress badly packed groups during fetch. (John Arbash Meinel, Robert Collins)
1669
                if record.storage_kind == 'groupcompress-block':
4665.3.2 by John Arbash Meinel
An alternative implementation that passes both tests.
1670
                    # Check to see if we really want to re-use this block
1671
                    insert_manager = record._manager
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
1672
                    reuse_this_block = insert_manager.check_is_well_utilized()
4665.3.10 by John Arbash Meinel
Get a test written which exercises the 'trim' code path.
1673
            else:
1674
                reuse_this_block = False
4665.3.2 by John Arbash Meinel
An alternative implementation that passes both tests.
1675
            if reuse_this_block:
1676
                # We still want to reuse this block
1677
                if record.storage_kind == 'groupcompress-block':
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1678
                    # Insert the raw block into the target repo
1679
                    insert_manager = record._manager
1680
                    bytes = record._manager._block.to_bytes()
1681
                    _, start, length = self._access.add_raw_records(
1682
                        [(None, len(bytes))], bytes)[0]
1683
                    del bytes
1684
                    block_start = start
1685
                    block_length = length
1686
                if record.storage_kind in ('groupcompress-block',
1687
                                           'groupcompress-block-ref'):
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1688
                    if insert_manager is None:
1689
                        raise AssertionError('No insert_manager set')
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
1690
                    if insert_manager is not record._manager:
1691
                        raise AssertionError('insert_manager does not match'
1692
                            ' the current record, we cannot be positive'
1693
                            ' that the appropriate content was inserted.'
1694
                            )
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1695
                    value = "%d %d %d %d" % (block_start, block_length,
1696
                                             record._start, record._end)
1697
                    nodes = [(record.key, value, (record.parents,))]
3735.38.1 by John Arbash Meinel
Change the delta byte stream to remove the 'source length' entry.
1698
                    # TODO: Consider buffering up many nodes to be added, not
1699
                    #       sure how much overhead this has, but we're seeing
1700
                    #       ~23s / 120s in add_records calls
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1701
                    self._index.add_records(nodes, random_id=random_id)
1702
                    continue
0.20.18 by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys()
1703
            try:
0.23.52 by John Arbash Meinel
Use the max_delta flag.
1704
                bytes = record.get_bytes_as('fulltext')
0.20.18 by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys()
1705
            except errors.UnavailableRepresentation:
0.17.5 by Robert Collins
nograph tests completely passing.
1706
                adapter_key = record.storage_kind, 'fulltext'
1707
                adapter = get_adapter(adapter_key)
0.20.21 by John Arbash Meinel
Merge the chk sorting code.
1708
                bytes = adapter.get_bytes(record)
0.20.13 by John Arbash Meinel
Play around a bit.
1709
            if len(record.key) > 1:
1710
                prefix = record.key[0]
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1711
                soft = (prefix == last_prefix)
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1712
            else:
1713
                prefix = None
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1714
                soft = False
1715
            if max_fulltext_len < len(bytes):
1716
                max_fulltext_len = len(bytes)
1717
                max_fulltext_prefix = prefix
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1718
            (found_sha1, start_point, end_point,
1719
             type) = self._compressor.compress(record.key,
1720
                                               bytes, record.sha1, soft=soft,
1721
                                               nostore_sha=nostore_sha)
1722
            # delta_ratio = float(len(bytes)) / (end_point - start_point)
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1723
            # Check if we want to continue to include that text
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1724
            if (prefix == max_fulltext_prefix
1725
                and end_point < 2 * max_fulltext_len):
1726
                # As long as we are on the same file_id, we will fill at least
1727
                # 2 * max_fulltext_len
1728
                start_new_block = False
1729
            elif end_point > 4*1024*1024:
1730
                start_new_block = True
1731
            elif (prefix is not None and prefix != last_prefix
1732
                  and end_point > 2*1024*1024):
1733
                start_new_block = True
1734
            else:
1735
                start_new_block = False
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1736
            last_prefix = prefix
1737
            if start_new_block:
1738
                self._compressor.pop_last()
1739
                flush()
1740
                max_fulltext_len = len(bytes)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1741
                (found_sha1, start_point, end_point,
1742
                 type) = self._compressor.compress(record.key, bytes,
1743
                                                   record.sha1)
0.17.26 by Robert Collins
Working better --gc-plain-chk.
1744
            if record.key[-1] is None:
1745
                key = record.key[:-1] + ('sha1:' + found_sha1,)
1746
            else:
1747
                key = record.key
1748
            self._unadded_refs[key] = record.parents
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
1749
            yield found_sha1
4842.1.1 by Andrew Bennetts
Fix crash involving static_tuple when C extensions are not built.
1750
            as_st = static_tuple.StaticTuple.from_sequence
1751
            if record.parents is not None:
1752
                parents = as_st([as_st(p) for p in record.parents])
1753
            else:
1754
                parents = None
1755
            refs = static_tuple.StaticTuple(parents)
1756
            keys_to_add.append((key, '%d %d' % (start_point, end_point), refs))
0.17.8 by Robert Collins
Flush pending updates at the end of _insert_record_stream
1757
        if len(keys_to_add):
1758
            flush()
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1759
        self._compressor = None
0.17.5 by Robert Collins
nograph tests completely passing.
1760
1761
    def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1762
        """Iterate over the lines in the versioned files from keys.
1763
1764
        This may return lines from other keys. Each item the returned
1765
        iterator yields is a tuple of a line and a text version that that line
1766
        is present in (not introduced in).
1767
1768
        Ordering of results is in whatever order is most suitable for the
1769
        underlying storage format.
1770
1771
        If a progress bar is supplied, it may be used to indicate progress.
1772
        The caller is responsible for cleaning up progress bars (because this
1773
        is an iterator).
1774
1775
        NOTES:
1776
         * Lines are normalised by the underlying store: they will all have \n
1777
           terminators.
1778
         * Lines are returned in arbitrary order.
1779
1780
        :return: An iterator over (line, key).
1781
        """
1782
        keys = set(keys)
1783
        total = len(keys)
1784
        # we don't care about inclusions, the caller cares.
1785
        # but we need to setup a list of records to visit.
1786
        # we need key, position, length
1787
        for key_idx, record in enumerate(self.get_record_stream(keys,
1788
            'unordered', True)):
1789
            # XXX: todo - optimise to use less than full texts.
1790
            key = record.key
4398.8.8 by John Arbash Meinel
Respond to Andrew's review comments.
1791
            if pb is not None:
1792
                pb.update('Walking content', key_idx, total)
0.17.5 by Robert Collins
nograph tests completely passing.
1793
            if record.storage_kind == 'absent':
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1794
                raise errors.RevisionNotPresent(key, self)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1795
            lines = osutils.split_lines(record.get_bytes_as('fulltext'))
0.17.5 by Robert Collins
nograph tests completely passing.
1796
            for line in lines:
1797
                yield line, key
4398.8.8 by John Arbash Meinel
Respond to Andrew's review comments.
1798
        if pb is not None:
1799
            pb.update('Walking content', total, total)
0.17.5 by Robert Collins
nograph tests completely passing.
1800
1801
    def keys(self):
1802
        """See VersionedFiles.keys."""
1803
        if 'evil' in debug.debug_flags:
1804
            trace.mutter_callsite(2, "keys scales with size of history")
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1805
        sources = [self._index] + self._fallback_vfs
0.17.5 by Robert Collins
nograph tests completely passing.
1806
        result = set()
1807
        for source in sources:
1808
            result.update(source.keys())
1809
        return result
1810
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1811
1812
class _GCGraphIndex(object):
1813
    """Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1814
0.17.9 by Robert Collins
Initial stab at repository format support.
1815
    def __init__(self, graph_index, is_locked, parents=True,
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1816
        add_callback=None, track_external_parent_refs=False,
4634.29.1 by Andrew Bennetts
Rough code to reject commit_write_group if any inventory's CHK root is absent.
1817
        inconsistency_fatal=True, track_new_keys=False):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1818
        """Construct a _GCGraphIndex on a graph_index.
1819
1820
        :param graph_index: An implementation of bzrlib.index.GraphIndex.
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1821
        :param is_locked: A callback, returns True if the index is locked and
1822
            thus usable.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1823
        :param parents: If True, record knits parents, if not do not record
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1824
            parents.
1825
        :param add_callback: If not None, allow additions to the index and call
1826
            this callback with a list of added GraphIndex nodes:
1827
            [(node, value, node_refs), ...]
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1828
        :param track_external_parent_refs: As keys are added, keep track of the
1829
            keys they reference, so that we can query get_missing_parents(),
1830
            etc.
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1831
        :param inconsistency_fatal: When asked to add records that are already
1832
            present, and the details are inconsistent with the existing
1833
            record, raise an exception instead of warning (and skipping the
1834
            record).
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1835
        """
1836
        self._add_callback = add_callback
1837
        self._graph_index = graph_index
1838
        self._parents = parents
1839
        self.has_graph = parents
1840
        self._is_locked = is_locked
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1841
        self._inconsistency_fatal = inconsistency_fatal
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
1842
        # GroupCompress records tend to have the same 'group' start + offset
1843
        # repeated over and over, this creates a surplus of ints
1844
        self._int_cache = {}
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1845
        if track_external_parent_refs:
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
1846
            self._key_dependencies = knit._KeyRefs(
1847
                track_new_keys=track_new_keys)
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1848
        else:
1849
            self._key_dependencies = None
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1850
0.17.5 by Robert Collins
nograph tests completely passing.
1851
    def add_records(self, records, random_id=False):
1852
        """Add multiple records to the index.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1853
0.17.5 by Robert Collins
nograph tests completely passing.
1854
        This function does not insert data into the Immutable GraphIndex
1855
        backing the KnitGraphIndex, instead it prepares data for insertion by
1856
        the caller and checks that it is safe to insert then calls
1857
        self._add_callback with the prepared GraphIndex nodes.
1858
1859
        :param records: a list of tuples:
1860
                         (key, options, access_memo, parents).
1861
        :param random_id: If True the ids being added were randomly generated
1862
            and no check for existence will be performed.
1863
        """
1864
        if not self._add_callback:
1865
            raise errors.ReadOnlyError(self)
1866
        # we hope there are no repositories with inconsistent parentage
1867
        # anymore.
1868
1869
        changed = False
1870
        keys = {}
1871
        for (key, value, refs) in records:
1872
            if not self._parents:
1873
                if refs:
1874
                    for ref in refs:
1875
                        if ref:
4398.8.1 by John Arbash Meinel
Add a VersionedFile.add_text() api.
1876
                            raise errors.KnitCorrupt(self,
0.17.5 by Robert Collins
nograph tests completely passing.
1877
                                "attempt to add node with parents "
1878
                                "in parentless index.")
1879
                    refs = ()
1880
                    changed = True
1881
            keys[key] = (value, refs)
1882
        # check for dups
1883
        if not random_id:
1884
            present_nodes = self._get_entries(keys)
1885
            for (index, key, value, node_refs) in present_nodes:
4789.28.3 by John Arbash Meinel
Add a static_tuple.as_tuples() helper.
1886
                # Sometimes these are passed as a list rather than a tuple
1887
                node_refs = static_tuple.as_tuples(node_refs)
1888
                passed = static_tuple.as_tuples(keys[key])
1889
                if node_refs != passed[1]:
1890
                    details = '%s %s %s' % (key, (value, node_refs), passed)
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1891
                    if self._inconsistency_fatal:
1892
                        raise errors.KnitCorrupt(self, "inconsistent details"
1893
                                                 " in add_records: %s" %
1894
                                                 details)
1895
                    else:
1896
                        trace.warning("inconsistent details in skipped"
1897
                                      " record: %s", details)
0.17.5 by Robert Collins
nograph tests completely passing.
1898
                del keys[key]
1899
                changed = True
1900
        if changed:
1901
            result = []
1902
            if self._parents:
1903
                for key, (value, node_refs) in keys.iteritems():
1904
                    result.append((key, value, node_refs))
1905
            else:
1906
                for key, (value, node_refs) in keys.iteritems():
1907
                    result.append((key, value))
1908
            records = result
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1909
        key_dependencies = self._key_dependencies
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
1910
        if key_dependencies is not None:
1911
            if self._parents:
1912
                for key, value, refs in records:
1913
                    parents = refs[0]
1914
                    key_dependencies.add_references(key, parents)
1915
            else:
1916
                for key, value, refs in records:
1917
                    new_keys.add_key(key)
0.17.5 by Robert Collins
nograph tests completely passing.
1918
        self._add_callback(records)
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1919
0.17.5 by Robert Collins
nograph tests completely passing.
1920
    def _check_read(self):
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1921
        """Raise an exception if reads are not permitted."""
0.17.5 by Robert Collins
nograph tests completely passing.
1922
        if not self._is_locked():
1923
            raise errors.ObjectNotLocked(self)
1924
0.17.2 by Robert Collins
Core proof of concept working.
1925
    def _check_write_ok(self):
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1926
        """Raise an exception if writes are not permitted."""
0.17.2 by Robert Collins
Core proof of concept working.
1927
        if not self._is_locked():
1928
            raise errors.ObjectNotLocked(self)
1929
0.17.5 by Robert Collins
nograph tests completely passing.
1930
    def _get_entries(self, keys, check_present=False):
1931
        """Get the entries for keys.
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1932
1933
        Note: Callers are responsible for checking that the index is locked
1934
        before calling this method.
1935
0.17.5 by Robert Collins
nograph tests completely passing.
1936
        :param keys: An iterable of index key tuples.
1937
        """
1938
        keys = set(keys)
1939
        found_keys = set()
1940
        if self._parents:
1941
            for node in self._graph_index.iter_entries(keys):
1942
                yield node
1943
                found_keys.add(node[1])
1944
        else:
1945
            # adapt parentless index to the rest of the code.
1946
            for node in self._graph_index.iter_entries(keys):
1947
                yield node[0], node[1], node[2], ()
1948
                found_keys.add(node[1])
1949
        if check_present:
1950
            missing_keys = keys.difference(found_keys)
1951
            if missing_keys:
4398.8.8 by John Arbash Meinel
Respond to Andrew's review comments.
1952
                raise errors.RevisionNotPresent(missing_keys.pop(), self)
0.17.5 by Robert Collins
nograph tests completely passing.
1953
4634.11.3 by John Arbash Meinel
Implement _GCGraphIndex.find_ancestry()
1954
    def find_ancestry(self, keys):
1955
        """See CombinedGraphIndex.find_ancestry"""
1956
        return self._graph_index.find_ancestry(keys, 0)
1957
0.17.5 by Robert Collins
nograph tests completely passing.
1958
    def get_parent_map(self, keys):
1959
        """Get a map of the parents of keys.
1960
1961
        :param keys: The keys to look up parents for.
1962
        :return: A mapping from keys to parents. Absent keys are absent from
1963
            the mapping.
1964
        """
1965
        self._check_read()
1966
        nodes = self._get_entries(keys)
1967
        result = {}
1968
        if self._parents:
1969
            for node in nodes:
1970
                result[node[1]] = node[3][0]
1971
        else:
1972
            for node in nodes:
1973
                result[node[1]] = None
1974
        return result
1975
4343.3.1 by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories.
1976
    def get_missing_parents(self):
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1977
        """Return the keys of missing parents."""
1978
        # Copied from _KnitGraphIndex.get_missing_parents
1979
        # We may have false positives, so filter those out.
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
1980
        self._key_dependencies.satisfy_refs_for_keys(
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1981
            self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
1982
        return frozenset(self._key_dependencies.get_unsatisfied_refs())
4343.3.1 by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories.
1983
0.17.5 by Robert Collins
nograph tests completely passing.
1984
    def get_build_details(self, keys):
1985
        """Get the various build details for keys.
1986
1987
        Ghosts are omitted from the result.
1988
1989
        :param keys: An iterable of keys.
1990
        :return: A dict of key:
1991
            (index_memo, compression_parent, parents, record_details).
1992
            index_memo
1993
                opaque structure to pass to read_records to extract the raw
1994
                data
1995
            compression_parent
1996
                Content that this record is built upon, may be None
1997
            parents
1998
                Logical parents of this node
1999
            record_details
2000
                extra information about the content which needs to be passed to
2001
                Factory.parse_record
2002
        """
2003
        self._check_read()
2004
        result = {}
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
2005
        entries = self._get_entries(keys)
0.17.5 by Robert Collins
nograph tests completely passing.
2006
        for entry in entries:
2007
            key = entry[1]
2008
            if not self._parents:
2009
                parents = None
2010
            else:
2011
                parents = entry[3][0]
2012
            method = 'group'
2013
            result[key] = (self._node_to_position(entry),
2014
                                  None, parents, (method, None))
2015
        return result
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2016
0.17.5 by Robert Collins
nograph tests completely passing.
2017
    def keys(self):
2018
        """Get all the keys in the collection.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2019
0.17.5 by Robert Collins
nograph tests completely passing.
2020
        The keys are not ordered.
2021
        """
2022
        self._check_read()
2023
        return [node[1] for node in self._graph_index.iter_all_entries()]
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2024
0.17.5 by Robert Collins
nograph tests completely passing.
2025
    def _node_to_position(self, node):
2026
        """Convert an index value to position details."""
2027
        bits = node[2].split(' ')
2028
        # It would be nice not to read the entire gzip.
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2029
        # start and stop are put into _int_cache because they are very common.
2030
        # They define the 'group' that an entry is in, and many groups can have
2031
        # thousands of objects.
2032
        # Branching Launchpad, for example, saves ~600k integers, at 12 bytes
2033
        # each, or about 7MB. Note that it might be even more when you consider
2034
        # how PyInt is allocated in separate slabs. And you can't return a slab
2035
        # to the OS if even 1 int on it is in use. Note though that Python uses
2036
        # a LIFO when re-using PyInt slots, which probably causes more
2037
        # fragmentation.
0.17.5 by Robert Collins
nograph tests completely passing.
2038
        start = int(bits[0])
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2039
        start = self._int_cache.setdefault(start, start)
0.17.5 by Robert Collins
nograph tests completely passing.
2040
        stop = int(bits[1])
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2041
        stop = self._int_cache.setdefault(stop, stop)
0.17.5 by Robert Collins
nograph tests completely passing.
2042
        basis_end = int(bits[2])
2043
        delta_end = int(bits[3])
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2044
        # We can't use StaticTuple here, because node[0] is a BTreeGraphIndex
2045
        # instance...
2046
        return (node[0], start, stop, basis_end, delta_end)
0.18.14 by John Arbash Meinel
A bit more work, not really usable yet.
2047
4343.3.2 by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos
2048
    def scan_unvalidated_index(self, graph_index):
2049
        """Inform this _GCGraphIndex that there is an unvalidated index.
2050
2051
        This allows this _GCGraphIndex to keep track of any missing
2052
        compression parents we may want to have filled in to make those
4634.29.3 by Andrew Bennetts
Simplify further.
2053
        indices valid.  It also allows _GCGraphIndex to track any new keys.
4343.3.2 by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos
2054
2055
        :param graph_index: A GraphIndex
2056
        """
4634.29.3 by Andrew Bennetts
Simplify further.
2057
        key_dependencies = self._key_dependencies
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
2058
        if key_dependencies is None:
4634.29.1 by Andrew Bennetts
Rough code to reject commit_write_group if any inventory's CHK root is absent.
2059
            return
2060
        for node in graph_index.iter_all_entries():
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
2061
            # Add parent refs from graph_index (and discard parent refs
2062
            # that the graph_index has).
2063
            key_dependencies.add_references(node[1], node[3][0])
4343.3.2 by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos
2064
0.18.14 by John Arbash Meinel
A bit more work, not really usable yet.
2065
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
2066
from bzrlib._groupcompress_py import (
2067
    apply_delta,
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
2068
    apply_delta_to_source,
3735.40.11 by John Arbash Meinel
Implement make_delta and apply_delta.
2069
    encode_base128_int,
2070
    decode_base128_int,
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
2071
    decode_copy_instruction,
3735.40.13 by John Arbash Meinel
Rename EquivalenceTable to LinesDeltaIndex.
2072
    LinesDeltaIndex,
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
2073
    )
0.18.14 by John Arbash Meinel
A bit more work, not really usable yet.
2074
try:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2075
    from bzrlib._groupcompress_pyx import (
2076
        apply_delta,
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
2077
        apply_delta_to_source,
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2078
        DeltaIndex,
3735.40.16 by John Arbash Meinel
Implement (de|en)code_base128_int in pyrex.
2079
        encode_base128_int,
2080
        decode_base128_int,
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2081
        )
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
2082
    GroupCompressor = PyrexGroupCompressor
4574.3.6 by Martin Pool
More warnings when failing to load extensions
2083
except ImportError, e:
4574.3.8 by Martin Pool
Only mutter extension load errors when they occur, and record for later
2084
    osutils.failed_to_load_extension(e)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2085
    GroupCompressor = PythonGroupCompressor
2086