~bzr-pqm/bzr/bzr.dev

5050.70.1 by Martin Pool
Add failing test for bug 715000
1
# Copyright (C) 2008-2011 Canonical Ltd
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2
#
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
3
# This program is free software; you can redistribute it and/or modify
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
12
#
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
3735.36.3 by John Arbash Meinel
Add the new address for FSF to the new files.
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
16
17
"""Core compression logic for compressing streams of related files."""
18
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
19
import time
0.17.5 by Robert Collins
nograph tests completely passing.
20
import zlib
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
21
try:
22
    import pylzma
23
except ImportError:
24
    pylzma = None
0.17.5 by Robert Collins
nograph tests completely passing.
25
5757.8.2 by Jelmer Vernooij
Avoid annotate import during 'bzr st'.
26
from bzrlib.lazy_import import lazy_import
27
lazy_import(globals(), """
0.17.4 by Robert Collins
Annotate.
28
from bzrlib import (
29
    annotate,
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
30
    config,
0.17.5 by Robert Collins
nograph tests completely passing.
31
    debug,
32
    errors,
0.17.4 by Robert Collins
Annotate.
33
    graph as _mod_graph,
0.20.2 by John Arbash Meinel
Teach groupcompress about 'chunked' encoding
34
    osutils,
0.17.4 by Robert Collins
Annotate.
35
    pack,
4789.28.3 by John Arbash Meinel
Add a static_tuple.as_tuples() helper.
36
    static_tuple,
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
37
    trace,
5757.8.2 by Jelmer Vernooij
Avoid annotate import during 'bzr st'.
38
    tsort,
0.17.4 by Robert Collins
Annotate.
39
    )
5757.8.7 by Jelmer Vernooij
Merge moving of _DirectPackAccess.
40
41
from bzrlib.repofmt import pack_repo
6138.3.4 by Jonathan Riddell
add gettext() to uses of trace.note()
42
from bzrlib.i18n import gettext
5757.8.2 by Jelmer Vernooij
Avoid annotate import during 'bzr st'.
43
""")
44
0.17.21 by Robert Collins
Update groupcompress to bzrlib 1.10.
45
from bzrlib.btree_index import BTreeBuilder
0.17.24 by Robert Collins
Add a group cache to decompression, 5 times faster than knit at decompression when accessing everything in a group.
46
from bzrlib.lru_cache import LRUSizeCache
0.17.2 by Robert Collins
Core proof of concept working.
47
from bzrlib.versionedfile import (
5757.8.1 by Jelmer Vernooij
Avoid bzrlib.knit imports when using groupcompress repositories.
48
    _KeyRefs,
0.17.5 by Robert Collins
nograph tests completely passing.
49
    adapter_registry,
50
    AbsentContentFactory,
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
51
    ChunkedContentFactory,
0.17.2 by Robert Collins
Core proof of concept working.
52
    FulltextContentFactory,
5816.8.1 by Andrew Bennetts
Be a little more clever about constructing a parents provider for stacked repositories, so that get_parent_map with local-stacked-on-remote doesn't use HPSS VFS calls.
53
    VersionedFilesWithFallbacks,
0.17.2 by Robert Collins
Core proof of concept working.
54
    )
55
4634.3.17 by Andrew Bennetts
Make BATCH_SIZE a global.
56
# Minimum number of uncompressed bytes to try fetch at once when retrieving
57
# groupcompress blocks.
58
BATCH_SIZE = 2**16
59
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
60
_USE_LZMA = False and (pylzma is not None)
0.17.2 by Robert Collins
Core proof of concept working.
61
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
62
# osutils.sha_string('')
63
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
64
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
65
def sort_gc_optimal(parent_map):
3735.31.14 by John Arbash Meinel
Change the gc-optimal to 'groupcompress'
66
    """Sort and group the keys in parent_map into groupcompress order.
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
67
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
68
    groupcompress is defined (currently) as reverse-topological order, grouped
69
    by the key prefix.
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
70
71
    :return: A sorted-list of keys
72
    """
3735.31.14 by John Arbash Meinel
Change the gc-optimal to 'groupcompress'
73
    # groupcompress ordering is approximately reverse topological,
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
74
    # properly grouped by file-id.
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
75
    per_prefix_map = {}
4593.5.43 by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value)
76
    for key, value in parent_map.iteritems():
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
77
        if isinstance(key, str) or len(key) == 1:
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
78
            prefix = ''
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
79
        else:
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
80
            prefix = key[0]
81
        try:
4593.5.43 by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value)
82
            per_prefix_map[prefix][key] = value
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
83
        except KeyError:
4593.5.43 by John Arbash Meinel
The api for topo_sort() was to allow a list of (key, value)
84
            per_prefix_map[prefix] = {key: value}
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
85
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
86
    present_keys = []
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
87
    for prefix in sorted(per_prefix_map):
5757.8.2 by Jelmer Vernooij
Avoid annotate import during 'bzr st'.
88
        present_keys.extend(reversed(tsort.topo_sort(per_prefix_map[prefix])))
0.20.11 by John Arbash Meinel
start experimenting with gc-optimal ordering.
89
    return present_keys
90
91
3735.32.9 by John Arbash Meinel
Use a 32kB extension, since that is the max window size for zlib.
92
# The max zlib window size is 32kB, so if we set 'max_size' output of the
93
# decompressor to the requested bytes + 32kB, then we should guarantee
94
# num_bytes coming out.
95
_ZLIB_DECOMP_WINDOW = 32*1024
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
96
97
class GroupCompressBlock(object):
98
    """An object which maintains the internal structure of the compressed data.
99
100
    This tracks the meta info (start of text, length, type, etc.)
101
    """
102
0.25.5 by John Arbash Meinel
Now using a zlib compressed format.
103
    # Group Compress Block v1 Zlib
104
    GCB_HEADER = 'gcb1z\n'
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
105
    # Group Compress Block v1 Lzma
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
106
    GCB_LZ_HEADER = 'gcb1l\n'
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
107
    GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
108
109
    def __init__(self):
110
        # map by key? or just order in file?
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
111
        self._compressor_name = None
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
112
        self._z_content_chunks = None
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
113
        self._z_content_decompressor = None
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
114
        self._z_content_length = None
115
        self._content_length = None
0.25.6 by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header
116
        self._content = None
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
117
        self._content_chunks = None
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
118
119
    def __len__(self):
3735.38.4 by John Arbash Meinel
Another disk format change.
120
        # This is the maximum number of bytes this object will reference if
121
        # everything is decompressed. However, if we decompress less than
122
        # everything... (this would cause some problems for LRUSizeCache)
123
        return self._content_length + self._z_content_length
0.17.48 by John Arbash Meinel
if _NO_LABELS is set, don't bother parsing the mini header.
124
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
125
    def _ensure_content(self, num_bytes=None):
126
        """Make sure that content has been expanded enough.
127
128
        :param num_bytes: Ensure that we have extracted at least num_bytes of
129
            content. If None, consume everything
130
        """
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
131
        if self._content_length is None:
132
            raise AssertionError('self._content_length should never be None')
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
133
        if num_bytes is None:
134
            num_bytes = self._content_length
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
135
        elif (self._content_length is not None
136
              and num_bytes > self._content_length):
137
            raise AssertionError(
138
                'requested num_bytes (%d) > content length (%d)'
139
                % (num_bytes, self._content_length))
140
        # Expand the content if required
3735.32.6 by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time.
141
        if self._content is None:
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
142
            if self._content_chunks is not None:
143
                self._content = ''.join(self._content_chunks)
144
                self._content_chunks = None
145
        if self._content is None:
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
146
            # We join self._z_content_chunks here, because if we are
147
            # decompressing, then it is *very* likely that we have a single
148
            # chunk
149
            if self._z_content_chunks is None:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
150
                raise AssertionError('No content to decompress')
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
151
            z_content = ''.join(self._z_content_chunks)
152
            if z_content == '':
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
153
                self._content = ''
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
154
            elif self._compressor_name == 'lzma':
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
155
                # We don't do partial lzma decomp yet
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
156
                self._content = pylzma.decompress(z_content)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
157
            elif self._compressor_name == 'zlib':
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
158
                # Start a zlib decompressor
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
159
                if num_bytes * 4 > self._content_length * 3:
160
                    # If we are requesting more that 3/4ths of the content,
161
                    # just extract the whole thing in a single pass
162
                    num_bytes = self._content_length
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
163
                    self._content = zlib.decompress(z_content)
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
164
                else:
165
                    self._z_content_decompressor = zlib.decompressobj()
166
                    # Seed the decompressor with the uncompressed bytes, so
167
                    # that the rest of the code is simplified
168
                    self._content = self._z_content_decompressor.decompress(
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
169
                        z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
170
                    if not self._z_content_decompressor.unconsumed_tail:
171
                        self._z_content_decompressor = None
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
172
            else:
3735.2.182 by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others
173
                raise AssertionError('Unknown compressor: %r'
3735.2.183 by John Arbash Meinel
Fix the compressor name.
174
                                     % self._compressor_name)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
175
        # Any bytes remaining to be decompressed will be in the decompressors
176
        # 'unconsumed_tail'
177
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
178
        # Do we have enough bytes already?
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
179
        if len(self._content) >= num_bytes:
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
180
            return
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
181
        # If we got this far, and don't have a decompressor, something is wrong
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
182
        if self._z_content_decompressor is None:
183
            raise AssertionError(
3735.2.182 by Matt Nordhoff
Improve an assertion message slightly, and fix typos in 2 others
184
                'No decompressor to decompress %d bytes' % num_bytes)
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
185
        remaining_decomp = self._z_content_decompressor.unconsumed_tail
4744.2.3 by John Arbash Meinel
change the GroupcompressBlock code a bit.
186
        if not remaining_decomp:
187
            raise AssertionError('Nothing left to decompress')
188
        needed_bytes = num_bytes - len(self._content)
189
        # We always set max_size to 32kB over the minimum needed, so that
190
        # zlib will give us as much as we really want.
191
        # TODO: If this isn't good enough, we could make a loop here,
192
        #       that keeps expanding the request until we get enough
193
        self._content += self._z_content_decompressor.decompress(
194
            remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
195
        if len(self._content) < num_bytes:
196
            raise AssertionError('%d bytes wanted, only %d available'
197
                                 % (num_bytes, len(self._content)))
198
        if not self._z_content_decompressor.unconsumed_tail:
199
            # The stream is finished
200
            self._z_content_decompressor = None
3735.32.6 by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time.
201
3735.38.4 by John Arbash Meinel
Another disk format change.
202
    def _parse_bytes(self, bytes, pos):
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
203
        """Read the various lengths from the header.
204
205
        This also populates the various 'compressed' buffers.
206
207
        :return: The position in bytes just after the last newline
208
        """
3735.38.4 by John Arbash Meinel
Another disk format change.
209
        # At present, we have 2 integers for the compressed and uncompressed
210
        # content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
211
        # checking too far, cap the search to 14 bytes.
212
        pos2 = bytes.index('\n', pos, pos + 14)
213
        self._z_content_length = int(bytes[pos:pos2])
214
        pos = pos2 + 1
215
        pos2 = bytes.index('\n', pos, pos + 14)
216
        self._content_length = int(bytes[pos:pos2])
217
        pos = pos2 + 1
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
218
        if len(bytes) != (pos + self._z_content_length):
219
            # XXX: Define some GCCorrupt error ?
220
            raise AssertionError('Invalid bytes: (%d) != %d + %d' %
221
                                 (len(bytes), pos, self._z_content_length))
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
222
        self._z_content_chunks = (bytes[pos:],)
223
224
    @property
225
    def _z_content(self):
5439.2.2 by John Arbash Meinel
Smal tweaks from reviewer feedback.
226
        """Return z_content_chunks as a simple string.
227
228
        Meant only to be used by the test suite.
229
        """
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
230
        if self._z_content_chunks is not None:
231
            return ''.join(self._z_content_chunks)
232
        return None
3735.32.5 by John Arbash Meinel
Change the parsing code to start out just holding the compressed bytes.
233
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
234
    @classmethod
235
    def from_bytes(cls, bytes):
236
        out = cls()
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
237
        if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
238
            raise ValueError('bytes did not start with any of %r'
239
                             % (cls.GCB_KNOWN_HEADERS,))
240
        # XXX: why not testing the whole header ?
0.17.44 by John Arbash Meinel
Use the bit field to allow both lzma groups and zlib groups.
241
        if bytes[4] == 'z':
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
242
            out._compressor_name = 'zlib'
0.17.45 by John Arbash Meinel
Just make sure we have the right decompressor
243
        elif bytes[4] == 'l':
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
244
            out._compressor_name = 'lzma'
0.17.45 by John Arbash Meinel
Just make sure we have the right decompressor
245
        else:
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
246
            raise ValueError('unknown compressor: %r' % (bytes,))
3735.38.4 by John Arbash Meinel
Another disk format change.
247
        out._parse_bytes(bytes, 6)
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
248
        return out
249
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
250
    def extract(self, key, start, end, sha1=None):
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
251
        """Extract the text for a specific key.
252
253
        :param key: The label used for this content
254
        :param sha1: TODO (should we validate only when sha1 is supplied?)
255
        :return: The bytes for the content
256
        """
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
257
        if start == end == 0:
3735.2.158 by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract.
258
            return ''
259
        self._ensure_content(end)
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
260
        # The bytes are 'f' or 'd' for the type, then a variable-length
261
        # base128 integer for the content size, then the actual content
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
262
        # We know that the variable-length integer won't be longer than 5
263
        # bytes (it takes 5 bytes to encode 2^32)
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
264
        c = self._content[start]
265
        if c == 'f':
266
            type = 'fulltext'
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
267
        else:
3735.32.7 by John Arbash Meinel
Implement partial decompression support.
268
            if c != 'd':
269
                raise ValueError('Unknown content control code: %s'
270
                                 % (c,))
271
            type = 'delta'
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
272
        content_len, len_len = decode_base128_int(
273
                            self._content[start + 1:start + 6])
274
        content_start = start + 1 + len_len
3735.2.158 by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract.
275
        if end != content_start + content_len:
276
            raise ValueError('end != len according to field header'
277
                ' %s != %s' % (end, content_start + content_len))
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
278
        if c == 'f':
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
279
            bytes = self._content[content_start:end]
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
280
        elif c == 'd':
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
281
            bytes = apply_delta_to_source(self._content, content_start, end)
3735.2.158 by John Arbash Meinel
Remove support for passing None for end in GroupCompressBlock.extract.
282
        return bytes
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
283
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
284
    def set_chunked_content(self, content_chunks, length):
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
285
        """Set the content of this block to the given chunks."""
4469.1.3 by John Arbash Meinel
Notes on why we do it the way we do.
286
        # If we have lots of short lines, it is may be more efficient to join
287
        # the content ahead of time. If the content is <10MiB, we don't really
288
        # care about the extra memory consumption, so we can just pack it and
289
        # be done. However, timing showed 18s => 17.9s for repacking 1k revs of
290
        # mysql, which is below the noise margin
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
291
        self._content_length = length
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
292
        self._content_chunks = content_chunks
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
293
        self._content = None
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
294
        self._z_content_chunks = None
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
295
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
296
    def set_content(self, content):
297
        """Set the content of this block."""
298
        self._content_length = len(content)
299
        self._content = content
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
300
        self._z_content_chunks = None
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
301
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
302
    def _create_z_content_using_lzma(self):
303
        if self._content_chunks is not None:
304
            self._content = ''.join(self._content_chunks)
305
            self._content_chunks = None
306
        if self._content is None:
307
            raise AssertionError('Nothing to compress')
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
308
        z_content = pylzma.compress(self._content)
309
        self._z_content_chunks = (z_content,)
310
        self._z_content_length = len(z_content)
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
311
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
312
    def _create_z_content_from_chunks(self, chunks):
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
313
        compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
314
        # Peak in this point is 1 fulltext, 1 compressed text, + zlib overhead
315
        # (measured peak is maybe 30MB over the above...)
316
        compressed_chunks = map(compressor.compress, chunks)
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
317
        compressed_chunks.append(compressor.flush())
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
318
        # Ignore empty chunks
319
        self._z_content_chunks = [c for c in compressed_chunks if c]
320
        self._z_content_length = sum(map(len, self._z_content_chunks))
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
321
322
    def _create_z_content(self):
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
323
        if self._z_content_chunks is not None:
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
324
            return
325
        if _USE_LZMA:
326
            self._create_z_content_using_lzma()
327
            return
328
        if self._content_chunks is not None:
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
329
            chunks = self._content_chunks
330
        else:
331
            chunks = (self._content,)
332
        self._create_z_content_from_chunks(chunks)
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
333
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
334
    def to_chunks(self):
335
        """Create the byte stream as a series of 'chunks'"""
4469.1.1 by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock.
336
        self._create_z_content()
0.17.46 by John Arbash Meinel
Set the proper header when using/not using lzma
337
        if _USE_LZMA:
338
            header = self.GCB_LZ_HEADER
339
        else:
340
            header = self.GCB_HEADER
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
341
        chunks = ['%s%d\n%d\n'
342
                  % (header, self._z_content_length, self._content_length),
0.25.7 by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content.
343
                 ]
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
344
        chunks.extend(self._z_content_chunks)
345
        total_len = sum(map(len, chunks))
346
        return total_len, chunks
347
348
    def to_bytes(self):
349
        """Encode the information into a byte stream."""
350
        total_len, chunks = self.to_chunks()
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
351
        return ''.join(chunks)
352
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
353
    def _dump(self, include_text=False):
354
        """Take this block, and spit out a human-readable structure.
355
356
        :param include_text: Inserts also include text bits, chose whether you
357
            want this displayed in the dump or not.
358
        :return: A dump of the given block. The layout is something like:
359
            [('f', length), ('d', delta_length, text_length, [delta_info])]
360
            delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
361
            ...]
362
        """
363
        self._ensure_content()
364
        result = []
365
        pos = 0
366
        while pos < self._content_length:
367
            kind = self._content[pos]
368
            pos += 1
369
            if kind not in ('f', 'd'):
370
                raise ValueError('invalid kind character: %r' % (kind,))
371
            content_len, len_len = decode_base128_int(
372
                                self._content[pos:pos + 5])
373
            pos += len_len
374
            if content_len + pos > self._content_length:
375
                raise ValueError('invalid content_len %d for record @ pos %d'
376
                                 % (content_len, pos - len_len - 1))
377
            if kind == 'f': # Fulltext
4398.5.6 by John Arbash Meinel
A bit more debugging information from gcblock._dump(True)
378
                if include_text:
379
                    text = self._content[pos:pos+content_len]
380
                    result.append(('f', content_len, text))
381
                else:
382
                    result.append(('f', content_len))
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
383
            elif kind == 'd': # Delta
384
                delta_content = self._content[pos:pos+content_len]
385
                delta_info = []
386
                # The first entry in a delta is the decompressed length
387
                decomp_len, delta_pos = decode_base128_int(delta_content)
388
                result.append(('d', content_len, decomp_len, delta_info))
389
                measured_len = 0
390
                while delta_pos < content_len:
391
                    c = ord(delta_content[delta_pos])
392
                    delta_pos += 1
393
                    if c & 0x80: # Copy
394
                        (offset, length,
395
                         delta_pos) = decode_copy_instruction(delta_content, c,
396
                                                              delta_pos)
4398.5.6 by John Arbash Meinel
A bit more debugging information from gcblock._dump(True)
397
                        if include_text:
398
                            text = self._content[offset:offset+length]
399
                            delta_info.append(('c', offset, length, text))
400
                        else:
401
                            delta_info.append(('c', offset, length))
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
402
                        measured_len += length
403
                    else: # Insert
404
                        if include_text:
405
                            txt = delta_content[delta_pos:delta_pos+c]
406
                        else:
407
                            txt = ''
408
                        delta_info.append(('i', c, txt))
409
                        measured_len += c
410
                        delta_pos += c
411
                if delta_pos != content_len:
412
                    raise ValueError('Delta consumed a bad number of bytes:'
413
                                     ' %d != %d' % (delta_pos, content_len))
414
                if measured_len != decomp_len:
415
                    raise ValueError('Delta claimed fulltext was %d bytes, but'
416
                                     ' extraction resulted in %d bytes'
417
                                     % (decomp_len, measured_len))
418
            pos += content_len
419
        return result
420
0.25.2 by John Arbash Meinel
First cut at meta-info as text form.
421
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
422
class _LazyGroupCompressFactory(object):
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
423
    """Yield content from a GroupCompressBlock on demand."""
424
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
425
    def __init__(self, key, parents, manager, start, end, first):
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
426
        """Create a _LazyGroupCompressFactory
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
427
428
        :param key: The key of just this record
429
        :param parents: The parents of this key (possibly None)
430
        :param gc_block: A GroupCompressBlock object
431
        :param start: Offset of the first byte for this record in the
432
            uncompressd content
433
        :param end: Offset of the byte just after the end of this record
434
            (ie, bytes = content[start:end])
435
        :param first: Is this the first Factory for the given block?
436
        """
437
        self.key = key
438
        self.parents = parents
439
        self.sha1 = None
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
440
        # Note: This attribute coupled with Manager._factories creates a
441
        #       reference cycle. Perhaps we would rather use a weakref(), or
442
        #       find an appropriate time to release the ref. After the first
443
        #       get_bytes_as call? After Manager.get_record_stream() returns
444
        #       the object?
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
445
        self._manager = manager
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
446
        self._bytes = None
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
447
        self.storage_kind = 'groupcompress-block'
448
        if not first:
449
            self.storage_kind = 'groupcompress-block-ref'
450
        self._first = first
451
        self._start = start
452
        self._end = end
453
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
454
    def __repr__(self):
455
        return '%s(%s, first=%s)' % (self.__class__.__name__,
456
            self.key, self._first)
457
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
458
    def get_bytes_as(self, storage_kind):
459
        if storage_kind == self.storage_kind:
460
            if self._first:
461
                # wire bytes, something...
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
462
                return self._manager._wire_bytes()
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
463
            else:
464
                return ''
465
        if storage_kind in ('fulltext', 'chunked'):
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
466
            if self._bytes is None:
3735.34.3 by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core.
467
                # Grab and cache the raw bytes for this entry
468
                # and break the ref-cycle with _manager since we don't need it
469
                # anymore
5927.2.2 by Jonathan Riddell
throw the error
470
                try:
471
                    self._manager._prepare_for_extract()
472
                except zlib.error as value:
5927.2.6 by Jonathan Riddell
Make error message less specific (might not be a local disk issue) and pass through zlib error
473
                    raise errors.DecompressCorruption("zlib: " + str(value))
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
474
                block = self._manager._block
3735.34.2 by John Arbash Meinel
Merge brisbane-core tip, resolve differences.
475
                self._bytes = block.extract(self.key, self._start, self._end)
3735.37.5 by John Arbash Meinel
Restore the refcycle reduction code.
476
                # There are code paths that first extract as fulltext, and then
477
                # extract as storage_kind (smart fetch). So we don't break the
478
                # refcycle here, but instead in manager.get_record_stream()
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
479
            if storage_kind == 'fulltext':
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
480
                return self._bytes
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
481
            else:
3735.34.1 by John Arbash Meinel
Some testing to see if we can decrease the peak memory consumption a bit.
482
                return [self._bytes]
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
483
        raise errors.UnavailableRepresentation(self.key, storage_kind,
3735.34.3 by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core.
484
                                               self.storage_kind)
3735.32.8 by John Arbash Meinel
Some tests for the LazyGroupCompressFactory
485
486
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
487
class _LazyGroupContentManager(object):
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
488
    """This manages a group of _LazyGroupCompressFactory objects."""
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
489
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
490
    _max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
491
                             # current size, and still be considered
492
                             # resuable
493
    _full_block_size = 4*1024*1024
494
    _full_mixed_block_size = 2*1024*1024
495
    _full_enough_block_size = 3*1024*1024 # size at which we won't repack
496
    _full_enough_mixed_block_size = 2*768*1024 # 1.5MB
497
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
498
    def __init__(self, block, get_compressor_settings=None):
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
499
        self._block = block
500
        # We need to preserve the ordering
501
        self._factories = []
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
502
        self._last_byte = 0
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
503
        self._get_settings = get_compressor_settings
504
        self._compressor_settings = None
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
505
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
506
    def _get_compressor_settings(self):
507
        if self._compressor_settings is not None:
508
            return self._compressor_settings
509
        settings = None
510
        if self._get_settings is not None:
511
            settings = self._get_settings()
512
        if settings is None:
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
513
            vf = GroupCompressVersionedFiles
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
514
            settings = vf._DEFAULT_COMPRESSOR_SETTINGS
515
        self._compressor_settings = settings
516
        return self._compressor_settings
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
517
518
    def add_factory(self, key, parents, start, end):
519
        if not self._factories:
520
            first = True
521
        else:
522
            first = False
523
        # Note that this creates a reference cycle....
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
524
        factory = _LazyGroupCompressFactory(key, parents, self,
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
525
            start, end, first=first)
3735.36.13 by John Arbash Meinel
max() shows up under lsprof as more expensive than creating an object.
526
        # max() works here, but as a function call, doing a compare seems to be
527
        # significantly faster, timeit says 250ms for max() and 100ms for the
528
        # comparison
529
        if end > self._last_byte:
530
            self._last_byte = end
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
531
        self._factories.append(factory)
532
533
    def get_record_stream(self):
534
        """Get a record for all keys added so far."""
535
        for factory in self._factories:
536
            yield factory
3735.34.3 by John Arbash Meinel
Cleanup, in preparation for merging to brisbane-core.
537
            # Break the ref-cycle
3735.34.2 by John Arbash Meinel
Merge brisbane-core tip, resolve differences.
538
            factory._bytes = None
3735.37.5 by John Arbash Meinel
Restore the refcycle reduction code.
539
            factory._manager = None
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
540
        # TODO: Consider setting self._factories = None after the above loop,
541
        #       as it will break the reference cycle
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
542
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
543
    def _trim_block(self, last_byte):
544
        """Create a new GroupCompressBlock, with just some of the content."""
545
        # None of the factories need to be adjusted, because the content is
546
        # located in an identical place. Just that some of the unreferenced
547
        # trailing bytes are stripped
548
        trace.mutter('stripping trailing bytes from groupcompress block'
549
                     ' %d => %d', self._block._content_length, last_byte)
550
        new_block = GroupCompressBlock()
551
        self._block._ensure_content(last_byte)
552
        new_block.set_content(self._block._content[:last_byte])
553
        self._block = new_block
554
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
555
    def _make_group_compressor(self):
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
556
        return GroupCompressor(self._get_compressor_settings())
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
557
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
558
    def _rebuild_block(self):
559
        """Create a new GroupCompressBlock with only the referenced texts."""
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
560
        compressor = self._make_group_compressor()
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
561
        tstart = time.time()
562
        old_length = self._block._content_length
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
563
        end_point = 0
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
564
        for factory in self._factories:
565
            bytes = factory.get_bytes_as('fulltext')
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
566
            (found_sha1, start_point, end_point,
567
             type) = compressor.compress(factory.key, bytes, factory.sha1)
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
568
            # Now update this factory with the new offsets, etc
569
            factory.sha1 = found_sha1
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
570
            factory._start = start_point
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
571
            factory._end = end_point
3735.2.162 by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point.
572
        self._last_byte = end_point
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
573
        new_block = compressor.flush()
574
        # TODO: Should we check that new_block really *is* smaller than the old
575
        #       block? It seems hard to come up with a method that it would
576
        #       expand, since we do full compression again. Perhaps based on a
577
        #       request that ends up poorly ordered?
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
578
        # TODO: If the content would have expanded, then we would want to
579
        #       handle a case where we need to split the block.
580
        #       Now that we have a user-tweakable option
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
581
        #       (max_bytes_to_index), it is possible that one person set it
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
582
        #       to a very low value, causing poor compression.
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
583
        delta = time.time() - tstart
584
        self._block = new_block
4641.4.2 by John Arbash Meinel
Use unordered fetches to avoid fragmentation (bug #402645)
585
        trace.mutter('creating new compressed block on-the-fly in %.3fs'
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
586
                     ' %d bytes => %d bytes', delta, old_length,
587
                     self._block._content_length)
588
3735.32.27 by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds.
589
    def _prepare_for_extract(self):
590
        """A _LazyGroupCompressFactory is about to extract to fulltext."""
591
        # We expect that if one child is going to fulltext, all will be. This
592
        # helps prevent all of them from extracting a small amount at a time.
593
        # Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
594
        # time (self._block._content) is a little expensive.
595
        self._block._ensure_content(self._last_byte)
596
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
597
    def _check_rebuild_action(self):
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
598
        """Check to see if our block should be repacked."""
599
        total_bytes_used = 0
600
        last_byte_used = 0
601
        for factory in self._factories:
602
            total_bytes_used += factory._end - factory._start
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
603
            if last_byte_used < factory._end:
604
                last_byte_used = factory._end
605
        # If we are using more than half of the bytes from the block, we have
606
        # nothing else to check
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
607
        if total_bytes_used * 2 >= self._block._content_length:
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
608
            return None, last_byte_used, total_bytes_used
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
609
        # We are using less than 50% of the content. Is the content we are
610
        # using at the beginning of the block? If so, we can just trim the
611
        # tail, rather than rebuilding from scratch.
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
612
        if total_bytes_used * 2 > last_byte_used:
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
613
            return 'trim', last_byte_used, total_bytes_used
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
614
615
        # We are using a small amount of the data, and it isn't just packed
616
        # nicely at the front, so rebuild the content.
617
        # Note: This would be *nicer* as a strip-data-from-group, rather than
618
        #       building it up again from scratch
619
        #       It might be reasonable to consider the fulltext sizes for
620
        #       different bits when deciding this, too. As you may have a small
621
        #       fulltext, and a trivial delta, and you are just trading around
622
        #       for another fulltext. If we do a simple 'prune' you may end up
623
        #       expanding many deltas into fulltexts, as well.
624
        #       If we build a cheap enough 'strip', then we could try a strip,
625
        #       if that expands the content, we then rebuild.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
626
        return 'rebuild', last_byte_used, total_bytes_used
627
628
    def check_is_well_utilized(self):
629
        """Is the current block considered 'well utilized'?
630
4665.3.15 by Robert Collins
Review and tweak
631
        This heuristic asks if the current block considers itself to be a fully
632
        developed group, rather than just a loose collection of data.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
633
        """
634
        if len(self._factories) == 1:
4665.3.15 by Robert Collins
Review and tweak
635
            # A block of length 1 could be improved by combining with other
636
            # groups - don't look deeper. Even larger than max size groups
637
            # could compress well with adjacent versions of the same thing.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
638
            return False
639
        action, last_byte_used, total_bytes_used = self._check_rebuild_action()
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
640
        block_size = self._block._content_length
641
        if total_bytes_used < block_size * self._max_cut_fraction:
642
            # This block wants to trim itself small enough that we want to
643
            # consider it under-utilized.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
644
            return False
645
        # TODO: This code is meant to be the twin of _insert_record_stream's
646
        #       'start_new_block' logic. It would probably be better to factor
647
        #       out that logic into a shared location, so that it stays
648
        #       together better
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
649
        # We currently assume a block is properly utilized whenever it is >75%
650
        # of the size of a 'full' block. In normal operation, a block is
651
        # considered full when it hits 4MB of same-file content. So any block
652
        # >3MB is 'full enough'.
653
        # The only time this isn't true is when a given block has large-object
654
        # content. (a single file >4MB, etc.)
655
        # Under these circumstances, we allow a block to grow to
656
        # 2 x largest_content.  Which means that if a given block had a large
657
        # object, it may actually be under-utilized. However, given that this
658
        # is 'pack-on-the-fly' it is probably reasonable to not repack large
4665.3.15 by Robert Collins
Review and tweak
659
        # content blobs on-the-fly. Note that because we return False for all
660
        # 1-item blobs, we will repack them; we may wish to reevaluate our
661
        # treatment of large object blobs in the future.
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
662
        if block_size >= self._full_enough_block_size:
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
663
            return True
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
664
        # If a block is <3MB, it still may be considered 'full' if it contains
665
        # mixed content. The current rule is 2MB of mixed content is considered
666
        # full. So check to see if this block contains mixed content, and
667
        # set the threshold appropriately.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
668
        common_prefix = None
669
        for factory in self._factories:
670
            prefix = factory.key[:-1]
671
            if common_prefix is None:
672
                common_prefix = prefix
673
            elif prefix != common_prefix:
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
674
                # Mixed content, check the size appropriately
4665.3.7 by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression.
675
                if block_size >= self._full_enough_mixed_block_size:
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
676
                    return True
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
677
                break
4665.3.6 by John Arbash Meinel
Add some comments, etc to discussing the 'is this block full enough'
678
        # The content failed both the mixed check and the single-content check
679
        # so obviously it is not fully utilized
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
680
        # TODO: there is one other constraint that isn't being checked
681
        #       namely, that the entries in the block are in the appropriate
682
        #       order. For example, you could insert the entries in exactly
683
        #       reverse groupcompress order, and we would think that is ok.
684
        #       (all the right objects are in one group, and it is fully
685
        #       utilized, etc.) For now, we assume that case is rare,
686
        #       especially since we should always fetch in 'groupcompress'
687
        #       order.
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
688
        return False
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
689
690
    def _check_rebuild_block(self):
4665.3.5 by John Arbash Meinel
Work out a heuristic about when a block is well utilized
691
        action, last_byte_used, total_bytes_used = self._check_rebuild_action()
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
692
        if action is None:
693
            return
694
        if action == 'trim':
695
            self._trim_block(last_byte_used)
696
        elif action == 'rebuild':
697
            self._rebuild_block()
698
        else:
699
            raise ValueError('unknown rebuild action: %r' % (action,))
3735.32.23 by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block
700
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
701
    def _wire_bytes(self):
702
        """Return a byte stream suitable for transmitting over the wire."""
3735.32.24 by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream
703
        self._check_rebuild_block()
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
704
        # The outer block starts with:
705
        #   'groupcompress-block\n'
706
        #   <length of compressed key info>\n
707
        #   <length of uncompressed info>\n
708
        #   <length of gc block>\n
709
        #   <header bytes>
710
        #   <gc-block>
711
        lines = ['groupcompress-block\n']
712
        # The minimal info we need is the key, the start offset, and the
713
        # parents. The length and type are encoded in the record itself.
714
        # However, passing in the other bits makes it easier.  The list of
715
        # keys, and the start offset, the length
716
        # 1 line key
717
        # 1 line with parents, '' for ()
718
        # 1 line for start offset
719
        # 1 line for end byte
720
        header_lines = []
3735.32.15 by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'.
721
        for factory in self._factories:
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
722
            key_bytes = '\x00'.join(factory.key)
723
            parents = factory.parents
724
            if parents is None:
725
                parent_bytes = 'None:'
726
            else:
727
                parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
728
            record_header = '%s\n%s\n%d\n%d\n' % (
729
                key_bytes, parent_bytes, factory._start, factory._end)
730
            header_lines.append(record_header)
3735.37.5 by John Arbash Meinel
Restore the refcycle reduction code.
731
            # TODO: Can we break the refcycle at this point and set
732
            #       factory._manager = None?
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
733
        header_bytes = ''.join(header_lines)
734
        del header_lines
735
        header_bytes_len = len(header_bytes)
736
        z_header_bytes = zlib.compress(header_bytes)
737
        del header_bytes
738
        z_header_bytes_len = len(z_header_bytes)
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
739
        block_bytes_len, block_chunks = self._block.to_chunks()
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
740
        lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
741
                                       block_bytes_len))
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
742
        lines.append(z_header_bytes)
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
743
        lines.extend(block_chunks)
744
        del z_header_bytes, block_chunks
745
        # TODO: This is a point where we will double the memory consumption. To
746
        #       avoid this, we probably have to switch to a 'chunked' api
3735.32.16 by John Arbash Meinel
We now have a general header for the GC block.
747
        return ''.join(lines)
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
748
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
749
    @classmethod
3735.32.18 by John Arbash Meinel
We now support generating a network stream.
750
    def from_bytes(cls, bytes):
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
751
        # TODO: This does extra string copying, probably better to do it a
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
752
        #       different way. At a minimum this creates 2 copies of the
753
        #       compressed content
3735.32.17 by John Arbash Meinel
We now round-trip the wire_bytes.
754
        (storage_kind, z_header_len, header_len,
755
         block_len, rest) = bytes.split('\n', 4)
756
        del bytes
757
        if storage_kind != 'groupcompress-block':
758
            raise ValueError('Unknown storage kind: %s' % (storage_kind,))
759
        z_header_len = int(z_header_len)
760
        if len(rest) < z_header_len:
761
            raise ValueError('Compressed header len shorter than all bytes')
762
        z_header = rest[:z_header_len]
763
        header_len = int(header_len)
764
        header = zlib.decompress(z_header)
765
        if len(header) != header_len:
766
            raise ValueError('invalid length for decompressed bytes')
767
        del z_header
768
        block_len = int(block_len)
769
        if len(rest) != z_header_len + block_len:
770
            raise ValueError('Invalid length for block')
771
        block_bytes = rest[z_header_len:]
772
        del rest
773
        # So now we have a valid GCB, we just need to parse the factories that
774
        # were sent to us
775
        header_lines = header.split('\n')
776
        del header
777
        last = header_lines.pop()
778
        if last != '':
779
            raise ValueError('header lines did not end with a trailing'
780
                             ' newline')
781
        if len(header_lines) % 4 != 0:
782
            raise ValueError('The header was not an even multiple of 4 lines')
783
        block = GroupCompressBlock.from_bytes(block_bytes)
784
        del block_bytes
785
        result = cls(block)
786
        for start in xrange(0, len(header_lines), 4):
787
            # intern()?
788
            key = tuple(header_lines[start].split('\x00'))
789
            parents_line = header_lines[start+1]
790
            if parents_line == 'None:':
791
                parents = None
792
            else:
793
                parents = tuple([tuple(segment.split('\x00'))
794
                                 for segment in parents_line.split('\t')
795
                                  if segment])
796
            start_offset = int(header_lines[start+2])
797
            end_offset = int(header_lines[start+3])
798
            result.add_factory(key, parents, start_offset, end_offset)
799
        return result
800
3735.32.14 by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object.
801
3735.32.18 by John Arbash Meinel
We now support generating a network stream.
802
def network_block_to_records(storage_kind, bytes, line_end):
803
    if storage_kind != 'groupcompress-block':
804
        raise ValueError('Unknown storage kind: %s' % (storage_kind,))
805
    manager = _LazyGroupContentManager.from_bytes(bytes)
806
    return manager.get_record_stream()
807
808
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
809
class _CommonGroupCompressor(object):
810
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
811
    def __init__(self, settings=None):
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
812
        """Create a GroupCompressor."""
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
813
        self.chunks = []
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
814
        self._last = None
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
815
        self.endpoint = 0
816
        self.input_bytes = 0
817
        self.labels_deltas = {}
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
818
        self._delta_index = None # Set by the children
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
819
        self._block = GroupCompressBlock()
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
820
        if settings is None:
821
            self._settings = {}
822
        else:
823
            self._settings = settings
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
824
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
825
    def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
826
        """Compress lines with label key.
827
828
        :param key: A key tuple. It is stored in the output
829
            for identification of the text during decompression. If the last
830
            element is 'None' it is replaced with the sha1 of the text -
831
            e.g. sha1:xxxxxxx.
832
        :param bytes: The bytes to be compressed
833
        :param expected_sha: If non-None, the sha the lines are believed to
834
            have. During compression the sha is calculated; a mismatch will
835
            cause an error.
836
        :param nostore_sha: If the computed sha1 sum matches, we will raise
837
            ExistingContent rather than adding the text.
838
        :param soft: Do a 'soft' compression. This means that we require larger
839
            ranges to match to be considered for a copy command.
840
841
        :return: The sha1 of lines, the start and end offsets in the delta, and
842
            the type ('fulltext' or 'delta').
843
844
        :seealso VersionedFiles.add_lines:
845
        """
846
        if not bytes: # empty, like a dir entry, etc
847
            if nostore_sha == _null_sha1:
848
                raise errors.ExistingContent()
849
            return _null_sha1, 0, 0, 'fulltext'
850
        # we assume someone knew what they were doing when they passed it in
851
        if expected_sha is not None:
852
            sha1 = expected_sha
853
        else:
854
            sha1 = osutils.sha_string(bytes)
855
        if nostore_sha is not None:
856
            if sha1 == nostore_sha:
857
                raise errors.ExistingContent()
858
        if key[-1] is None:
859
            key = key[:-1] + ('sha1:' + sha1,)
860
861
        start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
862
        return sha1, start, end, type
863
864
    def _compress(self, key, bytes, max_delta_size, soft=False):
865
        """Compress lines with label key.
866
867
        :param key: A key tuple. It is stored in the output for identification
868
            of the text during decompression.
869
870
        :param bytes: The bytes to be compressed
871
872
        :param max_delta_size: The size above which we issue a fulltext instead
873
            of a delta.
874
875
        :param soft: Do a 'soft' compression. This means that we require larger
876
            ranges to match to be considered for a copy command.
877
878
        :return: The sha1 of lines, the start and end offsets in the delta, and
879
            the type ('fulltext' or 'delta').
880
        """
881
        raise NotImplementedError(self._compress)
882
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
883
    def extract(self, key):
884
        """Extract a key previously added to the compressor.
885
886
        :param key: The key to extract.
887
        :return: An iterable over bytes and the sha1.
888
        """
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
889
        (start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
890
        delta_chunks = self.chunks[start_chunk:end_chunk]
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
891
        stored_bytes = ''.join(delta_chunks)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
892
        if stored_bytes[0] == 'f':
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
893
            fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
894
            data_len = fulltext_len + 1 + offset
895
            if  data_len != len(stored_bytes):
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
896
                raise ValueError('Index claimed fulltext len, but stored bytes'
897
                                 ' claim %s != %s'
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
898
                                 % (len(stored_bytes), data_len))
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
899
            bytes = stored_bytes[offset + 1:]
900
        else:
901
            # XXX: This is inefficient at best
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
902
            source = ''.join(self.chunks[:start_chunk])
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
903
            if stored_bytes[0] != 'd':
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
904
                raise ValueError('Unknown content kind, bytes claim %s'
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
905
                                 % (stored_bytes[0],))
906
            delta_len, offset = decode_base128_int(stored_bytes[1:10])
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
907
            data_len = delta_len + 1 + offset
908
            if data_len != len(stored_bytes):
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
909
                raise ValueError('Index claimed delta len, but stored bytes'
910
                                 ' claim %s != %s'
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
911
                                 % (len(stored_bytes), data_len))
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
912
            bytes = apply_delta(source, stored_bytes[offset + 1:])
913
        bytes_sha1 = osutils.sha_string(bytes)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
914
        return bytes, bytes_sha1
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
915
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
916
    def flush(self):
917
        """Finish this group, creating a formatted stream.
918
919
        After calling this, the compressor should no longer be used
920
        """
4469.1.2 by John Arbash Meinel
The only caller already knows the content length, so make the api such that
921
        self._block.set_chunked_content(self.chunks, self.endpoint)
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
922
        self.chunks = None
923
        self._delta_index = None
924
        return self._block
925
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
926
    def pop_last(self):
927
        """Call this if you want to 'revoke' the last compression.
928
929
        After this, the data structures will be rolled back, but you cannot do
930
        more compression.
931
        """
932
        self._delta_index = None
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
933
        del self.chunks[self._last[0]:]
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
934
        self.endpoint = self._last[1]
935
        self._last = None
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
936
937
    def ratio(self):
938
        """Return the overall compression ratio."""
939
        return float(self.input_bytes) / float(self.endpoint)
940
941
942
class PythonGroupCompressor(_CommonGroupCompressor):
943
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
944
    def __init__(self, settings=None):
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
945
        """Create a GroupCompressor.
946
947
        Used only if the pyrex version is not available.
948
        """
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
949
        super(PythonGroupCompressor, self).__init__(settings)
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
950
        self._delta_index = LinesDeltaIndex([])
951
        # The actual content is managed by LinesDeltaIndex
952
        self.chunks = self._delta_index.lines
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
953
954
    def _compress(self, key, bytes, max_delta_size, soft=False):
955
        """see _CommonGroupCompressor._compress"""
956
        input_len = len(bytes)
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
957
        new_lines = osutils.split_lines(bytes)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
958
        out_lines, index_lines = self._delta_index.make_delta(
959
            new_lines, bytes_length=input_len, soft=soft)
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
960
        delta_length = sum(map(len, out_lines))
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
961
        if delta_length > max_delta_size:
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
962
            # The delta is longer than the fulltext, insert a fulltext
963
            type = 'fulltext'
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
964
            out_lines = ['f', encode_base128_int(input_len)]
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
965
            out_lines.extend(new_lines)
966
            index_lines = [False, False]
967
            index_lines.extend([True] * len(new_lines))
968
        else:
969
            # this is a worthy delta, output it
970
            type = 'delta'
971
            out_lines[0] = 'd'
972
            # Update the delta_length to include those two encoded integers
973
            out_lines[1] = encode_base128_int(delta_length)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
974
        # Before insertion
975
        start = self.endpoint
976
        chunk_start = len(self.chunks)
4241.17.2 by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly.
977
        self._last = (chunk_start, self.endpoint)
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
978
        self._delta_index.extend_lines(out_lines, index_lines)
979
        self.endpoint = self._delta_index.endpoint
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
980
        self.input_bytes += input_len
981
        chunk_end = len(self.chunks)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
982
        self.labels_deltas[key] = (start, chunk_start,
983
                                   self.endpoint, chunk_end)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
984
        return start, self.endpoint, type
985
986
987
class PyrexGroupCompressor(_CommonGroupCompressor):
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
988
    """Produce a serialised group of compressed texts.
0.23.6 by John Arbash Meinel
Start stripping out the actual GroupCompressor
989
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
990
    It contains code very similar to SequenceMatcher because of having a similar
991
    task. However some key differences apply:
5891.1.2 by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier.
992
993
    * there is no junk, we want a minimal edit not a human readable diff.
994
    * we don't filter very common lines (because we don't know where a good
995
      range will start, and after the first text we want to be emitting minmal
996
      edits only.
997
    * we chain the left side, not the right side
998
    * we incrementally update the adjacency matrix as new lines are provided.
999
    * we look for matches in all of the left side, so the routine which does
1000
      the analagous task of find_longest_match does not need to filter on the
1001
      left side.
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
1002
    """
0.17.2 by Robert Collins
Core proof of concept working.
1003
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1004
    def __init__(self, settings=None):
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
1005
        super(PyrexGroupCompressor, self).__init__(settings)
1006
        max_bytes_to_index = self._settings.get('max_bytes_to_index', 0)
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1007
        self._delta_index = DeltaIndex(max_bytes_to_index=max_bytes_to_index)
0.23.6 by John Arbash Meinel
Start stripping out the actual GroupCompressor
1008
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1009
    def _compress(self, key, bytes, max_delta_size, soft=False):
1010
        """see _CommonGroupCompressor._compress"""
0.23.52 by John Arbash Meinel
Use the max_delta flag.
1011
        input_len = len(bytes)
0.23.12 by John Arbash Meinel
Add a 'len:' field to the data.
1012
        # By having action/label/sha1/len, we can parse the group if the index
1013
        # was ever destroyed, we have the key in 'label', we know the final
1014
        # bytes are valid from sha1, and we know where to find the end of this
1015
        # record because of 'len'. (the delta record itself will store the
1016
        # total length for the expanded record)
0.23.13 by John Arbash Meinel
Factor out the ability to have/not have labels.
1017
        # 'len: %d\n' costs approximately 1% increase in total data
1018
        # Having the labels at all costs us 9-10% increase, 38% increase for
1019
        # inventory pages, and 5.8% increase for text pages
0.25.6 by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header
1020
        # new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
0.23.33 by John Arbash Meinel
Fix a bug when handling multiple large-range copies.
1021
        if self._delta_index._source_offset != self.endpoint:
1022
            raise AssertionError('_source_offset != endpoint'
1023
                ' somehow the DeltaIndex got out of sync with'
1024
                ' the output lines')
0.23.52 by John Arbash Meinel
Use the max_delta flag.
1025
        delta = self._delta_index.make_delta(bytes, max_delta_size)
1026
        if (delta is None):
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1027
            type = 'fulltext'
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
1028
            enc_length = encode_base128_int(len(bytes))
1029
            len_mini_header = 1 + len(enc_length)
1030
            self._delta_index.add_source(bytes, len_mini_header)
1031
            new_chunks = ['f', enc_length, bytes]
0.23.9 by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor.
1032
        else:
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1033
            type = 'delta'
0.17.36 by John Arbash Meinel
Adding a mini-len to the delta/fulltext bytes
1034
            enc_length = encode_base128_int(len(delta))
1035
            len_mini_header = 1 + len(enc_length)
1036
            new_chunks = ['d', enc_length, delta]
3735.38.5 by John Arbash Meinel
A bit of testing showed that _FAST=True was actually *slower*.
1037
            self._delta_index.add_delta_source(delta, len_mini_header)
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
1038
        # Before insertion
1039
        start = self.endpoint
1040
        chunk_start = len(self.chunks)
1041
        # Now output these bytes
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
1042
        self._output_chunks(new_chunks)
0.23.6 by John Arbash Meinel
Start stripping out the actual GroupCompressor
1043
        self.input_bytes += input_len
3735.40.18 by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock.
1044
        chunk_end = len(self.chunks)
1045
        self.labels_deltas[key] = (start, chunk_start,
1046
                                   self.endpoint, chunk_end)
0.23.29 by John Arbash Meinel
Forgot to add the delta bytes to the index objects.
1047
        if not self._delta_index._source_offset == self.endpoint:
1048
            raise AssertionError('the delta index is out of sync'
1049
                'with the output lines %s != %s'
1050
                % (self._delta_index._source_offset, self.endpoint))
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1051
        return start, self.endpoint, type
0.17.2 by Robert Collins
Core proof of concept working.
1052
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
1053
    def _output_chunks(self, new_chunks):
0.23.9 by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor.
1054
        """Output some chunks.
1055
1056
        :param new_chunks: The chunks to output.
1057
        """
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
1058
        self._last = (len(self.chunks), self.endpoint)
0.17.12 by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead.
1059
        endpoint = self.endpoint
3735.40.17 by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more
1060
        self.chunks.extend(new_chunks)
0.23.9 by John Arbash Meinel
We now basically have full support for using diff-delta as the compressor.
1061
        endpoint += sum(map(len, new_chunks))
0.17.12 by Robert Collins
Encode copy ranges as bytes not lines, halves decode overhead.
1062
        self.endpoint = endpoint
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
1063
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1064
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1065
def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1066
    """Create a factory for creating a pack based groupcompress.
1067
1068
    This is only functional enough to run interface tests, it doesn't try to
1069
    provide a full pack environment.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1070
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1071
    :param graph: Store a graph.
1072
    :param delta: Delta compress contents.
1073
    :param keylength: How long should keys be.
1074
    """
1075
    def factory(transport):
3735.32.2 by John Arbash Meinel
The 'delta' flag has no effect on the content (all GC is delta'd),
1076
        parents = graph
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1077
        ref_length = 0
1078
        if graph:
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1079
            ref_length = 1
0.17.7 by Robert Collins
Update for current index2 changes.
1080
        graph_index = BTreeBuilder(reference_lists=ref_length,
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1081
            key_elements=keylength)
1082
        stream = transport.open_write_stream('newpack')
1083
        writer = pack.ContainerWriter(stream.write)
1084
        writer.begin()
1085
        index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1086
            add_callback=graph_index.add_nodes,
1087
            inconsistency_fatal=inconsistency_fatal)
5757.5.1 by Jelmer Vernooij
Move _DirectPackAccess to bzrlib.repofmt.pack_repo.
1088
        access = pack_repo._DirectPackAccess({})
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1089
        access.set_writer(writer, graph_index, (transport, 'newpack'))
0.17.2 by Robert Collins
Core proof of concept working.
1090
        result = GroupCompressVersionedFiles(index, access, delta)
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1091
        result.stream = stream
1092
        result.writer = writer
1093
        return result
1094
    return factory
1095
1096
1097
def cleanup_pack_group(versioned_files):
0.17.23 by Robert Collins
Only decompress as much of the zlib data as is needed to read the text recipe.
1098
    versioned_files.writer.end()
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1099
    versioned_files.stream.close()
1100
1101
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1102
class _BatchingBlockFetcher(object):
1103
    """Fetch group compress blocks in batches.
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1104
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1105
    :ivar total_bytes: int of expected number of bytes needed to fetch the
1106
        currently pending batch.
1107
    """
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1108
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1109
    def __init__(self, gcvf, locations, get_compressor_settings=None):
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1110
        self.gcvf = gcvf
1111
        self.locations = locations
1112
        self.keys = []
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1113
        self.batch_memos = {}
1114
        self.memos_to_get = []
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1115
        self.total_bytes = 0
1116
        self.last_read_memo = None
1117
        self.manager = None
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1118
        self._get_compressor_settings = get_compressor_settings
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1119
1120
    def add_key(self, key):
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1121
        """Add another to key to fetch.
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1122
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1123
        :return: The estimated number of bytes needed to fetch the batch so
1124
            far.
1125
        """
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1126
        self.keys.append(key)
1127
        index_memo, _, _, _ = self.locations[key]
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1128
        read_memo = index_memo[0:3]
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1129
        # Three possibilities for this read_memo:
1130
        #  - it's already part of this batch; or
1131
        #  - it's not yet part of this batch, but is already cached; or
1132
        #  - it's not yet part of this batch and will need to be fetched.
1133
        if read_memo in self.batch_memos:
1134
            # This read memo is already in this batch.
4634.3.16 by Andrew Bennetts
Fix buglets.
1135
            return self.total_bytes
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1136
        try:
1137
            cached_block = self.gcvf._group_cache[read_memo]
1138
        except KeyError:
1139
            # This read memo is new to this batch, and the data isn't cached
1140
            # either.
1141
            self.batch_memos[read_memo] = None
1142
            self.memos_to_get.append(read_memo)
4634.3.12 by Andrew Bennetts
Bump up the batch size to 256k, and fix the batch size estimate to use the length of the raw bytes that will be fetched (not the uncompressed bytes).
1143
            byte_length = read_memo[2]
1144
            self.total_bytes += byte_length
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1145
        else:
1146
            # This read memo is new to this batch, but cached.
1147
            # Keep a reference to the cached block in batch_memos because it's
1148
            # certain that we'll use it when this batch is processed, but
1149
            # there's a risk that it would fall out of _group_cache between now
1150
            # and then.
1151
            self.batch_memos[read_memo] = cached_block
1152
        return self.total_bytes
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1153
4634.3.13 by Andrew Bennetts
Rename empty_manager to _flush_manager.
1154
    def _flush_manager(self):
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1155
        if self.manager is not None:
1156
            for factory in self.manager.get_record_stream():
1157
                yield factory
1158
            self.manager = None
4634.3.4 by Andrew Bennetts
Decruftify a little more.
1159
            self.last_read_memo = None
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1160
1161
    def yield_factories(self, full_flush=False):
4634.3.5 by Andrew Bennetts
More docstrings.
1162
        """Yield factories for keys added since the last yield.  They will be
1163
        returned in the order they were added via add_key.
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1164
4634.3.5 by Andrew Bennetts
More docstrings.
1165
        :param full_flush: by default, some results may not be returned in case
1166
            they can be part of the next batch.  If full_flush is True, then
1167
            all results are returned.
1168
        """
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1169
        if self.manager is None and not self.keys:
1170
            return
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1171
        # Fetch all memos in this batch.
1172
        blocks = self.gcvf._get_blocks(self.memos_to_get)
1173
        # Turn blocks into factories and yield them.
1174
        memos_to_get_stack = list(self.memos_to_get)
1175
        memos_to_get_stack.reverse()
4634.3.2 by Andrew Bennetts
Stop using (and remove) unnecessary key_batch var that was causing a bug.
1176
        for key in self.keys:
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1177
            index_memo, _, parents, _ = self.locations[key]
1178
            read_memo = index_memo[:3]
4634.3.4 by Andrew Bennetts
Decruftify a little more.
1179
            if self.last_read_memo != read_memo:
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1180
                # We are starting a new block. If we have a
1181
                # manager, we have found everything that fits for
1182
                # now, so yield records
4634.3.13 by Andrew Bennetts
Rename empty_manager to _flush_manager.
1183
                for factory in self._flush_manager():
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1184
                    yield factory
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1185
                # Now start a new manager.
1186
                if memos_to_get_stack and memos_to_get_stack[-1] == read_memo:
1187
                    # The next block from _get_blocks will be the block we
1188
                    # need.
1189
                    block_read_memo, block = blocks.next()
1190
                    if block_read_memo != read_memo:
1191
                        raise AssertionError(
4634.3.16 by Andrew Bennetts
Fix buglets.
1192
                            "block_read_memo out of sync with read_memo"
1193
                            "(%r != %r)" % (block_read_memo, read_memo))
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1194
                    self.batch_memos[read_memo] = block
1195
                    memos_to_get_stack.pop()
1196
                else:
1197
                    block = self.batch_memos[read_memo]
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1198
                self.manager = _LazyGroupContentManager(block,
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1199
                    get_compressor_settings=self._get_compressor_settings)
4634.3.4 by Andrew Bennetts
Decruftify a little more.
1200
                self.last_read_memo = read_memo
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1201
            start, end = index_memo[3:5]
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1202
            self.manager.add_factory(key, parents, start, end)
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1203
        if full_flush:
4634.3.13 by Andrew Bennetts
Rename empty_manager to _flush_manager.
1204
            for factory in self._flush_manager():
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1205
                yield factory
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1206
        del self.keys[:]
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1207
        self.batch_memos.clear()
1208
        del self.memos_to_get[:]
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1209
        self.total_bytes = 0
1210
1211
5816.8.1 by Andrew Bennetts
Be a little more clever about constructing a parents provider for stacked repositories, so that get_parent_map with local-stacked-on-remote doesn't use HPSS VFS calls.
1212
class GroupCompressVersionedFiles(VersionedFilesWithFallbacks):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1213
    """A group-compress based VersionedFiles implementation."""
1214
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1215
    # This controls how the GroupCompress DeltaIndex works. Basically, we
1216
    # compute hash pointers into the source blocks (so hash(text) => text).
1217
    # However each of these references costs some memory in trade against a
1218
    # more accurate match result. For very large files, they either are
1219
    # pre-compressed and change in bulk whenever they change, or change in just
1220
    # local blocks. Either way, 'improved resolution' is not very helpful,
1221
    # versus running out of memory trying to track everything. The default max
1222
    # gives 100% sampling of a 1MB file.
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1223
    _DEFAULT_MAX_BYTES_TO_INDEX = 1024 * 1024
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
1224
    _DEFAULT_COMPRESSOR_SETTINGS = {'max_bytes_to_index':
1225
                                     _DEFAULT_MAX_BYTES_TO_INDEX}
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1226
5816.8.7 by Andrew Bennetts
Some tweaks to caching prompted by John's review.
1227
    def __init__(self, index, access, delta=True, _unadded_refs=None,
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1228
                 _group_cache=None):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1229
        """Create a GroupCompressVersionedFiles object.
1230
1231
        :param index: The index object storing access and graph data.
1232
        :param access: The access object storing raw data.
0.17.2 by Robert Collins
Core proof of concept working.
1233
        :param delta: Whether to delta compress or just entropy compress.
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1234
        :param _unadded_refs: private parameter, don't use.
5816.8.7 by Andrew Bennetts
Some tweaks to caching prompted by John's review.
1235
        :param _group_cache: private parameter, don't use.
0.17.2 by Robert Collins
Core proof of concept working.
1236
        """
1237
        self._index = index
1238
        self._access = access
1239
        self._delta = delta
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1240
        if _unadded_refs is None:
1241
            _unadded_refs = {}
1242
        self._unadded_refs = _unadded_refs
5816.8.7 by Andrew Bennetts
Some tweaks to caching prompted by John's review.
1243
        if _group_cache is None:
1244
            _group_cache = LRUSizeCache(max_size=50*1024*1024)
1245
        self._group_cache = _group_cache
5652.2.4 by Martin Pool
Rename to _immediate_fallback_vfs
1246
        self._immediate_fallback_vfs = []
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1247
        self._max_bytes_to_index = None
0.17.2 by Robert Collins
Core proof of concept working.
1248
4634.35.1 by Andrew Bennetts
Check for all necessary chk nodes, not just roots.
1249
    def without_fallbacks(self):
4634.35.10 by Andrew Bennetts
Move tests to per_repository_chk.
1250
        """Return a clone of this object without any fallbacks configured."""
1251
        return GroupCompressVersionedFiles(self._index, self._access,
5816.8.7 by Andrew Bennetts
Some tweaks to caching prompted by John's review.
1252
            self._delta, _unadded_refs=dict(self._unadded_refs),
1253
            _group_cache=self._group_cache)
4634.35.1 by Andrew Bennetts
Check for all necessary chk nodes, not just roots.
1254
0.17.2 by Robert Collins
Core proof of concept working.
1255
    def add_lines(self, key, parents, lines, parent_texts=None,
1256
        left_matching_blocks=None, nostore_sha=None, random_id=False,
1257
        check_content=True):
1258
        """Add a text to the store.
1259
1260
        :param key: The key tuple of the text to add.
1261
        :param parents: The parents key tuples of the text to add.
1262
        :param lines: A list of lines. Each line must be a bytestring. And all
5891.1.2 by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier.
1263
            of them except the last must be terminated with \\n and contain no
1264
            other \\n's. The last line may either contain no \\n's or a single
1265
            terminating \\n. If the lines list does meet this constraint the
1266
            add routine may error or may succeed - but you will be unable to
1267
            read the data back accurately. (Checking the lines have been split
0.17.2 by Robert Collins
Core proof of concept working.
1268
            correctly is expensive and extremely unlikely to catch bugs so it
1269
            is not done at runtime unless check_content is True.)
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1270
        :param parent_texts: An optional dictionary containing the opaque
0.17.2 by Robert Collins
Core proof of concept working.
1271
            representations of some or all of the parents of version_id to
1272
            allow delta optimisations.  VERY IMPORTANT: the texts must be those
1273
            returned by add_lines or data corruption can be caused.
1274
        :param left_matching_blocks: a hint about which areas are common
1275
            between the text and its left-hand-parent.  The format is
1276
            the SequenceMatcher.get_matching_blocks format.
1277
        :param nostore_sha: Raise ExistingContent and do not add the lines to
1278
            the versioned file if the digest of the lines matches this.
1279
        :param random_id: If True a random id has been selected rather than
1280
            an id determined by some deterministic process such as a converter
1281
            from a foreign VCS. When True the backend may choose not to check
1282
            for uniqueness of the resulting key within the versioned file, so
1283
            this should only be done when the result is expected to be unique
1284
            anyway.
1285
        :param check_content: If True, the lines supplied are verified to be
1286
            bytestrings that are correctly formed lines.
1287
        :return: The text sha1, the number of bytes in the text, and an opaque
1288
                 representation of the inserted version which can be provided
1289
                 back to future add_lines calls in the parent_texts dictionary.
1290
        """
1291
        self._index._check_write_ok()
1292
        self._check_add(key, lines, random_id, check_content)
1293
        if parents is None:
1294
            # The caller might pass None if there is no graph data, but kndx
1295
            # indexes can't directly store that, so we give them
1296
            # an empty tuple instead.
1297
            parents = ()
1298
        # double handling for now. Make it work until then.
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
1299
        length = sum(map(len, lines))
1300
        record = ChunkedContentFactory(key, parents, None, lines)
3735.31.12 by John Arbash Meinel
Push nostore_sha down through the stack.
1301
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
1302
                                               nostore_sha=nostore_sha))[0]
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
1303
        return sha1, length, None
0.17.2 by Robert Collins
Core proof of concept working.
1304
4398.8.6 by John Arbash Meinel
Switch the api from VF.add_text to VF._add_text and trim some extra 'features'.
1305
    def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
4398.9.1 by Matt Nordhoff
Update _add_text docstrings that still referred to add_text.
1306
        """See VersionedFiles._add_text()."""
4398.8.4 by John Arbash Meinel
Implement add_text for GroupCompressVersionedFiles
1307
        self._index._check_write_ok()
1308
        self._check_add(key, None, random_id, check_content=False)
1309
        if text.__class__ is not str:
1310
            raise errors.BzrBadParameterUnicode("text")
1311
        if parents is None:
1312
            # The caller might pass None if there is no graph data, but kndx
1313
            # indexes can't directly store that, so we give them
1314
            # an empty tuple instead.
1315
            parents = ()
1316
        # double handling for now. Make it work until then.
1317
        length = len(text)
1318
        record = FulltextContentFactory(key, parents, None, text)
1319
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
1320
                                               nostore_sha=nostore_sha))[0]
1321
        return sha1, length, None
1322
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1323
    def add_fallback_versioned_files(self, a_versioned_files):
1324
        """Add a source of texts for texts not present in this knit.
1325
1326
        :param a_versioned_files: A VersionedFiles object.
1327
        """
5652.2.4 by Martin Pool
Rename to _immediate_fallback_vfs
1328
        self._immediate_fallback_vfs.append(a_versioned_files)
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1329
0.17.4 by Robert Collins
Annotate.
1330
    def annotate(self, key):
1331
        """See VersionedFiles.annotate."""
4454.3.58 by John Arbash Meinel
Enable the new annotator for gc format repos.
1332
        ann = annotate.Annotator(self)
1333
        return ann.annotate_flat(key)
0.17.4 by Robert Collins
Annotate.
1334
4454.3.65 by John Arbash Meinel
Tests that VF implementations support .get_annotator()
1335
    def get_annotator(self):
1336
        return annotate.Annotator(self)
1337
4332.3.28 by Robert Collins
Start checking file texts in a single pass.
1338
    def check(self, progress_bar=None, keys=None):
0.17.5 by Robert Collins
nograph tests completely passing.
1339
        """See VersionedFiles.check()."""
4332.3.28 by Robert Collins
Start checking file texts in a single pass.
1340
        if keys is None:
1341
            keys = self.keys()
1342
            for record in self.get_record_stream(keys, 'unordered', True):
1343
                record.get_bytes_as('fulltext')
1344
        else:
1345
            return self.get_record_stream(keys, 'unordered', True)
0.17.5 by Robert Collins
nograph tests completely passing.
1346
4744.2.5 by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api.
1347
    def clear_cache(self):
1348
        """See VersionedFiles.clear_cache()"""
1349
        self._group_cache.clear()
4744.2.7 by John Arbash Meinel
Add .clear_cache() members to GraphIndexBuilder and BTreeBuilder.
1350
        self._index._graph_index.clear_cache()
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
1351
        self._index._int_cache.clear()
4744.2.5 by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api.
1352
0.17.2 by Robert Collins
Core proof of concept working.
1353
    def _check_add(self, key, lines, random_id, check_content):
1354
        """check that version_id and lines are safe to add."""
1355
        version_id = key[-1]
0.17.26 by Robert Collins
Working better --gc-plain-chk.
1356
        if version_id is not None:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1357
            if osutils.contains_whitespace(version_id):
3735.31.1 by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch.
1358
                raise errors.InvalidRevisionId(version_id, self)
0.17.2 by Robert Collins
Core proof of concept working.
1359
        self.check_not_reserved_id(version_id)
1360
        # TODO: If random_id==False and the key is already present, we should
1361
        # probably check that the existing content is identical to what is
1362
        # being inserted, and otherwise raise an exception.  This would make
1363
        # the bundle code simpler.
1364
        if check_content:
1365
            self._check_lines_not_unicode(lines)
1366
            self._check_lines_are_lines(lines)
1367
0.17.5 by Robert Collins
nograph tests completely passing.
1368
    def get_parent_map(self, keys):
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1369
        """Get a map of the graph parents of keys.
0.17.5 by Robert Collins
nograph tests completely passing.
1370
1371
        :param keys: The keys to look up parents for.
1372
        :return: A mapping from keys to parents. Absent keys are absent from
1373
            the mapping.
1374
        """
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1375
        return self._get_parent_map_with_sources(keys)[0]
1376
1377
    def _get_parent_map_with_sources(self, keys):
1378
        """Get a map of the parents of keys.
1379
1380
        :param keys: The keys to look up parents for.
1381
        :return: A tuple. The first element is a mapping from keys to parents.
1382
            Absent keys are absent from the mapping. The second element is a
1383
            list with the locations each key was found in. The first element
1384
            is the in-this-knit parents, the second the first fallback source,
1385
            and so on.
1386
        """
0.17.5 by Robert Collins
nograph tests completely passing.
1387
        result = {}
5652.2.4 by Martin Pool
Rename to _immediate_fallback_vfs
1388
        sources = [self._index] + self._immediate_fallback_vfs
0.17.5 by Robert Collins
nograph tests completely passing.
1389
        source_results = []
1390
        missing = set(keys)
1391
        for source in sources:
1392
            if not missing:
1393
                break
1394
            new_result = source.get_parent_map(missing)
1395
            source_results.append(new_result)
1396
            result.update(new_result)
1397
            missing.difference_update(set(new_result))
3735.31.7 by John Arbash Meinel
Start bringing in stacking support for Groupcompress repos.
1398
        return result, source_results
0.17.5 by Robert Collins
nograph tests completely passing.
1399
4634.3.11 by Andrew Bennetts
Simplify further, comment more.
1400
    def _get_blocks(self, read_memos):
1401
        """Get GroupCompressBlocks for the given read_memos.
1402
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1403
        :returns: a series of (read_memo, block) pairs, in the order they were
1404
            originally passed.
4634.3.11 by Andrew Bennetts
Simplify further, comment more.
1405
        """
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1406
        cached = {}
1407
        for read_memo in read_memos:
1408
            try:
1409
                block = self._group_cache[read_memo]
1410
            except KeyError:
1411
                pass
1412
            else:
1413
                cached[read_memo] = block
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1414
        not_cached = []
1415
        not_cached_seen = set()
1416
        for read_memo in read_memos:
1417
            if read_memo in cached:
1418
                # Don't fetch what we already have
1419
                continue
1420
            if read_memo in not_cached_seen:
1421
                # Don't try to fetch the same data twice
1422
                continue
1423
            not_cached.append(read_memo)
1424
            not_cached_seen.add(read_memo)
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1425
        raw_records = self._access.get_raw_records(not_cached)
1426
        for read_memo in read_memos:
1427
            try:
4634.3.16 by Andrew Bennetts
Fix buglets.
1428
                yield read_memo, cached[read_memo]
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1429
            except KeyError:
4634.3.15 by Andrew Bennetts
Get rid of inaccurate comment.
1430
                # Read the block, and cache it.
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1431
                zdata = raw_records.next()
1432
                block = GroupCompressBlock.from_bytes(zdata)
1433
                self._group_cache[read_memo] = block
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1434
                cached[read_memo] = block
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1435
                yield read_memo, block
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1436
0.20.18 by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys()
1437
    def get_missing_compression_parent_keys(self):
1438
        """Return the keys of missing compression parents.
1439
1440
        Missing compression parents occur when a record stream was missing
1441
        basis texts, or a index was scanned that had missing basis texts.
1442
        """
1443
        # GroupCompress cannot currently reference texts that are not in the
1444
        # group, so this is valid for now
1445
        return frozenset()
1446
0.17.5 by Robert Collins
nograph tests completely passing.
1447
    def get_record_stream(self, keys, ordering, include_delta_closure):
1448
        """Get a stream of records for keys.
1449
1450
        :param keys: The keys to include.
1451
        :param ordering: Either 'unordered' or 'topological'. A topologically
1452
            sorted stream has compression parents strictly before their
1453
            children.
1454
        :param include_delta_closure: If True then the closure across any
1455
            compression parents will be included (in the opaque data).
1456
        :return: An iterator of ContentFactory objects, each of which is only
1457
            valid until the iterator is advanced.
1458
        """
1459
        # keys might be a generator
0.22.6 by John Arbash Meinel
Clustering chk pages properly makes a big difference.
1460
        orig_keys = list(keys)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1461
        keys = set(keys)
0.17.5 by Robert Collins
nograph tests completely passing.
1462
        if not keys:
1463
            return
0.20.23 by John Arbash Meinel
Add a progress indicator for chk pages.
1464
        if (not self._index.has_graph
3735.31.14 by John Arbash Meinel
Change the gc-optimal to 'groupcompress'
1465
            and ordering in ('topological', 'groupcompress')):
0.17.5 by Robert Collins
nograph tests completely passing.
1466
            # Cannot topological order when no graph has been stored.
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1467
            # but we allow 'as-requested' or 'unordered'
0.17.5 by Robert Collins
nograph tests completely passing.
1468
            ordering = 'unordered'
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1469
1470
        remaining_keys = keys
1471
        while True:
1472
            try:
1473
                keys = set(remaining_keys)
1474
                for content_factory in self._get_remaining_record_stream(keys,
1475
                        orig_keys, ordering, include_delta_closure):
1476
                    remaining_keys.discard(content_factory.key)
1477
                    yield content_factory
1478
                return
1479
            except errors.RetryWithNewPacks, e:
1480
                self._access.reload_or_raise(e)
1481
1482
    def _find_from_fallback(self, missing):
1483
        """Find whatever keys you can from the fallbacks.
1484
1485
        :param missing: A set of missing keys. This set will be mutated as keys
1486
            are found from a fallback_vfs
1487
        :return: (parent_map, key_to_source_map, source_results)
1488
            parent_map  the overall key => parent_keys
1489
            key_to_source_map   a dict from {key: source}
1490
            source_results      a list of (source: keys)
1491
        """
1492
        parent_map = {}
1493
        key_to_source_map = {}
1494
        source_results = []
5652.2.4 by Martin Pool
Rename to _immediate_fallback_vfs
1495
        for source in self._immediate_fallback_vfs:
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1496
            if not missing:
1497
                break
1498
            source_parents = source.get_parent_map(missing)
1499
            parent_map.update(source_parents)
1500
            source_parents = list(source_parents)
1501
            source_results.append((source, source_parents))
1502
            key_to_source_map.update((key, source) for key in source_parents)
1503
            missing.difference_update(source_parents)
1504
        return parent_map, key_to_source_map, source_results
1505
1506
    def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
1507
        """Get the (source, [keys]) list.
1508
1509
        The returned objects should be in the order defined by 'ordering',
1510
        which can weave between different sources.
5891.1.2 by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier.
1511
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1512
        :param ordering: Must be one of 'topological' or 'groupcompress'
1513
        :return: List of [(source, [keys])] tuples, such that all keys are in
1514
            the defined order, regardless of source.
1515
        """
1516
        if ordering == 'topological':
5757.8.4 by Jelmer Vernooij
Fix import.
1517
            present_keys = tsort.topo_sort(parent_map)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1518
        else:
1519
            # ordering == 'groupcompress'
1520
            # XXX: This only optimizes for the target ordering. We may need
1521
            #      to balance that with the time it takes to extract
1522
            #      ordering, by somehow grouping based on
1523
            #      locations[key][0:3]
1524
            present_keys = sort_gc_optimal(parent_map)
1525
        # Now group by source:
1526
        source_keys = []
1527
        current_source = None
1528
        for key in present_keys:
1529
            source = key_to_source_map.get(key, self)
1530
            if source is not current_source:
1531
                source_keys.append((source, []))
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
1532
                current_source = source
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1533
            source_keys[-1][1].append(key)
1534
        return source_keys
1535
1536
    def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
1537
                                      key_to_source_map):
1538
        source_keys = []
1539
        current_source = None
1540
        for key in orig_keys:
1541
            if key in locations or key in unadded_keys:
1542
                source = self
1543
            elif key in key_to_source_map:
1544
                source = key_to_source_map[key]
1545
            else: # absent
1546
                continue
1547
            if source is not current_source:
1548
                source_keys.append((source, []))
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
1549
                current_source = source
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1550
            source_keys[-1][1].append(key)
1551
        return source_keys
1552
1553
    def _get_io_ordered_source_keys(self, locations, unadded_keys,
1554
                                    source_result):
1555
        def get_group(key):
1556
            # This is the group the bytes are stored in, followed by the
1557
            # location in the group
1558
            return locations[key][0]
1559
        present_keys = sorted(locations.iterkeys(), key=get_group)
1560
        # We don't have an ordering for keys in the in-memory object, but
1561
        # lets process the in-memory ones first.
1562
        present_keys = list(unadded_keys) + present_keys
1563
        # Now grab all of the ones from other sources
1564
        source_keys = [(self, present_keys)]
1565
        source_keys.extend(source_result)
1566
        return source_keys
1567
1568
    def _get_remaining_record_stream(self, keys, orig_keys, ordering,
1569
                                     include_delta_closure):
1570
        """Get a stream of records for keys.
1571
1572
        :param keys: The keys to include.
1573
        :param ordering: one of 'unordered', 'topological', 'groupcompress' or
1574
            'as-requested'
1575
        :param include_delta_closure: If True then the closure across any
1576
            compression parents will be included (in the opaque data).
1577
        :return: An iterator of ContentFactory objects, each of which is only
1578
            valid until the iterator is advanced.
1579
        """
0.17.5 by Robert Collins
nograph tests completely passing.
1580
        # Cheap: iterate
1581
        locations = self._index.get_build_details(keys)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1582
        unadded_keys = set(self._unadded_refs).intersection(keys)
1583
        missing = keys.difference(locations)
1584
        missing.difference_update(unadded_keys)
1585
        (fallback_parent_map, key_to_source_map,
1586
         source_result) = self._find_from_fallback(missing)
1587
        if ordering in ('topological', 'groupcompress'):
0.17.5 by Robert Collins
nograph tests completely passing.
1588
            # would be better to not globally sort initially but instead
1589
            # start with one key, recurse to its oldest parent, then grab
1590
            # everything in the same group, etc.
1591
            parent_map = dict((key, details[2]) for key, details in
1592
                locations.iteritems())
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1593
            for key in unadded_keys:
1594
                parent_map[key] = self._unadded_refs[key]
1595
            parent_map.update(fallback_parent_map)
1596
            source_keys = self._get_ordered_source_keys(ordering, parent_map,
1597
                                                        key_to_source_map)
0.22.6 by John Arbash Meinel
Clustering chk pages properly makes a big difference.
1598
        elif ordering == 'as-requested':
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1599
            source_keys = self._get_as_requested_source_keys(orig_keys,
1600
                locations, unadded_keys, key_to_source_map)
0.17.5 by Robert Collins
nograph tests completely passing.
1601
        else:
0.20.10 by John Arbash Meinel
Change the extraction ordering for 'unordered'.
1602
            # We want to yield the keys in a semi-optimal (read-wise) ordering.
1603
            # Otherwise we thrash the _group_cache and destroy performance
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1604
            source_keys = self._get_io_ordered_source_keys(locations,
1605
                unadded_keys, source_result)
1606
        for key in missing:
0.17.5 by Robert Collins
nograph tests completely passing.
1607
            yield AbsentContentFactory(key)
4634.3.3 by Andrew Bennetts
Fix bug, add docstrings, improve clarity.
1608
        # Batch up as many keys as we can until either:
1609
        #  - we encounter an unadded ref, or
1610
        #  - we run out of keys, or
4634.3.17 by Andrew Bennetts
Make BATCH_SIZE a global.
1611
        #  - the total bytes to retrieve for this batch > BATCH_SIZE
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1612
        batcher = _BatchingBlockFetcher(self, locations,
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1613
            get_compressor_settings=self._get_compressor_settings)
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1614
        for source, keys in source_keys:
1615
            if source is self:
1616
                for key in keys:
1617
                    if key in self._unadded_refs:
4634.3.8 by Andrew Bennetts
Tweak some comments.
1618
                        # Flush batch, then yield unadded ref from
1619
                        # self._compressor.
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1620
                        for factory in batcher.yield_factories(full_flush=True):
1621
                            yield factory
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1622
                        bytes, sha1 = self._compressor.extract(key)
1623
                        parents = self._unadded_refs[key]
3735.32.12 by John Arbash Meinel
Add groupcompress-block[-ref] as valid stream types.
1624
                        yield FulltextContentFactory(key, parents, sha1, bytes)
4634.3.1 by Andrew Bennetts
Add some batching to _get_remaining_record_stream.
1625
                        continue
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1626
                    if batcher.add_key(key) > BATCH_SIZE:
4634.3.8 by Andrew Bennetts
Tweak some comments.
1627
                        # Ok, this batch is big enough.  Yield some results.
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1628
                        for factory in batcher.yield_factories():
1629
                            yield factory
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1630
            else:
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1631
                for factory in batcher.yield_factories(full_flush=True):
1632
                    yield factory
3735.31.18 by John Arbash Meinel
Implement stacking support across all ordering implementations.
1633
                for record in source.get_record_stream(keys, ordering,
1634
                                                       include_delta_closure):
1635
                    yield record
4634.3.14 by Andrew Bennetts
Some changes prompted by John's review.
1636
        for factory in batcher.yield_factories(full_flush=True):
1637
            yield factory
0.20.5 by John Arbash Meinel
Finish the Fulltext => Chunked conversions so that we work in the more-efficient Chunks.
1638
0.17.5 by Robert Collins
nograph tests completely passing.
1639
    def get_sha1s(self, keys):
1640
        """See VersionedFiles.get_sha1s()."""
1641
        result = {}
1642
        for record in self.get_record_stream(keys, 'unordered', True):
1643
            if record.sha1 != None:
1644
                result[record.key] = record.sha1
1645
            else:
1646
                if record.storage_kind != 'absent':
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
1647
                    result[record.key] = osutils.sha_string(
1648
                        record.get_bytes_as('fulltext'))
0.17.5 by Robert Collins
nograph tests completely passing.
1649
        return result
1650
5195.3.26 by Parth Malwankar
reverted changes done to insert_record_stream API
1651
    def insert_record_stream(self, stream):
0.17.2 by Robert Collins
Core proof of concept working.
1652
        """Insert a record stream into this container.
1653
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1654
        :param stream: A stream of records to insert.
0.17.2 by Robert Collins
Core proof of concept working.
1655
        :return: None
1656
        :seealso VersionedFiles.get_record_stream:
1657
        """
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1658
        # XXX: Setting random_id=True makes
1659
        # test_insert_record_stream_existing_keys fail for groupcompress and
1660
        # groupcompress-nograph, this needs to be revisited while addressing
1661
        # 'bzr branch' performance issues.
5195.3.26 by Parth Malwankar
reverted changes done to insert_record_stream API
1662
        for _ in self._insert_record_stream(stream, random_id=False):
0.17.5 by Robert Collins
nograph tests completely passing.
1663
            pass
0.17.2 by Robert Collins
Core proof of concept working.
1664
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1665
    def _get_compressor_settings(self):
1666
        if self._max_bytes_to_index is None:
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1667
            # TODO: VersionedFiles don't know about their containing
1668
            #       repository, so they don't have much of an idea about their
1669
            #       location. So for now, this is only a global option.
1670
            c = config.GlobalConfig()
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1671
            val = c.get_user_option('bzr.groupcompress.max_bytes_to_index')
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1672
            if val is not None:
1673
                try:
1674
                    val = int(val)
1675
                except ValueError, e:
1676
                    trace.warning('Value for '
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1677
                                  '"bzr.groupcompress.max_bytes_to_index"'
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1678
                                  ' %r is not an integer'
1679
                                  % (val,))
1680
                    val = None
1681
            if val is None:
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1682
                val = self._DEFAULT_MAX_BYTES_TO_INDEX
1683
            self._max_bytes_to_index = val
5755.2.9 by John Arbash Meinel
Change settings to a dict. That way the attributes are still named.
1684
        return {'max_bytes_to_index': self._max_bytes_to_index}
5755.2.5 by John Arbash Meinel
Expose the setting up the stack.
1685
1686
    def _make_group_compressor(self):
5755.2.8 by John Arbash Meinel
Do a lot of renaming.
1687
        return GroupCompressor(self._get_compressor_settings())
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1688
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1689
    def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
5195.3.26 by Parth Malwankar
reverted changes done to insert_record_stream API
1690
                              reuse_blocks=True):
0.17.2 by Robert Collins
Core proof of concept working.
1691
        """Internal core to insert a record stream into this container.
1692
1693
        This helper function has a different interface than insert_record_stream
1694
        to allow add_lines to be minimal, but still return the needed data.
1695
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1696
        :param stream: A stream of records to insert.
3735.31.12 by John Arbash Meinel
Push nostore_sha down through the stack.
1697
        :param nostore_sha: If the sha1 of a given text matches nostore_sha,
1698
            raise ExistingContent, rather than committing the new text.
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1699
        :param reuse_blocks: If the source is streaming from
1700
            groupcompress-blocks, just insert the blocks as-is, rather than
1701
            expanding the texts and inserting again.
0.17.2 by Robert Collins
Core proof of concept working.
1702
        :return: An iterator over the sha1 of the inserted records.
1703
        :seealso insert_record_stream:
1704
        :seealso add_lines:
1705
        """
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1706
        adapters = {}
0.17.5 by Robert Collins
nograph tests completely passing.
1707
        def get_adapter(adapter_key):
1708
            try:
1709
                return adapters[adapter_key]
1710
            except KeyError:
1711
                adapter_factory = adapter_registry.get(adapter_key)
1712
                adapter = adapter_factory(self)
1713
                adapters[adapter_key] = adapter
1714
                return adapter
0.17.2 by Robert Collins
Core proof of concept working.
1715
        # This will go up to fulltexts for gc to gc fetching, which isn't
1716
        # ideal.
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1717
        self._compressor = self._make_group_compressor()
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1718
        self._unadded_refs = {}
0.17.5 by Robert Collins
nograph tests completely passing.
1719
        keys_to_add = []
0.17.6 by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big).
1720
        def flush():
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
1721
            bytes_len, chunks = self._compressor.flush().to_chunks()
5755.2.4 by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles
1722
            self._compressor = self._make_group_compressor()
5439.2.1 by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks
1723
            # Note: At this point we still have 1 copy of the fulltext (in
1724
            #       record and the var 'bytes'), and this generates 2 copies of
1725
            #       the compressed text (one for bytes, one in chunks)
1726
            # TODO: Push 'chunks' down into the _access api, so that we don't
1727
            #       have to double compressed memory here
1728
            # TODO: Figure out how to indicate that we would be happy to free
1729
            #       the fulltext content at this point. Note that sometimes we
1730
            #       will want it later (streaming CHK pages), but most of the
1731
            #       time we won't (everything else)
1732
            bytes = ''.join(chunks)
1733
            del chunks
0.17.6 by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big).
1734
            index, start, length = self._access.add_raw_records(
0.25.7 by John Arbash Meinel
Have the GroupCompressBlock decide how to compress the header and content.
1735
                [(None, len(bytes))], bytes)[0]
0.17.6 by Robert Collins
Cap group size at 20MB internal buffer. (Probably way too big).
1736
            nodes = []
1737
            for key, reads, refs in keys_to_add:
1738
                nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1739
            self._index.add_records(nodes, random_id=random_id)
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1740
            self._unadded_refs = {}
1741
            del keys_to_add[:]
1742
0.20.15 by John Arbash Meinel
Change so that regions that have lots of copies get converted back
1743
        last_prefix = None
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1744
        max_fulltext_len = 0
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1745
        max_fulltext_prefix = None
3735.32.20 by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given.
1746
        insert_manager = None
1747
        block_start = None
1748
        block_length = None
3735.36.15 by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices.
1749
        # XXX: TODO: remove this, it is just for safety checking for now
1750
        inserted_keys = set()
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
1751
        reuse_this_block = reuse_blocks
0.17.2 by Robert Collins
Core proof of concept working.
1752
        for record in stream:
0.17.5 by Robert Collins
nograph tests completely passing.
1753
            # Raise an error when a record is missing.
1754
            if record.storage_kind == 'absent':
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1755
                raise errors.RevisionNotPresent(record.key, self)
3735.36.15 by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices.
1756
            if random_id:
1757
                if record.key in inserted_keys:
6138.3.4 by Jonathan Riddell
add gettext() to uses of trace.note()
1758
                    trace.note(gettext('Insert claimed random_id=True,'
1759
                               ' but then inserted %r two times'), record.key)
3735.36.15 by John Arbash Meinel
Set 'combine_backing_indices=False' as the default for text and chk indices.
1760
                    continue
1761
                inserted_keys.add(record.key)
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
1762
            if reuse_blocks:
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1763
                # If the reuse_blocks flag is set, check to see if we can just
1764
                # copy a groupcompress block as-is.
4665.3.10 by John Arbash Meinel
Get a test written which exercises the 'trim' code path.
1765
                # We only check on the first record (groupcompress-block) not
1766
                # on all of the (groupcompress-block-ref) entries.
1767
                # The reuse_this_block flag is then kept for as long as
4634.23.1 by Robert Collins
Cherrypick from bzr.dev: Fix bug 402652: recompress badly packed groups during fetch. (John Arbash Meinel, Robert Collins)
1768
                if record.storage_kind == 'groupcompress-block':
4665.3.2 by John Arbash Meinel
An alternative implementation that passes both tests.
1769
                    # Check to see if we really want to re-use this block
1770
                    insert_manager = record._manager
4665.3.9 by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block
1771
                    reuse_this_block = insert_manager.check_is_well_utilized()
4665.3.10 by John Arbash Meinel
Get a test written which exercises the 'trim' code path.
1772
            else:
1773
                reuse_this_block = False
4665.3.2 by John Arbash Meinel
An alternative implementation that passes both tests.
1774
            if reuse_this_block:
1775
                # We still want to reuse this block
1776
                if record.storage_kind == 'groupcompress-block':
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1777
                    # Insert the raw block into the target repo
1778
                    insert_manager = record._manager
1779
                    bytes = record._manager._block.to_bytes()
1780
                    _, start, length = self._access.add_raw_records(
1781
                        [(None, len(bytes))], bytes)[0]
1782
                    del bytes
1783
                    block_start = start
1784
                    block_length = length
1785
                if record.storage_kind in ('groupcompress-block',
1786
                                           'groupcompress-block-ref'):
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1787
                    if insert_manager is None:
1788
                        raise AssertionError('No insert_manager set')
4665.3.4 by John Arbash Meinel
Refactor the check_rebuild code a bit, so that we can potentially
1789
                    if insert_manager is not record._manager:
1790
                        raise AssertionError('insert_manager does not match'
1791
                            ' the current record, we cannot be positive'
1792
                            ' that the appropriate content was inserted.'
1793
                            )
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1794
                    value = "%d %d %d %d" % (block_start, block_length,
1795
                                             record._start, record._end)
1796
                    nodes = [(record.key, value, (record.parents,))]
3735.38.1 by John Arbash Meinel
Change the delta byte stream to remove the 'source length' entry.
1797
                    # TODO: Consider buffering up many nodes to be added, not
1798
                    #       sure how much overhead this has, but we're seeing
1799
                    #       ~23s / 120s in add_records calls
3735.32.21 by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al.
1800
                    self._index.add_records(nodes, random_id=random_id)
1801
                    continue
0.20.18 by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys()
1802
            try:
0.23.52 by John Arbash Meinel
Use the max_delta flag.
1803
                bytes = record.get_bytes_as('fulltext')
0.20.18 by John Arbash Meinel
Implement new handling of get_bytes_as(), and get_missing_compression_parent_keys()
1804
            except errors.UnavailableRepresentation:
0.17.5 by Robert Collins
nograph tests completely passing.
1805
                adapter_key = record.storage_kind, 'fulltext'
1806
                adapter = get_adapter(adapter_key)
0.20.21 by John Arbash Meinel
Merge the chk sorting code.
1807
                bytes = adapter.get_bytes(record)
0.20.13 by John Arbash Meinel
Play around a bit.
1808
            if len(record.key) > 1:
1809
                prefix = record.key[0]
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1810
                soft = (prefix == last_prefix)
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1811
            else:
1812
                prefix = None
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1813
                soft = False
1814
            if max_fulltext_len < len(bytes):
1815
                max_fulltext_len = len(bytes)
1816
                max_fulltext_prefix = prefix
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1817
            (found_sha1, start_point, end_point,
1818
             type) = self._compressor.compress(record.key,
1819
                                               bytes, record.sha1, soft=soft,
1820
                                               nostore_sha=nostore_sha)
1821
            # delta_ratio = float(len(bytes)) / (end_point - start_point)
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1822
            # Check if we want to continue to include that text
0.25.11 by John Arbash Meinel
Slightly different handling of large texts.
1823
            if (prefix == max_fulltext_prefix
1824
                and end_point < 2 * max_fulltext_len):
1825
                # As long as we are on the same file_id, we will fill at least
1826
                # 2 * max_fulltext_len
1827
                start_new_block = False
1828
            elif end_point > 4*1024*1024:
1829
                start_new_block = True
1830
            elif (prefix is not None and prefix != last_prefix
1831
                  and end_point > 2*1024*1024):
1832
                start_new_block = True
1833
            else:
1834
                start_new_block = False
0.25.10 by John Arbash Meinel
Play around with detecting compression breaks.
1835
            last_prefix = prefix
1836
            if start_new_block:
1837
                self._compressor.pop_last()
1838
                flush()
1839
                max_fulltext_len = len(bytes)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1840
                (found_sha1, start_point, end_point,
1841
                 type) = self._compressor.compress(record.key, bytes,
1842
                                                   record.sha1)
0.17.26 by Robert Collins
Working better --gc-plain-chk.
1843
            if record.key[-1] is None:
1844
                key = record.key[:-1] + ('sha1:' + found_sha1,)
1845
            else:
1846
                key = record.key
1847
            self._unadded_refs[key] = record.parents
0.17.3 by Robert Collins
new encoder, allows non monotonically increasing sequence matches for moar compression.
1848
            yield found_sha1
4842.1.1 by Andrew Bennetts
Fix crash involving static_tuple when C extensions are not built.
1849
            as_st = static_tuple.StaticTuple.from_sequence
1850
            if record.parents is not None:
1851
                parents = as_st([as_st(p) for p in record.parents])
1852
            else:
1853
                parents = None
1854
            refs = static_tuple.StaticTuple(parents)
1855
            keys_to_add.append((key, '%d %d' % (start_point, end_point), refs))
0.17.8 by Robert Collins
Flush pending updates at the end of _insert_record_stream
1856
        if len(keys_to_add):
1857
            flush()
0.17.11 by Robert Collins
Add extraction of just-compressed texts to support converting from knits.
1858
        self._compressor = None
5195.3.12 by Parth Malwankar
initial approximation of progress.
1859
0.17.5 by Robert Collins
nograph tests completely passing.
1860
    def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1861
        """Iterate over the lines in the versioned files from keys.
1862
1863
        This may return lines from other keys. Each item the returned
1864
        iterator yields is a tuple of a line and a text version that that line
1865
        is present in (not introduced in).
1866
1867
        Ordering of results is in whatever order is most suitable for the
1868
        underlying storage format.
1869
1870
        If a progress bar is supplied, it may be used to indicate progress.
1871
        The caller is responsible for cleaning up progress bars (because this
1872
        is an iterator).
1873
1874
        NOTES:
1875
         * Lines are normalised by the underlying store: they will all have \n
1876
           terminators.
1877
         * Lines are returned in arbitrary order.
1878
1879
        :return: An iterator over (line, key).
1880
        """
1881
        keys = set(keys)
1882
        total = len(keys)
1883
        # we don't care about inclusions, the caller cares.
1884
        # but we need to setup a list of records to visit.
1885
        # we need key, position, length
1886
        for key_idx, record in enumerate(self.get_record_stream(keys,
1887
            'unordered', True)):
1888
            # XXX: todo - optimise to use less than full texts.
1889
            key = record.key
4398.8.8 by John Arbash Meinel
Respond to Andrew's review comments.
1890
            if pb is not None:
1891
                pb.update('Walking content', key_idx, total)
0.17.5 by Robert Collins
nograph tests completely passing.
1892
            if record.storage_kind == 'absent':
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1893
                raise errors.RevisionNotPresent(key, self)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1894
            lines = osutils.split_lines(record.get_bytes_as('fulltext'))
0.17.5 by Robert Collins
nograph tests completely passing.
1895
            for line in lines:
1896
                yield line, key
4398.8.8 by John Arbash Meinel
Respond to Andrew's review comments.
1897
        if pb is not None:
1898
            pb.update('Walking content', total, total)
0.17.5 by Robert Collins
nograph tests completely passing.
1899
1900
    def keys(self):
1901
        """See VersionedFiles.keys."""
1902
        if 'evil' in debug.debug_flags:
1903
            trace.mutter_callsite(2, "keys scales with size of history")
5652.2.4 by Martin Pool
Rename to _immediate_fallback_vfs
1904
        sources = [self._index] + self._immediate_fallback_vfs
0.17.5 by Robert Collins
nograph tests completely passing.
1905
        result = set()
1906
        for source in sources:
1907
            result.update(source.keys())
1908
        return result
1909
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1910
5365.4.1 by John Arbash Meinel
Find a case where we are wasting a bit of memory.
1911
class _GCBuildDetails(object):
1912
    """A blob of data about the build details.
1913
1914
    This stores the minimal data, which then allows compatibility with the old
1915
    api, without taking as much memory.
1916
    """
1917
1918
    __slots__ = ('_index', '_group_start', '_group_end', '_basis_end',
1919
                 '_delta_end', '_parents')
1920
1921
    method = 'group'
1922
    compression_parent = None
1923
1924
    def __init__(self, parents, position_info):
1925
        self._parents = parents
5365.4.2 by John Arbash Meinel
As suggested by Martin <gz>, switch to tuple unpacking for attribute assignment
1926
        (self._index, self._group_start, self._group_end, self._basis_end,
1927
         self._delta_end) = position_info
5365.4.1 by John Arbash Meinel
Find a case where we are wasting a bit of memory.
1928
1929
    def __repr__(self):
1930
        return '%s(%s, %s)' % (self.__class__.__name__,
1931
            self.index_memo, self._parents)
1932
1933
    @property
1934
    def index_memo(self):
1935
        return (self._index, self._group_start, self._group_end,
1936
                self._basis_end, self._delta_end)
1937
1938
    @property
1939
    def record_details(self):
1940
        return static_tuple.StaticTuple(self.method, None)
1941
1942
    def __getitem__(self, offset):
1943
        """Compatibility thunk to act like a tuple."""
1944
        if offset == 0:
1945
            return self.index_memo
1946
        elif offset == 1:
1947
            return self.compression_parent # Always None
1948
        elif offset == 2:
1949
            return self._parents
1950
        elif offset == 3:
1951
            return self.record_details
1952
        else:
1953
            raise IndexError('offset out of range')
1954
            
1955
    def __len__(self):
1956
        return 4
1957
1958
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1959
class _GCGraphIndex(object):
1960
    """Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1961
0.17.9 by Robert Collins
Initial stab at repository format support.
1962
    def __init__(self, graph_index, is_locked, parents=True,
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1963
        add_callback=None, track_external_parent_refs=False,
4634.29.1 by Andrew Bennetts
Rough code to reject commit_write_group if any inventory's CHK root is absent.
1964
        inconsistency_fatal=True, track_new_keys=False):
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1965
        """Construct a _GCGraphIndex on a graph_index.
1966
1967
        :param graph_index: An implementation of bzrlib.index.GraphIndex.
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
1968
        :param is_locked: A callback, returns True if the index is locked and
1969
            thus usable.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
1970
        :param parents: If True, record knits parents, if not do not record
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1971
            parents.
1972
        :param add_callback: If not None, allow additions to the index and call
1973
            this callback with a list of added GraphIndex nodes:
1974
            [(node, value, node_refs), ...]
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1975
        :param track_external_parent_refs: As keys are added, keep track of the
1976
            keys they reference, so that we can query get_missing_parents(),
1977
            etc.
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1978
        :param inconsistency_fatal: When asked to add records that are already
1979
            present, and the details are inconsistent with the existing
1980
            record, raise an exception instead of warning (and skipping the
1981
            record).
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1982
        """
1983
        self._add_callback = add_callback
1984
        self._graph_index = graph_index
1985
        self._parents = parents
1986
        self.has_graph = parents
1987
        self._is_locked = is_locked
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
1988
        self._inconsistency_fatal = inconsistency_fatal
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
1989
        # GroupCompress records tend to have the same 'group' start + offset
1990
        # repeated over and over, this creates a surplus of ints
1991
        self._int_cache = {}
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1992
        if track_external_parent_refs:
5757.8.1 by Jelmer Vernooij
Avoid bzrlib.knit imports when using groupcompress repositories.
1993
            self._key_dependencies = _KeyRefs(
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
1994
                track_new_keys=track_new_keys)
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
1995
        else:
1996
            self._key_dependencies = None
0.17.1 by Robert Collins
Starting point. Interface tests hooked up and failing.
1997
0.17.5 by Robert Collins
nograph tests completely passing.
1998
    def add_records(self, records, random_id=False):
1999
        """Add multiple records to the index.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2000
0.17.5 by Robert Collins
nograph tests completely passing.
2001
        This function does not insert data into the Immutable GraphIndex
2002
        backing the KnitGraphIndex, instead it prepares data for insertion by
2003
        the caller and checks that it is safe to insert then calls
2004
        self._add_callback with the prepared GraphIndex nodes.
2005
2006
        :param records: a list of tuples:
2007
                         (key, options, access_memo, parents).
2008
        :param random_id: If True the ids being added were randomly generated
2009
            and no check for existence will be performed.
2010
        """
2011
        if not self._add_callback:
2012
            raise errors.ReadOnlyError(self)
2013
        # we hope there are no repositories with inconsistent parentage
2014
        # anymore.
2015
2016
        changed = False
2017
        keys = {}
2018
        for (key, value, refs) in records:
2019
            if not self._parents:
2020
                if refs:
2021
                    for ref in refs:
2022
                        if ref:
4398.8.1 by John Arbash Meinel
Add a VersionedFile.add_text() api.
2023
                            raise errors.KnitCorrupt(self,
0.17.5 by Robert Collins
nograph tests completely passing.
2024
                                "attempt to add node with parents "
2025
                                "in parentless index.")
2026
                    refs = ()
2027
                    changed = True
2028
            keys[key] = (value, refs)
2029
        # check for dups
2030
        if not random_id:
2031
            present_nodes = self._get_entries(keys)
2032
            for (index, key, value, node_refs) in present_nodes:
4789.28.3 by John Arbash Meinel
Add a static_tuple.as_tuples() helper.
2033
                # Sometimes these are passed as a list rather than a tuple
2034
                node_refs = static_tuple.as_tuples(node_refs)
2035
                passed = static_tuple.as_tuples(keys[key])
2036
                if node_refs != passed[1]:
2037
                    details = '%s %s %s' % (key, (value, node_refs), passed)
4465.2.4 by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal.
2038
                    if self._inconsistency_fatal:
2039
                        raise errors.KnitCorrupt(self, "inconsistent details"
2040
                                                 " in add_records: %s" %
2041
                                                 details)
2042
                    else:
2043
                        trace.warning("inconsistent details in skipped"
2044
                                      " record: %s", details)
0.17.5 by Robert Collins
nograph tests completely passing.
2045
                del keys[key]
2046
                changed = True
2047
        if changed:
2048
            result = []
2049
            if self._parents:
2050
                for key, (value, node_refs) in keys.iteritems():
2051
                    result.append((key, value, node_refs))
2052
            else:
2053
                for key, (value, node_refs) in keys.iteritems():
2054
                    result.append((key, value))
2055
            records = result
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
2056
        key_dependencies = self._key_dependencies
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
2057
        if key_dependencies is not None:
2058
            if self._parents:
2059
                for key, value, refs in records:
2060
                    parents = refs[0]
2061
                    key_dependencies.add_references(key, parents)
2062
            else:
2063
                for key, value, refs in records:
2064
                    new_keys.add_key(key)
0.17.5 by Robert Collins
nograph tests completely passing.
2065
        self._add_callback(records)
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2066
0.17.5 by Robert Collins
nograph tests completely passing.
2067
    def _check_read(self):
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
2068
        """Raise an exception if reads are not permitted."""
0.17.5 by Robert Collins
nograph tests completely passing.
2069
        if not self._is_locked():
2070
            raise errors.ObjectNotLocked(self)
2071
0.17.2 by Robert Collins
Core proof of concept working.
2072
    def _check_write_ok(self):
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
2073
        """Raise an exception if writes are not permitted."""
0.17.2 by Robert Collins
Core proof of concept working.
2074
        if not self._is_locked():
2075
            raise errors.ObjectNotLocked(self)
2076
0.17.5 by Robert Collins
nograph tests completely passing.
2077
    def _get_entries(self, keys, check_present=False):
2078
        """Get the entries for keys.
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
2079
2080
        Note: Callers are responsible for checking that the index is locked
2081
        before calling this method.
2082
0.17.5 by Robert Collins
nograph tests completely passing.
2083
        :param keys: An iterable of index key tuples.
2084
        """
2085
        keys = set(keys)
2086
        found_keys = set()
2087
        if self._parents:
2088
            for node in self._graph_index.iter_entries(keys):
2089
                yield node
2090
                found_keys.add(node[1])
2091
        else:
2092
            # adapt parentless index to the rest of the code.
2093
            for node in self._graph_index.iter_entries(keys):
2094
                yield node[0], node[1], node[2], ()
2095
                found_keys.add(node[1])
2096
        if check_present:
2097
            missing_keys = keys.difference(found_keys)
2098
            if missing_keys:
4398.8.8 by John Arbash Meinel
Respond to Andrew's review comments.
2099
                raise errors.RevisionNotPresent(missing_keys.pop(), self)
0.17.5 by Robert Collins
nograph tests completely passing.
2100
4634.11.3 by John Arbash Meinel
Implement _GCGraphIndex.find_ancestry()
2101
    def find_ancestry(self, keys):
2102
        """See CombinedGraphIndex.find_ancestry"""
2103
        return self._graph_index.find_ancestry(keys, 0)
2104
0.17.5 by Robert Collins
nograph tests completely passing.
2105
    def get_parent_map(self, keys):
2106
        """Get a map of the parents of keys.
2107
2108
        :param keys: The keys to look up parents for.
2109
        :return: A mapping from keys to parents. Absent keys are absent from
2110
            the mapping.
2111
        """
2112
        self._check_read()
2113
        nodes = self._get_entries(keys)
2114
        result = {}
2115
        if self._parents:
2116
            for node in nodes:
2117
                result[node[1]] = node[3][0]
2118
        else:
2119
            for node in nodes:
2120
                result[node[1]] = None
2121
        return result
2122
4343.3.1 by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories.
2123
    def get_missing_parents(self):
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
2124
        """Return the keys of missing parents."""
2125
        # Copied from _KnitGraphIndex.get_missing_parents
2126
        # We may have false positives, so filter those out.
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
2127
        self._key_dependencies.satisfy_refs_for_keys(
4343.3.21 by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs.
2128
            self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2129
        return frozenset(self._key_dependencies.get_unsatisfied_refs())
4343.3.1 by John Arbash Meinel
Set 'supports_external_lookups=True' for dev6 repositories.
2130
0.17.5 by Robert Collins
nograph tests completely passing.
2131
    def get_build_details(self, keys):
2132
        """Get the various build details for keys.
2133
2134
        Ghosts are omitted from the result.
2135
2136
        :param keys: An iterable of keys.
2137
        :return: A dict of key:
2138
            (index_memo, compression_parent, parents, record_details).
5891.1.2 by Andrew Bennetts
Fix a bunch of docstring formatting nits, making pydoctor a bit happier.
2139
2140
            * index_memo: opaque structure to pass to read_records to extract
2141
              the raw data
2142
            * compression_parent: Content that this record is built upon, may
2143
              be None
2144
            * parents: Logical parents of this node
2145
            * record_details: extra information about the content which needs
2146
              to be passed to Factory.parse_record
0.17.5 by Robert Collins
nograph tests completely passing.
2147
        """
2148
        self._check_read()
2149
        result = {}
0.20.29 by Ian Clatworthy
groupcompress.py code cleanups
2150
        entries = self._get_entries(keys)
0.17.5 by Robert Collins
nograph tests completely passing.
2151
        for entry in entries:
2152
            key = entry[1]
2153
            if not self._parents:
2154
                parents = None
2155
            else:
2156
                parents = entry[3][0]
5365.4.1 by John Arbash Meinel
Find a case where we are wasting a bit of memory.
2157
            details = _GCBuildDetails(parents, self._node_to_position(entry))
2158
            result[key] = details
0.17.5 by Robert Collins
nograph tests completely passing.
2159
        return result
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2160
0.17.5 by Robert Collins
nograph tests completely passing.
2161
    def keys(self):
2162
        """Get all the keys in the collection.
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2163
0.17.5 by Robert Collins
nograph tests completely passing.
2164
        The keys are not ordered.
2165
        """
2166
        self._check_read()
2167
        return [node[1] for node in self._graph_index.iter_all_entries()]
3735.31.2 by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts.
2168
0.17.5 by Robert Collins
nograph tests completely passing.
2169
    def _node_to_position(self, node):
2170
        """Convert an index value to position details."""
2171
        bits = node[2].split(' ')
2172
        # It would be nice not to read the entire gzip.
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2173
        # start and stop are put into _int_cache because they are very common.
2174
        # They define the 'group' that an entry is in, and many groups can have
2175
        # thousands of objects.
2176
        # Branching Launchpad, for example, saves ~600k integers, at 12 bytes
2177
        # each, or about 7MB. Note that it might be even more when you consider
2178
        # how PyInt is allocated in separate slabs. And you can't return a slab
2179
        # to the OS if even 1 int on it is in use. Note though that Python uses
5365.4.1 by John Arbash Meinel
Find a case where we are wasting a bit of memory.
2180
        # a LIFO when re-using PyInt slots, which might cause more
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2181
        # fragmentation.
0.17.5 by Robert Collins
nograph tests completely passing.
2182
        start = int(bits[0])
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2183
        start = self._int_cache.setdefault(start, start)
0.17.5 by Robert Collins
nograph tests completely passing.
2184
        stop = int(bits[1])
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2185
        stop = self._int_cache.setdefault(stop, stop)
0.17.5 by Robert Collins
nograph tests completely passing.
2186
        basis_end = int(bits[2])
2187
        delta_end = int(bits[3])
4679.9.19 by John Arbash Meinel
Interning the start and stop group positions saves another 7MB peak mem. \o/
2188
        # We can't use StaticTuple here, because node[0] is a BTreeGraphIndex
2189
        # instance...
2190
        return (node[0], start, stop, basis_end, delta_end)
0.18.14 by John Arbash Meinel
A bit more work, not really usable yet.
2191
4343.3.2 by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos
2192
    def scan_unvalidated_index(self, graph_index):
2193
        """Inform this _GCGraphIndex that there is an unvalidated index.
2194
2195
        This allows this _GCGraphIndex to keep track of any missing
2196
        compression parents we may want to have filled in to make those
4634.29.3 by Andrew Bennetts
Simplify further.
2197
        indices valid.  It also allows _GCGraphIndex to track any new keys.
4343.3.2 by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos
2198
2199
        :param graph_index: A GraphIndex
2200
        """
4634.29.3 by Andrew Bennetts
Simplify further.
2201
        key_dependencies = self._key_dependencies
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
2202
        if key_dependencies is None:
4634.29.1 by Andrew Bennetts
Rough code to reject commit_write_group if any inventory's CHK root is absent.
2203
            return
2204
        for node in graph_index.iter_all_entries():
4634.29.6 by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it.
2205
            # Add parent refs from graph_index (and discard parent refs
2206
            # that the graph_index has).
2207
            key_dependencies.add_references(node[1], node[3][0])
4343.3.2 by John Arbash Meinel
All stacking tests seem to be passing for dev6 repos
2208
0.18.14 by John Arbash Meinel
A bit more work, not really usable yet.
2209
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
2210
from bzrlib._groupcompress_py import (
2211
    apply_delta,
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
2212
    apply_delta_to_source,
3735.40.11 by John Arbash Meinel
Implement make_delta and apply_delta.
2213
    encode_base128_int,
2214
    decode_base128_int,
4300.1.1 by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form.
2215
    decode_copy_instruction,
3735.40.13 by John Arbash Meinel
Rename EquivalenceTable to LinesDeltaIndex.
2216
    LinesDeltaIndex,
3735.40.4 by John Arbash Meinel
Factor out tests that rely on the exact bytecode.
2217
    )
0.18.14 by John Arbash Meinel
A bit more work, not really usable yet.
2218
try:
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2219
    from bzrlib._groupcompress_pyx import (
2220
        apply_delta,
3735.40.19 by John Arbash Meinel
Implement apply_delta_to_source which doesn't have to malloc another string.
2221
        apply_delta_to_source,
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2222
        DeltaIndex,
3735.40.16 by John Arbash Meinel
Implement (de|en)code_base128_int in pyrex.
2223
        encode_base128_int,
2224
        decode_base128_int,
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2225
        )
3735.40.2 by John Arbash Meinel
Add a groupcompress.encode_copy_instruction function.
2226
    GroupCompressor = PyrexGroupCompressor
4574.3.6 by Martin Pool
More warnings when failing to load extensions
2227
except ImportError, e:
4574.3.8 by Martin Pool
Only mutter extension load errors when they occur, and record for later
2228
    osutils.failed_to_load_extension(e)
4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
2229
    GroupCompressor = PythonGroupCompressor
2230