~bzr-pqm/bzr/bzr.dev

4241.6.6 by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core.
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
17
"""Core compression logic for compressing streams of related files."""
18
19
from itertools import izip
20
from cStringIO import StringIO
21
import time
22
import zlib
23
try:
24
    import pylzma
25
except ImportError:
26
    pylzma = None
27
28
from bzrlib import (
29
    annotate,
30
    debug,
31
    diff,
32
    errors,
33
    graph as _mod_graph,
34
    osutils,
35
    pack,
36
    patiencediff,
37
    trace,
38
    )
39
from bzrlib.graph import Graph
40
from bzrlib.knit import _DirectPackAccess
41
from bzrlib.btree_index import BTreeBuilder
42
from bzrlib.lru_cache import LRUSizeCache
43
from bzrlib.tsort import topo_sort
44
from bzrlib.versionedfile import (
45
    adapter_registry,
46
    AbsentContentFactory,
47
    ChunkedContentFactory,
48
    FulltextContentFactory,
49
    VersionedFiles,
50
    )
51
52
_USE_LZMA = False and (pylzma is not None)
53
54
# osutils.sha_string('')
55
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
56
57
58
def sort_gc_optimal(parent_map):
59
    """Sort and group the keys in parent_map into groupcompress order.
60
61
    groupcompress is defined (currently) as reverse-topological order, grouped
62
    by the key prefix.
63
64
    :return: A sorted-list of keys
65
    """
66
    # groupcompress ordering is approximately reverse topological,
67
    # properly grouped by file-id.
68
    per_prefix_map = {}
69
    for item in parent_map.iteritems():
70
        key = item[0]
71
        if isinstance(key, str) or len(key) == 1:
72
            prefix = ''
73
        else:
74
            prefix = key[0]
75
        try:
76
            per_prefix_map[prefix].append(item)
77
        except KeyError:
78
            per_prefix_map[prefix] = [item]
79
80
    present_keys = []
81
    for prefix in sorted(per_prefix_map):
82
        present_keys.extend(reversed(topo_sort(per_prefix_map[prefix])))
83
    return present_keys
84
85
86
# The max zlib window size is 32kB, so if we set 'max_size' output of the
87
# decompressor to the requested bytes + 32kB, then we should guarantee
88
# num_bytes coming out.
89
_ZLIB_DECOMP_WINDOW = 32*1024
90
91
class GroupCompressBlock(object):
92
    """An object which maintains the internal structure of the compressed data.
93
94
    This tracks the meta info (start of text, length, type, etc.)
95
    """
96
97
    # Group Compress Block v1 Zlib
98
    GCB_HEADER = 'gcb1z\n'
99
    # Group Compress Block v1 Lzma
100
    GCB_LZ_HEADER = 'gcb1l\n'
101
    GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
102
103
    def __init__(self):
104
        # map by key? or just order in file?
105
        self._compressor_name = None
106
        self._z_content = None
107
        self._z_content_decompressor = None
108
        self._z_content_length = None
109
        self._content_length = None
110
        self._content = None
111
112
    def __len__(self):
113
        # This is the maximum number of bytes this object will reference if
114
        # everything is decompressed. However, if we decompress less than
115
        # everything... (this would cause some problems for LRUSizeCache)
116
        return self._content_length + self._z_content_length
117
118
    def _ensure_content(self, num_bytes=None):
119
        """Make sure that content has been expanded enough.
120
121
        :param num_bytes: Ensure that we have extracted at least num_bytes of
122
            content. If None, consume everything
123
        """
124
        # TODO: If we re-use the same content block at different times during
125
        #       get_record_stream(), it is possible that the first pass will
126
        #       get inserted, triggering an extract/_ensure_content() which
127
        #       will get rid of _z_content. And then the next use of the block
128
        #       will try to access _z_content (to send it over the wire), and
129
        #       fail because it is already extracted. Consider never releasing
130
        #       _z_content because of this.
131
        if num_bytes is None:
132
            num_bytes = self._content_length
133
        elif (self._content_length is not None
134
              and num_bytes > self._content_length):
135
            raise AssertionError(
136
                'requested num_bytes (%d) > content length (%d)'
137
                % (num_bytes, self._content_length))
138
        # Expand the content if required
139
        if self._content is None:
140
            if self._z_content is None:
141
                raise AssertionError('No content to decompress')
142
            if self._z_content == '':
143
                self._content = ''
144
            elif self._compressor_name == 'lzma':
145
                # We don't do partial lzma decomp yet
146
                self._content = pylzma.decompress(self._z_content)
147
            elif self._compressor_name == 'zlib':
148
                # Start a zlib decompressor
149
                if num_bytes is None:
150
                    self._content = zlib.decompress(self._z_content)
151
                else:
152
                    self._z_content_decompressor = zlib.decompressobj()
153
                    # Seed the decompressor with the uncompressed bytes, so
154
                    # that the rest of the code is simplified
155
                    self._content = self._z_content_decompressor.decompress(
156
                        self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
157
            else:
158
                raise AssertionError('Unknown compressor: %r'
159
                                     % self._compressor_name)
160
        # Any bytes remaining to be decompressed will be in the decompressors
161
        # 'unconsumed_tail'
162
163
        # Do we have enough bytes already?
164
        if num_bytes is not None and len(self._content) >= num_bytes:
165
            return
166
        if num_bytes is None and self._z_content_decompressor is None:
167
            # We must have already decompressed everything
168
            return
169
        # If we got this far, and don't have a decompressor, something is wrong
170
        if self._z_content_decompressor is None:
171
            raise AssertionError(
172
                'No decompressor to decompress %d bytes' % num_bytes)
173
        remaining_decomp = self._z_content_decompressor.unconsumed_tail
174
        if num_bytes is None:
175
            if remaining_decomp:
176
                # We don't know how much is left, but we'll decompress it all
177
                self._content += self._z_content_decompressor.decompress(
178
                    remaining_decomp)
179
                # Note: There's what I consider a bug in zlib.decompressobj
180
                #       If you pass back in the entire unconsumed_tail, only
181
                #       this time you don't pass a max-size, it doesn't
182
                #       change the unconsumed_tail back to None/''.
183
                #       However, we know we are done with the whole stream
184
                self._z_content_decompressor = None
185
            # XXX: Why is this the only place in this routine we set this?
186
            self._content_length = len(self._content)
187
        else:
188
            if not remaining_decomp:
189
                raise AssertionError('Nothing left to decompress')
190
            needed_bytes = num_bytes - len(self._content)
191
            # We always set max_size to 32kB over the minimum needed, so that
192
            # zlib will give us as much as we really want.
193
            # TODO: If this isn't good enough, we could make a loop here,
194
            #       that keeps expanding the request until we get enough
195
            self._content += self._z_content_decompressor.decompress(
196
                remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
197
            if len(self._content) < num_bytes:
198
                raise AssertionError('%d bytes wanted, only %d available'
199
                                     % (num_bytes, len(self._content)))
200
            if not self._z_content_decompressor.unconsumed_tail:
201
                # The stream is finished
202
                self._z_content_decompressor = None
203
204
    def _parse_bytes(self, bytes, pos):
205
        """Read the various lengths from the header.
206
207
        This also populates the various 'compressed' buffers.
208
209
        :return: The position in bytes just after the last newline
210
        """
211
        # At present, we have 2 integers for the compressed and uncompressed
212
        # content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
213
        # checking too far, cap the search to 14 bytes.
214
        pos2 = bytes.index('\n', pos, pos + 14)
215
        self._z_content_length = int(bytes[pos:pos2])
216
        pos = pos2 + 1
217
        pos2 = bytes.index('\n', pos, pos + 14)
218
        self._content_length = int(bytes[pos:pos2])
219
        pos = pos2 + 1
220
        if len(bytes) != (pos + self._z_content_length):
221
            # XXX: Define some GCCorrupt error ?
222
            raise AssertionError('Invalid bytes: (%d) != %d + %d' %
223
                                 (len(bytes), pos, self._z_content_length))
224
        self._z_content = bytes[pos:]
225
226
    @classmethod
227
    def from_bytes(cls, bytes):
228
        out = cls()
229
        if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
230
            raise ValueError('bytes did not start with any of %r'
231
                             % (cls.GCB_KNOWN_HEADERS,))
232
        # XXX: why not testing the whole header ?
233
        if bytes[4] == 'z':
234
            out._compressor_name = 'zlib'
235
        elif bytes[4] == 'l':
236
            out._compressor_name = 'lzma'
237
        else:
238
            raise ValueError('unknown compressor: %r' % (bytes,))
239
        out._parse_bytes(bytes, 6)
240
        return out
241
242
    def extract(self, key, start, end, sha1=None):
243
        """Extract the text for a specific key.
244
245
        :param key: The label used for this content
246
        :param sha1: TODO (should we validate only when sha1 is supplied?)
247
        :return: The bytes for the content
248
        """
249
        if start == end == 0:
250
            return ''
251
        self._ensure_content(end)
252
        # The bytes are 'f' or 'd' for the type, then a variable-length
253
        # base128 integer for the content size, then the actual content
254
        # We know that the variable-length integer won't be longer than 5
255
        # bytes (it takes 5 bytes to encode 2^32)
256
        c = self._content[start]
257
        if c == 'f':
258
            type = 'fulltext'
259
        else:
260
            if c != 'd':
261
                raise ValueError('Unknown content control code: %s'
262
                                 % (c,))
263
            type = 'delta'
264
        content_len, len_len = decode_base128_int(
265
                            self._content[start + 1:start + 6])
266
        content_start = start + 1 + len_len
267
        if end != content_start + content_len:
268
            raise ValueError('end != len according to field header'
269
                ' %s != %s' % (end, content_start + content_len))
270
        if c == 'f':
271
            bytes = self._content[content_start:end]
272
        elif c == 'd':
273
            bytes = apply_delta_to_source(self._content, content_start, end)
274
        return bytes
275
276
    def set_content(self, content):
277
        """Set the content of this block."""
278
        self._content_length = len(content)
279
        self._content = content
280
        self._z_content = None
281
282
    def to_bytes(self):
283
        """Encode the information into a byte stream."""
284
        compress = zlib.compress
285
        if _USE_LZMA:
286
            compress = pylzma.compress
287
        if self._z_content is None:
288
            if self._content is None:
289
                raise AssertionError('Nothing to compress')
290
            self._z_content = compress(self._content)
291
            self._z_content_length = len(self._z_content)
292
        if _USE_LZMA:
293
            header = self.GCB_LZ_HEADER
294
        else:
295
            header = self.GCB_HEADER
296
        chunks = [header,
297
                  '%d\n%d\n' % (self._z_content_length, self._content_length),
298
                  self._z_content,
299
                 ]
300
        return ''.join(chunks)
301
302
303
class _LazyGroupCompressFactory(object):
304
    """Yield content from a GroupCompressBlock on demand."""
305
306
    def __init__(self, key, parents, manager, start, end, first):
307
        """Create a _LazyGroupCompressFactory
308
309
        :param key: The key of just this record
310
        :param parents: The parents of this key (possibly None)
311
        :param gc_block: A GroupCompressBlock object
312
        :param start: Offset of the first byte for this record in the
313
            uncompressd content
314
        :param end: Offset of the byte just after the end of this record
315
            (ie, bytes = content[start:end])
316
        :param first: Is this the first Factory for the given block?
317
        """
318
        self.key = key
319
        self.parents = parents
320
        self.sha1 = None
321
        # Note: This attribute coupled with Manager._factories creates a
322
        #       reference cycle. Perhaps we would rather use a weakref(), or
323
        #       find an appropriate time to release the ref. After the first
324
        #       get_bytes_as call? After Manager.get_record_stream() returns
325
        #       the object?
326
        self._manager = manager
327
        self._bytes = None
328
        self.storage_kind = 'groupcompress-block'
329
        if not first:
330
            self.storage_kind = 'groupcompress-block-ref'
331
        self._first = first
332
        self._start = start
333
        self._end = end
334
335
    def __repr__(self):
336
        return '%s(%s, first=%s)' % (self.__class__.__name__,
337
            self.key, self._first)
338
339
    def get_bytes_as(self, storage_kind):
340
        if storage_kind == self.storage_kind:
341
            if self._first:
342
                # wire bytes, something...
343
                return self._manager._wire_bytes()
344
            else:
345
                return ''
346
        if storage_kind in ('fulltext', 'chunked'):
347
            if self._bytes is None:
348
                # Grab and cache the raw bytes for this entry
349
                # and break the ref-cycle with _manager since we don't need it
350
                # anymore
351
                self._manager._prepare_for_extract()
352
                block = self._manager._block
353
                self._bytes = block.extract(self.key, self._start, self._end)
354
                # There are code paths that first extract as fulltext, and then
355
                # extract as storage_kind (smart fetch). So we don't break the
356
                # refcycle here, but instead in manager.get_record_stream()
357
                # self._manager = None
358
            if storage_kind == 'fulltext':
359
                return self._bytes
360
            else:
361
                return [self._bytes]
362
        raise errors.UnavailableRepresentation(self.key, storage_kind,
363
                                               self.storage_kind)
364
365
366
class _LazyGroupContentManager(object):
367
    """This manages a group of _LazyGroupCompressFactory objects."""
368
369
    def __init__(self, block):
370
        self._block = block
371
        # We need to preserve the ordering
372
        self._factories = []
373
        self._last_byte = 0
374
375
    def add_factory(self, key, parents, start, end):
376
        if not self._factories:
377
            first = True
378
        else:
379
            first = False
380
        # Note that this creates a reference cycle....
381
        factory = _LazyGroupCompressFactory(key, parents, self,
382
            start, end, first=first)
383
        # max() works here, but as a function call, doing a compare seems to be
384
        # significantly faster, timeit says 250ms for max() and 100ms for the
385
        # comparison
386
        if end > self._last_byte:
387
            self._last_byte = end
388
        self._factories.append(factory)
389
390
    def get_record_stream(self):
391
        """Get a record for all keys added so far."""
392
        for factory in self._factories:
393
            yield factory
394
            # Break the ref-cycle
395
            factory._bytes = None
396
            factory._manager = None
397
        # TODO: Consider setting self._factories = None after the above loop,
398
        #       as it will break the reference cycle
399
400
    def _trim_block(self, last_byte):
401
        """Create a new GroupCompressBlock, with just some of the content."""
402
        # None of the factories need to be adjusted, because the content is
403
        # located in an identical place. Just that some of the unreferenced
404
        # trailing bytes are stripped
405
        trace.mutter('stripping trailing bytes from groupcompress block'
406
                     ' %d => %d', self._block._content_length, last_byte)
407
        new_block = GroupCompressBlock()
408
        self._block._ensure_content(last_byte)
409
        new_block.set_content(self._block._content[:last_byte])
410
        self._block = new_block
411
412
    def _rebuild_block(self):
413
        """Create a new GroupCompressBlock with only the referenced texts."""
414
        compressor = GroupCompressor()
415
        tstart = time.time()
416
        old_length = self._block._content_length
417
        end_point = 0
418
        for factory in self._factories:
419
            bytes = factory.get_bytes_as('fulltext')
420
            (found_sha1, start_point, end_point,
421
             type) = compressor.compress(factory.key, bytes, factory.sha1)
422
            # Now update this factory with the new offsets, etc
423
            factory.sha1 = found_sha1
424
            factory._start = start_point
425
            factory._end = end_point
426
        self._last_byte = end_point
427
        new_block = compressor.flush()
428
        # TODO: Should we check that new_block really *is* smaller than the old
429
        #       block? It seems hard to come up with a method that it would
430
        #       expand, since we do full compression again. Perhaps based on a
431
        #       request that ends up poorly ordered?
432
        delta = time.time() - tstart
433
        self._block = new_block
434
        trace.mutter('creating new compressed block on-the-fly in %.3fs'
435
                     ' %d bytes => %d bytes', delta, old_length,
436
                     self._block._content_length)
437
438
    def _prepare_for_extract(self):
439
        """A _LazyGroupCompressFactory is about to extract to fulltext."""
440
        # We expect that if one child is going to fulltext, all will be. This
441
        # helps prevent all of them from extracting a small amount at a time.
442
        # Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
443
        # time (self._block._content) is a little expensive.
444
        self._block._ensure_content(self._last_byte)
445
446
    def _check_rebuild_block(self):
447
        """Check to see if our block should be repacked."""
448
        total_bytes_used = 0
449
        last_byte_used = 0
450
        for factory in self._factories:
451
            total_bytes_used += factory._end - factory._start
452
            last_byte_used = max(last_byte_used, factory._end)
453
        # If we are using most of the bytes from the block, we have nothing
454
        # else to check (currently more that 1/2)
455
        if total_bytes_used * 2 >= self._block._content_length:
456
            return
457
        # Can we just strip off the trailing bytes? If we are going to be
458
        # transmitting more than 50% of the front of the content, go ahead
459
        if total_bytes_used * 2 > last_byte_used:
460
            self._trim_block(last_byte_used)
461
            return
462
463
        # We are using a small amount of the data, and it isn't just packed
464
        # nicely at the front, so rebuild the content.
465
        # Note: This would be *nicer* as a strip-data-from-group, rather than
466
        #       building it up again from scratch
467
        #       It might be reasonable to consider the fulltext sizes for
468
        #       different bits when deciding this, too. As you may have a small
469
        #       fulltext, and a trivial delta, and you are just trading around
470
        #       for another fulltext. If we do a simple 'prune' you may end up
471
        #       expanding many deltas into fulltexts, as well.
472
        #       If we build a cheap enough 'strip', then we could try a strip,
473
        #       if that expands the content, we then rebuild.
474
        self._rebuild_block()
475
476
    def _wire_bytes(self):
477
        """Return a byte stream suitable for transmitting over the wire."""
478
        self._check_rebuild_block()
479
        # The outer block starts with:
480
        #   'groupcompress-block\n'
481
        #   <length of compressed key info>\n
482
        #   <length of uncompressed info>\n
483
        #   <length of gc block>\n
484
        #   <header bytes>
485
        #   <gc-block>
486
        lines = ['groupcompress-block\n']
487
        # The minimal info we need is the key, the start offset, and the
488
        # parents. The length and type are encoded in the record itself.
489
        # However, passing in the other bits makes it easier.  The list of
490
        # keys, and the start offset, the length
491
        # 1 line key
492
        # 1 line with parents, '' for ()
493
        # 1 line for start offset
494
        # 1 line for end byte
495
        header_lines = []
496
        for factory in self._factories:
497
            key_bytes = '\x00'.join(factory.key)
498
            parents = factory.parents
499
            if parents is None:
500
                parent_bytes = 'None:'
501
            else:
502
                parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
503
            record_header = '%s\n%s\n%d\n%d\n' % (
504
                key_bytes, parent_bytes, factory._start, factory._end)
505
            header_lines.append(record_header)
506
            # TODO: Can we break the refcycle at this point and set
507
            #       factory._manager = None?
508
        header_bytes = ''.join(header_lines)
509
        del header_lines
510
        header_bytes_len = len(header_bytes)
511
        z_header_bytes = zlib.compress(header_bytes)
512
        del header_bytes
513
        z_header_bytes_len = len(z_header_bytes)
514
        block_bytes = self._block.to_bytes()
515
        lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
516
                                       len(block_bytes)))
517
        lines.append(z_header_bytes)
518
        lines.append(block_bytes)
519
        del z_header_bytes, block_bytes
520
        return ''.join(lines)
521
522
    @classmethod
523
    def from_bytes(cls, bytes):
524
        # TODO: This does extra string copying, probably better to do it a
525
        #       different way
526
        (storage_kind, z_header_len, header_len,
527
         block_len, rest) = bytes.split('\n', 4)
528
        del bytes
529
        if storage_kind != 'groupcompress-block':
530
            raise ValueError('Unknown storage kind: %s' % (storage_kind,))
531
        z_header_len = int(z_header_len)
532
        if len(rest) < z_header_len:
533
            raise ValueError('Compressed header len shorter than all bytes')
534
        z_header = rest[:z_header_len]
535
        header_len = int(header_len)
536
        header = zlib.decompress(z_header)
537
        if len(header) != header_len:
538
            raise ValueError('invalid length for decompressed bytes')
539
        del z_header
540
        block_len = int(block_len)
541
        if len(rest) != z_header_len + block_len:
542
            raise ValueError('Invalid length for block')
543
        block_bytes = rest[z_header_len:]
544
        del rest
545
        # So now we have a valid GCB, we just need to parse the factories that
546
        # were sent to us
547
        header_lines = header.split('\n')
548
        del header
549
        last = header_lines.pop()
550
        if last != '':
551
            raise ValueError('header lines did not end with a trailing'
552
                             ' newline')
553
        if len(header_lines) % 4 != 0:
554
            raise ValueError('The header was not an even multiple of 4 lines')
555
        block = GroupCompressBlock.from_bytes(block_bytes)
556
        del block_bytes
557
        result = cls(block)
558
        for start in xrange(0, len(header_lines), 4):
559
            # intern()?
560
            key = tuple(header_lines[start].split('\x00'))
561
            parents_line = header_lines[start+1]
562
            if parents_line == 'None:':
563
                parents = None
564
            else:
565
                parents = tuple([tuple(segment.split('\x00'))
566
                                 for segment in parents_line.split('\t')
567
                                  if segment])
568
            start_offset = int(header_lines[start+2])
569
            end_offset = int(header_lines[start+3])
570
            result.add_factory(key, parents, start_offset, end_offset)
571
        return result
572
573
574
def network_block_to_records(storage_kind, bytes, line_end):
575
    if storage_kind != 'groupcompress-block':
576
        raise ValueError('Unknown storage kind: %s' % (storage_kind,))
577
    manager = _LazyGroupContentManager.from_bytes(bytes)
578
    return manager.get_record_stream()
579
580
581
class _CommonGroupCompressor(object):
582
583
    def __init__(self):
584
        """Create a GroupCompressor."""
585
        self.chunks = []
586
        self._last = None
587
        self.endpoint = 0
588
        self.input_bytes = 0
589
        self.labels_deltas = {}
590
        self._delta_index = None # Set by the children
591
        self._block = GroupCompressBlock()
592
593
    def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
594
        """Compress lines with label key.
595
596
        :param key: A key tuple. It is stored in the output
597
            for identification of the text during decompression. If the last
598
            element is 'None' it is replaced with the sha1 of the text -
599
            e.g. sha1:xxxxxxx.
600
        :param bytes: The bytes to be compressed
601
        :param expected_sha: If non-None, the sha the lines are believed to
602
            have. During compression the sha is calculated; a mismatch will
603
            cause an error.
604
        :param nostore_sha: If the computed sha1 sum matches, we will raise
605
            ExistingContent rather than adding the text.
606
        :param soft: Do a 'soft' compression. This means that we require larger
607
            ranges to match to be considered for a copy command.
608
609
        :return: The sha1 of lines, the start and end offsets in the delta, and
610
            the type ('fulltext' or 'delta').
611
612
        :seealso VersionedFiles.add_lines:
613
        """
614
        if not bytes: # empty, like a dir entry, etc
615
            if nostore_sha == _null_sha1:
616
                raise errors.ExistingContent()
617
            return _null_sha1, 0, 0, 'fulltext'
618
        # we assume someone knew what they were doing when they passed it in
619
        if expected_sha is not None:
620
            sha1 = expected_sha
621
        else:
622
            sha1 = osutils.sha_string(bytes)
623
        if nostore_sha is not None:
624
            if sha1 == nostore_sha:
625
                raise errors.ExistingContent()
626
        if key[-1] is None:
627
            key = key[:-1] + ('sha1:' + sha1,)
628
629
        start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
630
        return sha1, start, end, type
631
632
    def _compress(self, key, bytes, max_delta_size, soft=False):
633
        """Compress lines with label key.
634
635
        :param key: A key tuple. It is stored in the output for identification
636
            of the text during decompression.
637
638
        :param bytes: The bytes to be compressed
639
640
        :param max_delta_size: The size above which we issue a fulltext instead
641
            of a delta.
642
643
        :param soft: Do a 'soft' compression. This means that we require larger
644
            ranges to match to be considered for a copy command.
645
646
        :return: The sha1 of lines, the start and end offsets in the delta, and
647
            the type ('fulltext' or 'delta').
648
        """
649
        raise NotImplementedError(self._compress)
650
651
    def extract(self, key):
652
        """Extract a key previously added to the compressor.
653
654
        :param key: The key to extract.
655
        :return: An iterable over bytes and the sha1.
656
        """
657
        (start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
658
        delta_chunks = self.chunks[start_chunk:end_chunk]
659
        stored_bytes = ''.join(delta_chunks)
660
        if stored_bytes[0] == 'f':
661
            fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
662
            data_len = fulltext_len + 1 + offset
663
            if  data_len != len(stored_bytes):
664
                raise ValueError('Index claimed fulltext len, but stored bytes'
665
                                 ' claim %s != %s'
666
                                 % (len(stored_bytes), data_len))
667
            bytes = stored_bytes[offset + 1:]
668
        else:
669
            # XXX: This is inefficient at best
670
            source = ''.join(self.chunks[:start_chunk])
671
            if stored_bytes[0] != 'd':
672
                raise ValueError('Unknown content kind, bytes claim %s'
673
                                 % (stored_bytes[0],))
674
            delta_len, offset = decode_base128_int(stored_bytes[1:10])
675
            data_len = delta_len + 1 + offset
676
            if data_len != len(stored_bytes):
677
                raise ValueError('Index claimed delta len, but stored bytes'
678
                                 ' claim %s != %s'
679
                                 % (len(stored_bytes), data_len))
680
            bytes = apply_delta(source, stored_bytes[offset + 1:])
681
        bytes_sha1 = osutils.sha_string(bytes)
682
        return bytes, bytes_sha1
683
684
    def flush(self):
685
        """Finish this group, creating a formatted stream.
686
687
        After calling this, the compressor should no longer be used
688
        """
689
        content = ''.join(self.chunks)
690
        self.chunks = None
691
        self._delta_index = None
692
        self._block.set_content(content)
693
        return self._block
694
695
    def pop_last(self):
696
        """Call this if you want to 'revoke' the last compression.
697
698
        After this, the data structures will be rolled back, but you cannot do
699
        more compression.
700
        """
701
        self._delta_index = None
702
        del self.chunks[self._last[0]:]
703
        self.endpoint = self._last[1]
704
        self._last = None
705
706
    def ratio(self):
707
        """Return the overall compression ratio."""
708
        return float(self.input_bytes) / float(self.endpoint)
709
710
711
class PythonGroupCompressor(_CommonGroupCompressor):
712
713
    def __init__(self):
714
        """Create a GroupCompressor.
715
716
        Used only if the pyrex version is not available.
717
        """
718
        super(PythonGroupCompressor, self).__init__()
719
        self._delta_index = LinesDeltaIndex([])
720
        # The actual content is managed by LinesDeltaIndex
721
        self.chunks = self._delta_index.lines
722
723
    def _compress(self, key, bytes, max_delta_size, soft=False):
724
        """see _CommonGroupCompressor._compress"""
725
        input_len = len(bytes)
726
        new_lines = osutils.split_lines(bytes)
727
        out_lines, index_lines = self._delta_index.make_delta(
728
            new_lines, bytes_length=input_len, soft=soft)
729
        delta_length = sum(map(len, out_lines))
730
        if delta_length > max_delta_size:
731
            # The delta is longer than the fulltext, insert a fulltext
732
            type = 'fulltext'
733
            out_lines = ['f', encode_base128_int(input_len)]
734
            out_lines.extend(new_lines)
735
            index_lines = [False, False]
736
            index_lines.extend([True] * len(new_lines))
737
        else:
738
            # this is a worthy delta, output it
739
            type = 'delta'
740
            out_lines[0] = 'd'
741
            # Update the delta_length to include those two encoded integers
742
            out_lines[1] = encode_base128_int(delta_length)
743
        # Before insertion
744
        start = self.endpoint
745
        chunk_start = len(self.chunks)
746
        self._delta_index.extend_lines(out_lines, index_lines)
747
        self.endpoint = self._delta_index.endpoint
748
        self.input_bytes += input_len
749
        chunk_end = len(self.chunks)
750
        self.labels_deltas[key] = (start, chunk_start,
751
                                   self.endpoint, chunk_end)
752
        return start, self.endpoint, type
753
754
755
class PyrexGroupCompressor(_CommonGroupCompressor):
756
    """Produce a serialised group of compressed texts.
757
758
    It contains code very similar to SequenceMatcher because of having a similar
759
    task. However some key differences apply:
760
     - there is no junk, we want a minimal edit not a human readable diff.
761
     - we don't filter very common lines (because we don't know where a good
762
       range will start, and after the first text we want to be emitting minmal
763
       edits only.
764
     - we chain the left side, not the right side
765
     - we incrementally update the adjacency matrix as new lines are provided.
766
     - we look for matches in all of the left side, so the routine which does
767
       the analagous task of find_longest_match does not need to filter on the
768
       left side.
769
    """
770
771
    def __init__(self):
772
        super(PyrexGroupCompressor, self).__init__()
773
        self._delta_index = DeltaIndex()
774
775
    def _compress(self, key, bytes, max_delta_size, soft=False):
776
        """see _CommonGroupCompressor._compress"""
777
        input_len = len(bytes)
778
        # By having action/label/sha1/len, we can parse the group if the index
779
        # was ever destroyed, we have the key in 'label', we know the final
780
        # bytes are valid from sha1, and we know where to find the end of this
781
        # record because of 'len'. (the delta record itself will store the
782
        # total length for the expanded record)
783
        # 'len: %d\n' costs approximately 1% increase in total data
784
        # Having the labels at all costs us 9-10% increase, 38% increase for
785
        # inventory pages, and 5.8% increase for text pages
786
        # new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
787
        if self._delta_index._source_offset != self.endpoint:
788
            raise AssertionError('_source_offset != endpoint'
789
                ' somehow the DeltaIndex got out of sync with'
790
                ' the output lines')
791
        delta = self._delta_index.make_delta(bytes, max_delta_size)
792
        if (delta is None):
793
            type = 'fulltext'
794
            enc_length = encode_base128_int(len(bytes))
795
            len_mini_header = 1 + len(enc_length)
796
            self._delta_index.add_source(bytes, len_mini_header)
797
            new_chunks = ['f', enc_length, bytes]
798
        else:
799
            type = 'delta'
800
            enc_length = encode_base128_int(len(delta))
801
            len_mini_header = 1 + len(enc_length)
802
            new_chunks = ['d', enc_length, delta]
803
            self._delta_index.add_delta_source(delta, len_mini_header)
804
        # Before insertion
805
        start = self.endpoint
806
        chunk_start = len(self.chunks)
807
        # Now output these bytes
808
        self._output_chunks(new_chunks)
809
        self.input_bytes += input_len
810
        chunk_end = len(self.chunks)
811
        self.labels_deltas[key] = (start, chunk_start,
812
                                   self.endpoint, chunk_end)
813
        if not self._delta_index._source_offset == self.endpoint:
814
            raise AssertionError('the delta index is out of sync'
815
                'with the output lines %s != %s'
816
                % (self._delta_index._source_offset, self.endpoint))
817
        return start, self.endpoint, type
818
819
    def _output_chunks(self, new_chunks):
820
        """Output some chunks.
821
822
        :param new_chunks: The chunks to output.
823
        """
824
        self._last = (len(self.chunks), self.endpoint)
825
        endpoint = self.endpoint
826
        self.chunks.extend(new_chunks)
827
        endpoint += sum(map(len, new_chunks))
828
        self.endpoint = endpoint
829
830
831
def make_pack_factory(graph, delta, keylength):
832
    """Create a factory for creating a pack based groupcompress.
833
834
    This is only functional enough to run interface tests, it doesn't try to
835
    provide a full pack environment.
836
837
    :param graph: Store a graph.
838
    :param delta: Delta compress contents.
839
    :param keylength: How long should keys be.
840
    """
841
    def factory(transport):
842
        parents = graph
843
        ref_length = 0
844
        if graph:
845
            ref_length = 1
846
        graph_index = BTreeBuilder(reference_lists=ref_length,
847
            key_elements=keylength)
848
        stream = transport.open_write_stream('newpack')
849
        writer = pack.ContainerWriter(stream.write)
850
        writer.begin()
851
        index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
852
            add_callback=graph_index.add_nodes)
853
        access = _DirectPackAccess({})
854
        access.set_writer(writer, graph_index, (transport, 'newpack'))
855
        result = GroupCompressVersionedFiles(index, access, delta)
856
        result.stream = stream
857
        result.writer = writer
858
        return result
859
    return factory
860
861
862
def cleanup_pack_group(versioned_files):
863
    versioned_files.writer.end()
864
    versioned_files.stream.close()
865
866
867
class GroupCompressVersionedFiles(VersionedFiles):
868
    """A group-compress based VersionedFiles implementation."""
869
870
    def __init__(self, index, access, delta=True):
871
        """Create a GroupCompressVersionedFiles object.
872
873
        :param index: The index object storing access and graph data.
874
        :param access: The access object storing raw data.
875
        :param delta: Whether to delta compress or just entropy compress.
876
        """
877
        self._index = index
878
        self._access = access
879
        self._delta = delta
880
        self._unadded_refs = {}
881
        self._group_cache = LRUSizeCache(max_size=50*1024*1024)
882
        self._fallback_vfs = []
883
884
    def add_lines(self, key, parents, lines, parent_texts=None,
885
        left_matching_blocks=None, nostore_sha=None, random_id=False,
886
        check_content=True):
887
        """Add a text to the store.
888
889
        :param key: The key tuple of the text to add.
890
        :param parents: The parents key tuples of the text to add.
891
        :param lines: A list of lines. Each line must be a bytestring. And all
892
            of them except the last must be terminated with \n and contain no
893
            other \n's. The last line may either contain no \n's or a single
894
            terminating \n. If the lines list does meet this constraint the add
895
            routine may error or may succeed - but you will be unable to read
896
            the data back accurately. (Checking the lines have been split
897
            correctly is expensive and extremely unlikely to catch bugs so it
898
            is not done at runtime unless check_content is True.)
899
        :param parent_texts: An optional dictionary containing the opaque
900
            representations of some or all of the parents of version_id to
901
            allow delta optimisations.  VERY IMPORTANT: the texts must be those
902
            returned by add_lines or data corruption can be caused.
903
        :param left_matching_blocks: a hint about which areas are common
904
            between the text and its left-hand-parent.  The format is
905
            the SequenceMatcher.get_matching_blocks format.
906
        :param nostore_sha: Raise ExistingContent and do not add the lines to
907
            the versioned file if the digest of the lines matches this.
908
        :param random_id: If True a random id has been selected rather than
909
            an id determined by some deterministic process such as a converter
910
            from a foreign VCS. When True the backend may choose not to check
911
            for uniqueness of the resulting key within the versioned file, so
912
            this should only be done when the result is expected to be unique
913
            anyway.
914
        :param check_content: If True, the lines supplied are verified to be
915
            bytestrings that are correctly formed lines.
916
        :return: The text sha1, the number of bytes in the text, and an opaque
917
                 representation of the inserted version which can be provided
918
                 back to future add_lines calls in the parent_texts dictionary.
919
        """
920
        self._index._check_write_ok()
921
        self._check_add(key, lines, random_id, check_content)
922
        if parents is None:
923
            # The caller might pass None if there is no graph data, but kndx
924
            # indexes can't directly store that, so we give them
925
            # an empty tuple instead.
926
            parents = ()
927
        # double handling for now. Make it work until then.
928
        length = sum(map(len, lines))
929
        record = ChunkedContentFactory(key, parents, None, lines)
930
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
931
                                               nostore_sha=nostore_sha))[0]
932
        return sha1, length, None
933
934
    def add_fallback_versioned_files(self, a_versioned_files):
935
        """Add a source of texts for texts not present in this knit.
936
937
        :param a_versioned_files: A VersionedFiles object.
938
        """
939
        self._fallback_vfs.append(a_versioned_files)
940
941
    def annotate(self, key):
942
        """See VersionedFiles.annotate."""
943
        graph = Graph(self)
944
        parent_map = self.get_parent_map([key])
945
        if not parent_map:
946
            raise errors.RevisionNotPresent(key, self)
947
        if parent_map[key] is not None:
948
            search = graph._make_breadth_first_searcher([key])
949
            keys = set()
950
            while True:
951
                try:
952
                    present, ghosts = search.next_with_ghosts()
953
                except StopIteration:
954
                    break
955
                keys.update(present)
956
            parent_map = self.get_parent_map(keys)
957
        else:
958
            keys = [key]
959
            parent_map = {key:()}
960
        head_cache = _mod_graph.FrozenHeadsCache(graph)
961
        parent_cache = {}
962
        reannotate = annotate.reannotate
963
        for record in self.get_record_stream(keys, 'topological', True):
964
            key = record.key
965
            chunks = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
966
            parent_lines = [parent_cache[parent] for parent in parent_map[key]]
967
            parent_cache[key] = list(
968
                reannotate(parent_lines, chunks, key, None, head_cache))
969
        return parent_cache[key]
970
971
    def check(self, progress_bar=None):
972
        """See VersionedFiles.check()."""
973
        keys = self.keys()
974
        for record in self.get_record_stream(keys, 'unordered', True):
975
            record.get_bytes_as('fulltext')
976
977
    def _check_add(self, key, lines, random_id, check_content):
978
        """check that version_id and lines are safe to add."""
979
        version_id = key[-1]
980
        if version_id is not None:
981
            if osutils.contains_whitespace(version_id):
982
                raise errors.InvalidRevisionId(version_id, self)
983
        self.check_not_reserved_id(version_id)
984
        # TODO: If random_id==False and the key is already present, we should
985
        # probably check that the existing content is identical to what is
986
        # being inserted, and otherwise raise an exception.  This would make
987
        # the bundle code simpler.
988
        if check_content:
989
            self._check_lines_not_unicode(lines)
990
            self._check_lines_are_lines(lines)
991
992
    def get_parent_map(self, keys):
993
        """Get a map of the graph parents of keys.
994
995
        :param keys: The keys to look up parents for.
996
        :return: A mapping from keys to parents. Absent keys are absent from
997
            the mapping.
998
        """
999
        return self._get_parent_map_with_sources(keys)[0]
1000
1001
    def _get_parent_map_with_sources(self, keys):
1002
        """Get a map of the parents of keys.
1003
1004
        :param keys: The keys to look up parents for.
1005
        :return: A tuple. The first element is a mapping from keys to parents.
1006
            Absent keys are absent from the mapping. The second element is a
1007
            list with the locations each key was found in. The first element
1008
            is the in-this-knit parents, the second the first fallback source,
1009
            and so on.
1010
        """
1011
        result = {}
1012
        sources = [self._index] + self._fallback_vfs
1013
        source_results = []
1014
        missing = set(keys)
1015
        for source in sources:
1016
            if not missing:
1017
                break
1018
            new_result = source.get_parent_map(missing)
1019
            source_results.append(new_result)
1020
            result.update(new_result)
1021
            missing.difference_update(set(new_result))
1022
        return result, source_results
1023
1024
    def _get_block(self, index_memo):
1025
        read_memo = index_memo[0:3]
1026
        # get the group:
1027
        try:
1028
            block = self._group_cache[read_memo]
1029
        except KeyError:
1030
            # read the group
1031
            zdata = self._access.get_raw_records([read_memo]).next()
1032
            # decompress - whole thing - this is not a bug, as it
1033
            # permits caching. We might want to store the partially
1034
            # decompresed group and decompress object, so that recent
1035
            # texts are not penalised by big groups.
1036
            block = GroupCompressBlock.from_bytes(zdata)
1037
            self._group_cache[read_memo] = block
1038
        # cheapo debugging:
1039
        # print len(zdata), len(plain)
1040
        # parse - requires split_lines, better to have byte offsets
1041
        # here (but not by much - we only split the region for the
1042
        # recipe, and we often want to end up with lines anyway.
1043
        return block
1044
1045
    def get_missing_compression_parent_keys(self):
1046
        """Return the keys of missing compression parents.
1047
1048
        Missing compression parents occur when a record stream was missing
1049
        basis texts, or a index was scanned that had missing basis texts.
1050
        """
1051
        # GroupCompress cannot currently reference texts that are not in the
1052
        # group, so this is valid for now
1053
        return frozenset()
1054
1055
    def get_record_stream(self, keys, ordering, include_delta_closure):
1056
        """Get a stream of records for keys.
1057
1058
        :param keys: The keys to include.
1059
        :param ordering: Either 'unordered' or 'topological'. A topologically
1060
            sorted stream has compression parents strictly before their
1061
            children.
1062
        :param include_delta_closure: If True then the closure across any
1063
            compression parents will be included (in the opaque data).
1064
        :return: An iterator of ContentFactory objects, each of which is only
1065
            valid until the iterator is advanced.
1066
        """
1067
        # keys might be a generator
1068
        orig_keys = list(keys)
1069
        keys = set(keys)
1070
        if not keys:
1071
            return
1072
        if (not self._index.has_graph
1073
            and ordering in ('topological', 'groupcompress')):
1074
            # Cannot topological order when no graph has been stored.
1075
            # but we allow 'as-requested' or 'unordered'
1076
            ordering = 'unordered'
1077
1078
        remaining_keys = keys
1079
        while True:
1080
            try:
1081
                keys = set(remaining_keys)
1082
                for content_factory in self._get_remaining_record_stream(keys,
1083
                        orig_keys, ordering, include_delta_closure):
1084
                    remaining_keys.discard(content_factory.key)
1085
                    yield content_factory
1086
                return
1087
            except errors.RetryWithNewPacks, e:
1088
                self._access.reload_or_raise(e)
1089
1090
    def _find_from_fallback(self, missing):
1091
        """Find whatever keys you can from the fallbacks.
1092
1093
        :param missing: A set of missing keys. This set will be mutated as keys
1094
            are found from a fallback_vfs
1095
        :return: (parent_map, key_to_source_map, source_results)
1096
            parent_map  the overall key => parent_keys
1097
            key_to_source_map   a dict from {key: source}
1098
            source_results      a list of (source: keys)
1099
        """
1100
        parent_map = {}
1101
        key_to_source_map = {}
1102
        source_results = []
1103
        for source in self._fallback_vfs:
1104
            if not missing:
1105
                break
1106
            source_parents = source.get_parent_map(missing)
1107
            parent_map.update(source_parents)
1108
            source_parents = list(source_parents)
1109
            source_results.append((source, source_parents))
1110
            key_to_source_map.update((key, source) for key in source_parents)
1111
            missing.difference_update(source_parents)
1112
        return parent_map, key_to_source_map, source_results
1113
1114
    def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
1115
        """Get the (source, [keys]) list.
1116
1117
        The returned objects should be in the order defined by 'ordering',
1118
        which can weave between different sources.
1119
        :param ordering: Must be one of 'topological' or 'groupcompress'
1120
        :return: List of [(source, [keys])] tuples, such that all keys are in
1121
            the defined order, regardless of source.
1122
        """
1123
        if ordering == 'topological':
1124
            present_keys = topo_sort(parent_map)
1125
        else:
1126
            # ordering == 'groupcompress'
1127
            # XXX: This only optimizes for the target ordering. We may need
1128
            #      to balance that with the time it takes to extract
1129
            #      ordering, by somehow grouping based on
1130
            #      locations[key][0:3]
1131
            present_keys = sort_gc_optimal(parent_map)
1132
        # Now group by source:
1133
        source_keys = []
1134
        current_source = None
1135
        for key in present_keys:
1136
            source = key_to_source_map.get(key, self)
1137
            if source is not current_source:
1138
                source_keys.append((source, []))
1139
                current_source = source
1140
            source_keys[-1][1].append(key)
1141
        return source_keys
1142
1143
    def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
1144
                                      key_to_source_map):
1145
        source_keys = []
1146
        current_source = None
1147
        for key in orig_keys:
1148
            if key in locations or key in unadded_keys:
1149
                source = self
1150
            elif key in key_to_source_map:
1151
                source = key_to_source_map[key]
1152
            else: # absent
1153
                continue
1154
            if source is not current_source:
1155
                source_keys.append((source, []))
1156
                current_source = source
1157
            source_keys[-1][1].append(key)
1158
        return source_keys
1159
1160
    def _get_io_ordered_source_keys(self, locations, unadded_keys,
1161
                                    source_result):
1162
        def get_group(key):
1163
            # This is the group the bytes are stored in, followed by the
1164
            # location in the group
1165
            return locations[key][0]
1166
        present_keys = sorted(locations.iterkeys(), key=get_group)
1167
        # We don't have an ordering for keys in the in-memory object, but
1168
        # lets process the in-memory ones first.
1169
        present_keys = list(unadded_keys) + present_keys
1170
        # Now grab all of the ones from other sources
1171
        source_keys = [(self, present_keys)]
1172
        source_keys.extend(source_result)
1173
        return source_keys
1174
1175
    def _get_remaining_record_stream(self, keys, orig_keys, ordering,
1176
                                     include_delta_closure):
1177
        """Get a stream of records for keys.
1178
1179
        :param keys: The keys to include.
1180
        :param ordering: one of 'unordered', 'topological', 'groupcompress' or
1181
            'as-requested'
1182
        :param include_delta_closure: If True then the closure across any
1183
            compression parents will be included (in the opaque data).
1184
        :return: An iterator of ContentFactory objects, each of which is only
1185
            valid until the iterator is advanced.
1186
        """
1187
        # Cheap: iterate
1188
        locations = self._index.get_build_details(keys)
1189
        unadded_keys = set(self._unadded_refs).intersection(keys)
1190
        missing = keys.difference(locations)
1191
        missing.difference_update(unadded_keys)
1192
        (fallback_parent_map, key_to_source_map,
1193
         source_result) = self._find_from_fallback(missing)
1194
        if ordering in ('topological', 'groupcompress'):
1195
            # would be better to not globally sort initially but instead
1196
            # start with one key, recurse to its oldest parent, then grab
1197
            # everything in the same group, etc.
1198
            parent_map = dict((key, details[2]) for key, details in
1199
                locations.iteritems())
1200
            for key in unadded_keys:
1201
                parent_map[key] = self._unadded_refs[key]
1202
            parent_map.update(fallback_parent_map)
1203
            source_keys = self._get_ordered_source_keys(ordering, parent_map,
1204
                                                        key_to_source_map)
1205
        elif ordering == 'as-requested':
1206
            source_keys = self._get_as_requested_source_keys(orig_keys,
1207
                locations, unadded_keys, key_to_source_map)
1208
        else:
1209
            # We want to yield the keys in a semi-optimal (read-wise) ordering.
1210
            # Otherwise we thrash the _group_cache and destroy performance
1211
            source_keys = self._get_io_ordered_source_keys(locations,
1212
                unadded_keys, source_result)
1213
        for key in missing:
1214
            yield AbsentContentFactory(key)
1215
        manager = None
1216
        last_read_memo = None
1217
        # TODO: This works fairly well at batching up existing groups into a
1218
        #       streamable format, and possibly allowing for taking one big
1219
        #       group and splitting it when it isn't fully utilized.
1220
        #       However, it doesn't allow us to find under-utilized groups and
1221
        #       combine them into a bigger group on the fly.
1222
        #       (Consider the issue with how chk_map inserts texts
1223
        #       one-at-a-time.) This could be done at insert_record_stream()
1224
        #       time, but it probably would decrease the number of
1225
        #       bytes-on-the-wire for fetch.
1226
        for source, keys in source_keys:
1227
            if source is self:
1228
                for key in keys:
1229
                    if key in self._unadded_refs:
1230
                        if manager is not None:
1231
                            for factory in manager.get_record_stream():
1232
                                yield factory
1233
                            last_read_memo = manager = None
1234
                        bytes, sha1 = self._compressor.extract(key)
1235
                        parents = self._unadded_refs[key]
1236
                        yield FulltextContentFactory(key, parents, sha1, bytes)
1237
                    else:
1238
                        index_memo, _, parents, (method, _) = locations[key]
1239
                        read_memo = index_memo[0:3]
1240
                        if last_read_memo != read_memo:
1241
                            # We are starting a new block. If we have a
1242
                            # manager, we have found everything that fits for
1243
                            # now, so yield records
1244
                            if manager is not None:
1245
                                for factory in manager.get_record_stream():
1246
                                    yield factory
1247
                            # Now start a new manager
1248
                            block = self._get_block(index_memo)
1249
                            manager = _LazyGroupContentManager(block)
1250
                            last_read_memo = read_memo
1251
                        start, end = index_memo[3:5]
1252
                        manager.add_factory(key, parents, start, end)
1253
            else:
1254
                if manager is not None:
1255
                    for factory in manager.get_record_stream():
1256
                        yield factory
1257
                    last_read_memo = manager = None
1258
                for record in source.get_record_stream(keys, ordering,
1259
                                                       include_delta_closure):
1260
                    yield record
1261
        if manager is not None:
1262
            for factory in manager.get_record_stream():
1263
                yield factory
1264
1265
    def get_sha1s(self, keys):
1266
        """See VersionedFiles.get_sha1s()."""
1267
        result = {}
1268
        for record in self.get_record_stream(keys, 'unordered', True):
1269
            if record.sha1 != None:
1270
                result[record.key] = record.sha1
1271
            else:
1272
                if record.storage_kind != 'absent':
1273
                    result[record.key] = osutils.sha_string(
1274
                        record.get_bytes_as('fulltext'))
1275
        return result
1276
1277
    def insert_record_stream(self, stream):
1278
        """Insert a record stream into this container.
1279
1280
        :param stream: A stream of records to insert.
1281
        :return: None
1282
        :seealso VersionedFiles.get_record_stream:
1283
        """
1284
        # XXX: Setting random_id=True makes
1285
        # test_insert_record_stream_existing_keys fail for groupcompress and
1286
        # groupcompress-nograph, this needs to be revisited while addressing
1287
        # 'bzr branch' performance issues.
1288
        for _ in self._insert_record_stream(stream, random_id=False):
1289
            pass
1290
1291
    def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
1292
                              reuse_blocks=True):
1293
        """Internal core to insert a record stream into this container.
1294
1295
        This helper function has a different interface than insert_record_stream
1296
        to allow add_lines to be minimal, but still return the needed data.
1297
1298
        :param stream: A stream of records to insert.
1299
        :param nostore_sha: If the sha1 of a given text matches nostore_sha,
1300
            raise ExistingContent, rather than committing the new text.
1301
        :param reuse_blocks: If the source is streaming from
1302
            groupcompress-blocks, just insert the blocks as-is, rather than
1303
            expanding the texts and inserting again.
1304
        :return: An iterator over the sha1 of the inserted records.
1305
        :seealso insert_record_stream:
1306
        :seealso add_lines:
1307
        """
1308
        adapters = {}
1309
        def get_adapter(adapter_key):
1310
            try:
1311
                return adapters[adapter_key]
1312
            except KeyError:
1313
                adapter_factory = adapter_registry.get(adapter_key)
1314
                adapter = adapter_factory(self)
1315
                adapters[adapter_key] = adapter
1316
                return adapter
1317
        # This will go up to fulltexts for gc to gc fetching, which isn't
1318
        # ideal.
1319
        self._compressor = GroupCompressor()
1320
        self._unadded_refs = {}
1321
        keys_to_add = []
1322
        def flush():
1323
            bytes = self._compressor.flush().to_bytes()
1324
            index, start, length = self._access.add_raw_records(
1325
                [(None, len(bytes))], bytes)[0]
1326
            nodes = []
1327
            for key, reads, refs in keys_to_add:
1328
                nodes.append((key, "%d %d %s" % (start, length, reads), refs))
1329
            self._index.add_records(nodes, random_id=random_id)
1330
            self._unadded_refs = {}
1331
            del keys_to_add[:]
1332
            self._compressor = GroupCompressor()
1333
1334
        last_prefix = None
1335
        max_fulltext_len = 0
1336
        max_fulltext_prefix = None
1337
        insert_manager = None
1338
        block_start = None
1339
        block_length = None
1340
        # XXX: TODO: remove this, it is just for safety checking for now
1341
        inserted_keys = set()
1342
        for record in stream:
1343
            # Raise an error when a record is missing.
1344
            if record.storage_kind == 'absent':
1345
                raise errors.RevisionNotPresent(record.key, self)
1346
            if random_id:
1347
                if record.key in inserted_keys:
1348
                    trace.note('Insert claimed random_id=True,'
1349
                               ' but then inserted %r two times', record.key)
1350
                    continue
1351
                inserted_keys.add(record.key)
1352
            if reuse_blocks:
1353
                # If the reuse_blocks flag is set, check to see if we can just
1354
                # copy a groupcompress block as-is.
1355
                if record.storage_kind == 'groupcompress-block':
1356
                    # Insert the raw block into the target repo
1357
                    insert_manager = record._manager
1358
                    insert_manager._check_rebuild_block()
1359
                    bytes = record._manager._block.to_bytes()
1360
                    _, start, length = self._access.add_raw_records(
1361
                        [(None, len(bytes))], bytes)[0]
1362
                    del bytes
1363
                    block_start = start
1364
                    block_length = length
1365
                if record.storage_kind in ('groupcompress-block',
1366
                                           'groupcompress-block-ref'):
1367
                    if insert_manager is None:
1368
                        raise AssertionError('No insert_manager set')
1369
                    value = "%d %d %d %d" % (block_start, block_length,
1370
                                             record._start, record._end)
1371
                    nodes = [(record.key, value, (record.parents,))]
1372
                    # TODO: Consider buffering up many nodes to be added, not
1373
                    #       sure how much overhead this has, but we're seeing
1374
                    #       ~23s / 120s in add_records calls
1375
                    self._index.add_records(nodes, random_id=random_id)
1376
                    continue
1377
            try:
1378
                bytes = record.get_bytes_as('fulltext')
1379
            except errors.UnavailableRepresentation:
1380
                adapter_key = record.storage_kind, 'fulltext'
1381
                adapter = get_adapter(adapter_key)
1382
                bytes = adapter.get_bytes(record)
1383
            if len(record.key) > 1:
1384
                prefix = record.key[0]
1385
                soft = (prefix == last_prefix)
1386
            else:
1387
                prefix = None
1388
                soft = False
1389
            if max_fulltext_len < len(bytes):
1390
                max_fulltext_len = len(bytes)
1391
                max_fulltext_prefix = prefix
1392
            (found_sha1, start_point, end_point,
1393
             type) = self._compressor.compress(record.key,
1394
                                               bytes, record.sha1, soft=soft,
1395
                                               nostore_sha=nostore_sha)
1396
            # delta_ratio = float(len(bytes)) / (end_point - start_point)
1397
            # Check if we want to continue to include that text
1398
            if (prefix == max_fulltext_prefix
1399
                and end_point < 2 * max_fulltext_len):
1400
                # As long as we are on the same file_id, we will fill at least
1401
                # 2 * max_fulltext_len
1402
                start_new_block = False
1403
            elif end_point > 4*1024*1024:
1404
                start_new_block = True
1405
            elif (prefix is not None and prefix != last_prefix
1406
                  and end_point > 2*1024*1024):
1407
                start_new_block = True
1408
            else:
1409
                start_new_block = False
1410
            last_prefix = prefix
1411
            if start_new_block:
1412
                self._compressor.pop_last()
1413
                flush()
1414
                max_fulltext_len = len(bytes)
1415
                (found_sha1, start_point, end_point,
1416
                 type) = self._compressor.compress(record.key, bytes,
1417
                                                   record.sha1)
1418
            if record.key[-1] is None:
1419
                key = record.key[:-1] + ('sha1:' + found_sha1,)
1420
            else:
1421
                key = record.key
1422
            self._unadded_refs[key] = record.parents
1423
            yield found_sha1
1424
            keys_to_add.append((key, '%d %d' % (start_point, end_point),
1425
                (record.parents,)))
1426
        if len(keys_to_add):
1427
            flush()
1428
        self._compressor = None
1429
1430
    def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1431
        """Iterate over the lines in the versioned files from keys.
1432
1433
        This may return lines from other keys. Each item the returned
1434
        iterator yields is a tuple of a line and a text version that that line
1435
        is present in (not introduced in).
1436
1437
        Ordering of results is in whatever order is most suitable for the
1438
        underlying storage format.
1439
1440
        If a progress bar is supplied, it may be used to indicate progress.
1441
        The caller is responsible for cleaning up progress bars (because this
1442
        is an iterator).
1443
1444
        NOTES:
1445
         * Lines are normalised by the underlying store: they will all have \n
1446
           terminators.
1447
         * Lines are returned in arbitrary order.
1448
1449
        :return: An iterator over (line, key).
1450
        """
1451
        if pb is None:
1452
            pb = progress.DummyProgress()
1453
        keys = set(keys)
1454
        total = len(keys)
1455
        # we don't care about inclusions, the caller cares.
1456
        # but we need to setup a list of records to visit.
1457
        # we need key, position, length
1458
        for key_idx, record in enumerate(self.get_record_stream(keys,
1459
            'unordered', True)):
1460
            # XXX: todo - optimise to use less than full texts.
1461
            key = record.key
1462
            pb.update('Walking content', key_idx, total)
1463
            if record.storage_kind == 'absent':
1464
                raise errors.RevisionNotPresent(key, self)
1465
            lines = osutils.split_lines(record.get_bytes_as('fulltext'))
1466
            for line in lines:
1467
                yield line, key
1468
        pb.update('Walking content', total, total)
1469
1470
    def keys(self):
1471
        """See VersionedFiles.keys."""
1472
        if 'evil' in debug.debug_flags:
1473
            trace.mutter_callsite(2, "keys scales with size of history")
1474
        sources = [self._index] + self._fallback_vfs
1475
        result = set()
1476
        for source in sources:
1477
            result.update(source.keys())
1478
        return result
1479
1480
1481
class _GCGraphIndex(object):
1482
    """Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""
1483
1484
    def __init__(self, graph_index, is_locked, parents=True,
1485
        add_callback=None):
1486
        """Construct a _GCGraphIndex on a graph_index.
1487
1488
        :param graph_index: An implementation of bzrlib.index.GraphIndex.
1489
        :param is_locked: A callback, returns True if the index is locked and
1490
            thus usable.
1491
        :param parents: If True, record knits parents, if not do not record
1492
            parents.
1493
        :param add_callback: If not None, allow additions to the index and call
1494
            this callback with a list of added GraphIndex nodes:
1495
            [(node, value, node_refs), ...]
1496
        """
1497
        self._add_callback = add_callback
1498
        self._graph_index = graph_index
1499
        self._parents = parents
1500
        self.has_graph = parents
1501
        self._is_locked = is_locked
1502
1503
    def add_records(self, records, random_id=False):
1504
        """Add multiple records to the index.
1505
1506
        This function does not insert data into the Immutable GraphIndex
1507
        backing the KnitGraphIndex, instead it prepares data for insertion by
1508
        the caller and checks that it is safe to insert then calls
1509
        self._add_callback with the prepared GraphIndex nodes.
1510
1511
        :param records: a list of tuples:
1512
                         (key, options, access_memo, parents).
1513
        :param random_id: If True the ids being added were randomly generated
1514
            and no check for existence will be performed.
1515
        """
1516
        if not self._add_callback:
1517
            raise errors.ReadOnlyError(self)
1518
        # we hope there are no repositories with inconsistent parentage
1519
        # anymore.
1520
1521
        changed = False
1522
        keys = {}
1523
        for (key, value, refs) in records:
1524
            if not self._parents:
1525
                if refs:
1526
                    for ref in refs:
1527
                        if ref:
1528
                            raise KnitCorrupt(self,
1529
                                "attempt to add node with parents "
1530
                                "in parentless index.")
1531
                    refs = ()
1532
                    changed = True
1533
            keys[key] = (value, refs)
1534
        # check for dups
1535
        if not random_id:
1536
            present_nodes = self._get_entries(keys)
1537
            for (index, key, value, node_refs) in present_nodes:
1538
                if node_refs != keys[key][1]:
1539
                    raise errors.KnitCorrupt(self, "inconsistent details in add_records"
1540
                        ": %s %s" % ((value, node_refs), keys[key]))
1541
                del keys[key]
1542
                changed = True
1543
        if changed:
1544
            result = []
1545
            if self._parents:
1546
                for key, (value, node_refs) in keys.iteritems():
1547
                    result.append((key, value, node_refs))
1548
            else:
1549
                for key, (value, node_refs) in keys.iteritems():
1550
                    result.append((key, value))
1551
            records = result
1552
        self._add_callback(records)
1553
1554
    def _check_read(self):
1555
        """Raise an exception if reads are not permitted."""
1556
        if not self._is_locked():
1557
            raise errors.ObjectNotLocked(self)
1558
1559
    def _check_write_ok(self):
1560
        """Raise an exception if writes are not permitted."""
1561
        if not self._is_locked():
1562
            raise errors.ObjectNotLocked(self)
1563
1564
    def _get_entries(self, keys, check_present=False):
1565
        """Get the entries for keys.
1566
1567
        Note: Callers are responsible for checking that the index is locked
1568
        before calling this method.
1569
1570
        :param keys: An iterable of index key tuples.
1571
        """
1572
        keys = set(keys)
1573
        found_keys = set()
1574
        if self._parents:
1575
            for node in self._graph_index.iter_entries(keys):
1576
                yield node
1577
                found_keys.add(node[1])
1578
        else:
1579
            # adapt parentless index to the rest of the code.
1580
            for node in self._graph_index.iter_entries(keys):
1581
                yield node[0], node[1], node[2], ()
1582
                found_keys.add(node[1])
1583
        if check_present:
1584
            missing_keys = keys.difference(found_keys)
1585
            if missing_keys:
1586
                raise RevisionNotPresent(missing_keys.pop(), self)
1587
1588
    def get_parent_map(self, keys):
1589
        """Get a map of the parents of keys.
1590
1591
        :param keys: The keys to look up parents for.
1592
        :return: A mapping from keys to parents. Absent keys are absent from
1593
            the mapping.
1594
        """
1595
        self._check_read()
1596
        nodes = self._get_entries(keys)
1597
        result = {}
1598
        if self._parents:
1599
            for node in nodes:
1600
                result[node[1]] = node[3][0]
1601
        else:
1602
            for node in nodes:
1603
                result[node[1]] = None
1604
        return result
1605
1606
    def get_build_details(self, keys):
1607
        """Get the various build details for keys.
1608
1609
        Ghosts are omitted from the result.
1610
1611
        :param keys: An iterable of keys.
1612
        :return: A dict of key:
1613
            (index_memo, compression_parent, parents, record_details).
1614
            index_memo
1615
                opaque structure to pass to read_records to extract the raw
1616
                data
1617
            compression_parent
1618
                Content that this record is built upon, may be None
1619
            parents
1620
                Logical parents of this node
1621
            record_details
1622
                extra information about the content which needs to be passed to
1623
                Factory.parse_record
1624
        """
1625
        self._check_read()
1626
        result = {}
1627
        entries = self._get_entries(keys)
1628
        for entry in entries:
1629
            key = entry[1]
1630
            if not self._parents:
1631
                parents = None
1632
            else:
1633
                parents = entry[3][0]
1634
            method = 'group'
1635
            result[key] = (self._node_to_position(entry),
1636
                                  None, parents, (method, None))
1637
        return result
1638
1639
    def keys(self):
1640
        """Get all the keys in the collection.
1641
1642
        The keys are not ordered.
1643
        """
1644
        self._check_read()
1645
        return [node[1] for node in self._graph_index.iter_all_entries()]
1646
1647
    def _node_to_position(self, node):
1648
        """Convert an index value to position details."""
1649
        bits = node[2].split(' ')
1650
        # It would be nice not to read the entire gzip.
1651
        start = int(bits[0])
1652
        stop = int(bits[1])
1653
        basis_end = int(bits[2])
1654
        delta_end = int(bits[3])
1655
        return node[0], start, stop, basis_end, delta_end
1656
1657
1658
from bzrlib._groupcompress_py import (
1659
    apply_delta,
1660
    apply_delta_to_source,
1661
    encode_base128_int,
1662
    decode_base128_int,
1663
    LinesDeltaIndex,
1664
    )
1665
try:
1666
    from bzrlib._groupcompress_pyx import (
1667
        apply_delta,
1668
        apply_delta_to_source,
1669
        DeltaIndex,
1670
        encode_base128_int,
1671
        decode_base128_int,
1672
        )
1673
    GroupCompressor = PyrexGroupCompressor
1674
except ImportError:
1675
    GroupCompressor = PythonGroupCompressor
1676