~bzr-pqm/bzr/bzr.dev

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
# Copyright (C) 2008, 2009 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

"""Core compression logic for compressing streams of related files."""

import time
import zlib
try:
    import pylzma
except ImportError:
    pylzma = None

from bzrlib import (
    annotate,
    debug,
    errors,
    graph as _mod_graph,
    knit,
    osutils,
    pack,
    trace,
    )
from bzrlib.btree_index import BTreeBuilder
from bzrlib.lru_cache import LRUSizeCache
from bzrlib.tsort import topo_sort
from bzrlib.versionedfile import (
    adapter_registry,
    AbsentContentFactory,
    ChunkedContentFactory,
    FulltextContentFactory,
    VersionedFiles,
    )

# Minimum number of uncompressed bytes to try fetch at once when retrieving
# groupcompress blocks.
BATCH_SIZE = 2**16

_USE_LZMA = False and (pylzma is not None)

# osutils.sha_string('')
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'

def sort_gc_optimal(parent_map):
    """Sort and group the keys in parent_map into groupcompress order.

    groupcompress is defined (currently) as reverse-topological order, grouped
    by the key prefix.

    :return: A sorted-list of keys
    """
    # groupcompress ordering is approximately reverse topological,
    # properly grouped by file-id.
    per_prefix_map = {}
    for key, value in parent_map.iteritems():
        if isinstance(key, str) or len(key) == 1:
            prefix = ''
        else:
            prefix = key[0]
        try:
            per_prefix_map[prefix][key] = value
        except KeyError:
            per_prefix_map[prefix] = {key: value}

    present_keys = []
    for prefix in sorted(per_prefix_map):
        present_keys.extend(reversed(topo_sort(per_prefix_map[prefix])))
    return present_keys


# The max zlib window size is 32kB, so if we set 'max_size' output of the
# decompressor to the requested bytes + 32kB, then we should guarantee
# num_bytes coming out.
_ZLIB_DECOMP_WINDOW = 32*1024

class GroupCompressBlock(object):
    """An object which maintains the internal structure of the compressed data.

    This tracks the meta info (start of text, length, type, etc.)
    """

    # Group Compress Block v1 Zlib
    GCB_HEADER = 'gcb1z\n'
    # Group Compress Block v1 Lzma
    GCB_LZ_HEADER = 'gcb1l\n'
    GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)

    def __init__(self):
        # map by key? or just order in file?
        self._compressor_name = None
        self._z_content = None
        self._z_content_decompressor = None
        self._z_content_length = None
        self._content_length = None
        self._content = None
        self._content_chunks = None

    def __len__(self):
        # This is the maximum number of bytes this object will reference if
        # everything is decompressed. However, if we decompress less than
        # everything... (this would cause some problems for LRUSizeCache)
        return self._content_length + self._z_content_length

    def _ensure_content(self, num_bytes=None):
        """Make sure that content has been expanded enough.

        :param num_bytes: Ensure that we have extracted at least num_bytes of
            content. If None, consume everything
        """
        # TODO: If we re-use the same content block at different times during
        #       get_record_stream(), it is possible that the first pass will
        #       get inserted, triggering an extract/_ensure_content() which
        #       will get rid of _z_content. And then the next use of the block
        #       will try to access _z_content (to send it over the wire), and
        #       fail because it is already extracted. Consider never releasing
        #       _z_content because of this.
        if num_bytes is None:
            num_bytes = self._content_length
        elif (self._content_length is not None
              and num_bytes > self._content_length):
            raise AssertionError(
                'requested num_bytes (%d) > content length (%d)'
                % (num_bytes, self._content_length))
        # Expand the content if required
        if self._content is None:
            if self._content_chunks is not None:
                self._content = ''.join(self._content_chunks)
                self._content_chunks = None
        if self._content is None:
            if self._z_content is None:
                raise AssertionError('No content to decompress')
            if self._z_content == '':
                self._content = ''
            elif self._compressor_name == 'lzma':
                # We don't do partial lzma decomp yet
                self._content = pylzma.decompress(self._z_content)
            elif self._compressor_name == 'zlib':
                # Start a zlib decompressor
                if num_bytes is None:
                    self._content = zlib.decompress(self._z_content)
                else:
                    self._z_content_decompressor = zlib.decompressobj()
                    # Seed the decompressor with the uncompressed bytes, so
                    # that the rest of the code is simplified
                    self._content = self._z_content_decompressor.decompress(
                        self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
            else:
                raise AssertionError('Unknown compressor: %r'
                                     % self._compressor_name)
        # Any bytes remaining to be decompressed will be in the decompressors
        # 'unconsumed_tail'

        # Do we have enough bytes already?
        if num_bytes is not None and len(self._content) >= num_bytes:
            return
        if num_bytes is None and self._z_content_decompressor is None:
            # We must have already decompressed everything
            return
        # If we got this far, and don't have a decompressor, something is wrong
        if self._z_content_decompressor is None:
            raise AssertionError(
                'No decompressor to decompress %d bytes' % num_bytes)
        remaining_decomp = self._z_content_decompressor.unconsumed_tail
        if num_bytes is None:
            if remaining_decomp:
                # We don't know how much is left, but we'll decompress it all
                self._content += self._z_content_decompressor.decompress(
                    remaining_decomp)
                # Note: There's what I consider a bug in zlib.decompressobj
                #       If you pass back in the entire unconsumed_tail, only
                #       this time you don't pass a max-size, it doesn't
                #       change the unconsumed_tail back to None/''.
                #       However, we know we are done with the whole stream
                self._z_content_decompressor = None
            # XXX: Why is this the only place in this routine we set this?
            self._content_length = len(self._content)
        else:
            if not remaining_decomp:
                raise AssertionError('Nothing left to decompress')
            needed_bytes = num_bytes - len(self._content)
            # We always set max_size to 32kB over the minimum needed, so that
            # zlib will give us as much as we really want.
            # TODO: If this isn't good enough, we could make a loop here,
            #       that keeps expanding the request until we get enough
            self._content += self._z_content_decompressor.decompress(
                remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
            if len(self._content) < num_bytes:
                raise AssertionError('%d bytes wanted, only %d available'
                                     % (num_bytes, len(self._content)))
            if not self._z_content_decompressor.unconsumed_tail:
                # The stream is finished
                self._z_content_decompressor = None

    def _parse_bytes(self, bytes, pos):
        """Read the various lengths from the header.

        This also populates the various 'compressed' buffers.

        :return: The position in bytes just after the last newline
        """
        # At present, we have 2 integers for the compressed and uncompressed
        # content. In base10 (ascii) 14 bytes can represent > 1TB, so to avoid
        # checking too far, cap the search to 14 bytes.
        pos2 = bytes.index('\n', pos, pos + 14)
        self._z_content_length = int(bytes[pos:pos2])
        pos = pos2 + 1
        pos2 = bytes.index('\n', pos, pos + 14)
        self._content_length = int(bytes[pos:pos2])
        pos = pos2 + 1
        if len(bytes) != (pos + self._z_content_length):
            # XXX: Define some GCCorrupt error ?
            raise AssertionError('Invalid bytes: (%d) != %d + %d' %
                                 (len(bytes), pos, self._z_content_length))
        self._z_content = bytes[pos:]

    @classmethod
    def from_bytes(cls, bytes):
        out = cls()
        if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
            raise ValueError('bytes did not start with any of %r'
                             % (cls.GCB_KNOWN_HEADERS,))
        # XXX: why not testing the whole header ?
        if bytes[4] == 'z':
            out._compressor_name = 'zlib'
        elif bytes[4] == 'l':
            out._compressor_name = 'lzma'
        else:
            raise ValueError('unknown compressor: %r' % (bytes,))
        out._parse_bytes(bytes, 6)
        return out

    def extract(self, key, start, end, sha1=None):
        """Extract the text for a specific key.

        :param key: The label used for this content
        :param sha1: TODO (should we validate only when sha1 is supplied?)
        :return: The bytes for the content
        """
        if start == end == 0:
            return ''
        self._ensure_content(end)
        # The bytes are 'f' or 'd' for the type, then a variable-length
        # base128 integer for the content size, then the actual content
        # We know that the variable-length integer won't be longer than 5
        # bytes (it takes 5 bytes to encode 2^32)
        c = self._content[start]
        if c == 'f':
            type = 'fulltext'
        else:
            if c != 'd':
                raise ValueError('Unknown content control code: %s'
                                 % (c,))
            type = 'delta'
        content_len, len_len = decode_base128_int(
                            self._content[start + 1:start + 6])
        content_start = start + 1 + len_len
        if end != content_start + content_len:
            raise ValueError('end != len according to field header'
                ' %s != %s' % (end, content_start + content_len))
        if c == 'f':
            bytes = self._content[content_start:end]
        elif c == 'd':
            bytes = apply_delta_to_source(self._content, content_start, end)
        return bytes

    def set_chunked_content(self, content_chunks, length):
        """Set the content of this block to the given chunks."""
        # If we have lots of short lines, it is may be more efficient to join
        # the content ahead of time. If the content is <10MiB, we don't really
        # care about the extra memory consumption, so we can just pack it and
        # be done. However, timing showed 18s => 17.9s for repacking 1k revs of
        # mysql, which is below the noise margin
        self._content_length = length
        self._content_chunks = content_chunks
        self._content = None
        self._z_content = None

    def set_content(self, content):
        """Set the content of this block."""
        self._content_length = len(content)
        self._content = content
        self._z_content = None

    def _create_z_content_using_lzma(self):
        if self._content_chunks is not None:
            self._content = ''.join(self._content_chunks)
            self._content_chunks = None
        if self._content is None:
            raise AssertionError('Nothing to compress')
        self._z_content = pylzma.compress(self._content)
        self._z_content_length = len(self._z_content)

    def _create_z_content_from_chunks(self):
        compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION)
        compressed_chunks = map(compressor.compress, self._content_chunks)
        compressed_chunks.append(compressor.flush())
        self._z_content = ''.join(compressed_chunks)
        self._z_content_length = len(self._z_content)

    def _create_z_content(self):
        if self._z_content is not None:
            return
        if _USE_LZMA:
            self._create_z_content_using_lzma()
            return
        if self._content_chunks is not None:
            self._create_z_content_from_chunks()
            return
        self._z_content = zlib.compress(self._content)
        self._z_content_length = len(self._z_content)

    def to_bytes(self):
        """Encode the information into a byte stream."""
        self._create_z_content()
        if _USE_LZMA:
            header = self.GCB_LZ_HEADER
        else:
            header = self.GCB_HEADER
        chunks = [header,
                  '%d\n%d\n' % (self._z_content_length, self._content_length),
                  self._z_content,
                 ]
        return ''.join(chunks)

    def _dump(self, include_text=False):
        """Take this block, and spit out a human-readable structure.

        :param include_text: Inserts also include text bits, chose whether you
            want this displayed in the dump or not.
        :return: A dump of the given block. The layout is something like:
            [('f', length), ('d', delta_length, text_length, [delta_info])]
            delta_info := [('i', num_bytes, text), ('c', offset, num_bytes),
            ...]
        """
        self._ensure_content()
        result = []
        pos = 0
        while pos < self._content_length:
            kind = self._content[pos]
            pos += 1
            if kind not in ('f', 'd'):
                raise ValueError('invalid kind character: %r' % (kind,))
            content_len, len_len = decode_base128_int(
                                self._content[pos:pos + 5])
            pos += len_len
            if content_len + pos > self._content_length:
                raise ValueError('invalid content_len %d for record @ pos %d'
                                 % (content_len, pos - len_len - 1))
            if kind == 'f': # Fulltext
                if include_text:
                    text = self._content[pos:pos+content_len]
                    result.append(('f', content_len, text))
                else:
                    result.append(('f', content_len))
            elif kind == 'd': # Delta
                delta_content = self._content[pos:pos+content_len]
                delta_info = []
                # The first entry in a delta is the decompressed length
                decomp_len, delta_pos = decode_base128_int(delta_content)
                result.append(('d', content_len, decomp_len, delta_info))
                measured_len = 0
                while delta_pos < content_len:
                    c = ord(delta_content[delta_pos])
                    delta_pos += 1
                    if c & 0x80: # Copy
                        (offset, length,
                         delta_pos) = decode_copy_instruction(delta_content, c,
                                                              delta_pos)
                        if include_text:
                            text = self._content[offset:offset+length]
                            delta_info.append(('c', offset, length, text))
                        else:
                            delta_info.append(('c', offset, length))
                        measured_len += length
                    else: # Insert
                        if include_text:
                            txt = delta_content[delta_pos:delta_pos+c]
                        else:
                            txt = ''
                        delta_info.append(('i', c, txt))
                        measured_len += c
                        delta_pos += c
                if delta_pos != content_len:
                    raise ValueError('Delta consumed a bad number of bytes:'
                                     ' %d != %d' % (delta_pos, content_len))
                if measured_len != decomp_len:
                    raise ValueError('Delta claimed fulltext was %d bytes, but'
                                     ' extraction resulted in %d bytes'
                                     % (decomp_len, measured_len))
            pos += content_len
        return result


class _LazyGroupCompressFactory(object):
    """Yield content from a GroupCompressBlock on demand."""

    def __init__(self, key, parents, manager, start, end, first):
        """Create a _LazyGroupCompressFactory

        :param key: The key of just this record
        :param parents: The parents of this key (possibly None)
        :param gc_block: A GroupCompressBlock object
        :param start: Offset of the first byte for this record in the
            uncompressd content
        :param end: Offset of the byte just after the end of this record
            (ie, bytes = content[start:end])
        :param first: Is this the first Factory for the given block?
        """
        self.key = key
        self.parents = parents
        self.sha1 = None
        # Note: This attribute coupled with Manager._factories creates a
        #       reference cycle. Perhaps we would rather use a weakref(), or
        #       find an appropriate time to release the ref. After the first
        #       get_bytes_as call? After Manager.get_record_stream() returns
        #       the object?
        self._manager = manager
        self._bytes = None
        self.storage_kind = 'groupcompress-block'
        if not first:
            self.storage_kind = 'groupcompress-block-ref'
        self._first = first
        self._start = start
        self._end = end

    def __repr__(self):
        return '%s(%s, first=%s)' % (self.__class__.__name__,
            self.key, self._first)

    def get_bytes_as(self, storage_kind):
        if storage_kind == self.storage_kind:
            if self._first:
                # wire bytes, something...
                return self._manager._wire_bytes()
            else:
                return ''
        if storage_kind in ('fulltext', 'chunked'):
            if self._bytes is None:
                # Grab and cache the raw bytes for this entry
                # and break the ref-cycle with _manager since we don't need it
                # anymore
                self._manager._prepare_for_extract()
                block = self._manager._block
                self._bytes = block.extract(self.key, self._start, self._end)
                # There are code paths that first extract as fulltext, and then
                # extract as storage_kind (smart fetch). So we don't break the
                # refcycle here, but instead in manager.get_record_stream()
            if storage_kind == 'fulltext':
                return self._bytes
            else:
                return [self._bytes]
        raise errors.UnavailableRepresentation(self.key, storage_kind,
                                               self.storage_kind)


class _LazyGroupContentManager(object):
    """This manages a group of _LazyGroupCompressFactory objects."""

    _max_cut_fraction = 0.75 # We allow a block to be trimmed to 75% of
                             # current size, and still be considered
                             # resuable
    _full_block_size = 4*1024*1024
    _full_mixed_block_size = 2*1024*1024
    _full_enough_block_size = 3*1024*1024 # size at which we won't repack
    _full_enough_mixed_block_size = 2*768*1024 # 1.5MB

    def __init__(self, block):
        self._block = block
        # We need to preserve the ordering
        self._factories = []
        self._last_byte = 0

    def add_factory(self, key, parents, start, end):
        if not self._factories:
            first = True
        else:
            first = False
        # Note that this creates a reference cycle....
        factory = _LazyGroupCompressFactory(key, parents, self,
            start, end, first=first)
        # max() works here, but as a function call, doing a compare seems to be
        # significantly faster, timeit says 250ms for max() and 100ms for the
        # comparison
        if end > self._last_byte:
            self._last_byte = end
        self._factories.append(factory)

    def get_record_stream(self):
        """Get a record for all keys added so far."""
        for factory in self._factories:
            yield factory
            # Break the ref-cycle
            factory._bytes = None
            factory._manager = None
        # TODO: Consider setting self._factories = None after the above loop,
        #       as it will break the reference cycle

    def _trim_block(self, last_byte):
        """Create a new GroupCompressBlock, with just some of the content."""
        # None of the factories need to be adjusted, because the content is
        # located in an identical place. Just that some of the unreferenced
        # trailing bytes are stripped
        trace.mutter('stripping trailing bytes from groupcompress block'
                     ' %d => %d', self._block._content_length, last_byte)
        new_block = GroupCompressBlock()
        self._block._ensure_content(last_byte)
        new_block.set_content(self._block._content[:last_byte])
        self._block = new_block

    def _rebuild_block(self):
        """Create a new GroupCompressBlock with only the referenced texts."""
        compressor = GroupCompressor()
        tstart = time.time()
        old_length = self._block._content_length
        end_point = 0
        for factory in self._factories:
            bytes = factory.get_bytes_as('fulltext')
            (found_sha1, start_point, end_point,
             type) = compressor.compress(factory.key, bytes, factory.sha1)
            # Now update this factory with the new offsets, etc
            factory.sha1 = found_sha1
            factory._start = start_point
            factory._end = end_point
        self._last_byte = end_point
        new_block = compressor.flush()
        # TODO: Should we check that new_block really *is* smaller than the old
        #       block? It seems hard to come up with a method that it would
        #       expand, since we do full compression again. Perhaps based on a
        #       request that ends up poorly ordered?
        delta = time.time() - tstart
        self._block = new_block
        trace.mutter('creating new compressed block on-the-fly in %.3fs'
                     ' %d bytes => %d bytes', delta, old_length,
                     self._block._content_length)

    def _prepare_for_extract(self):
        """A _LazyGroupCompressFactory is about to extract to fulltext."""
        # We expect that if one child is going to fulltext, all will be. This
        # helps prevent all of them from extracting a small amount at a time.
        # Which in itself isn't terribly expensive, but resizing 2MB 32kB at a
        # time (self._block._content) is a little expensive.
        self._block._ensure_content(self._last_byte)

    def _check_rebuild_action(self):
        """Check to see if our block should be repacked."""
        total_bytes_used = 0
        last_byte_used = 0
        for factory in self._factories:
            total_bytes_used += factory._end - factory._start
            if last_byte_used < factory._end:
                last_byte_used = factory._end
        # If we are using more than half of the bytes from the block, we have
        # nothing else to check
        if total_bytes_used * 2 >= self._block._content_length:
            return None, last_byte_used, total_bytes_used
        # We are using less than 50% of the content. Is the content we are
        # using at the beginning of the block? If so, we can just trim the
        # tail, rather than rebuilding from scratch.
        if total_bytes_used * 2 > last_byte_used:
            return 'trim', last_byte_used, total_bytes_used

        # We are using a small amount of the data, and it isn't just packed
        # nicely at the front, so rebuild the content.
        # Note: This would be *nicer* as a strip-data-from-group, rather than
        #       building it up again from scratch
        #       It might be reasonable to consider the fulltext sizes for
        #       different bits when deciding this, too. As you may have a small
        #       fulltext, and a trivial delta, and you are just trading around
        #       for another fulltext. If we do a simple 'prune' you may end up
        #       expanding many deltas into fulltexts, as well.
        #       If we build a cheap enough 'strip', then we could try a strip,
        #       if that expands the content, we then rebuild.
        return 'rebuild', last_byte_used, total_bytes_used

    def check_is_well_utilized(self):
        """Is the current block considered 'well utilized'?

        This heuristic asks if the current block considers itself to be a fully
        developed group, rather than just a loose collection of data.
        """
        if len(self._factories) == 1:
            # A block of length 1 could be improved by combining with other
            # groups - don't look deeper. Even larger than max size groups
            # could compress well with adjacent versions of the same thing.
            return False
        action, last_byte_used, total_bytes_used = self._check_rebuild_action()
        block_size = self._block._content_length
        if total_bytes_used < block_size * self._max_cut_fraction:
            # This block wants to trim itself small enough that we want to
            # consider it under-utilized.
            return False
        # TODO: This code is meant to be the twin of _insert_record_stream's
        #       'start_new_block' logic. It would probably be better to factor
        #       out that logic into a shared location, so that it stays
        #       together better
        # We currently assume a block is properly utilized whenever it is >75%
        # of the size of a 'full' block. In normal operation, a block is
        # considered full when it hits 4MB of same-file content. So any block
        # >3MB is 'full enough'.
        # The only time this isn't true is when a given block has large-object
        # content. (a single file >4MB, etc.)
        # Under these circumstances, we allow a block to grow to
        # 2 x largest_content.  Which means that if a given block had a large
        # object, it may actually be under-utilized. However, given that this
        # is 'pack-on-the-fly' it is probably reasonable to not repack large
        # content blobs on-the-fly. Note that because we return False for all
        # 1-item blobs, we will repack them; we may wish to reevaluate our
        # treatment of large object blobs in the future.
        if block_size >= self._full_enough_block_size:
            return True
        # If a block is <3MB, it still may be considered 'full' if it contains
        # mixed content. The current rule is 2MB of mixed content is considered
        # full. So check to see if this block contains mixed content, and
        # set the threshold appropriately.
        common_prefix = None
        for factory in self._factories:
            prefix = factory.key[:-1]
            if common_prefix is None:
                common_prefix = prefix
            elif prefix != common_prefix:
                # Mixed content, check the size appropriately
                if block_size >= self._full_enough_mixed_block_size:
                    return True
                break
        # The content failed both the mixed check and the single-content check
        # so obviously it is not fully utilized
        # TODO: there is one other constraint that isn't being checked
        #       namely, that the entries in the block are in the appropriate
        #       order. For example, you could insert the entries in exactly
        #       reverse groupcompress order, and we would think that is ok.
        #       (all the right objects are in one group, and it is fully
        #       utilized, etc.) For now, we assume that case is rare,
        #       especially since we should always fetch in 'groupcompress'
        #       order.
        return False

    def _check_rebuild_block(self):
        action, last_byte_used, total_bytes_used = self._check_rebuild_action()
        if action is None:
            return
        if action == 'trim':
            self._trim_block(last_byte_used)
        elif action == 'rebuild':
            self._rebuild_block()
        else:
            raise ValueError('unknown rebuild action: %r' % (action,))

    def _wire_bytes(self):
        """Return a byte stream suitable for transmitting over the wire."""
        self._check_rebuild_block()
        # The outer block starts with:
        #   'groupcompress-block\n'
        #   <length of compressed key info>\n
        #   <length of uncompressed info>\n
        #   <length of gc block>\n
        #   <header bytes>
        #   <gc-block>
        lines = ['groupcompress-block\n']
        # The minimal info we need is the key, the start offset, and the
        # parents. The length and type are encoded in the record itself.
        # However, passing in the other bits makes it easier.  The list of
        # keys, and the start offset, the length
        # 1 line key
        # 1 line with parents, '' for ()
        # 1 line for start offset
        # 1 line for end byte
        header_lines = []
        for factory in self._factories:
            key_bytes = '\x00'.join(factory.key)
            parents = factory.parents
            if parents is None:
                parent_bytes = 'None:'
            else:
                parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
            record_header = '%s\n%s\n%d\n%d\n' % (
                key_bytes, parent_bytes, factory._start, factory._end)
            header_lines.append(record_header)
            # TODO: Can we break the refcycle at this point and set
            #       factory._manager = None?
        header_bytes = ''.join(header_lines)
        del header_lines
        header_bytes_len = len(header_bytes)
        z_header_bytes = zlib.compress(header_bytes)
        del header_bytes
        z_header_bytes_len = len(z_header_bytes)
        block_bytes = self._block.to_bytes()
        lines.append('%d\n%d\n%d\n' % (z_header_bytes_len, header_bytes_len,
                                       len(block_bytes)))
        lines.append(z_header_bytes)
        lines.append(block_bytes)
        del z_header_bytes, block_bytes
        return ''.join(lines)

    @classmethod
    def from_bytes(cls, bytes):
        # TODO: This does extra string copying, probably better to do it a
        #       different way
        (storage_kind, z_header_len, header_len,
         block_len, rest) = bytes.split('\n', 4)
        del bytes
        if storage_kind != 'groupcompress-block':
            raise ValueError('Unknown storage kind: %s' % (storage_kind,))
        z_header_len = int(z_header_len)
        if len(rest) < z_header_len:
            raise ValueError('Compressed header len shorter than all bytes')
        z_header = rest[:z_header_len]
        header_len = int(header_len)
        header = zlib.decompress(z_header)
        if len(header) != header_len:
            raise ValueError('invalid length for decompressed bytes')
        del z_header
        block_len = int(block_len)
        if len(rest) != z_header_len + block_len:
            raise ValueError('Invalid length for block')
        block_bytes = rest[z_header_len:]
        del rest
        # So now we have a valid GCB, we just need to parse the factories that
        # were sent to us
        header_lines = header.split('\n')
        del header
        last = header_lines.pop()
        if last != '':
            raise ValueError('header lines did not end with a trailing'
                             ' newline')
        if len(header_lines) % 4 != 0:
            raise ValueError('The header was not an even multiple of 4 lines')
        block = GroupCompressBlock.from_bytes(block_bytes)
        del block_bytes
        result = cls(block)
        for start in xrange(0, len(header_lines), 4):
            # intern()?
            key = tuple(header_lines[start].split('\x00'))
            parents_line = header_lines[start+1]
            if parents_line == 'None:':
                parents = None
            else:
                parents = tuple([tuple(segment.split('\x00'))
                                 for segment in parents_line.split('\t')
                                  if segment])
            start_offset = int(header_lines[start+2])
            end_offset = int(header_lines[start+3])
            result.add_factory(key, parents, start_offset, end_offset)
        return result


def network_block_to_records(storage_kind, bytes, line_end):
    if storage_kind != 'groupcompress-block':
        raise ValueError('Unknown storage kind: %s' % (storage_kind,))
    manager = _LazyGroupContentManager.from_bytes(bytes)
    return manager.get_record_stream()


class _CommonGroupCompressor(object):

    def __init__(self):
        """Create a GroupCompressor."""
        self.chunks = []
        self._last = None
        self.endpoint = 0
        self.input_bytes = 0
        self.labels_deltas = {}
        self._delta_index = None # Set by the children
        self._block = GroupCompressBlock()

    def compress(self, key, bytes, expected_sha, nostore_sha=None, soft=False):
        """Compress lines with label key.

        :param key: A key tuple. It is stored in the output
            for identification of the text during decompression. If the last
            element is 'None' it is replaced with the sha1 of the text -
            e.g. sha1:xxxxxxx.
        :param bytes: The bytes to be compressed
        :param expected_sha: If non-None, the sha the lines are believed to
            have. During compression the sha is calculated; a mismatch will
            cause an error.
        :param nostore_sha: If the computed sha1 sum matches, we will raise
            ExistingContent rather than adding the text.
        :param soft: Do a 'soft' compression. This means that we require larger
            ranges to match to be considered for a copy command.

        :return: The sha1 of lines, the start and end offsets in the delta, and
            the type ('fulltext' or 'delta').

        :seealso VersionedFiles.add_lines:
        """
        if not bytes: # empty, like a dir entry, etc
            if nostore_sha == _null_sha1:
                raise errors.ExistingContent()
            return _null_sha1, 0, 0, 'fulltext'
        # we assume someone knew what they were doing when they passed it in
        if expected_sha is not None:
            sha1 = expected_sha
        else:
            sha1 = osutils.sha_string(bytes)
        if nostore_sha is not None:
            if sha1 == nostore_sha:
                raise errors.ExistingContent()
        if key[-1] is None:
            key = key[:-1] + ('sha1:' + sha1,)

        start, end, type = self._compress(key, bytes, len(bytes) / 2, soft)
        return sha1, start, end, type

    def _compress(self, key, bytes, max_delta_size, soft=False):
        """Compress lines with label key.

        :param key: A key tuple. It is stored in the output for identification
            of the text during decompression.

        :param bytes: The bytes to be compressed

        :param max_delta_size: The size above which we issue a fulltext instead
            of a delta.

        :param soft: Do a 'soft' compression. This means that we require larger
            ranges to match to be considered for a copy command.

        :return: The sha1 of lines, the start and end offsets in the delta, and
            the type ('fulltext' or 'delta').
        """
        raise NotImplementedError(self._compress)

    def extract(self, key):
        """Extract a key previously added to the compressor.

        :param key: The key to extract.
        :return: An iterable over bytes and the sha1.
        """
        (start_byte, start_chunk, end_byte, end_chunk) = self.labels_deltas[key]
        delta_chunks = self.chunks[start_chunk:end_chunk]
        stored_bytes = ''.join(delta_chunks)
        if stored_bytes[0] == 'f':
            fulltext_len, offset = decode_base128_int(stored_bytes[1:10])
            data_len = fulltext_len + 1 + offset
            if  data_len != len(stored_bytes):
                raise ValueError('Index claimed fulltext len, but stored bytes'
                                 ' claim %s != %s'
                                 % (len(stored_bytes), data_len))
            bytes = stored_bytes[offset + 1:]
        else:
            # XXX: This is inefficient at best
            source = ''.join(self.chunks[:start_chunk])
            if stored_bytes[0] != 'd':
                raise ValueError('Unknown content kind, bytes claim %s'
                                 % (stored_bytes[0],))
            delta_len, offset = decode_base128_int(stored_bytes[1:10])
            data_len = delta_len + 1 + offset
            if data_len != len(stored_bytes):
                raise ValueError('Index claimed delta len, but stored bytes'
                                 ' claim %s != %s'
                                 % (len(stored_bytes), data_len))
            bytes = apply_delta(source, stored_bytes[offset + 1:])
        bytes_sha1 = osutils.sha_string(bytes)
        return bytes, bytes_sha1

    def flush(self):
        """Finish this group, creating a formatted stream.

        After calling this, the compressor should no longer be used
        """
        # TODO: this causes us to 'bloat' to 2x the size of content in the
        #       group. This has an impact for 'commit' of large objects.
        #       One possibility is to use self._content_chunks, and be lazy and
        #       only fill out self._content as a full string when we actually
        #       need it. That would at least drop the peak memory consumption
        #       for 'commit' down to ~1x the size of the largest file, at a
        #       cost of increased complexity within this code. 2x is still <<
        #       3x the size of the largest file, so we are doing ok.
        self._block.set_chunked_content(self.chunks, self.endpoint)
        self.chunks = None
        self._delta_index = None
        return self._block

    def pop_last(self):
        """Call this if you want to 'revoke' the last compression.

        After this, the data structures will be rolled back, but you cannot do
        more compression.
        """
        self._delta_index = None
        del self.chunks[self._last[0]:]
        self.endpoint = self._last[1]
        self._last = None

    def ratio(self):
        """Return the overall compression ratio."""
        return float(self.input_bytes) / float(self.endpoint)


class PythonGroupCompressor(_CommonGroupCompressor):

    def __init__(self):
        """Create a GroupCompressor.

        Used only if the pyrex version is not available.
        """
        super(PythonGroupCompressor, self).__init__()
        self._delta_index = LinesDeltaIndex([])
        # The actual content is managed by LinesDeltaIndex
        self.chunks = self._delta_index.lines

    def _compress(self, key, bytes, max_delta_size, soft=False):
        """see _CommonGroupCompressor._compress"""
        input_len = len(bytes)
        new_lines = osutils.split_lines(bytes)
        out_lines, index_lines = self._delta_index.make_delta(
            new_lines, bytes_length=input_len, soft=soft)
        delta_length = sum(map(len, out_lines))
        if delta_length > max_delta_size:
            # The delta is longer than the fulltext, insert a fulltext
            type = 'fulltext'
            out_lines = ['f', encode_base128_int(input_len)]
            out_lines.extend(new_lines)
            index_lines = [False, False]
            index_lines.extend([True] * len(new_lines))
        else:
            # this is a worthy delta, output it
            type = 'delta'
            out_lines[0] = 'd'
            # Update the delta_length to include those two encoded integers
            out_lines[1] = encode_base128_int(delta_length)
        # Before insertion
        start = self.endpoint
        chunk_start = len(self.chunks)
        self._last = (chunk_start, self.endpoint)
        self._delta_index.extend_lines(out_lines, index_lines)
        self.endpoint = self._delta_index.endpoint
        self.input_bytes += input_len
        chunk_end = len(self.chunks)
        self.labels_deltas[key] = (start, chunk_start,
                                   self.endpoint, chunk_end)
        return start, self.endpoint, type


class PyrexGroupCompressor(_CommonGroupCompressor):
    """Produce a serialised group of compressed texts.

    It contains code very similar to SequenceMatcher because of having a similar
    task. However some key differences apply:
     - there is no junk, we want a minimal edit not a human readable diff.
     - we don't filter very common lines (because we don't know where a good
       range will start, and after the first text we want to be emitting minmal
       edits only.
     - we chain the left side, not the right side
     - we incrementally update the adjacency matrix as new lines are provided.
     - we look for matches in all of the left side, so the routine which does
       the analagous task of find_longest_match does not need to filter on the
       left side.
    """

    def __init__(self):
        super(PyrexGroupCompressor, self).__init__()
        self._delta_index = DeltaIndex()

    def _compress(self, key, bytes, max_delta_size, soft=False):
        """see _CommonGroupCompressor._compress"""
        input_len = len(bytes)
        # By having action/label/sha1/len, we can parse the group if the index
        # was ever destroyed, we have the key in 'label', we know the final
        # bytes are valid from sha1, and we know where to find the end of this
        # record because of 'len'. (the delta record itself will store the
        # total length for the expanded record)
        # 'len: %d\n' costs approximately 1% increase in total data
        # Having the labels at all costs us 9-10% increase, 38% increase for
        # inventory pages, and 5.8% increase for text pages
        # new_chunks = ['label:%s\nsha1:%s\n' % (label, sha1)]
        if self._delta_index._source_offset != self.endpoint:
            raise AssertionError('_source_offset != endpoint'
                ' somehow the DeltaIndex got out of sync with'
                ' the output lines')
        delta = self._delta_index.make_delta(bytes, max_delta_size)
        if (delta is None):
            type = 'fulltext'
            enc_length = encode_base128_int(len(bytes))
            len_mini_header = 1 + len(enc_length)
            self._delta_index.add_source(bytes, len_mini_header)
            new_chunks = ['f', enc_length, bytes]
        else:
            type = 'delta'
            enc_length = encode_base128_int(len(delta))
            len_mini_header = 1 + len(enc_length)
            new_chunks = ['d', enc_length, delta]
            self._delta_index.add_delta_source(delta, len_mini_header)
        # Before insertion
        start = self.endpoint
        chunk_start = len(self.chunks)
        # Now output these bytes
        self._output_chunks(new_chunks)
        self.input_bytes += input_len
        chunk_end = len(self.chunks)
        self.labels_deltas[key] = (start, chunk_start,
                                   self.endpoint, chunk_end)
        if not self._delta_index._source_offset == self.endpoint:
            raise AssertionError('the delta index is out of sync'
                'with the output lines %s != %s'
                % (self._delta_index._source_offset, self.endpoint))
        return start, self.endpoint, type

    def _output_chunks(self, new_chunks):
        """Output some chunks.

        :param new_chunks: The chunks to output.
        """
        self._last = (len(self.chunks), self.endpoint)
        endpoint = self.endpoint
        self.chunks.extend(new_chunks)
        endpoint += sum(map(len, new_chunks))
        self.endpoint = endpoint


def make_pack_factory(graph, delta, keylength, inconsistency_fatal=True):
    """Create a factory for creating a pack based groupcompress.

    This is only functional enough to run interface tests, it doesn't try to
    provide a full pack environment.

    :param graph: Store a graph.
    :param delta: Delta compress contents.
    :param keylength: How long should keys be.
    """
    def factory(transport):
        parents = graph
        ref_length = 0
        if graph:
            ref_length = 1
        graph_index = BTreeBuilder(reference_lists=ref_length,
            key_elements=keylength)
        stream = transport.open_write_stream('newpack')
        writer = pack.ContainerWriter(stream.write)
        writer.begin()
        index = _GCGraphIndex(graph_index, lambda:True, parents=parents,
            add_callback=graph_index.add_nodes,
            inconsistency_fatal=inconsistency_fatal)
        access = knit._DirectPackAccess({})
        access.set_writer(writer, graph_index, (transport, 'newpack'))
        result = GroupCompressVersionedFiles(index, access, delta)
        result.stream = stream
        result.writer = writer
        return result
    return factory


def cleanup_pack_group(versioned_files):
    versioned_files.writer.end()
    versioned_files.stream.close()


class _BatchingBlockFetcher(object):
    """Fetch group compress blocks in batches.
    
    :ivar total_bytes: int of expected number of bytes needed to fetch the
        currently pending batch.
    """

    def __init__(self, gcvf, locations):
        self.gcvf = gcvf
        self.locations = locations
        self.keys = []
        self.batch_memos = {}
        self.memos_to_get = []
        self.total_bytes = 0
        self.last_read_memo = None
        self.manager = None

    def add_key(self, key):
        """Add another to key to fetch.
        
        :return: The estimated number of bytes needed to fetch the batch so
            far.
        """
        self.keys.append(key)
        index_memo, _, _, _ = self.locations[key]
        read_memo = index_memo[0:3]
        # Three possibilities for this read_memo:
        #  - it's already part of this batch; or
        #  - it's not yet part of this batch, but is already cached; or
        #  - it's not yet part of this batch and will need to be fetched.
        if read_memo in self.batch_memos:
            # This read memo is already in this batch.
            return self.total_bytes
        try:
            cached_block = self.gcvf._group_cache[read_memo]
        except KeyError:
            # This read memo is new to this batch, and the data isn't cached
            # either.
            self.batch_memos[read_memo] = None
            self.memos_to_get.append(read_memo)
            byte_length = read_memo[2]
            self.total_bytes += byte_length
        else:
            # This read memo is new to this batch, but cached.
            # Keep a reference to the cached block in batch_memos because it's
            # certain that we'll use it when this batch is processed, but
            # there's a risk that it would fall out of _group_cache between now
            # and then.
            self.batch_memos[read_memo] = cached_block
        return self.total_bytes
        
    def _flush_manager(self):
        if self.manager is not None:
            for factory in self.manager.get_record_stream():
                yield factory
            self.manager = None
            self.last_read_memo = None

    def yield_factories(self, full_flush=False):
        """Yield factories for keys added since the last yield.  They will be
        returned in the order they were added via add_key.
        
        :param full_flush: by default, some results may not be returned in case
            they can be part of the next batch.  If full_flush is True, then
            all results are returned.
        """
        if self.manager is None and not self.keys:
            return
        # Fetch all memos in this batch.
        blocks = self.gcvf._get_blocks(self.memos_to_get)
        # Turn blocks into factories and yield them.
        memos_to_get_stack = list(self.memos_to_get)
        memos_to_get_stack.reverse()
        for key in self.keys:
            index_memo, _, parents, _ = self.locations[key]
            read_memo = index_memo[:3]
            if self.last_read_memo != read_memo:
                # We are starting a new block. If we have a
                # manager, we have found everything that fits for
                # now, so yield records
                for factory in self._flush_manager():
                    yield factory
                # Now start a new manager.
                if memos_to_get_stack and memos_to_get_stack[-1] == read_memo:
                    # The next block from _get_blocks will be the block we
                    # need.
                    block_read_memo, block = blocks.next()
                    if block_read_memo != read_memo:
                        raise AssertionError(
                            "block_read_memo out of sync with read_memo"
                            "(%r != %r)" % (block_read_memo, read_memo))
                    self.batch_memos[read_memo] = block
                    memos_to_get_stack.pop()
                else:
                    block = self.batch_memos[read_memo]
                self.manager = _LazyGroupContentManager(block)
                self.last_read_memo = read_memo
            start, end = index_memo[3:5]
            self.manager.add_factory(key, parents, start, end)
        if full_flush:
            for factory in self._flush_manager():
                yield factory
        del self.keys[:]
        self.batch_memos.clear()
        del self.memos_to_get[:]
        self.total_bytes = 0


class GroupCompressVersionedFiles(VersionedFiles):
    """A group-compress based VersionedFiles implementation."""

    def __init__(self, index, access, delta=True, _unadded_refs=None):
        """Create a GroupCompressVersionedFiles object.

        :param index: The index object storing access and graph data.
        :param access: The access object storing raw data.
        :param delta: Whether to delta compress or just entropy compress.
        :param _unadded_refs: private parameter, don't use.
        """
        self._index = index
        self._access = access
        self._delta = delta
        if _unadded_refs is None:
            _unadded_refs = {}
        self._unadded_refs = _unadded_refs
        self._group_cache = LRUSizeCache(max_size=50*1024*1024)
        self._fallback_vfs = []

    def without_fallbacks(self):
        """Return a clone of this object without any fallbacks configured."""
        return GroupCompressVersionedFiles(self._index, self._access,
            self._delta, _unadded_refs=dict(self._unadded_refs))

    def add_lines(self, key, parents, lines, parent_texts=None,
        left_matching_blocks=None, nostore_sha=None, random_id=False,
        check_content=True):
        """Add a text to the store.

        :param key: The key tuple of the text to add.
        :param parents: The parents key tuples of the text to add.
        :param lines: A list of lines. Each line must be a bytestring. And all
            of them except the last must be terminated with \n and contain no
            other \n's. The last line may either contain no \n's or a single
            terminating \n. If the lines list does meet this constraint the add
            routine may error or may succeed - but you will be unable to read
            the data back accurately. (Checking the lines have been split
            correctly is expensive and extremely unlikely to catch bugs so it
            is not done at runtime unless check_content is True.)
        :param parent_texts: An optional dictionary containing the opaque
            representations of some or all of the parents of version_id to
            allow delta optimisations.  VERY IMPORTANT: the texts must be those
            returned by add_lines or data corruption can be caused.
        :param left_matching_blocks: a hint about which areas are common
            between the text and its left-hand-parent.  The format is
            the SequenceMatcher.get_matching_blocks format.
        :param nostore_sha: Raise ExistingContent and do not add the lines to
            the versioned file if the digest of the lines matches this.
        :param random_id: If True a random id has been selected rather than
            an id determined by some deterministic process such as a converter
            from a foreign VCS. When True the backend may choose not to check
            for uniqueness of the resulting key within the versioned file, so
            this should only be done when the result is expected to be unique
            anyway.
        :param check_content: If True, the lines supplied are verified to be
            bytestrings that are correctly formed lines.
        :return: The text sha1, the number of bytes in the text, and an opaque
                 representation of the inserted version which can be provided
                 back to future add_lines calls in the parent_texts dictionary.
        """
        self._index._check_write_ok()
        self._check_add(key, lines, random_id, check_content)
        if parents is None:
            # The caller might pass None if there is no graph data, but kndx
            # indexes can't directly store that, so we give them
            # an empty tuple instead.
            parents = ()
        # double handling for now. Make it work until then.
        length = sum(map(len, lines))
        record = ChunkedContentFactory(key, parents, None, lines)
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
                                               nostore_sha=nostore_sha))[0]
        return sha1, length, None

    def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
        """See VersionedFiles._add_text()."""
        self._index._check_write_ok()
        self._check_add(key, None, random_id, check_content=False)
        if text.__class__ is not str:
            raise errors.BzrBadParameterUnicode("text")
        if parents is None:
            # The caller might pass None if there is no graph data, but kndx
            # indexes can't directly store that, so we give them
            # an empty tuple instead.
            parents = ()
        # double handling for now. Make it work until then.
        length = len(text)
        record = FulltextContentFactory(key, parents, None, text)
        sha1 = list(self._insert_record_stream([record], random_id=random_id,
                                               nostore_sha=nostore_sha))[0]
        return sha1, length, None

    def add_fallback_versioned_files(self, a_versioned_files):
        """Add a source of texts for texts not present in this knit.

        :param a_versioned_files: A VersionedFiles object.
        """
        self._fallback_vfs.append(a_versioned_files)

    def annotate(self, key):
        """See VersionedFiles.annotate."""
        ann = annotate.Annotator(self)
        return ann.annotate_flat(key)

    def get_annotator(self):
        return annotate.Annotator(self)

    def check(self, progress_bar=None, keys=None):
        """See VersionedFiles.check()."""
        if keys is None:
            keys = self.keys()
            for record in self.get_record_stream(keys, 'unordered', True):
                record.get_bytes_as('fulltext')
        else:
            return self.get_record_stream(keys, 'unordered', True)

    def _check_add(self, key, lines, random_id, check_content):
        """check that version_id and lines are safe to add."""
        version_id = key[-1]
        if version_id is not None:
            if osutils.contains_whitespace(version_id):
                raise errors.InvalidRevisionId(version_id, self)
        self.check_not_reserved_id(version_id)
        # TODO: If random_id==False and the key is already present, we should
        # probably check that the existing content is identical to what is
        # being inserted, and otherwise raise an exception.  This would make
        # the bundle code simpler.
        if check_content:
            self._check_lines_not_unicode(lines)
            self._check_lines_are_lines(lines)

    def get_known_graph_ancestry(self, keys):
        """Get a KnownGraph instance with the ancestry of keys."""
        # Note that this is identical to
        # KnitVersionedFiles.get_known_graph_ancestry, but they don't share
        # ancestry.
        parent_map, missing_keys = self._index.find_ancestry(keys)
        for fallback in self._fallback_vfs:
            if not missing_keys:
                break
            (f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
                                                missing_keys)
            parent_map.update(f_parent_map)
            missing_keys = f_missing_keys
        kg = _mod_graph.KnownGraph(parent_map)
        return kg

    def get_parent_map(self, keys):
        """Get a map of the graph parents of keys.

        :param keys: The keys to look up parents for.
        :return: A mapping from keys to parents. Absent keys are absent from
            the mapping.
        """
        return self._get_parent_map_with_sources(keys)[0]

    def _get_parent_map_with_sources(self, keys):
        """Get a map of the parents of keys.

        :param keys: The keys to look up parents for.
        :return: A tuple. The first element is a mapping from keys to parents.
            Absent keys are absent from the mapping. The second element is a
            list with the locations each key was found in. The first element
            is the in-this-knit parents, the second the first fallback source,
            and so on.
        """
        result = {}
        sources = [self._index] + self._fallback_vfs
        source_results = []
        missing = set(keys)
        for source in sources:
            if not missing:
                break
            new_result = source.get_parent_map(missing)
            source_results.append(new_result)
            result.update(new_result)
            missing.difference_update(set(new_result))
        return result, source_results

    def _get_blocks(self, read_memos):
        """Get GroupCompressBlocks for the given read_memos.

        :returns: a series of (read_memo, block) pairs, in the order they were
            originally passed.
        """
        cached = {}
        for read_memo in read_memos:
            try:
                block = self._group_cache[read_memo]
            except KeyError:
                pass
            else:
                cached[read_memo] = block
        not_cached = []
        not_cached_seen = set()
        for read_memo in read_memos:
            if read_memo in cached:
                # Don't fetch what we already have
                continue
            if read_memo in not_cached_seen:
                # Don't try to fetch the same data twice
                continue
            not_cached.append(read_memo)
            not_cached_seen.add(read_memo)
        raw_records = self._access.get_raw_records(not_cached)
        for read_memo in read_memos:
            try:
                yield read_memo, cached[read_memo]
            except KeyError:
                # Read the block, and cache it.
                zdata = raw_records.next()
                block = GroupCompressBlock.from_bytes(zdata)
                self._group_cache[read_memo] = block
                cached[read_memo] = block
                yield read_memo, block

    def get_missing_compression_parent_keys(self):
        """Return the keys of missing compression parents.

        Missing compression parents occur when a record stream was missing
        basis texts, or a index was scanned that had missing basis texts.
        """
        # GroupCompress cannot currently reference texts that are not in the
        # group, so this is valid for now
        return frozenset()

    def get_record_stream(self, keys, ordering, include_delta_closure):
        """Get a stream of records for keys.

        :param keys: The keys to include.
        :param ordering: Either 'unordered' or 'topological'. A topologically
            sorted stream has compression parents strictly before their
            children.
        :param include_delta_closure: If True then the closure across any
            compression parents will be included (in the opaque data).
        :return: An iterator of ContentFactory objects, each of which is only
            valid until the iterator is advanced.
        """
        # keys might be a generator
        orig_keys = list(keys)
        keys = set(keys)
        if not keys:
            return
        if (not self._index.has_graph
            and ordering in ('topological', 'groupcompress')):
            # Cannot topological order when no graph has been stored.
            # but we allow 'as-requested' or 'unordered'
            ordering = 'unordered'

        remaining_keys = keys
        while True:
            try:
                keys = set(remaining_keys)
                for content_factory in self._get_remaining_record_stream(keys,
                        orig_keys, ordering, include_delta_closure):
                    remaining_keys.discard(content_factory.key)
                    yield content_factory
                return
            except errors.RetryWithNewPacks, e:
                self._access.reload_or_raise(e)

    def _find_from_fallback(self, missing):
        """Find whatever keys you can from the fallbacks.

        :param missing: A set of missing keys. This set will be mutated as keys
            are found from a fallback_vfs
        :return: (parent_map, key_to_source_map, source_results)
            parent_map  the overall key => parent_keys
            key_to_source_map   a dict from {key: source}
            source_results      a list of (source: keys)
        """
        parent_map = {}
        key_to_source_map = {}
        source_results = []
        for source in self._fallback_vfs:
            if not missing:
                break
            source_parents = source.get_parent_map(missing)
            parent_map.update(source_parents)
            source_parents = list(source_parents)
            source_results.append((source, source_parents))
            key_to_source_map.update((key, source) for key in source_parents)
            missing.difference_update(source_parents)
        return parent_map, key_to_source_map, source_results

    def _get_ordered_source_keys(self, ordering, parent_map, key_to_source_map):
        """Get the (source, [keys]) list.

        The returned objects should be in the order defined by 'ordering',
        which can weave between different sources.
        :param ordering: Must be one of 'topological' or 'groupcompress'
        :return: List of [(source, [keys])] tuples, such that all keys are in
            the defined order, regardless of source.
        """
        if ordering == 'topological':
            present_keys = topo_sort(parent_map)
        else:
            # ordering == 'groupcompress'
            # XXX: This only optimizes for the target ordering. We may need
            #      to balance that with the time it takes to extract
            #      ordering, by somehow grouping based on
            #      locations[key][0:3]
            present_keys = sort_gc_optimal(parent_map)
        # Now group by source:
        source_keys = []
        current_source = None
        for key in present_keys:
            source = key_to_source_map.get(key, self)
            if source is not current_source:
                source_keys.append((source, []))
                current_source = source
            source_keys[-1][1].append(key)
        return source_keys

    def _get_as_requested_source_keys(self, orig_keys, locations, unadded_keys,
                                      key_to_source_map):
        source_keys = []
        current_source = None
        for key in orig_keys:
            if key in locations or key in unadded_keys:
                source = self
            elif key in key_to_source_map:
                source = key_to_source_map[key]
            else: # absent
                continue
            if source is not current_source:
                source_keys.append((source, []))
                current_source = source
            source_keys[-1][1].append(key)
        return source_keys

    def _get_io_ordered_source_keys(self, locations, unadded_keys,
                                    source_result):
        def get_group(key):
            # This is the group the bytes are stored in, followed by the
            # location in the group
            return locations[key][0]
        present_keys = sorted(locations.iterkeys(), key=get_group)
        # We don't have an ordering for keys in the in-memory object, but
        # lets process the in-memory ones first.
        present_keys = list(unadded_keys) + present_keys
        # Now grab all of the ones from other sources
        source_keys = [(self, present_keys)]
        source_keys.extend(source_result)
        return source_keys

    def _get_remaining_record_stream(self, keys, orig_keys, ordering,
                                     include_delta_closure):
        """Get a stream of records for keys.

        :param keys: The keys to include.
        :param ordering: one of 'unordered', 'topological', 'groupcompress' or
            'as-requested'
        :param include_delta_closure: If True then the closure across any
            compression parents will be included (in the opaque data).
        :return: An iterator of ContentFactory objects, each of which is only
            valid until the iterator is advanced.
        """
        # Cheap: iterate
        locations = self._index.get_build_details(keys)
        unadded_keys = set(self._unadded_refs).intersection(keys)
        missing = keys.difference(locations)
        missing.difference_update(unadded_keys)
        (fallback_parent_map, key_to_source_map,
         source_result) = self._find_from_fallback(missing)
        if ordering in ('topological', 'groupcompress'):
            # would be better to not globally sort initially but instead
            # start with one key, recurse to its oldest parent, then grab
            # everything in the same group, etc.
            parent_map = dict((key, details[2]) for key, details in
                locations.iteritems())
            for key in unadded_keys:
                parent_map[key] = self._unadded_refs[key]
            parent_map.update(fallback_parent_map)
            source_keys = self._get_ordered_source_keys(ordering, parent_map,
                                                        key_to_source_map)
        elif ordering == 'as-requested':
            source_keys = self._get_as_requested_source_keys(orig_keys,
                locations, unadded_keys, key_to_source_map)
        else:
            # We want to yield the keys in a semi-optimal (read-wise) ordering.
            # Otherwise we thrash the _group_cache and destroy performance
            source_keys = self._get_io_ordered_source_keys(locations,
                unadded_keys, source_result)
        for key in missing:
            yield AbsentContentFactory(key)
        # Batch up as many keys as we can until either:
        #  - we encounter an unadded ref, or
        #  - we run out of keys, or
        #  - the total bytes to retrieve for this batch > BATCH_SIZE
        batcher = _BatchingBlockFetcher(self, locations)
        for source, keys in source_keys:
            if source is self:
                for key in keys:
                    if key in self._unadded_refs:
                        # Flush batch, then yield unadded ref from
                        # self._compressor.
                        for factory in batcher.yield_factories(full_flush=True):
                            yield factory
                        bytes, sha1 = self._compressor.extract(key)
                        parents = self._unadded_refs[key]
                        yield FulltextContentFactory(key, parents, sha1, bytes)
                        continue
                    if batcher.add_key(key) > BATCH_SIZE:
                        # Ok, this batch is big enough.  Yield some results.
                        for factory in batcher.yield_factories():
                            yield factory
            else:
                for factory in batcher.yield_factories(full_flush=True):
                    yield factory
                for record in source.get_record_stream(keys, ordering,
                                                       include_delta_closure):
                    yield record
        for factory in batcher.yield_factories(full_flush=True):
            yield factory

    def get_sha1s(self, keys):
        """See VersionedFiles.get_sha1s()."""
        result = {}
        for record in self.get_record_stream(keys, 'unordered', True):
            if record.sha1 != None:
                result[record.key] = record.sha1
            else:
                if record.storage_kind != 'absent':
                    result[record.key] = osutils.sha_string(
                        record.get_bytes_as('fulltext'))
        return result

    def insert_record_stream(self, stream):
        """Insert a record stream into this container.

        :param stream: A stream of records to insert.
        :return: None
        :seealso VersionedFiles.get_record_stream:
        """
        # XXX: Setting random_id=True makes
        # test_insert_record_stream_existing_keys fail for groupcompress and
        # groupcompress-nograph, this needs to be revisited while addressing
        # 'bzr branch' performance issues.
        for _ in self._insert_record_stream(stream, random_id=False):
            pass

    def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
                              reuse_blocks=True):
        """Internal core to insert a record stream into this container.

        This helper function has a different interface than insert_record_stream
        to allow add_lines to be minimal, but still return the needed data.

        :param stream: A stream of records to insert.
        :param nostore_sha: If the sha1 of a given text matches nostore_sha,
            raise ExistingContent, rather than committing the new text.
        :param reuse_blocks: If the source is streaming from
            groupcompress-blocks, just insert the blocks as-is, rather than
            expanding the texts and inserting again.
        :return: An iterator over the sha1 of the inserted records.
        :seealso insert_record_stream:
        :seealso add_lines:
        """
        adapters = {}
        def get_adapter(adapter_key):
            try:
                return adapters[adapter_key]
            except KeyError:
                adapter_factory = adapter_registry.get(adapter_key)
                adapter = adapter_factory(self)
                adapters[adapter_key] = adapter
                return adapter
        # This will go up to fulltexts for gc to gc fetching, which isn't
        # ideal.
        self._compressor = GroupCompressor()
        self._unadded_refs = {}
        keys_to_add = []
        def flush():
            bytes = self._compressor.flush().to_bytes()
            index, start, length = self._access.add_raw_records(
                [(None, len(bytes))], bytes)[0]
            nodes = []
            for key, reads, refs in keys_to_add:
                nodes.append((key, "%d %d %s" % (start, length, reads), refs))
            self._index.add_records(nodes, random_id=random_id)
            self._unadded_refs = {}
            del keys_to_add[:]
            self._compressor = GroupCompressor()

        last_prefix = None
        max_fulltext_len = 0
        max_fulltext_prefix = None
        insert_manager = None
        block_start = None
        block_length = None
        # XXX: TODO: remove this, it is just for safety checking for now
        inserted_keys = set()
        reuse_this_block = reuse_blocks
        for record in stream:
            # Raise an error when a record is missing.
            if record.storage_kind == 'absent':
                raise errors.RevisionNotPresent(record.key, self)
            if random_id:
                if record.key in inserted_keys:
                    trace.note('Insert claimed random_id=True,'
                               ' but then inserted %r two times', record.key)
                    continue
                inserted_keys.add(record.key)
            if reuse_blocks:
                # If the reuse_blocks flag is set, check to see if we can just
                # copy a groupcompress block as-is.
                # We only check on the first record (groupcompress-block) not
                # on all of the (groupcompress-block-ref) entries.
                # The reuse_this_block flag is then kept for as long as
                if record.storage_kind == 'groupcompress-block':
                    # Check to see if we really want to re-use this block
                    insert_manager = record._manager
                    reuse_this_block = insert_manager.check_is_well_utilized()
            else:
                reuse_this_block = False
            if reuse_this_block:
                # We still want to reuse this block
                if record.storage_kind == 'groupcompress-block':
                    # Insert the raw block into the target repo
                    insert_manager = record._manager
                    bytes = record._manager._block.to_bytes()
                    _, start, length = self._access.add_raw_records(
                        [(None, len(bytes))], bytes)[0]
                    del bytes
                    block_start = start
                    block_length = length
                if record.storage_kind in ('groupcompress-block',
                                           'groupcompress-block-ref'):
                    if insert_manager is None:
                        raise AssertionError('No insert_manager set')
                    if insert_manager is not record._manager:
                        raise AssertionError('insert_manager does not match'
                            ' the current record, we cannot be positive'
                            ' that the appropriate content was inserted.'
                            )
                    value = "%d %d %d %d" % (block_start, block_length,
                                             record._start, record._end)
                    nodes = [(record.key, value, (record.parents,))]
                    # TODO: Consider buffering up many nodes to be added, not
                    #       sure how much overhead this has, but we're seeing
                    #       ~23s / 120s in add_records calls
                    self._index.add_records(nodes, random_id=random_id)
                    continue
            try:
                bytes = record.get_bytes_as('fulltext')
            except errors.UnavailableRepresentation:
                adapter_key = record.storage_kind, 'fulltext'
                adapter = get_adapter(adapter_key)
                bytes = adapter.get_bytes(record)
            if len(record.key) > 1:
                prefix = record.key[0]
                soft = (prefix == last_prefix)
            else:
                prefix = None
                soft = False
            if max_fulltext_len < len(bytes):
                max_fulltext_len = len(bytes)
                max_fulltext_prefix = prefix
            (found_sha1, start_point, end_point,
             type) = self._compressor.compress(record.key,
                                               bytes, record.sha1, soft=soft,
                                               nostore_sha=nostore_sha)
            # delta_ratio = float(len(bytes)) / (end_point - start_point)
            # Check if we want to continue to include that text
            if (prefix == max_fulltext_prefix
                and end_point < 2 * max_fulltext_len):
                # As long as we are on the same file_id, we will fill at least
                # 2 * max_fulltext_len
                start_new_block = False
            elif end_point > 4*1024*1024:
                start_new_block = True
            elif (prefix is not None and prefix != last_prefix
                  and end_point > 2*1024*1024):
                start_new_block = True
            else:
                start_new_block = False
            last_prefix = prefix
            if start_new_block:
                self._compressor.pop_last()
                flush()
                max_fulltext_len = len(bytes)
                (found_sha1, start_point, end_point,
                 type) = self._compressor.compress(record.key, bytes,
                                                   record.sha1)
            if record.key[-1] is None:
                key = record.key[:-1] + ('sha1:' + found_sha1,)
            else:
                key = record.key
            self._unadded_refs[key] = record.parents
            yield found_sha1
            keys_to_add.append((key, '%d %d' % (start_point, end_point),
                (record.parents,)))
        if len(keys_to_add):
            flush()
        self._compressor = None

    def iter_lines_added_or_present_in_keys(self, keys, pb=None):
        """Iterate over the lines in the versioned files from keys.

        This may return lines from other keys. Each item the returned
        iterator yields is a tuple of a line and a text version that that line
        is present in (not introduced in).

        Ordering of results is in whatever order is most suitable for the
        underlying storage format.

        If a progress bar is supplied, it may be used to indicate progress.
        The caller is responsible for cleaning up progress bars (because this
        is an iterator).

        NOTES:
         * Lines are normalised by the underlying store: they will all have \n
           terminators.
         * Lines are returned in arbitrary order.

        :return: An iterator over (line, key).
        """
        keys = set(keys)
        total = len(keys)
        # we don't care about inclusions, the caller cares.
        # but we need to setup a list of records to visit.
        # we need key, position, length
        for key_idx, record in enumerate(self.get_record_stream(keys,
            'unordered', True)):
            # XXX: todo - optimise to use less than full texts.
            key = record.key
            if pb is not None:
                pb.update('Walking content', key_idx, total)
            if record.storage_kind == 'absent':
                raise errors.RevisionNotPresent(key, self)
            lines = osutils.split_lines(record.get_bytes_as('fulltext'))
            for line in lines:
                yield line, key
        if pb is not None:
            pb.update('Walking content', total, total)

    def keys(self):
        """See VersionedFiles.keys."""
        if 'evil' in debug.debug_flags:
            trace.mutter_callsite(2, "keys scales with size of history")
        sources = [self._index] + self._fallback_vfs
        result = set()
        for source in sources:
            result.update(source.keys())
        return result


class _GCGraphIndex(object):
    """Mapper from GroupCompressVersionedFiles needs into GraphIndex storage."""

    def __init__(self, graph_index, is_locked, parents=True,
        add_callback=None, track_external_parent_refs=False,
        inconsistency_fatal=True, track_new_keys=False):
        """Construct a _GCGraphIndex on a graph_index.

        :param graph_index: An implementation of bzrlib.index.GraphIndex.
        :param is_locked: A callback, returns True if the index is locked and
            thus usable.
        :param parents: If True, record knits parents, if not do not record
            parents.
        :param add_callback: If not None, allow additions to the index and call
            this callback with a list of added GraphIndex nodes:
            [(node, value, node_refs), ...]
        :param track_external_parent_refs: As keys are added, keep track of the
            keys they reference, so that we can query get_missing_parents(),
            etc.
        :param inconsistency_fatal: When asked to add records that are already
            present, and the details are inconsistent with the existing
            record, raise an exception instead of warning (and skipping the
            record).
        """
        self._add_callback = add_callback
        self._graph_index = graph_index
        self._parents = parents
        self.has_graph = parents
        self._is_locked = is_locked
        self._inconsistency_fatal = inconsistency_fatal
        if track_external_parent_refs:
            self._key_dependencies = knit._KeyRefs(
                track_new_keys=track_new_keys)
        else:
            self._key_dependencies = None

    def add_records(self, records, random_id=False):
        """Add multiple records to the index.

        This function does not insert data into the Immutable GraphIndex
        backing the KnitGraphIndex, instead it prepares data for insertion by
        the caller and checks that it is safe to insert then calls
        self._add_callback with the prepared GraphIndex nodes.

        :param records: a list of tuples:
                         (key, options, access_memo, parents).
        :param random_id: If True the ids being added were randomly generated
            and no check for existence will be performed.
        """
        if not self._add_callback:
            raise errors.ReadOnlyError(self)
        # we hope there are no repositories with inconsistent parentage
        # anymore.

        changed = False
        keys = {}
        for (key, value, refs) in records:
            if not self._parents:
                if refs:
                    for ref in refs:
                        if ref:
                            raise errors.KnitCorrupt(self,
                                "attempt to add node with parents "
                                "in parentless index.")
                    refs = ()
                    changed = True
            keys[key] = (value, refs)
        # check for dups
        if not random_id:
            present_nodes = self._get_entries(keys)
            for (index, key, value, node_refs) in present_nodes:
                if node_refs != keys[key][1]:
                    details = '%s %s %s' % (key, (value, node_refs), keys[key])
                    if self._inconsistency_fatal:
                        raise errors.KnitCorrupt(self, "inconsistent details"
                                                 " in add_records: %s" %
                                                 details)
                    else:
                        trace.warning("inconsistent details in skipped"
                                      " record: %s", details)
                del keys[key]
                changed = True
        if changed:
            result = []
            if self._parents:
                for key, (value, node_refs) in keys.iteritems():
                    result.append((key, value, node_refs))
            else:
                for key, (value, node_refs) in keys.iteritems():
                    result.append((key, value))
            records = result
        key_dependencies = self._key_dependencies
        if key_dependencies is not None:
            if self._parents:
                for key, value, refs in records:
                    parents = refs[0]
                    key_dependencies.add_references(key, parents)
            else:
                for key, value, refs in records:
                    new_keys.add_key(key)
        self._add_callback(records)

    def _check_read(self):
        """Raise an exception if reads are not permitted."""
        if not self._is_locked():
            raise errors.ObjectNotLocked(self)

    def _check_write_ok(self):
        """Raise an exception if writes are not permitted."""
        if not self._is_locked():
            raise errors.ObjectNotLocked(self)

    def _get_entries(self, keys, check_present=False):
        """Get the entries for keys.

        Note: Callers are responsible for checking that the index is locked
        before calling this method.

        :param keys: An iterable of index key tuples.
        """
        keys = set(keys)
        found_keys = set()
        if self._parents:
            for node in self._graph_index.iter_entries(keys):
                yield node
                found_keys.add(node[1])
        else:
            # adapt parentless index to the rest of the code.
            for node in self._graph_index.iter_entries(keys):
                yield node[0], node[1], node[2], ()
                found_keys.add(node[1])
        if check_present:
            missing_keys = keys.difference(found_keys)
            if missing_keys:
                raise errors.RevisionNotPresent(missing_keys.pop(), self)

    def find_ancestry(self, keys):
        """See CombinedGraphIndex.find_ancestry"""
        return self._graph_index.find_ancestry(keys, 0)

    def get_parent_map(self, keys):
        """Get a map of the parents of keys.

        :param keys: The keys to look up parents for.
        :return: A mapping from keys to parents. Absent keys are absent from
            the mapping.
        """
        self._check_read()
        nodes = self._get_entries(keys)
        result = {}
        if self._parents:
            for node in nodes:
                result[node[1]] = node[3][0]
        else:
            for node in nodes:
                result[node[1]] = None
        return result

    def get_missing_parents(self):
        """Return the keys of missing parents."""
        # Copied from _KnitGraphIndex.get_missing_parents
        # We may have false positives, so filter those out.
        self._key_dependencies.satisfy_refs_for_keys(
            self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
        return frozenset(self._key_dependencies.get_unsatisfied_refs())

    def get_build_details(self, keys):
        """Get the various build details for keys.

        Ghosts are omitted from the result.

        :param keys: An iterable of keys.
        :return: A dict of key:
            (index_memo, compression_parent, parents, record_details).
            index_memo
                opaque structure to pass to read_records to extract the raw
                data
            compression_parent
                Content that this record is built upon, may be None
            parents
                Logical parents of this node
            record_details
                extra information about the content which needs to be passed to
                Factory.parse_record
        """
        self._check_read()
        result = {}
        entries = self._get_entries(keys)
        for entry in entries:
            key = entry[1]
            if not self._parents:
                parents = None
            else:
                parents = entry[3][0]
            method = 'group'
            result[key] = (self._node_to_position(entry),
                                  None, parents, (method, None))
        return result

    def keys(self):
        """Get all the keys in the collection.

        The keys are not ordered.
        """
        self._check_read()
        return [node[1] for node in self._graph_index.iter_all_entries()]

    def _node_to_position(self, node):
        """Convert an index value to position details."""
        bits = node[2].split(' ')
        # It would be nice not to read the entire gzip.
        start = int(bits[0])
        stop = int(bits[1])
        basis_end = int(bits[2])
        delta_end = int(bits[3])
        return node[0], start, stop, basis_end, delta_end

    def scan_unvalidated_index(self, graph_index):
        """Inform this _GCGraphIndex that there is an unvalidated index.

        This allows this _GCGraphIndex to keep track of any missing
        compression parents we may want to have filled in to make those
        indices valid.  It also allows _GCGraphIndex to track any new keys.

        :param graph_index: A GraphIndex
        """
        key_dependencies = self._key_dependencies
        if key_dependencies is None:
            return
        for node in graph_index.iter_all_entries():
            # Add parent refs from graph_index (and discard parent refs
            # that the graph_index has).
            key_dependencies.add_references(node[1], node[3][0])


from bzrlib._groupcompress_py import (
    apply_delta,
    apply_delta_to_source,
    encode_base128_int,
    decode_base128_int,
    decode_copy_instruction,
    LinesDeltaIndex,
    )
try:
    from bzrlib._groupcompress_pyx import (
        apply_delta,
        apply_delta_to_source,
        DeltaIndex,
        encode_base128_int,
        decode_base128_int,
        )
    GroupCompressor = PyrexGroupCompressor
except ImportError:
    GroupCompressor = PythonGroupCompressor