~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-04-09 23:12:55 UTC
  • mfrom: (3920.2.37 dpush)
  • Revision ID: pqm@pqm.ubuntu.com-20090409231255-o8w1g2q3igiyf8b2
(Jelmer) Add the dpush command.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
37
36
    )
38
37
from bzrlib.index import (
39
38
    CombinedGraphIndex,
 
39
    GraphIndex,
 
40
    GraphIndexBuilder,
40
41
    GraphIndexPrefixAdapter,
 
42
    InMemoryGraphIndex,
41
43
    )
42
44
from bzrlib.knit import (
43
45
    KnitPlainFactory,
53
55
    lockable_files,
54
56
    lockdir,
55
57
    revision as _mod_revision,
 
58
    symbol_versioning,
56
59
    )
57
60
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
61
from bzrlib.decorators import needs_write_lock
59
62
from bzrlib.btree_index import (
60
63
    BTreeGraphIndex,
61
64
    BTreeBuilder,
64
67
    GraphIndex,
65
68
    InMemoryGraphIndex,
66
69
    )
67
 
from bzrlib.lock import LogicalLockResult
68
70
from bzrlib.repofmt.knitrepo import KnitRepository
69
71
from bzrlib.repository import (
70
72
    CommitBuilder,
71
73
    MetaDirRepositoryFormat,
72
74
    RepositoryFormat,
73
 
    RepositoryWriteLockResult,
74
75
    RootCommitBuilder,
75
 
    StreamSource,
76
76
    )
 
77
import bzrlib.revision as _mod_revision
77
78
from bzrlib.trace import (
78
79
    mutter,
79
 
    note,
80
80
    warning,
81
81
    )
82
82
 
228
228
        return self.index_name('text', name)
229
229
 
230
230
    def _replace_index_with_readonly(self, index_type):
231
 
        unlimited_cache = False
232
 
        if index_type == 'chk':
233
 
            unlimited_cache = True
234
231
        setattr(self, index_type + '_index',
235
232
            self.index_class(self.index_transport,
236
233
                self.index_name(index_type, self.name),
237
 
                self.index_sizes[self.index_offset(index_type)],
238
 
                unlimited_cache=unlimited_cache))
 
234
                self.index_sizes[self.index_offset(index_type)]))
239
235
 
240
236
 
241
237
class ExistingPack(Pack):
272
268
 
273
269
    def __init__(self, name, revision_index, inventory_index, text_index,
274
270
        signature_index, upload_transport, pack_transport, index_transport,
275
 
        pack_collection, chk_index=None):
 
271
        pack_collection):
276
272
        """Create a ResumedPack object."""
277
273
        ExistingPack.__init__(self, pack_transport, name, revision_index,
278
 
            inventory_index, text_index, signature_index,
279
 
            chk_index=chk_index)
 
274
            inventory_index, text_index, signature_index)
280
275
        self.upload_transport = upload_transport
281
276
        self.index_transport = index_transport
282
277
        self.index_sizes = [None, None, None, None]
286
281
            ('text', text_index),
287
282
            ('signature', signature_index),
288
283
            ]
289
 
        if chk_index is not None:
290
 
            indices.append(('chk', chk_index))
291
 
            self.index_sizes.append(None)
292
284
        for index_type, index in indices:
293
285
            offset = self.index_offset(index_type)
294
286
            self.index_sizes[offset] = index._size
309
301
        self.upload_transport.delete(self.file_name())
310
302
        indices = [self.revision_index, self.inventory_index, self.text_index,
311
303
            self.signature_index]
312
 
        if self.chk_index is not None:
313
 
            indices.append(self.chk_index)
314
304
        for index in indices:
315
305
            index._transport.delete(index._name)
316
306
 
317
307
    def finish(self):
318
308
        self._check_references()
319
 
        index_types = ['revision', 'inventory', 'text', 'signature']
320
 
        if self.chk_index is not None:
321
 
            index_types.append('chk')
322
 
        for index_type in index_types:
 
309
        new_name = '../packs/' + self.file_name()
 
310
        self.upload_transport.rename(self.file_name(), new_name)
 
311
        for index_type in ['revision', 'inventory', 'text', 'signature']:
323
312
            old_name = self.index_name(index_type, self.name)
324
313
            new_name = '../indices/' + old_name
325
314
            self.upload_transport.rename(old_name, new_name)
326
315
            self._replace_index_with_readonly(index_type)
327
 
        new_name = '../packs/' + self.file_name()
328
 
        self.upload_transport.rename(self.file_name(), new_name)
329
316
        self._state = 'finished'
330
317
 
331
318
    def _get_external_refs(self, index):
332
 
        """Return compression parents for this index that are not present.
333
 
 
334
 
        This returns any compression parents that are referenced by this index,
335
 
        which are not contained *in* this index. They may be present elsewhere.
336
 
        """
337
319
        return index.external_references(1)
338
320
 
339
321
 
430
412
        self._writer.begin()
431
413
        # what state is the pack in? (open, finished, aborted)
432
414
        self._state = 'open'
433
 
        # no name until we finish writing the content
434
 
        self.name = None
435
415
 
436
416
    def abort(self):
437
417
        """Cancel creating this pack."""
458
438
            self.signature_index.key_count() or
459
439
            (self.chk_index is not None and self.chk_index.key_count()))
460
440
 
461
 
    def finish_content(self):
462
 
        if self.name is not None:
463
 
            return
464
 
        self._writer.end()
465
 
        if self._buffer[1]:
466
 
            self._write_data('', flush=True)
467
 
        self.name = self._hash.hexdigest()
468
 
 
469
441
    def finish(self, suspend=False):
470
442
        """Finish the new pack.
471
443
 
477
449
         - stores the index size tuple for the pack in the index_sizes
478
450
           attribute.
479
451
        """
480
 
        self.finish_content()
 
452
        self._writer.end()
 
453
        if self._buffer[1]:
 
454
            self._write_data('', flush=True)
 
455
        self.name = self._hash.hexdigest()
481
456
        if not suspend:
482
457
            self._check_references()
483
458
        # write indices
589
564
                                             flush_func=flush_func)
590
565
        self.add_callback = None
591
566
 
 
567
    def replace_indices(self, index_to_pack, indices):
 
568
        """Replace the current mappings with fresh ones.
 
569
 
 
570
        This should probably not be used eventually, rather incremental add and
 
571
        removal of indices. It has been added during refactoring of existing
 
572
        code.
 
573
 
 
574
        :param index_to_pack: A mapping from index objects to
 
575
            (transport, name) tuples for the pack file data.
 
576
        :param indices: A list of indices.
 
577
        """
 
578
        # refresh the revision pack map dict without replacing the instance.
 
579
        self.index_to_pack.clear()
 
580
        self.index_to_pack.update(index_to_pack)
 
581
        # XXX: API break - clearly a 'replace' method would be good?
 
582
        self.combined_index._indices[:] = indices
 
583
        # the current add nodes callback for the current writable index if
 
584
        # there is one.
 
585
        self.add_callback = None
 
586
 
592
587
    def add_index(self, index, pack):
593
588
        """Add index to the aggregate, which is an index for Pack pack.
594
589
 
601
596
        # expose it to the index map
602
597
        self.index_to_pack[index] = pack.access_tuple()
603
598
        # put it at the front of the linear index list
604
 
        self.combined_index.insert_index(0, index, pack.name)
 
599
        self.combined_index.insert_index(0, index)
605
600
 
606
601
    def add_writable_index(self, index, pack):
607
602
        """Add an index which is able to have data added to it.
627
622
        self.data_access.set_writer(None, None, (None, None))
628
623
        self.index_to_pack.clear()
629
624
        del self.combined_index._indices[:]
630
 
        del self.combined_index._index_names[:]
631
625
        self.add_callback = None
632
626
 
633
 
    def remove_index(self, index):
 
627
    def remove_index(self, index, pack):
634
628
        """Remove index from the indices used to answer queries.
635
629
 
636
630
        :param index: An index from the pack parameter.
 
631
        :param pack: A Pack instance.
637
632
        """
638
633
        del self.index_to_pack[index]
639
 
        pos = self.combined_index._indices.index(index)
640
 
        del self.combined_index._indices[pos]
641
 
        del self.combined_index._index_names[pos]
 
634
        self.combined_index._indices.remove(index)
642
635
        if (self.add_callback is not None and
643
636
            getattr(index, 'add_nodes', None) == self.add_callback):
644
637
            self.add_callback = None
1102
1095
            iterator is a tuple with:
1103
1096
            index, readv_vector, node_vector. readv_vector is a list ready to
1104
1097
            hand to the transport readv method, and node_vector is a list of
1105
 
            (key, eol_flag, references) for the node retrieved by the
 
1098
            (key, eol_flag, references) for the the node retrieved by the
1106
1099
            matching readv_vector.
1107
1100
        """
1108
1101
        # group by pack so we do one readv per pack
1299
1292
        # space (we only topo sort the revisions, which is smaller).
1300
1293
        topo_order = tsort.topo_sort(ancestors)
1301
1294
        rev_order = dict(zip(topo_order, range(len(topo_order))))
1302
 
        bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
 
1295
        bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1303
1296
        transaction = repo.get_transaction()
1304
1297
        file_id_index = GraphIndexPrefixAdapter(
1305
1298
            self.new_pack.text_index,
1359
1352
    """
1360
1353
 
1361
1354
    pack_factory = NewPack
1362
 
    resumed_pack_factory = ResumedPack
1363
1355
 
1364
1356
    def __init__(self, repo, transport, index_transport, upload_transport,
1365
1357
                 pack_transport, index_builder_class, index_class,
1400
1392
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
1393
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
1394
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
 
        all_indices = [self.revision_index, self.inventory_index,
1404
 
                self.text_index, self.signature_index]
1405
1395
        if use_chk_index:
1406
1396
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
 
            all_indices.append(self.chk_index)
1408
1397
        else:
1409
1398
            # used to determine if we're using a chk_index elsewhere.
1410
1399
            self.chk_index = None
1411
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1412
 
        # share hints about which pack names to search first.
1413
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
 
        for combined_idx in all_combined:
1415
 
            combined_idx.set_sibling_indices(
1416
 
                set(all_combined).difference([combined_idx]))
1417
1400
        # resumed packs
1418
1401
        self._resumed_packs = []
1419
1402
 
1420
 
    def __repr__(self):
1421
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1422
 
 
1423
1403
    def add_pack_to_memory(self, pack):
1424
1404
        """Make a Pack object available to the repository to satisfy queries.
1425
1405
 
1463
1443
        in synchronisation with certain steps. Otherwise the names collection
1464
1444
        is not flushed.
1465
1445
 
1466
 
        :return: Something evaluating true if packing took place.
 
1446
        :return: True if packing took place.
1467
1447
        """
1468
1448
        while True:
1469
1449
            try:
1470
1450
                return self._do_autopack()
1471
 
            except errors.RetryAutopack:
 
1451
            except errors.RetryAutopack, e:
1472
1452
                # If we get a RetryAutopack exception, we should abort the
1473
1453
                # current action, and retry.
1474
1454
                pass
1478
1458
        total_revisions = self.revision_index.combined_index.key_count()
1479
1459
        total_packs = len(self._names)
1480
1460
        if self._max_pack_count(total_revisions) >= total_packs:
1481
 
            return None
 
1461
            return False
1482
1462
        # determine which packs need changing
1483
1463
        pack_distribution = self.pack_distribution(total_revisions)
1484
1464
        existing_packs = []
1506
1486
            'containing %d revisions. Packing %d files into %d affecting %d'
1507
1487
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1508
1488
            num_new_packs, num_revs_affected)
1509
 
        result = self._execute_pack_operations(pack_operations,
 
1489
        self._execute_pack_operations(pack_operations,
1510
1490
                                      reload_func=self._restart_autopack)
1511
1491
        mutter('Auto-packing repository %s completed', self)
1512
 
        return result
 
1492
        return True
1513
1493
 
1514
1494
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1515
1495
                                 reload_func=None):
1517
1497
 
1518
1498
        :param pack_operations: A list of [revision_count, packs_to_combine].
1519
1499
        :param _packer_class: The class of packer to use (default: Packer).
1520
 
        :return: The new pack names.
 
1500
        :return: None.
1521
1501
        """
1522
1502
        for revision_count, packs in pack_operations:
1523
1503
            # we may have no-ops from the setup logic
1539
1519
                self._remove_pack_from_memory(pack)
1540
1520
        # record the newly available packs and stop advertising the old
1541
1521
        # packs
1542
 
        to_be_obsoleted = []
1543
 
        for _, packs in pack_operations:
1544
 
            to_be_obsoleted.extend(packs)
1545
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1546
 
                                       obsolete_packs=to_be_obsoleted)
1547
 
        return result
 
1522
        self._save_pack_names(clear_obsolete_packs=True)
 
1523
        # Move the old packs out of the way now they are no longer referenced.
 
1524
        for revision_count, packs in pack_operations:
 
1525
            self._obsolete_packs(packs)
1548
1526
 
1549
1527
    def _flush_new_pack(self):
1550
1528
        if self._new_pack is not None:
1560
1538
 
1561
1539
    def _already_packed(self):
1562
1540
        """Is the collection already packed?"""
1563
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
 
1541
        return len(self._names) < 2
1564
1542
 
1565
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1543
    def pack(self):
1566
1544
        """Pack the pack collection totally."""
1567
1545
        self.ensure_loaded()
1568
1546
        total_packs = len(self._names)
1569
1547
        if self._already_packed():
 
1548
            # This is arguably wrong because we might not be optimal, but for
 
1549
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1550
            # optimal.
1570
1551
            return
1571
1552
        total_revisions = self.revision_index.combined_index.key_count()
1572
1553
        # XXX: the following may want to be a class, to pack with a given
1573
1554
        # policy.
1574
1555
        mutter('Packing repository %s, which has %d pack files, '
1575
 
            'containing %d revisions with hint %r.', self, total_packs,
1576
 
            total_revisions, hint)
 
1556
            'containing %d revisions into 1 packs.', self, total_packs,
 
1557
            total_revisions)
1577
1558
        # determine which packs need changing
 
1559
        pack_distribution = [1]
1578
1560
        pack_operations = [[0, []]]
1579
1561
        for pack in self.all_packs():
1580
 
            if hint is None or pack.name in hint:
1581
 
                # Either no hint was provided (so we are packing everything),
1582
 
                # or this pack was included in the hint.
1583
 
                pack_operations[-1][0] += pack.get_revision_count()
1584
 
                pack_operations[-1][1].append(pack)
 
1562
            pack_operations[-1][0] += pack.get_revision_count()
 
1563
            pack_operations[-1][1].append(pack)
1585
1564
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1586
1565
 
1587
 
        if clean_obsolete_packs:
1588
 
            self._clear_obsolete_packs()
1589
 
 
1590
1566
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1567
        """Plan a pack operation.
1592
1568
 
1680
1656
            txt_index = self._make_index(name, '.tix')
1681
1657
            sig_index = self._make_index(name, '.six')
1682
1658
            if self.chk_index is not None:
1683
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1659
                chk_index = self._make_index(name, '.cix')
1684
1660
            else:
1685
1661
                chk_index = None
1686
1662
            result = ExistingPack(self._pack_transport, name, rev_index,
1704
1680
            inv_index = self._make_index(name, '.iix', resume=True)
1705
1681
            txt_index = self._make_index(name, '.tix', resume=True)
1706
1682
            sig_index = self._make_index(name, '.six', resume=True)
1707
 
            if self.chk_index is not None:
1708
 
                chk_index = self._make_index(name, '.cix', resume=True,
1709
 
                                             unlimited_cache=True)
1710
 
            else:
1711
 
                chk_index = None
1712
 
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1713
 
                txt_index, sig_index, self._upload_transport,
1714
 
                self._pack_transport, self._index_transport, self,
1715
 
                chk_index=chk_index)
 
1683
            result = ResumedPack(name, rev_index, inv_index, txt_index,
 
1684
                sig_index, self._upload_transport, self._pack_transport,
 
1685
                self._index_transport, self)
1716
1686
        except errors.NoSuchFile, e:
1717
1687
            raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1718
1688
        self.add_pack_to_memory(result)
1742
1712
        return self._index_class(self.transport, 'pack-names', None
1743
1713
                ).iter_all_entries()
1744
1714
 
1745
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1715
    def _make_index(self, name, suffix, resume=False):
1746
1716
        size_offset = self._suffix_offsets[suffix]
1747
1717
        index_name = name + suffix
1748
1718
        if resume:
1751
1721
        else:
1752
1722
            transport = self._index_transport
1753
1723
            index_size = self._names[name][size_offset]
1754
 
        return self._index_class(transport, index_name, index_size,
1755
 
                                 unlimited_cache=unlimited_cache)
 
1724
        return self._index_class(transport, index_name, index_size)
1756
1725
 
1757
1726
    def _max_pack_count(self, total_revisions):
1758
1727
        """Return the maximum number of packs to use for total revisions.
1786
1755
        :param return: None.
1787
1756
        """
1788
1757
        for pack in packs:
1789
 
            try:
1790
 
                pack.pack_transport.rename(pack.file_name(),
1791
 
                    '../obsolete_packs/' + pack.file_name())
1792
 
            except (errors.PathError, errors.TransportError), e:
1793
 
                # TODO: Should these be warnings or mutters?
1794
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1795
 
                       % (e,))
 
1758
            pack.pack_transport.rename(pack.file_name(),
 
1759
                '../obsolete_packs/' + pack.file_name())
1796
1760
            # TODO: Probably needs to know all possible indices for this pack
1797
1761
            # - or maybe list the directory and move all indices matching this
1798
1762
            # name whether we recognize it or not?
1800
1764
            if self.chk_index is not None:
1801
1765
                suffixes.append('.cix')
1802
1766
            for suffix in suffixes:
1803
 
                try:
1804
 
                    self._index_transport.rename(pack.name + suffix,
1805
 
                        '../obsolete_packs/' + pack.name + suffix)
1806
 
                except (errors.PathError, errors.TransportError), e:
1807
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1808
 
                           % (e,))
 
1767
                self._index_transport.rename(pack.name + suffix,
 
1768
                    '../obsolete_packs/' + pack.name + suffix)
1809
1769
 
1810
1770
    def pack_distribution(self, total_revisions):
1811
1771
        """Generate a list of the number of revisions to put in each pack.
1837
1797
        self._remove_pack_indices(pack)
1838
1798
        self.packs.remove(pack)
1839
1799
 
1840
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1841
 
        """Remove the indices for pack from the aggregated indices.
1842
 
        
1843
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1844
 
        """
1845
 
        for index_type in Pack.index_definitions.keys():
1846
 
            attr_name = index_type + '_index'
1847
 
            aggregate_index = getattr(self, attr_name)
1848
 
            if aggregate_index is not None:
1849
 
                pack_index = getattr(pack, attr_name)
1850
 
                try:
1851
 
                    aggregate_index.remove_index(pack_index)
1852
 
                except KeyError:
1853
 
                    if ignore_missing:
1854
 
                        continue
1855
 
                    raise
 
1800
    def _remove_pack_indices(self, pack):
 
1801
        """Remove the indices for pack from the aggregated indices."""
 
1802
        self.revision_index.remove_index(pack.revision_index, pack)
 
1803
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1804
        self.text_index.remove_index(pack.text_index, pack)
 
1805
        self.signature_index.remove_index(pack.signature_index, pack)
 
1806
        if self.chk_index is not None:
 
1807
            self.chk_index.remove_index(pack.chk_index, pack)
1856
1808
 
1857
1809
    def reset(self):
1858
1810
        """Clear all cached data."""
1859
1811
        # cached revision data
 
1812
        self.repo._revision_knit = None
1860
1813
        self.revision_index.clear()
1861
1814
        # cached signature data
 
1815
        self.repo._signature_knit = None
1862
1816
        self.signature_index.clear()
1863
1817
        # cached file text data
1864
1818
        self.text_index.clear()
 
1819
        self.repo._text_knit = None
1865
1820
        # cached inventory data
1866
1821
        self.inventory_index.clear()
1867
1822
        # cached chk data
1891
1846
        disk_nodes = set()
1892
1847
        for index, key, value in self._iter_disk_pack_index():
1893
1848
            disk_nodes.add((key, value))
1894
 
        orig_disk_nodes = set(disk_nodes)
1895
1849
 
1896
1850
        # do a two-way diff against our original content
1897
1851
        current_nodes = set()
1910
1864
        disk_nodes.difference_update(deleted_nodes)
1911
1865
        disk_nodes.update(new_nodes)
1912
1866
 
1913
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1867
        return disk_nodes, deleted_nodes, new_nodes
1914
1868
 
1915
1869
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1916
1870
        """Given the correct set of pack files, update our saved info.
1956
1910
                added.append(name)
1957
1911
        return removed, added, modified
1958
1912
 
1959
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1913
    def _save_pack_names(self, clear_obsolete_packs=False):
1960
1914
        """Save the list of packs.
1961
1915
 
1962
1916
        This will take out the mutex around the pack names list for the
1966
1920
 
1967
1921
        :param clear_obsolete_packs: If True, clear out the contents of the
1968
1922
            obsolete_packs directory.
1969
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1970
 
            file has been written.
1971
 
        :return: A list of the names saved that were not previously on disk.
1972
1923
        """
1973
 
        already_obsolete = []
1974
1924
        self.lock_names()
1975
1925
        try:
1976
1926
            builder = self._index_builder_class()
1977
 
            (disk_nodes, deleted_nodes, new_nodes,
1978
 
             orig_disk_nodes) = self._diff_pack_names()
 
1927
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1979
1928
            # TODO: handle same-name, index-size-changes here -
1980
1929
            # e.g. use the value from disk, not ours, *unless* we're the one
1981
1930
            # changing it.
1983
1932
                builder.add_node(key, value)
1984
1933
            self.transport.put_file('pack-names', builder.finish(),
1985
1934
                mode=self.repo.bzrdir._get_file_mode())
 
1935
            # move the baseline forward
1986
1936
            self._packs_at_load = disk_nodes
1987
1937
            if clear_obsolete_packs:
1988
 
                to_preserve = None
1989
 
                if obsolete_packs:
1990
 
                    to_preserve = set([o.name for o in obsolete_packs])
1991
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1938
                self._clear_obsolete_packs()
1992
1939
        finally:
1993
1940
            self._unlock_names()
1994
1941
        # synchronise the memory packs list with what we just wrote:
1995
1942
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1996
 
        if obsolete_packs:
1997
 
            # TODO: We could add one more condition here. "if o.name not in
1998
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
1999
 
            #       disk yet. However, the new pack object is not easily
2000
 
            #       accessible here (it would have to be passed through the
2001
 
            #       autopacking code, etc.)
2002
 
            obsolete_packs = [o for o in obsolete_packs
2003
 
                              if o.name not in already_obsolete]
2004
 
            self._obsolete_packs(obsolete_packs)
2005
 
        return [new_node[0][0] for new_node in new_nodes]
2006
1943
 
2007
1944
    def reload_pack_names(self):
2008
1945
        """Sync our pack listing with what is present in the repository.
2022
1959
        if first_read:
2023
1960
            return True
2024
1961
        # out the new value.
2025
 
        (disk_nodes, deleted_nodes, new_nodes,
2026
 
         orig_disk_nodes) = self._diff_pack_names()
2027
 
        # _packs_at_load is meant to be the explicit list of names in
2028
 
        # 'pack-names' at then start. As such, it should not contain any
2029
 
        # pending names that haven't been written out yet.
2030
 
        self._packs_at_load = orig_disk_nodes
 
1962
        disk_nodes, _, _ = self._diff_pack_names()
 
1963
        self._packs_at_load = disk_nodes
2031
1964
        (removed, added,
2032
1965
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
1966
        if removed or added or modified:
2042
1975
            raise
2043
1976
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2044
1977
 
2045
 
    def _clear_obsolete_packs(self, preserve=None):
 
1978
    def _clear_obsolete_packs(self):
2046
1979
        """Delete everything from the obsolete-packs directory.
2047
 
 
2048
 
        :return: A list of pack identifiers (the filename without '.pack') that
2049
 
            were found in obsolete_packs.
2050
1980
        """
2051
 
        found = []
2052
1981
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
 
        if preserve is None:
2054
 
            preserve = set()
2055
1982
        for filename in obsolete_pack_transport.list_dir('.'):
2056
 
            name, ext = osutils.splitext(filename)
2057
 
            if ext == '.pack':
2058
 
                found.append(name)
2059
 
            if name in preserve:
2060
 
                continue
2061
1983
            try:
2062
1984
                obsolete_pack_transport.delete(filename)
2063
1985
            except (errors.PathError, errors.TransportError), e:
2064
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2065
 
                        % (e,))
2066
 
        return found
 
1986
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2067
1987
 
2068
1988
    def _start_write_group(self):
2069
1989
        # Do not permit preparation for writing if we're not in a 'write lock'.
2096
2016
        # FIXME: just drop the transient index.
2097
2017
        # forget what names there are
2098
2018
        if self._new_pack is not None:
2099
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2101
 
            # If we aborted while in the middle of finishing the write
2102
 
            # group, _remove_pack_indices could fail because the indexes are
2103
 
            # already gone.  But they're not there we shouldn't fail in this
2104
 
            # case, so we pass ignore_missing=True.
2105
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
 
                ignore_missing=True)
2107
 
            operation.run_simple()
 
2019
            try:
 
2020
                self._new_pack.abort()
 
2021
            finally:
 
2022
                # XXX: If we aborted while in the middle of finishing the write
 
2023
                # group, _remove_pack_indices can fail because the indexes are
 
2024
                # already gone.  If they're not there we shouldn't fail in this
 
2025
                # case.  -- mbp 20081113
 
2026
                self._remove_pack_indices(self._new_pack)
 
2027
                self._new_pack = None
2108
2028
        for resumed_pack in self._resumed_packs:
2109
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
 
            # See comment in previous finally block.
2111
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
 
                ignore_missing=True)
2113
 
            operation.run_simple()
 
2029
            try:
 
2030
                resumed_pack.abort()
 
2031
            finally:
 
2032
                # See comment in previous finally block.
 
2033
                try:
 
2034
                    self._remove_pack_indices(resumed_pack)
 
2035
                except KeyError:
 
2036
                    pass
2114
2037
        del self._resumed_packs[:]
 
2038
        self.repo._text_knit = None
2115
2039
 
2116
2040
    def _remove_resumed_pack_indices(self):
2117
2041
        for resumed_pack in self._resumed_packs:
2118
2042
            self._remove_pack_indices(resumed_pack)
2119
2043
        del self._resumed_packs[:]
2120
2044
 
2121
 
    def _check_new_inventories(self):
2122
 
        """Detect missing inventories in this write group.
2123
 
 
2124
 
        :returns: list of strs, summarising any problems found.  If the list is
2125
 
            empty no problems were found.
2126
 
        """
2127
 
        # The base implementation does no checks.  GCRepositoryPackCollection
2128
 
        # overrides this.
2129
 
        return []
2130
 
        
2131
2045
    def _commit_write_group(self):
2132
2046
        all_missing = set()
2133
2047
        for prefix, versioned_file in (
2142
2056
            raise errors.BzrCheckError(
2143
2057
                "Repository %s has missing compression parent(s) %r "
2144
2058
                 % (self.repo, sorted(all_missing)))
2145
 
        problems = self._check_new_inventories()
2146
 
        if problems:
2147
 
            problems_summary = '\n'.join(problems)
2148
 
            raise errors.BzrCheckError(
2149
 
                "Cannot add revision(s) to repository: " + problems_summary)
2150
2059
        self._remove_pack_indices(self._new_pack)
2151
 
        any_new_content = False
 
2060
        should_autopack = False
2152
2061
        if self._new_pack.data_inserted():
2153
2062
            # get all the data to disk and read to use
2154
2063
            self._new_pack.finish()
2155
2064
            self.allocate(self._new_pack)
2156
2065
            self._new_pack = None
2157
 
            any_new_content = True
 
2066
            should_autopack = True
2158
2067
        else:
2159
2068
            self._new_pack.abort()
2160
2069
            self._new_pack = None
2165
2074
            self._remove_pack_from_memory(resumed_pack)
2166
2075
            resumed_pack.finish()
2167
2076
            self.allocate(resumed_pack)
2168
 
            any_new_content = True
 
2077
            should_autopack = True
2169
2078
        del self._resumed_packs[:]
2170
 
        if any_new_content:
2171
 
            result = self.autopack()
2172
 
            if not result:
 
2079
        if should_autopack:
 
2080
            if not self.autopack():
2173
2081
                # when autopack takes no steps, the names list is still
2174
2082
                # unsaved.
2175
 
                return self._save_pack_names()
2176
 
            return result
2177
 
        return []
 
2083
                self._save_pack_names()
 
2084
        self.repo._text_knit = None
2178
2085
 
2179
2086
    def _suspend_write_group(self):
2180
2087
        tokens = [pack.name for pack in self._resumed_packs]
2188
2095
            self._new_pack.abort()
2189
2096
            self._new_pack = None
2190
2097
        self._remove_resumed_pack_indices()
 
2098
        self.repo._text_knit = None
2191
2099
        return tokens
2192
2100
 
2193
2101
    def _resume_write_group(self, tokens):
2242
2150
        self.revisions = KnitVersionedFiles(
2243
2151
            _KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2244
2152
                add_callback=self._pack_collection.revision_index.add_callback,
2245
 
                deltas=False, parents=True, is_locked=self.is_locked,
2246
 
                track_external_parent_refs=True),
 
2153
                deltas=False, parents=True, is_locked=self.is_locked),
2247
2154
            data_access=self._pack_collection.revision_index.data_access,
2248
2155
            max_delta_chain=0)
2249
2156
        self.signatures = KnitVersionedFiles(
2282
2189
        self._reconcile_fixes_text_parents = True
2283
2190
        self._reconcile_backsup_inventory = False
2284
2191
 
2285
 
    def _warn_if_deprecated(self, branch=None):
 
2192
    def _warn_if_deprecated(self):
2286
2193
        # This class isn't deprecated, but one sub-format is
2287
2194
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2195
            from bzrlib import repository
 
2196
            if repository._deprecation_warning_done:
 
2197
                return
 
2198
            repository._deprecation_warning_done = True
 
2199
            warning("Format %s for %s is deprecated - please use"
 
2200
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2201
                    % (self._format, self.bzrdir.transport.base))
2289
2202
 
2290
2203
    def _abort_write_group(self):
2291
 
        self.revisions._index._key_dependencies.clear()
2292
2204
        self._pack_collection._abort_write_group()
2293
2205
 
2294
 
    def _get_source(self, to_format):
2295
 
        if to_format.network_name() == self._format.network_name():
2296
 
            return KnitPackStreamSource(self, to_format)
2297
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2206
    def _find_inconsistent_revision_parents(self):
 
2207
        """Find revisions with incorrectly cached parents.
 
2208
 
 
2209
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2210
            parents-in-revision).
 
2211
        """
 
2212
        if not self.is_locked():
 
2213
            raise errors.ObjectNotLocked(self)
 
2214
        pb = ui.ui_factory.nested_progress_bar()
 
2215
        result = []
 
2216
        try:
 
2217
            revision_nodes = self._pack_collection.revision_index \
 
2218
                .combined_index.iter_all_entries()
 
2219
            index_positions = []
 
2220
            # Get the cached index values for all revisions, and also the
 
2221
            # location in each index of the revision text so we can perform
 
2222
            # linear IO.
 
2223
            for index, key, value, refs in revision_nodes:
 
2224
                node = (index, key, value, refs)
 
2225
                index_memo = self.revisions._index._node_to_position(node)
 
2226
                if index_memo[0] != index:
 
2227
                    raise AssertionError('%r != %r' % (index_memo[0], index))
 
2228
                index_positions.append((index_memo, key[0],
 
2229
                                       tuple(parent[0] for parent in refs[0])))
 
2230
                pb.update("Reading revision index", 0, 0)
 
2231
            index_positions.sort()
 
2232
            batch_size = 1000
 
2233
            pb.update("Checking cached revision graph", 0,
 
2234
                      len(index_positions))
 
2235
            for offset in xrange(0, len(index_positions), 1000):
 
2236
                pb.update("Checking cached revision graph", offset)
 
2237
                to_query = index_positions[offset:offset + batch_size]
 
2238
                if not to_query:
 
2239
                    break
 
2240
                rev_ids = [item[1] for item in to_query]
 
2241
                revs = self.get_revisions(rev_ids)
 
2242
                for revision, item in zip(revs, to_query):
 
2243
                    index_parents = item[2]
 
2244
                    rev_parents = tuple(revision.parent_ids)
 
2245
                    if index_parents != rev_parents:
 
2246
                        result.append((revision.revision_id, index_parents,
 
2247
                                       rev_parents))
 
2248
        finally:
 
2249
            pb.finished()
 
2250
        return result
2298
2251
 
2299
2252
    def _make_parents_provider(self):
2300
2253
        return graph.CachingParentsProvider(self)
2308
2261
        self._pack_collection._start_write_group()
2309
2262
 
2310
2263
    def _commit_write_group(self):
2311
 
        hint = self._pack_collection._commit_write_group()
2312
 
        self.revisions._index._key_dependencies.clear()
2313
 
        return hint
 
2264
        return self._pack_collection._commit_write_group()
2314
2265
 
2315
2266
    def suspend_write_group(self):
2316
2267
        # XXX check self._write_group is self.get_transaction()?
2317
2268
        tokens = self._pack_collection._suspend_write_group()
2318
 
        self.revisions._index._key_dependencies.clear()
2319
2269
        self._write_group = None
2320
2270
        return tokens
2321
2271
 
2322
2272
    def _resume_write_group(self, tokens):
2323
2273
        self._start_write_group()
2324
 
        try:
2325
 
            self._pack_collection._resume_write_group(tokens)
2326
 
        except errors.UnresumableWriteGroup:
2327
 
            self._abort_write_group()
2328
 
            raise
2329
 
        for pack in self._pack_collection._resumed_packs:
2330
 
            self.revisions._index.scan_unvalidated_index(pack.revision_index)
 
2274
        self._pack_collection._resume_write_group(tokens)
2331
2275
 
2332
2276
    def get_transaction(self):
2333
2277
        if self._write_lock_count:
2342
2286
        return self._write_lock_count
2343
2287
 
2344
2288
    def lock_write(self, token=None):
2345
 
        """Lock the repository for writes.
2346
 
 
2347
 
        :return: A bzrlib.repository.RepositoryWriteLockResult.
2348
 
        """
2349
2289
        locked = self.is_locked()
2350
2290
        if not self._write_lock_count and locked:
2351
2291
            raise errors.ReadOnlyError(self)
2352
2292
        self._write_lock_count += 1
2353
2293
        if self._write_lock_count == 1:
2354
2294
            self._transaction = transactions.WriteTransaction()
2355
 
        if not locked:
2356
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
 
                note('%r was write locked again', self)
2358
 
            self._prev_lock = 'w'
2359
2295
            for repo in self._fallback_repositories:
2360
2296
                # Writes don't affect fallback repos
2361
2297
                repo.lock_read()
 
2298
        if not locked:
2362
2299
            self._refresh_data()
2363
 
        return RepositoryWriteLockResult(self.unlock, None)
2364
2300
 
2365
2301
    def lock_read(self):
2366
 
        """Lock the repository for reads.
2367
 
 
2368
 
        :return: A bzrlib.lock.LogicalLockResult.
2369
 
        """
2370
2302
        locked = self.is_locked()
2371
2303
        if self._write_lock_count:
2372
2304
            self._write_lock_count += 1
2373
2305
        else:
2374
2306
            self.control_files.lock_read()
2375
 
        if not locked:
2376
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
 
                note('%r was read locked again', self)
2378
 
            self._prev_lock = 'r'
2379
2307
            for repo in self._fallback_repositories:
 
2308
                # Writes don't affect fallback repos
2380
2309
                repo.lock_read()
 
2310
        if not locked:
2381
2311
            self._refresh_data()
2382
 
        return LogicalLockResult(self.unlock)
2383
2312
 
2384
2313
    def leave_lock_in_place(self):
2385
2314
        # not supported - raise an error
2390
2319
        raise NotImplementedError(self.dont_leave_lock_in_place)
2391
2320
 
2392
2321
    @needs_write_lock
2393
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2322
    def pack(self):
2394
2323
        """Compress the data within the repository.
2395
2324
 
2396
2325
        This will pack all the data to a single pack. In future it may
2397
2326
        recompress deltas or do other such expensive operations.
2398
2327
        """
2399
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2328
        self._pack_collection.pack()
2400
2329
 
2401
2330
    @needs_write_lock
2402
2331
    def reconcile(self, other=None, thorough=False):
2410
2339
        packer = ReconcilePacker(collection, packs, extension, revs)
2411
2340
        return packer.pack(pb)
2412
2341
 
2413
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2414
2342
    def unlock(self):
2415
2343
        if self._write_lock_count == 1 and self._write_group is not None:
2416
2344
            self.abort_write_group()
2425
2353
                transaction = self._transaction
2426
2354
                self._transaction = None
2427
2355
                transaction.finish()
 
2356
                for repo in self._fallback_repositories:
 
2357
                    repo.unlock()
2428
2358
        else:
2429
2359
            self.control_files.unlock()
2430
 
 
2431
 
        if not self.is_locked():
2432
2360
            for repo in self._fallback_repositories:
2433
2361
                repo.unlock()
2434
2362
 
2435
2363
 
2436
 
class KnitPackStreamSource(StreamSource):
2437
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2438
 
 
2439
 
    This source assumes:
2440
 
        1) Same serialization format for all objects
2441
 
        2) Same root information
2442
 
        3) XML format inventories
2443
 
        4) Atomic inserts (so we can stream inventory texts before text
2444
 
           content)
2445
 
        5) No chk_bytes
2446
 
    """
2447
 
 
2448
 
    def __init__(self, from_repository, to_format):
2449
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2450
 
        self._text_keys = None
2451
 
        self._text_fetch_order = 'unordered'
2452
 
 
2453
 
    def _get_filtered_inv_stream(self, revision_ids):
2454
 
        from_repo = self.from_repository
2455
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2456
 
        parent_keys = [(p,) for p in parent_ids]
2457
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2458
 
        parent_text_keys = set(find_text_keys(
2459
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2460
 
        content_text_keys = set()
2461
 
        knit = KnitVersionedFiles(None, None)
2462
 
        factory = KnitPlainFactory()
2463
 
        def find_text_keys_from_content(record):
2464
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2465
 
                raise ValueError("Unknown content storage kind for"
2466
 
                    " inventory text: %s" % (record.storage_kind,))
2467
 
            # It's a knit record, it has a _raw_record field (even if it was
2468
 
            # reconstituted from a network stream).
2469
 
            raw_data = record._raw_record
2470
 
            # read the entire thing
2471
 
            revision_id = record.key[-1]
2472
 
            content, _ = knit._parse_record(revision_id, raw_data)
2473
 
            if record.storage_kind == 'knit-delta-gz':
2474
 
                line_iterator = factory.get_linedelta_content(content)
2475
 
            elif record.storage_kind == 'knit-ft-gz':
2476
 
                line_iterator = factory.get_fulltext_content(content)
2477
 
            content_text_keys.update(find_text_keys(
2478
 
                [(line, revision_id) for line in line_iterator]))
2479
 
        revision_keys = [(r,) for r in revision_ids]
2480
 
        def _filtered_inv_stream():
2481
 
            source_vf = from_repo.inventories
2482
 
            stream = source_vf.get_record_stream(revision_keys,
2483
 
                                                 'unordered', False)
2484
 
            for record in stream:
2485
 
                if record.storage_kind == 'absent':
2486
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2487
 
                find_text_keys_from_content(record)
2488
 
                yield record
2489
 
            self._text_keys = content_text_keys - parent_text_keys
2490
 
        return ('inventories', _filtered_inv_stream())
2491
 
 
2492
 
    def _get_text_stream(self):
2493
 
        # Note: We know we don't have to handle adding root keys, because both
2494
 
        # the source and target are the identical network name.
2495
 
        text_stream = self.from_repository.texts.get_record_stream(
2496
 
                        self._text_keys, self._text_fetch_order, False)
2497
 
        return ('texts', text_stream)
2498
 
 
2499
 
    def get_stream(self, search):
2500
 
        revision_ids = search.get_keys()
2501
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2502
 
            yield stream_info
2503
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2504
 
        yield self._get_filtered_inv_stream(revision_ids)
2505
 
        yield self._get_text_stream()
2506
 
 
2507
 
 
2508
 
 
2509
2364
class RepositoryFormatPack(MetaDirRepositoryFormat):
2510
2365
    """Format logic for pack structured repositories.
2511
2366
 
2558
2413
        utf8_files = [('format', self.get_format_string())]
2559
2414
 
2560
2415
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2563
 
        return repository
 
2416
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2564
2417
 
2565
2418
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2419
        """See RepositoryFormat.open().
2615
2468
        """See RepositoryFormat.get_format_description()."""
2616
2469
        return "Packs containing knits without subtree support"
2617
2470
 
 
2471
    def check_conversion_target(self, target_format):
 
2472
        pass
 
2473
 
2618
2474
 
2619
2475
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2620
2476
    """A subtrees parameterized Pack repository.
2629
2485
    repository_class = KnitPackRepository
2630
2486
    _commit_builder_class = PackRootCommitBuilder
2631
2487
    rich_root_data = True
2632
 
    experimental = True
2633
2488
    supports_tree_reference = True
2634
2489
    @property
2635
2490
    def _serializer(self):
2647
2502
 
2648
2503
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2649
2504
 
 
2505
    def check_conversion_target(self, target_format):
 
2506
        if not target_format.rich_root_data:
 
2507
            raise errors.BadConversionTarget(
 
2508
                'Does not support rich root data.', target_format)
 
2509
        if not getattr(target_format, 'supports_tree_reference', False):
 
2510
            raise errors.BadConversionTarget(
 
2511
                'Does not support nested trees', target_format)
 
2512
 
2650
2513
    def get_format_string(self):
2651
2514
        """See RepositoryFormat.get_format_string()."""
2652
2515
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2685
2548
 
2686
2549
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2687
2550
 
 
2551
    def check_conversion_target(self, target_format):
 
2552
        if not target_format.rich_root_data:
 
2553
            raise errors.BadConversionTarget(
 
2554
                'Does not support rich root data.', target_format)
 
2555
 
2688
2556
    def get_format_string(self):
2689
2557
        """See RepositoryFormat.get_format_string()."""
2690
2558
        return ("Bazaar pack repository format 1 with rich root"
2731
2599
        """See RepositoryFormat.get_format_description()."""
2732
2600
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2733
2601
 
 
2602
    def check_conversion_target(self, target_format):
 
2603
        pass
 
2604
 
2734
2605
 
2735
2606
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2736
2607
    """A repository with rich roots and stacking.
2763
2634
 
2764
2635
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2765
2636
 
 
2637
    def check_conversion_target(self, target_format):
 
2638
        if not target_format.rich_root_data:
 
2639
            raise errors.BadConversionTarget(
 
2640
                'Does not support rich root data.', target_format)
 
2641
 
2766
2642
    def get_format_string(self):
2767
2643
        """See RepositoryFormat.get_format_string()."""
2768
2644
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2809
2685
 
2810
2686
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2811
2687
 
 
2688
    def check_conversion_target(self, target_format):
 
2689
        if not target_format.rich_root_data:
 
2690
            raise errors.BadConversionTarget(
 
2691
                'Does not support rich root data.', target_format)
 
2692
 
2812
2693
    def get_format_string(self):
2813
2694
        """See RepositoryFormat.get_format_string()."""
2814
2695
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2852
2733
        """See RepositoryFormat.get_format_description()."""
2853
2734
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2854
2735
 
 
2736
    def check_conversion_target(self, target_format):
 
2737
        pass
 
2738
 
2855
2739
 
2856
2740
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2857
2741
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2881
2765
 
2882
2766
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2883
2767
 
 
2768
    def check_conversion_target(self, target_format):
 
2769
        if not target_format.rich_root_data:
 
2770
            raise errors.BadConversionTarget(
 
2771
                'Does not support rich root data.', target_format)
 
2772
 
2884
2773
    def get_format_string(self):
2885
2774
        """See RepositoryFormat.get_format_string()."""
2886
2775
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2903
2792
    repository_class = KnitPackRepository
2904
2793
    _commit_builder_class = PackRootCommitBuilder
2905
2794
    rich_root_data = True
2906
 
    experimental = True
2907
2795
    supports_tree_reference = True
2908
2796
    supports_external_lookups = True
2909
2797
    # What index classes to use
2923
2811
 
2924
2812
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2925
2813
 
 
2814
    def check_conversion_target(self, target_format):
 
2815
        if not target_format.rich_root_data:
 
2816
            raise errors.BadConversionTarget(
 
2817
                'Does not support rich root data.', target_format)
 
2818
        if not getattr(target_format, 'supports_tree_reference', False):
 
2819
            raise errors.BadConversionTarget(
 
2820
                'Does not support nested trees', target_format)
 
2821
 
2926
2822
    def get_format_string(self):
2927
2823
        """See RepositoryFormat.get_format_string()."""
2928
2824
        return ("Bazaar development format 2 with subtree support "