~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-04-09 20:23:07 UTC
  • mfrom: (4265.1.4 bbc-merge)
  • Revision ID: pqm@pqm.ubuntu.com-20090409202307-n0depb16qepoe21o
(jam) Change _fetch_uses_deltas = False for CHK repos until we can
        write a better fix.

Show diffs side-by-side

added added

removed removed

Lines of Context:
36
36
    )
37
37
from bzrlib.index import (
38
38
    CombinedGraphIndex,
 
39
    GraphIndex,
 
40
    GraphIndexBuilder,
39
41
    GraphIndexPrefixAdapter,
 
42
    InMemoryGraphIndex,
40
43
    )
41
44
from bzrlib.knit import (
42
45
    KnitPlainFactory,
52
55
    lockable_files,
53
56
    lockdir,
54
57
    revision as _mod_revision,
 
58
    symbol_versioning,
55
59
    )
56
60
 
57
61
from bzrlib.decorators import needs_write_lock
69
73
    MetaDirRepositoryFormat,
70
74
    RepositoryFormat,
71
75
    RootCommitBuilder,
72
 
    StreamSource,
73
76
    )
 
77
import bzrlib.revision as _mod_revision
74
78
from bzrlib.trace import (
75
79
    mutter,
76
80
    warning,
264
268
 
265
269
    def __init__(self, name, revision_index, inventory_index, text_index,
266
270
        signature_index, upload_transport, pack_transport, index_transport,
267
 
        pack_collection, chk_index=None):
 
271
        pack_collection):
268
272
        """Create a ResumedPack object."""
269
273
        ExistingPack.__init__(self, pack_transport, name, revision_index,
270
 
            inventory_index, text_index, signature_index,
271
 
            chk_index=chk_index)
 
274
            inventory_index, text_index, signature_index)
272
275
        self.upload_transport = upload_transport
273
276
        self.index_transport = index_transport
274
277
        self.index_sizes = [None, None, None, None]
278
281
            ('text', text_index),
279
282
            ('signature', signature_index),
280
283
            ]
281
 
        if chk_index is not None:
282
 
            indices.append(('chk', chk_index))
283
 
            self.index_sizes.append(None)
284
284
        for index_type, index in indices:
285
285
            offset = self.index_offset(index_type)
286
286
            self.index_sizes[offset] = index._size
301
301
        self.upload_transport.delete(self.file_name())
302
302
        indices = [self.revision_index, self.inventory_index, self.text_index,
303
303
            self.signature_index]
304
 
        if self.chk_index is not None:
305
 
            indices.append(self.chk_index)
306
304
        for index in indices:
307
305
            index._transport.delete(index._name)
308
306
 
309
307
    def finish(self):
310
308
        self._check_references()
311
 
        index_types = ['revision', 'inventory', 'text', 'signature']
312
 
        if self.chk_index is not None:
313
 
            index_types.append('chk')
314
 
        for index_type in index_types:
 
309
        new_name = '../packs/' + self.file_name()
 
310
        self.upload_transport.rename(self.file_name(), new_name)
 
311
        for index_type in ['revision', 'inventory', 'text', 'signature']:
315
312
            old_name = self.index_name(index_type, self.name)
316
313
            new_name = '../indices/' + old_name
317
314
            self.upload_transport.rename(old_name, new_name)
318
315
            self._replace_index_with_readonly(index_type)
319
 
        new_name = '../packs/' + self.file_name()
320
 
        self.upload_transport.rename(self.file_name(), new_name)
321
316
        self._state = 'finished'
322
317
 
323
318
    def _get_external_refs(self, index):
324
 
        """Return compression parents for this index that are not present.
325
 
 
326
 
        This returns any compression parents that are referenced by this index,
327
 
        which are not contained *in* this index. They may be present elsewhere.
328
 
        """
329
319
        return index.external_references(1)
330
320
 
331
321
 
422
412
        self._writer.begin()
423
413
        # what state is the pack in? (open, finished, aborted)
424
414
        self._state = 'open'
425
 
        # no name until we finish writing the content
426
 
        self.name = None
427
415
 
428
416
    def abort(self):
429
417
        """Cancel creating this pack."""
450
438
            self.signature_index.key_count() or
451
439
            (self.chk_index is not None and self.chk_index.key_count()))
452
440
 
453
 
    def finish_content(self):
454
 
        if self.name is not None:
455
 
            return
456
 
        self._writer.end()
457
 
        if self._buffer[1]:
458
 
            self._write_data('', flush=True)
459
 
        self.name = self._hash.hexdigest()
460
 
 
461
441
    def finish(self, suspend=False):
462
442
        """Finish the new pack.
463
443
 
469
449
         - stores the index size tuple for the pack in the index_sizes
470
450
           attribute.
471
451
        """
472
 
        self.finish_content()
 
452
        self._writer.end()
 
453
        if self._buffer[1]:
 
454
            self._write_data('', flush=True)
 
455
        self.name = self._hash.hexdigest()
473
456
        if not suspend:
474
457
            self._check_references()
475
458
        # write indices
1309
1292
        # space (we only topo sort the revisions, which is smaller).
1310
1293
        topo_order = tsort.topo_sort(ancestors)
1311
1294
        rev_order = dict(zip(topo_order, range(len(topo_order))))
1312
 
        bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
 
1295
        bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1313
1296
        transaction = repo.get_transaction()
1314
1297
        file_id_index = GraphIndexPrefixAdapter(
1315
1298
            self.new_pack.text_index,
1369
1352
    """
1370
1353
 
1371
1354
    pack_factory = NewPack
1372
 
    resumed_pack_factory = ResumedPack
1373
1355
 
1374
1356
    def __init__(self, repo, transport, index_transport, upload_transport,
1375
1357
                 pack_transport, index_builder_class, index_class,
1461
1443
        in synchronisation with certain steps. Otherwise the names collection
1462
1444
        is not flushed.
1463
1445
 
1464
 
        :return: Something evaluating true if packing took place.
 
1446
        :return: True if packing took place.
1465
1447
        """
1466
1448
        while True:
1467
1449
            try:
1468
1450
                return self._do_autopack()
1469
 
            except errors.RetryAutopack:
 
1451
            except errors.RetryAutopack, e:
1470
1452
                # If we get a RetryAutopack exception, we should abort the
1471
1453
                # current action, and retry.
1472
1454
                pass
1476
1458
        total_revisions = self.revision_index.combined_index.key_count()
1477
1459
        total_packs = len(self._names)
1478
1460
        if self._max_pack_count(total_revisions) >= total_packs:
1479
 
            return None
 
1461
            return False
1480
1462
        # determine which packs need changing
1481
1463
        pack_distribution = self.pack_distribution(total_revisions)
1482
1464
        existing_packs = []
1504
1486
            'containing %d revisions. Packing %d files into %d affecting %d'
1505
1487
            ' revisions', self, total_packs, total_revisions, num_old_packs,
1506
1488
            num_new_packs, num_revs_affected)
1507
 
        result = self._execute_pack_operations(pack_operations,
 
1489
        self._execute_pack_operations(pack_operations,
1508
1490
                                      reload_func=self._restart_autopack)
1509
1491
        mutter('Auto-packing repository %s completed', self)
1510
 
        return result
 
1492
        return True
1511
1493
 
1512
1494
    def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1513
1495
                                 reload_func=None):
1515
1497
 
1516
1498
        :param pack_operations: A list of [revision_count, packs_to_combine].
1517
1499
        :param _packer_class: The class of packer to use (default: Packer).
1518
 
        :return: The new pack names.
 
1500
        :return: None.
1519
1501
        """
1520
1502
        for revision_count, packs in pack_operations:
1521
1503
            # we may have no-ops from the setup logic
1537
1519
                self._remove_pack_from_memory(pack)
1538
1520
        # record the newly available packs and stop advertising the old
1539
1521
        # packs
1540
 
        result = self._save_pack_names(clear_obsolete_packs=True)
 
1522
        self._save_pack_names(clear_obsolete_packs=True)
1541
1523
        # Move the old packs out of the way now they are no longer referenced.
1542
1524
        for revision_count, packs in pack_operations:
1543
1525
            self._obsolete_packs(packs)
1544
 
        return result
1545
1526
 
1546
1527
    def _flush_new_pack(self):
1547
1528
        if self._new_pack is not None:
1557
1538
 
1558
1539
    def _already_packed(self):
1559
1540
        """Is the collection already packed?"""
1560
 
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
 
1541
        return len(self._names) < 2
1561
1542
 
1562
 
    def pack(self, hint=None):
 
1543
    def pack(self):
1563
1544
        """Pack the pack collection totally."""
1564
1545
        self.ensure_loaded()
1565
1546
        total_packs = len(self._names)
1566
1547
        if self._already_packed():
 
1548
            # This is arguably wrong because we might not be optimal, but for
 
1549
            # now lets leave it in. (e.g. reconcile -> one pack. But not
 
1550
            # optimal.
1567
1551
            return
1568
1552
        total_revisions = self.revision_index.combined_index.key_count()
1569
1553
        # XXX: the following may want to be a class, to pack with a given
1570
1554
        # policy.
1571
1555
        mutter('Packing repository %s, which has %d pack files, '
1572
 
            'containing %d revisions with hint %r.', self, total_packs,
1573
 
            total_revisions, hint)
 
1556
            'containing %d revisions into 1 packs.', self, total_packs,
 
1557
            total_revisions)
1574
1558
        # determine which packs need changing
 
1559
        pack_distribution = [1]
1575
1560
        pack_operations = [[0, []]]
1576
1561
        for pack in self.all_packs():
1577
 
            if hint is None or pack.name in hint:
1578
 
                # Either no hint was provided (so we are packing everything),
1579
 
                # or this pack was included in the hint.
1580
 
                pack_operations[-1][0] += pack.get_revision_count()
1581
 
                pack_operations[-1][1].append(pack)
 
1562
            pack_operations[-1][0] += pack.get_revision_count()
 
1563
            pack_operations[-1][1].append(pack)
1582
1564
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1583
1565
 
1584
1566
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1698
1680
            inv_index = self._make_index(name, '.iix', resume=True)
1699
1681
            txt_index = self._make_index(name, '.tix', resume=True)
1700
1682
            sig_index = self._make_index(name, '.six', resume=True)
1701
 
            if self.chk_index is not None:
1702
 
                chk_index = self._make_index(name, '.cix', resume=True)
1703
 
            else:
1704
 
                chk_index = None
1705
 
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1706
 
                txt_index, sig_index, self._upload_transport,
1707
 
                self._pack_transport, self._index_transport, self,
1708
 
                chk_index=chk_index)
 
1683
            result = ResumedPack(name, rev_index, inv_index, txt_index,
 
1684
                sig_index, self._upload_transport, self._pack_transport,
 
1685
                self._index_transport, self)
1709
1686
        except errors.NoSuchFile, e:
1710
1687
            raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1711
1688
        self.add_pack_to_memory(result)
1832
1809
    def reset(self):
1833
1810
        """Clear all cached data."""
1834
1811
        # cached revision data
 
1812
        self.repo._revision_knit = None
1835
1813
        self.revision_index.clear()
1836
1814
        # cached signature data
 
1815
        self.repo._signature_knit = None
1837
1816
        self.signature_index.clear()
1838
1817
        # cached file text data
1839
1818
        self.text_index.clear()
 
1819
        self.repo._text_knit = None
1840
1820
        # cached inventory data
1841
1821
        self.inventory_index.clear()
1842
1822
        # cached chk data
1940
1920
 
1941
1921
        :param clear_obsolete_packs: If True, clear out the contents of the
1942
1922
            obsolete_packs directory.
1943
 
        :return: A list of the names saved that were not previously on disk.
1944
1923
        """
1945
1924
        self.lock_names()
1946
1925
        try:
1961
1940
            self._unlock_names()
1962
1941
        # synchronise the memory packs list with what we just wrote:
1963
1942
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1964
 
        return [new_node[0][0] for new_node in new_nodes]
1965
1943
 
1966
1944
    def reload_pack_names(self):
1967
1945
        """Sync our pack listing with what is present in the repository.
2057
2035
                except KeyError:
2058
2036
                    pass
2059
2037
        del self._resumed_packs[:]
 
2038
        self.repo._text_knit = None
2060
2039
 
2061
2040
    def _remove_resumed_pack_indices(self):
2062
2041
        for resumed_pack in self._resumed_packs:
2101
2080
            if not self.autopack():
2102
2081
                # when autopack takes no steps, the names list is still
2103
2082
                # unsaved.
2104
 
                return self._save_pack_names()
2105
 
        return []
 
2083
                self._save_pack_names()
 
2084
        self.repo._text_knit = None
2106
2085
 
2107
2086
    def _suspend_write_group(self):
2108
2087
        tokens = [pack.name for pack in self._resumed_packs]
2116
2095
            self._new_pack.abort()
2117
2096
            self._new_pack = None
2118
2097
        self._remove_resumed_pack_indices()
 
2098
        self.repo._text_knit = None
2119
2099
        return tokens
2120
2100
 
2121
2101
    def _resume_write_group(self, tokens):
2170
2150
        self.revisions = KnitVersionedFiles(
2171
2151
            _KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2172
2152
                add_callback=self._pack_collection.revision_index.add_callback,
2173
 
                deltas=False, parents=True, is_locked=self.is_locked,
2174
 
                track_external_parent_refs=True),
 
2153
                deltas=False, parents=True, is_locked=self.is_locked),
2175
2154
            data_access=self._pack_collection.revision_index.data_access,
2176
2155
            max_delta_chain=0)
2177
2156
        self.signatures = KnitVersionedFiles(
2222
2201
                    % (self._format, self.bzrdir.transport.base))
2223
2202
 
2224
2203
    def _abort_write_group(self):
2225
 
        self.revisions._index._key_dependencies.refs.clear()
2226
2204
        self._pack_collection._abort_write_group()
2227
2205
 
2228
 
    def _get_source(self, to_format):
2229
 
        if to_format.network_name() == self._format.network_name():
2230
 
            return KnitPackStreamSource(self, to_format)
2231
 
        return super(KnitPackRepository, self)._get_source(to_format)
 
2206
    def _find_inconsistent_revision_parents(self):
 
2207
        """Find revisions with incorrectly cached parents.
 
2208
 
 
2209
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
2210
            parents-in-revision).
 
2211
        """
 
2212
        if not self.is_locked():
 
2213
            raise errors.ObjectNotLocked(self)
 
2214
        pb = ui.ui_factory.nested_progress_bar()
 
2215
        result = []
 
2216
        try:
 
2217
            revision_nodes = self._pack_collection.revision_index \
 
2218
                .combined_index.iter_all_entries()
 
2219
            index_positions = []
 
2220
            # Get the cached index values for all revisions, and also the
 
2221
            # location in each index of the revision text so we can perform
 
2222
            # linear IO.
 
2223
            for index, key, value, refs in revision_nodes:
 
2224
                node = (index, key, value, refs)
 
2225
                index_memo = self.revisions._index._node_to_position(node)
 
2226
                if index_memo[0] != index:
 
2227
                    raise AssertionError('%r != %r' % (index_memo[0], index))
 
2228
                index_positions.append((index_memo, key[0],
 
2229
                                       tuple(parent[0] for parent in refs[0])))
 
2230
                pb.update("Reading revision index", 0, 0)
 
2231
            index_positions.sort()
 
2232
            batch_size = 1000
 
2233
            pb.update("Checking cached revision graph", 0,
 
2234
                      len(index_positions))
 
2235
            for offset in xrange(0, len(index_positions), 1000):
 
2236
                pb.update("Checking cached revision graph", offset)
 
2237
                to_query = index_positions[offset:offset + batch_size]
 
2238
                if not to_query:
 
2239
                    break
 
2240
                rev_ids = [item[1] for item in to_query]
 
2241
                revs = self.get_revisions(rev_ids)
 
2242
                for revision, item in zip(revs, to_query):
 
2243
                    index_parents = item[2]
 
2244
                    rev_parents = tuple(revision.parent_ids)
 
2245
                    if index_parents != rev_parents:
 
2246
                        result.append((revision.revision_id, index_parents,
 
2247
                                       rev_parents))
 
2248
        finally:
 
2249
            pb.finished()
 
2250
        return result
2232
2251
 
2233
2252
    def _make_parents_provider(self):
2234
2253
        return graph.CachingParentsProvider(self)
2242
2261
        self._pack_collection._start_write_group()
2243
2262
 
2244
2263
    def _commit_write_group(self):
2245
 
        self.revisions._index._key_dependencies.refs.clear()
2246
2264
        return self._pack_collection._commit_write_group()
2247
2265
 
2248
2266
    def suspend_write_group(self):
2249
2267
        # XXX check self._write_group is self.get_transaction()?
2250
2268
        tokens = self._pack_collection._suspend_write_group()
2251
 
        self.revisions._index._key_dependencies.refs.clear()
2252
2269
        self._write_group = None
2253
2270
        return tokens
2254
2271
 
2255
2272
    def _resume_write_group(self, tokens):
2256
2273
        self._start_write_group()
2257
 
        try:
2258
 
            self._pack_collection._resume_write_group(tokens)
2259
 
        except errors.UnresumableWriteGroup:
2260
 
            self._abort_write_group()
2261
 
            raise
2262
 
        for pack in self._pack_collection._resumed_packs:
2263
 
            self.revisions._index.scan_unvalidated_index(pack.revision_index)
 
2274
        self._pack_collection._resume_write_group(tokens)
2264
2275
 
2265
2276
    def get_transaction(self):
2266
2277
        if self._write_lock_count:
2281
2292
        self._write_lock_count += 1
2282
2293
        if self._write_lock_count == 1:
2283
2294
            self._transaction = transactions.WriteTransaction()
2284
 
        if not locked:
2285
2295
            for repo in self._fallback_repositories:
2286
2296
                # Writes don't affect fallback repos
2287
2297
                repo.lock_read()
 
2298
        if not locked:
2288
2299
            self._refresh_data()
2289
2300
 
2290
2301
    def lock_read(self):
2293
2304
            self._write_lock_count += 1
2294
2305
        else:
2295
2306
            self.control_files.lock_read()
2296
 
        if not locked:
2297
2307
            for repo in self._fallback_repositories:
 
2308
                # Writes don't affect fallback repos
2298
2309
                repo.lock_read()
 
2310
        if not locked:
2299
2311
            self._refresh_data()
2300
2312
 
2301
2313
    def leave_lock_in_place(self):
2307
2319
        raise NotImplementedError(self.dont_leave_lock_in_place)
2308
2320
 
2309
2321
    @needs_write_lock
2310
 
    def pack(self, hint=None):
 
2322
    def pack(self):
2311
2323
        """Compress the data within the repository.
2312
2324
 
2313
2325
        This will pack all the data to a single pack. In future it may
2314
2326
        recompress deltas or do other such expensive operations.
2315
2327
        """
2316
 
        self._pack_collection.pack(hint=hint)
 
2328
        self._pack_collection.pack()
2317
2329
 
2318
2330
    @needs_write_lock
2319
2331
    def reconcile(self, other=None, thorough=False):
2341
2353
                transaction = self._transaction
2342
2354
                self._transaction = None
2343
2355
                transaction.finish()
 
2356
                for repo in self._fallback_repositories:
 
2357
                    repo.unlock()
2344
2358
        else:
2345
2359
            self.control_files.unlock()
2346
 
 
2347
 
        if not self.is_locked():
2348
2360
            for repo in self._fallback_repositories:
2349
2361
                repo.unlock()
2350
2362
 
2351
2363
 
2352
 
class KnitPackStreamSource(StreamSource):
2353
 
    """A StreamSource used to transfer data between same-format KnitPack repos.
2354
 
 
2355
 
    This source assumes:
2356
 
        1) Same serialization format for all objects
2357
 
        2) Same root information
2358
 
        3) XML format inventories
2359
 
        4) Atomic inserts (so we can stream inventory texts before text
2360
 
           content)
2361
 
        5) No chk_bytes
2362
 
    """
2363
 
 
2364
 
    def __init__(self, from_repository, to_format):
2365
 
        super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2366
 
        self._text_keys = None
2367
 
        self._text_fetch_order = 'unordered'
2368
 
 
2369
 
    def _get_filtered_inv_stream(self, revision_ids):
2370
 
        from_repo = self.from_repository
2371
 
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2372
 
        parent_keys = [(p,) for p in parent_ids]
2373
 
        find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2374
 
        parent_text_keys = set(find_text_keys(
2375
 
            from_repo._inventory_xml_lines_for_keys(parent_keys)))
2376
 
        content_text_keys = set()
2377
 
        knit = KnitVersionedFiles(None, None)
2378
 
        factory = KnitPlainFactory()
2379
 
        def find_text_keys_from_content(record):
2380
 
            if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2381
 
                raise ValueError("Unknown content storage kind for"
2382
 
                    " inventory text: %s" % (record.storage_kind,))
2383
 
            # It's a knit record, it has a _raw_record field (even if it was
2384
 
            # reconstituted from a network stream).
2385
 
            raw_data = record._raw_record
2386
 
            # read the entire thing
2387
 
            revision_id = record.key[-1]
2388
 
            content, _ = knit._parse_record(revision_id, raw_data)
2389
 
            if record.storage_kind == 'knit-delta-gz':
2390
 
                line_iterator = factory.get_linedelta_content(content)
2391
 
            elif record.storage_kind == 'knit-ft-gz':
2392
 
                line_iterator = factory.get_fulltext_content(content)
2393
 
            content_text_keys.update(find_text_keys(
2394
 
                [(line, revision_id) for line in line_iterator]))
2395
 
        revision_keys = [(r,) for r in revision_ids]
2396
 
        def _filtered_inv_stream():
2397
 
            source_vf = from_repo.inventories
2398
 
            stream = source_vf.get_record_stream(revision_keys,
2399
 
                                                 'unordered', False)
2400
 
            for record in stream:
2401
 
                if record.storage_kind == 'absent':
2402
 
                    raise errors.NoSuchRevision(from_repo, record.key)
2403
 
                find_text_keys_from_content(record)
2404
 
                yield record
2405
 
            self._text_keys = content_text_keys - parent_text_keys
2406
 
        return ('inventories', _filtered_inv_stream())
2407
 
 
2408
 
    def _get_text_stream(self):
2409
 
        # Note: We know we don't have to handle adding root keys, because both
2410
 
        # the source and target are the identical network name.
2411
 
        text_stream = self.from_repository.texts.get_record_stream(
2412
 
                        self._text_keys, self._text_fetch_order, False)
2413
 
        return ('texts', text_stream)
2414
 
 
2415
 
    def get_stream(self, search):
2416
 
        revision_ids = search.get_keys()
2417
 
        for stream_info in self._fetch_revision_texts(revision_ids):
2418
 
            yield stream_info
2419
 
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2420
 
        yield self._get_filtered_inv_stream(revision_ids)
2421
 
        yield self._get_text_stream()
2422
 
 
2423
 
 
2424
 
 
2425
2364
class RepositoryFormatPack(MetaDirRepositoryFormat):
2426
2365
    """Format logic for pack structured repositories.
2427
2366
 
2529
2468
        """See RepositoryFormat.get_format_description()."""
2530
2469
        return "Packs containing knits without subtree support"
2531
2470
 
 
2471
    def check_conversion_target(self, target_format):
 
2472
        pass
 
2473
 
2532
2474
 
2533
2475
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2534
2476
    """A subtrees parameterized Pack repository.
2560
2502
 
2561
2503
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2562
2504
 
 
2505
    def check_conversion_target(self, target_format):
 
2506
        if not target_format.rich_root_data:
 
2507
            raise errors.BadConversionTarget(
 
2508
                'Does not support rich root data.', target_format)
 
2509
        if not getattr(target_format, 'supports_tree_reference', False):
 
2510
            raise errors.BadConversionTarget(
 
2511
                'Does not support nested trees', target_format)
 
2512
 
2563
2513
    def get_format_string(self):
2564
2514
        """See RepositoryFormat.get_format_string()."""
2565
2515
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2598
2548
 
2599
2549
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2600
2550
 
 
2551
    def check_conversion_target(self, target_format):
 
2552
        if not target_format.rich_root_data:
 
2553
            raise errors.BadConversionTarget(
 
2554
                'Does not support rich root data.', target_format)
 
2555
 
2601
2556
    def get_format_string(self):
2602
2557
        """See RepositoryFormat.get_format_string()."""
2603
2558
        return ("Bazaar pack repository format 1 with rich root"
2644
2599
        """See RepositoryFormat.get_format_description()."""
2645
2600
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2646
2601
 
 
2602
    def check_conversion_target(self, target_format):
 
2603
        pass
 
2604
 
2647
2605
 
2648
2606
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2649
2607
    """A repository with rich roots and stacking.
2676
2634
 
2677
2635
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2678
2636
 
 
2637
    def check_conversion_target(self, target_format):
 
2638
        if not target_format.rich_root_data:
 
2639
            raise errors.BadConversionTarget(
 
2640
                'Does not support rich root data.', target_format)
 
2641
 
2679
2642
    def get_format_string(self):
2680
2643
        """See RepositoryFormat.get_format_string()."""
2681
2644
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2722
2685
 
2723
2686
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2724
2687
 
 
2688
    def check_conversion_target(self, target_format):
 
2689
        if not target_format.rich_root_data:
 
2690
            raise errors.BadConversionTarget(
 
2691
                'Does not support rich root data.', target_format)
 
2692
 
2725
2693
    def get_format_string(self):
2726
2694
        """See RepositoryFormat.get_format_string()."""
2727
2695
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2765
2733
        """See RepositoryFormat.get_format_description()."""
2766
2734
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2767
2735
 
 
2736
    def check_conversion_target(self, target_format):
 
2737
        pass
 
2738
 
2768
2739
 
2769
2740
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2770
2741
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2794
2765
 
2795
2766
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2796
2767
 
 
2768
    def check_conversion_target(self, target_format):
 
2769
        if not target_format.rich_root_data:
 
2770
            raise errors.BadConversionTarget(
 
2771
                'Does not support rich root data.', target_format)
 
2772
 
2797
2773
    def get_format_string(self):
2798
2774
        """See RepositoryFormat.get_format_string()."""
2799
2775
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2835
2811
 
2836
2812
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2837
2813
 
 
2814
    def check_conversion_target(self, target_format):
 
2815
        if not target_format.rich_root_data:
 
2816
            raise errors.BadConversionTarget(
 
2817
                'Does not support rich root data.', target_format)
 
2818
        if not getattr(target_format, 'supports_tree_reference', False):
 
2819
            raise errors.BadConversionTarget(
 
2820
                'Does not support nested trees', target_format)
 
2821
 
2838
2822
    def get_format_string(self):
2839
2823
        """See RepositoryFormat.get_format_string()."""
2840
2824
        return ("Bazaar development format 2 with subtree support "