~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Danny van Heumen
  • Date: 2010-03-09 21:42:11 UTC
  • mto: (4634.139.5 2.0)
  • mto: This revision was merged to the branch mainline in revision 5160.
  • Revision ID: danny@dannyvanheumen.nl-20100309214211-iqh42x6qcikgd9p3
Reverted now-useless TODO list.

Show diffs side-by-side

added added

removed removed

Lines of Context:
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
55
54
    revision as _mod_revision,
56
55
    )
57
56
 
58
 
from bzrlib.decorators import needs_write_lock, only_raises
 
57
from bzrlib.decorators import needs_write_lock
59
58
from bzrlib.btree_index import (
60
59
    BTreeGraphIndex,
61
60
    BTreeBuilder,
74
73
    )
75
74
from bzrlib.trace import (
76
75
    mutter,
77
 
    note,
78
76
    warning,
79
77
    )
80
78
 
587
585
                                             flush_func=flush_func)
588
586
        self.add_callback = None
589
587
 
 
588
    def replace_indices(self, index_to_pack, indices):
 
589
        """Replace the current mappings with fresh ones.
 
590
 
 
591
        This should probably not be used eventually, rather incremental add and
 
592
        removal of indices. It has been added during refactoring of existing
 
593
        code.
 
594
 
 
595
        :param index_to_pack: A mapping from index objects to
 
596
            (transport, name) tuples for the pack file data.
 
597
        :param indices: A list of indices.
 
598
        """
 
599
        # refresh the revision pack map dict without replacing the instance.
 
600
        self.index_to_pack.clear()
 
601
        self.index_to_pack.update(index_to_pack)
 
602
        # XXX: API break - clearly a 'replace' method would be good?
 
603
        self.combined_index._indices[:] = indices
 
604
        # the current add nodes callback for the current writable index if
 
605
        # there is one.
 
606
        self.add_callback = None
 
607
 
590
608
    def add_index(self, index, pack):
591
609
        """Add index to the aggregate, which is an index for Pack pack.
592
610
 
599
617
        # expose it to the index map
600
618
        self.index_to_pack[index] = pack.access_tuple()
601
619
        # put it at the front of the linear index list
602
 
        self.combined_index.insert_index(0, index, pack.name)
 
620
        self.combined_index.insert_index(0, index)
603
621
 
604
622
    def add_writable_index(self, index, pack):
605
623
        """Add an index which is able to have data added to it.
625
643
        self.data_access.set_writer(None, None, (None, None))
626
644
        self.index_to_pack.clear()
627
645
        del self.combined_index._indices[:]
628
 
        del self.combined_index._index_names[:]
629
646
        self.add_callback = None
630
647
 
631
 
    def remove_index(self, index):
 
648
    def remove_index(self, index, pack):
632
649
        """Remove index from the indices used to answer queries.
633
650
 
634
651
        :param index: An index from the pack parameter.
 
652
        :param pack: A Pack instance.
635
653
        """
636
654
        del self.index_to_pack[index]
637
 
        pos = self.combined_index._indices.index(index)
638
 
        del self.combined_index._indices[pos]
639
 
        del self.combined_index._index_names[pos]
 
655
        self.combined_index._indices.remove(index)
640
656
        if (self.add_callback is not None and
641
657
            getattr(index, 'add_nodes', None) == self.add_callback):
642
658
            self.add_callback = None
1100
1116
            iterator is a tuple with:
1101
1117
            index, readv_vector, node_vector. readv_vector is a list ready to
1102
1118
            hand to the transport readv method, and node_vector is a list of
1103
 
            (key, eol_flag, references) for the node retrieved by the
 
1119
            (key, eol_flag, references) for the the node retrieved by the
1104
1120
            matching readv_vector.
1105
1121
        """
1106
1122
        # group by pack so we do one readv per pack
1398
1414
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
1415
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
1416
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
 
        all_indices = [self.revision_index, self.inventory_index,
1402
 
                self.text_index, self.signature_index]
1403
1417
        if use_chk_index:
1404
1418
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
 
            all_indices.append(self.chk_index)
1406
1419
        else:
1407
1420
            # used to determine if we're using a chk_index elsewhere.
1408
1421
            self.chk_index = None
1409
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1410
 
        # share hints about which pack names to search first.
1411
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
 
        for combined_idx in all_combined:
1413
 
            combined_idx.set_sibling_indices(
1414
 
                set(all_combined).difference([combined_idx]))
1415
1422
        # resumed packs
1416
1423
        self._resumed_packs = []
1417
1424
 
1418
 
    def __repr__(self):
1419
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1420
 
 
1421
1425
    def add_pack_to_memory(self, pack):
1422
1426
        """Make a Pack object available to the repository to satisfy queries.
1423
1427
 
1560
1564
        """Is the collection already packed?"""
1561
1565
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1562
1566
 
1563
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1567
    def pack(self, hint=None):
1564
1568
        """Pack the pack collection totally."""
1565
1569
        self.ensure_loaded()
1566
1570
        total_packs = len(self._names)
1582
1586
                pack_operations[-1][1].append(pack)
1583
1587
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1584
1588
 
1585
 
        if clean_obsolete_packs:
1586
 
            self._clear_obsolete_packs()
1587
 
 
1588
1589
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1590
        """Plan a pack operation.
1590
1591
 
1835
1836
        self._remove_pack_indices(pack)
1836
1837
        self.packs.remove(pack)
1837
1838
 
1838
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1839
 
        """Remove the indices for pack from the aggregated indices.
1840
 
        
1841
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1842
 
        """
1843
 
        for index_type in Pack.index_definitions.keys():
1844
 
            attr_name = index_type + '_index'
1845
 
            aggregate_index = getattr(self, attr_name)
1846
 
            if aggregate_index is not None:
1847
 
                pack_index = getattr(pack, attr_name)
1848
 
                try:
1849
 
                    aggregate_index.remove_index(pack_index)
1850
 
                except KeyError:
1851
 
                    if ignore_missing:
1852
 
                        continue
1853
 
                    raise
 
1839
    def _remove_pack_indices(self, pack):
 
1840
        """Remove the indices for pack from the aggregated indices."""
 
1841
        self.revision_index.remove_index(pack.revision_index, pack)
 
1842
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1843
        self.text_index.remove_index(pack.text_index, pack)
 
1844
        self.signature_index.remove_index(pack.signature_index, pack)
 
1845
        if self.chk_index is not None:
 
1846
            self.chk_index.remove_index(pack.chk_index, pack)
1854
1847
 
1855
1848
    def reset(self):
1856
1849
        """Clear all cached data."""
2094
2087
        # FIXME: just drop the transient index.
2095
2088
        # forget what names there are
2096
2089
        if self._new_pack is not None:
2097
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2099
 
            # If we aborted while in the middle of finishing the write
2100
 
            # group, _remove_pack_indices could fail because the indexes are
2101
 
            # already gone.  But they're not there we shouldn't fail in this
2102
 
            # case, so we pass ignore_missing=True.
2103
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
 
                ignore_missing=True)
2105
 
            operation.run_simple()
 
2090
            try:
 
2091
                self._new_pack.abort()
 
2092
            finally:
 
2093
                # XXX: If we aborted while in the middle of finishing the write
 
2094
                # group, _remove_pack_indices can fail because the indexes are
 
2095
                # already gone.  If they're not there we shouldn't fail in this
 
2096
                # case.  -- mbp 20081113
 
2097
                self._remove_pack_indices(self._new_pack)
 
2098
                self._new_pack = None
2106
2099
        for resumed_pack in self._resumed_packs:
2107
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
 
            # See comment in previous finally block.
2109
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
 
                ignore_missing=True)
2111
 
            operation.run_simple()
 
2100
            try:
 
2101
                resumed_pack.abort()
 
2102
            finally:
 
2103
                # See comment in previous finally block.
 
2104
                try:
 
2105
                    self._remove_pack_indices(resumed_pack)
 
2106
                except KeyError:
 
2107
                    pass
2112
2108
        del self._resumed_packs[:]
2113
2109
 
2114
2110
    def _remove_resumed_pack_indices(self):
2280
2276
        self._reconcile_fixes_text_parents = True
2281
2277
        self._reconcile_backsup_inventory = False
2282
2278
 
2283
 
    def _warn_if_deprecated(self, branch=None):
 
2279
    def _warn_if_deprecated(self):
2284
2280
        # This class isn't deprecated, but one sub-format is
2285
2281
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
 
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
 
2282
            from bzrlib import repository
 
2283
            if repository._deprecation_warning_done:
 
2284
                return
 
2285
            repository._deprecation_warning_done = True
 
2286
            warning("Format %s for %s is deprecated - please use"
 
2287
                    " 'bzr upgrade --1.6.1-rich-root'"
 
2288
                    % (self._format, self.bzrdir.transport.base))
2287
2289
 
2288
2290
    def _abort_write_group(self):
2289
2291
        self.revisions._index._key_dependencies.clear()
2347
2349
        if self._write_lock_count == 1:
2348
2350
            self._transaction = transactions.WriteTransaction()
2349
2351
        if not locked:
2350
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
 
                note('%r was write locked again', self)
2352
 
            self._prev_lock = 'w'
2353
2352
            for repo in self._fallback_repositories:
2354
2353
                # Writes don't affect fallback repos
2355
2354
                repo.lock_read()
2362
2361
        else:
2363
2362
            self.control_files.lock_read()
2364
2363
        if not locked:
2365
 
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
 
                note('%r was read locked again', self)
2367
 
            self._prev_lock = 'r'
2368
2364
            for repo in self._fallback_repositories:
2369
2365
                repo.lock_read()
2370
2366
            self._refresh_data()
2378
2374
        raise NotImplementedError(self.dont_leave_lock_in_place)
2379
2375
 
2380
2376
    @needs_write_lock
2381
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2377
    def pack(self, hint=None):
2382
2378
        """Compress the data within the repository.
2383
2379
 
2384
2380
        This will pack all the data to a single pack. In future it may
2385
2381
        recompress deltas or do other such expensive operations.
2386
2382
        """
2387
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2383
        self._pack_collection.pack(hint=hint)
2388
2384
 
2389
2385
    @needs_write_lock
2390
2386
    def reconcile(self, other=None, thorough=False):
2398
2394
        packer = ReconcilePacker(collection, packs, extension, revs)
2399
2395
        return packer.pack(pb)
2400
2396
 
2401
 
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2402
2397
    def unlock(self):
2403
2398
        if self._write_lock_count == 1 and self._write_group is not None:
2404
2399
            self.abort_write_group()
2546
2541
        utf8_files = [('format', self.get_format_string())]
2547
2542
 
2548
2543
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2551
 
        return repository
 
2544
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2552
2545
 
2553
2546
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2547
        """See RepositoryFormat.open().
2617
2610
    repository_class = KnitPackRepository
2618
2611
    _commit_builder_class = PackRootCommitBuilder
2619
2612
    rich_root_data = True
2620
 
    experimental = True
2621
2613
    supports_tree_reference = True
2622
2614
    @property
2623
2615
    def _serializer(self):
2891
2883
    repository_class = KnitPackRepository
2892
2884
    _commit_builder_class = PackRootCommitBuilder
2893
2885
    rich_root_data = True
2894
 
    experimental = True
2895
2886
    supports_tree_reference = True
2896
2887
    supports_external_lookups = True
2897
2888
    # What index classes to use