~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Martin Pool
  • Date: 2010-01-29 14:09:05 UTC
  • mto: This revision was merged to the branch mainline in revision 4992.
  • Revision ID: mbp@sourcefrog.net-20100129140905-2uiarb6p8di1ywsr
Correction to url

from review: https://code.edge.launchpad.net/~mbp/bzr/doc/+merge/18250

Show diffs side-by-side

added added

removed removed

Lines of Context:
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
64
63
    GraphIndex,
65
64
    InMemoryGraphIndex,
66
65
    )
67
 
from bzrlib.lock import LogicalLockResult
68
66
from bzrlib.repofmt.knitrepo import KnitRepository
69
67
from bzrlib.repository import (
70
68
    CommitBuilder,
71
69
    MetaDirRepositoryFormat,
72
70
    RepositoryFormat,
73
 
    RepositoryWriteLockResult,
74
71
    RootCommitBuilder,
75
72
    StreamSource,
76
73
    )
589
586
                                             flush_func=flush_func)
590
587
        self.add_callback = None
591
588
 
 
589
    def replace_indices(self, index_to_pack, indices):
 
590
        """Replace the current mappings with fresh ones.
 
591
 
 
592
        This should probably not be used eventually, rather incremental add and
 
593
        removal of indices. It has been added during refactoring of existing
 
594
        code.
 
595
 
 
596
        :param index_to_pack: A mapping from index objects to
 
597
            (transport, name) tuples for the pack file data.
 
598
        :param indices: A list of indices.
 
599
        """
 
600
        # refresh the revision pack map dict without replacing the instance.
 
601
        self.index_to_pack.clear()
 
602
        self.index_to_pack.update(index_to_pack)
 
603
        # XXX: API break - clearly a 'replace' method would be good?
 
604
        self.combined_index._indices[:] = indices
 
605
        # the current add nodes callback for the current writable index if
 
606
        # there is one.
 
607
        self.add_callback = None
 
608
 
592
609
    def add_index(self, index, pack):
593
610
        """Add index to the aggregate, which is an index for Pack pack.
594
611
 
601
618
        # expose it to the index map
602
619
        self.index_to_pack[index] = pack.access_tuple()
603
620
        # put it at the front of the linear index list
604
 
        self.combined_index.insert_index(0, index, pack.name)
 
621
        self.combined_index.insert_index(0, index)
605
622
 
606
623
    def add_writable_index(self, index, pack):
607
624
        """Add an index which is able to have data added to it.
627
644
        self.data_access.set_writer(None, None, (None, None))
628
645
        self.index_to_pack.clear()
629
646
        del self.combined_index._indices[:]
630
 
        del self.combined_index._index_names[:]
631
647
        self.add_callback = None
632
648
 
633
 
    def remove_index(self, index):
 
649
    def remove_index(self, index, pack):
634
650
        """Remove index from the indices used to answer queries.
635
651
 
636
652
        :param index: An index from the pack parameter.
 
653
        :param pack: A Pack instance.
637
654
        """
638
655
        del self.index_to_pack[index]
639
 
        pos = self.combined_index._indices.index(index)
640
 
        del self.combined_index._indices[pos]
641
 
        del self.combined_index._index_names[pos]
 
656
        self.combined_index._indices.remove(index)
642
657
        if (self.add_callback is not None and
643
658
            getattr(index, 'add_nodes', None) == self.add_callback):
644
659
            self.add_callback = None
1400
1415
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
1416
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
1417
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
 
        all_indices = [self.revision_index, self.inventory_index,
1404
 
                self.text_index, self.signature_index]
1405
1418
        if use_chk_index:
1406
1419
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
 
            all_indices.append(self.chk_index)
1408
1420
        else:
1409
1421
            # used to determine if we're using a chk_index elsewhere.
1410
1422
            self.chk_index = None
1411
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1412
 
        # share hints about which pack names to search first.
1413
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
 
        for combined_idx in all_combined:
1415
 
            combined_idx.set_sibling_indices(
1416
 
                set(all_combined).difference([combined_idx]))
1417
1423
        # resumed packs
1418
1424
        self._resumed_packs = []
1419
1425
 
1562
1568
        """Is the collection already packed?"""
1563
1569
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1564
1570
 
1565
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1571
    def pack(self, hint=None):
1566
1572
        """Pack the pack collection totally."""
1567
1573
        self.ensure_loaded()
1568
1574
        total_packs = len(self._names)
1584
1590
                pack_operations[-1][1].append(pack)
1585
1591
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1586
1592
 
1587
 
        if clean_obsolete_packs:
1588
 
            self._clear_obsolete_packs()
1589
 
 
1590
1593
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1594
        """Plan a pack operation.
1592
1595
 
1837
1840
        self._remove_pack_indices(pack)
1838
1841
        self.packs.remove(pack)
1839
1842
 
1840
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1841
 
        """Remove the indices for pack from the aggregated indices.
1842
 
        
1843
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1844
 
        """
1845
 
        for index_type in Pack.index_definitions.keys():
1846
 
            attr_name = index_type + '_index'
1847
 
            aggregate_index = getattr(self, attr_name)
1848
 
            if aggregate_index is not None:
1849
 
                pack_index = getattr(pack, attr_name)
1850
 
                try:
1851
 
                    aggregate_index.remove_index(pack_index)
1852
 
                except KeyError:
1853
 
                    if ignore_missing:
1854
 
                        continue
1855
 
                    raise
 
1843
    def _remove_pack_indices(self, pack):
 
1844
        """Remove the indices for pack from the aggregated indices."""
 
1845
        self.revision_index.remove_index(pack.revision_index, pack)
 
1846
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1847
        self.text_index.remove_index(pack.text_index, pack)
 
1848
        self.signature_index.remove_index(pack.signature_index, pack)
 
1849
        if self.chk_index is not None:
 
1850
            self.chk_index.remove_index(pack.chk_index, pack)
1856
1851
 
1857
1852
    def reset(self):
1858
1853
        """Clear all cached data."""
2096
2091
        # FIXME: just drop the transient index.
2097
2092
        # forget what names there are
2098
2093
        if self._new_pack is not None:
2099
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2101
 
            # If we aborted while in the middle of finishing the write
2102
 
            # group, _remove_pack_indices could fail because the indexes are
2103
 
            # already gone.  But they're not there we shouldn't fail in this
2104
 
            # case, so we pass ignore_missing=True.
2105
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
 
                ignore_missing=True)
2107
 
            operation.run_simple()
 
2094
            try:
 
2095
                self._new_pack.abort()
 
2096
            finally:
 
2097
                # XXX: If we aborted while in the middle of finishing the write
 
2098
                # group, _remove_pack_indices can fail because the indexes are
 
2099
                # already gone.  If they're not there we shouldn't fail in this
 
2100
                # case.  -- mbp 20081113
 
2101
                self._remove_pack_indices(self._new_pack)
 
2102
                self._new_pack = None
2108
2103
        for resumed_pack in self._resumed_packs:
2109
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
 
            # See comment in previous finally block.
2111
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
 
                ignore_missing=True)
2113
 
            operation.run_simple()
 
2104
            try:
 
2105
                resumed_pack.abort()
 
2106
            finally:
 
2107
                # See comment in previous finally block.
 
2108
                try:
 
2109
                    self._remove_pack_indices(resumed_pack)
 
2110
                except KeyError:
 
2111
                    pass
2114
2112
        del self._resumed_packs[:]
2115
2113
 
2116
2114
    def _remove_resumed_pack_indices(self):
2342
2340
        return self._write_lock_count
2343
2341
 
2344
2342
    def lock_write(self, token=None):
2345
 
        """Lock the repository for writes.
2346
 
 
2347
 
        :return: A bzrlib.repository.RepositoryWriteLockResult.
2348
 
        """
2349
2343
        locked = self.is_locked()
2350
2344
        if not self._write_lock_count and locked:
2351
2345
            raise errors.ReadOnlyError(self)
2360
2354
                # Writes don't affect fallback repos
2361
2355
                repo.lock_read()
2362
2356
            self._refresh_data()
2363
 
        return RepositoryWriteLockResult(self.unlock, None)
2364
2357
 
2365
2358
    def lock_read(self):
2366
 
        """Lock the repository for reads.
2367
 
 
2368
 
        :return: A bzrlib.lock.LogicalLockResult.
2369
 
        """
2370
2359
        locked = self.is_locked()
2371
2360
        if self._write_lock_count:
2372
2361
            self._write_lock_count += 1
2379
2368
            for repo in self._fallback_repositories:
2380
2369
                repo.lock_read()
2381
2370
            self._refresh_data()
2382
 
        return LogicalLockResult(self.unlock)
2383
2371
 
2384
2372
    def leave_lock_in_place(self):
2385
2373
        # not supported - raise an error
2390
2378
        raise NotImplementedError(self.dont_leave_lock_in_place)
2391
2379
 
2392
2380
    @needs_write_lock
2393
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2381
    def pack(self, hint=None):
2394
2382
        """Compress the data within the repository.
2395
2383
 
2396
2384
        This will pack all the data to a single pack. In future it may
2397
2385
        recompress deltas or do other such expensive operations.
2398
2386
        """
2399
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2387
        self._pack_collection.pack(hint=hint)
2400
2388
 
2401
2389
    @needs_write_lock
2402
2390
    def reconcile(self, other=None, thorough=False):
2558
2546
        utf8_files = [('format', self.get_format_string())]
2559
2547
 
2560
2548
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2563
 
        return repository
 
2549
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2564
2550
 
2565
2551
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2552
        """See RepositoryFormat.open().
2629
2615
    repository_class = KnitPackRepository
2630
2616
    _commit_builder_class = PackRootCommitBuilder
2631
2617
    rich_root_data = True
2632
 
    experimental = True
2633
2618
    supports_tree_reference = True
2634
2619
    @property
2635
2620
    def _serializer(self):
2903
2888
    repository_class = KnitPackRepository
2904
2889
    _commit_builder_class = PackRootCommitBuilder
2905
2890
    rich_root_data = True
2906
 
    experimental = True
2907
2891
    supports_tree_reference = True
2908
2892
    supports_external_lookups = True
2909
2893
    # What index classes to use