1
# Copyright (C) 2007-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
from bzrlib.lazy_import import lazy_import
21
lazy_import(globals(), """
22
from itertools import izip
37
from bzrlib.index import (
39
GraphIndexPrefixAdapter,
49
from bzrlib.decorators import (
54
from bzrlib.lock import LogicalLockResult
55
from bzrlib.repository import (
58
RepositoryFormatMetaDir,
59
RepositoryWriteLockResult,
61
from bzrlib.vf_repository import (
62
MetaDirVersionedFileRepository,
63
MetaDirVersionedFileRepositoryFormat,
64
VersionedFileCommitBuilder,
65
VersionedFileRootCommitBuilder,
67
from bzrlib.trace import (
74
class PackCommitBuilder(VersionedFileCommitBuilder):
75
"""Subclass of VersionedFileCommitBuilder to add texts with pack semantics.
77
Specifically this uses one knit object rather than one knit object per
78
added text, reducing memory and object pressure.
81
def __init__(self, repository, parents, config, timestamp=None,
82
timezone=None, committer=None, revprops=None,
83
revision_id=None, lossy=False):
84
VersionedFileCommitBuilder.__init__(self, repository, parents, config,
85
timestamp=timestamp, timezone=timezone, committer=committer,
86
revprops=revprops, revision_id=revision_id, lossy=lossy)
87
self._file_graph = graph.Graph(
88
repository._pack_collection.text_index.combined_index)
90
def _heads(self, file_id, revision_ids):
91
keys = [(file_id, revision_id) for revision_id in revision_ids]
92
return set([key[1] for key in self._file_graph.heads(keys)])
95
class PackRootCommitBuilder(VersionedFileRootCommitBuilder):
96
"""A subclass of RootCommitBuilder to add texts with pack semantics.
98
Specifically this uses one knit object rather than one knit object per
99
added text, reducing memory and object pressure.
102
def __init__(self, repository, parents, config, timestamp=None,
103
timezone=None, committer=None, revprops=None,
104
revision_id=None, lossy=False):
105
super(PackRootCommitBuilder, self).__init__(repository, parents,
106
config, timestamp=timestamp, timezone=timezone,
107
committer=committer, revprops=revprops, revision_id=revision_id,
109
self._file_graph = graph.Graph(
110
repository._pack_collection.text_index.combined_index)
112
def _heads(self, file_id, revision_ids):
113
keys = [(file_id, revision_id) for revision_id in revision_ids]
114
return set([key[1] for key in self._file_graph.heads(keys)])
118
"""An in memory proxy for a pack and its indices.
120
This is a base class that is not directly used, instead the classes
121
ExistingPack and NewPack are used.
124
# A map of index 'type' to the file extension and position in the
126
index_definitions = {
128
'revision': ('.rix', 0),
129
'inventory': ('.iix', 1),
131
'signature': ('.six', 3),
134
def __init__(self, revision_index, inventory_index, text_index,
135
signature_index, chk_index=None):
136
"""Create a pack instance.
138
:param revision_index: A GraphIndex for determining what revisions are
139
present in the Pack and accessing the locations of their texts.
140
:param inventory_index: A GraphIndex for determining what inventories are
141
present in the Pack and accessing the locations of their
143
:param text_index: A GraphIndex for determining what file texts
144
are present in the pack and accessing the locations of their
145
texts/deltas (via (fileid, revisionid) tuples).
146
:param signature_index: A GraphIndex for determining what signatures are
147
present in the Pack and accessing the locations of their texts.
148
:param chk_index: A GraphIndex for accessing content by CHK, if the
151
self.revision_index = revision_index
152
self.inventory_index = inventory_index
153
self.text_index = text_index
154
self.signature_index = signature_index
155
self.chk_index = chk_index
157
def access_tuple(self):
158
"""Return a tuple (transport, name) for the pack content."""
159
return self.pack_transport, self.file_name()
161
def _check_references(self):
162
"""Make sure our external references are present.
164
Packs are allowed to have deltas whose base is not in the pack, but it
165
must be present somewhere in this collection. It is not allowed to
166
have deltas based on a fallback repository.
167
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
170
for (index_name, external_refs, index) in [
172
self._get_external_refs(self.text_index),
173
self._pack_collection.text_index.combined_index),
175
self._get_external_refs(self.inventory_index),
176
self._pack_collection.inventory_index.combined_index),
178
missing = external_refs.difference(
179
k for (idx, k, v, r) in
180
index.iter_entries(external_refs))
182
missing_items[index_name] = sorted(list(missing))
184
from pprint import pformat
185
raise errors.BzrCheckError(
186
"Newly created pack file %r has delta references to "
187
"items not in its repository:\n%s"
188
% (self, pformat(missing_items)))
191
"""Get the file name for the pack on disk."""
192
return self.name + '.pack'
194
def get_revision_count(self):
195
return self.revision_index.key_count()
197
def index_name(self, index_type, name):
198
"""Get the disk name of an index type for pack name 'name'."""
199
return name + Pack.index_definitions[index_type][0]
201
def index_offset(self, index_type):
202
"""Get the position in a index_size array for a given index type."""
203
return Pack.index_definitions[index_type][1]
205
def inventory_index_name(self, name):
206
"""The inv index is the name + .iix."""
207
return self.index_name('inventory', name)
209
def revision_index_name(self, name):
210
"""The revision index is the name + .rix."""
211
return self.index_name('revision', name)
213
def signature_index_name(self, name):
214
"""The signature index is the name + .six."""
215
return self.index_name('signature', name)
217
def text_index_name(self, name):
218
"""The text index is the name + .tix."""
219
return self.index_name('text', name)
221
def _replace_index_with_readonly(self, index_type):
222
unlimited_cache = False
223
if index_type == 'chk':
224
unlimited_cache = True
225
index = self.index_class(self.index_transport,
226
self.index_name(index_type, self.name),
227
self.index_sizes[self.index_offset(index_type)],
228
unlimited_cache=unlimited_cache)
229
if index_type == 'chk':
230
index._leaf_factory = btree_index._gcchk_factory
231
setattr(self, index_type + '_index', index)
234
class ExistingPack(Pack):
235
"""An in memory proxy for an existing .pack and its disk indices."""
237
def __init__(self, pack_transport, name, revision_index, inventory_index,
238
text_index, signature_index, chk_index=None):
239
"""Create an ExistingPack object.
241
:param pack_transport: The transport where the pack file resides.
242
:param name: The name of the pack on disk in the pack_transport.
244
Pack.__init__(self, revision_index, inventory_index, text_index,
245
signature_index, chk_index)
247
self.pack_transport = pack_transport
248
if None in (revision_index, inventory_index, text_index,
249
signature_index, name, pack_transport):
250
raise AssertionError()
252
def __eq__(self, other):
253
return self.__dict__ == other.__dict__
255
def __ne__(self, other):
256
return not self.__eq__(other)
259
return "<%s.%s object at 0x%x, %s, %s" % (
260
self.__class__.__module__, self.__class__.__name__, id(self),
261
self.pack_transport, self.name)
264
class ResumedPack(ExistingPack):
266
def __init__(self, name, revision_index, inventory_index, text_index,
267
signature_index, upload_transport, pack_transport, index_transport,
268
pack_collection, chk_index=None):
269
"""Create a ResumedPack object."""
270
ExistingPack.__init__(self, pack_transport, name, revision_index,
271
inventory_index, text_index, signature_index,
273
self.upload_transport = upload_transport
274
self.index_transport = index_transport
275
self.index_sizes = [None, None, None, None]
277
('revision', revision_index),
278
('inventory', inventory_index),
279
('text', text_index),
280
('signature', signature_index),
282
if chk_index is not None:
283
indices.append(('chk', chk_index))
284
self.index_sizes.append(None)
285
for index_type, index in indices:
286
offset = self.index_offset(index_type)
287
self.index_sizes[offset] = index._size
288
self.index_class = pack_collection._index_class
289
self._pack_collection = pack_collection
290
self._state = 'resumed'
291
# XXX: perhaps check that the .pack file exists?
293
def access_tuple(self):
294
if self._state == 'finished':
295
return Pack.access_tuple(self)
296
elif self._state == 'resumed':
297
return self.upload_transport, self.file_name()
299
raise AssertionError(self._state)
302
self.upload_transport.delete(self.file_name())
303
indices = [self.revision_index, self.inventory_index, self.text_index,
304
self.signature_index]
305
if self.chk_index is not None:
306
indices.append(self.chk_index)
307
for index in indices:
308
index._transport.delete(index._name)
311
self._check_references()
312
index_types = ['revision', 'inventory', 'text', 'signature']
313
if self.chk_index is not None:
314
index_types.append('chk')
315
for index_type in index_types:
316
old_name = self.index_name(index_type, self.name)
317
new_name = '../indices/' + old_name
318
self.upload_transport.move(old_name, new_name)
319
self._replace_index_with_readonly(index_type)
320
new_name = '../packs/' + self.file_name()
321
self.upload_transport.move(self.file_name(), new_name)
322
self._state = 'finished'
324
def _get_external_refs(self, index):
325
"""Return compression parents for this index that are not present.
327
This returns any compression parents that are referenced by this index,
328
which are not contained *in* this index. They may be present elsewhere.
330
return index.external_references(1)
334
"""An in memory proxy for a pack which is being created."""
336
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
337
"""Create a NewPack instance.
339
:param pack_collection: A PackCollection into which this is being inserted.
340
:param upload_suffix: An optional suffix to be given to any temporary
341
files created during the pack creation. e.g '.autopack'
342
:param file_mode: Unix permissions for newly created file.
344
# The relative locations of the packs are constrained, but all are
345
# passed in because the caller has them, so as to avoid object churn.
346
index_builder_class = pack_collection._index_builder_class
347
if pack_collection.chk_index is not None:
348
chk_index = index_builder_class(reference_lists=0)
352
# Revisions: parents list, no text compression.
353
index_builder_class(reference_lists=1),
354
# Inventory: We want to map compression only, but currently the
355
# knit code hasn't been updated enough to understand that, so we
356
# have a regular 2-list index giving parents and compression
358
index_builder_class(reference_lists=2),
359
# Texts: compression and per file graph, for all fileids - so two
360
# reference lists and two elements in the key tuple.
361
index_builder_class(reference_lists=2, key_elements=2),
362
# Signatures: Just blobs to store, no compression, no parents
364
index_builder_class(reference_lists=0),
365
# CHK based storage - just blobs, no compression or parents.
368
self._pack_collection = pack_collection
369
# When we make readonly indices, we need this.
370
self.index_class = pack_collection._index_class
371
# where should the new pack be opened
372
self.upload_transport = pack_collection._upload_transport
373
# where are indices written out to
374
self.index_transport = pack_collection._index_transport
375
# where is the pack renamed to when it is finished?
376
self.pack_transport = pack_collection._pack_transport
377
# What file mode to upload the pack and indices with.
378
self._file_mode = file_mode
379
# tracks the content written to the .pack file.
380
self._hash = osutils.md5()
381
# a tuple with the length in bytes of the indices, once the pack
382
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
383
self.index_sizes = None
384
# How much data to cache when writing packs. Note that this is not
385
# synchronised with reads, because it's not in the transport layer, so
386
# is not safe unless the client knows it won't be reading from the pack
388
self._cache_limit = 0
389
# the temporary pack file name.
390
self.random_name = osutils.rand_chars(20) + upload_suffix
391
# when was this pack started ?
392
self.start_time = time.time()
393
# open an output stream for the data added to the pack.
394
self.write_stream = self.upload_transport.open_write_stream(
395
self.random_name, mode=self._file_mode)
396
if 'pack' in debug.debug_flags:
397
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
398
time.ctime(), self.upload_transport.base, self.random_name,
399
time.time() - self.start_time)
400
# A list of byte sequences to be written to the new pack, and the
401
# aggregate size of them. Stored as a list rather than separate
402
# variables so that the _write_data closure below can update them.
403
self._buffer = [[], 0]
404
# create a callable for adding data
406
# robertc says- this is a closure rather than a method on the object
407
# so that the variables are locals, and faster than accessing object
409
def _write_data(bytes, flush=False, _buffer=self._buffer,
410
_write=self.write_stream.write, _update=self._hash.update):
411
_buffer[0].append(bytes)
412
_buffer[1] += len(bytes)
414
if _buffer[1] > self._cache_limit or flush:
415
bytes = ''.join(_buffer[0])
419
# expose this on self, for the occasion when clients want to add data.
420
self._write_data = _write_data
421
# a pack writer object to serialise pack records.
422
self._writer = pack.ContainerWriter(self._write_data)
424
# what state is the pack in? (open, finished, aborted)
426
# no name until we finish writing the content
430
"""Cancel creating this pack."""
431
self._state = 'aborted'
432
self.write_stream.close()
433
# Remove the temporary pack file.
434
self.upload_transport.delete(self.random_name)
435
# The indices have no state on disk.
437
def access_tuple(self):
438
"""Return a tuple (transport, name) for the pack content."""
439
if self._state == 'finished':
440
return Pack.access_tuple(self)
441
elif self._state == 'open':
442
return self.upload_transport, self.random_name
444
raise AssertionError(self._state)
446
def data_inserted(self):
447
"""True if data has been added to this pack."""
448
return bool(self.get_revision_count() or
449
self.inventory_index.key_count() or
450
self.text_index.key_count() or
451
self.signature_index.key_count() or
452
(self.chk_index is not None and self.chk_index.key_count()))
454
def finish_content(self):
455
if self.name is not None:
459
self._write_data('', flush=True)
460
self.name = self._hash.hexdigest()
462
def finish(self, suspend=False):
463
"""Finish the new pack.
466
- finalises the content
467
- assigns a name (the md5 of the content, currently)
468
- writes out the associated indices
469
- renames the pack into place.
470
- stores the index size tuple for the pack in the index_sizes
473
self.finish_content()
475
self._check_references()
477
# XXX: It'd be better to write them all to temporary names, then
478
# rename them all into place, so that the window when only some are
479
# visible is smaller. On the other hand none will be seen until
480
# they're in the names list.
481
self.index_sizes = [None, None, None, None]
482
self._write_index('revision', self.revision_index, 'revision',
484
self._write_index('inventory', self.inventory_index, 'inventory',
486
self._write_index('text', self.text_index, 'file texts', suspend)
487
self._write_index('signature', self.signature_index,
488
'revision signatures', suspend)
489
if self.chk_index is not None:
490
self.index_sizes.append(None)
491
self._write_index('chk', self.chk_index,
492
'content hash bytes', suspend)
493
self.write_stream.close(
494
want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
495
# Note that this will clobber an existing pack with the same name,
496
# without checking for hash collisions. While this is undesirable this
497
# is something that can be rectified in a subsequent release. One way
498
# to rectify it may be to leave the pack at the original name, writing
499
# its pack-names entry as something like 'HASH: index-sizes
500
# temporary-name'. Allocate that and check for collisions, if it is
501
# collision free then rename it into place. If clients know this scheme
502
# they can handle missing-file errors by:
503
# - try for HASH.pack
504
# - try for temporary-name
505
# - refresh the pack-list to see if the pack is now absent
506
new_name = self.name + '.pack'
508
new_name = '../packs/' + new_name
509
self.upload_transport.move(self.random_name, new_name)
510
self._state = 'finished'
511
if 'pack' in debug.debug_flags:
512
# XXX: size might be interesting?
513
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
514
time.ctime(), self.upload_transport.base, self.random_name,
515
new_name, time.time() - self.start_time)
518
"""Flush any current data."""
520
bytes = ''.join(self._buffer[0])
521
self.write_stream.write(bytes)
522
self._hash.update(bytes)
523
self._buffer[:] = [[], 0]
525
def _get_external_refs(self, index):
526
return index._external_references()
528
def set_write_cache_size(self, size):
529
self._cache_limit = size
531
def _write_index(self, index_type, index, label, suspend=False):
532
"""Write out an index.
534
:param index_type: The type of index to write - e.g. 'revision'.
535
:param index: The index object to serialise.
536
:param label: What label to give the index e.g. 'revision'.
538
index_name = self.index_name(index_type, self.name)
540
transport = self.upload_transport
542
transport = self.index_transport
543
index_tempfile = index.finish()
544
index_bytes = index_tempfile.read()
545
write_stream = transport.open_write_stream(index_name,
546
mode=self._file_mode)
547
write_stream.write(index_bytes)
549
want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
550
self.index_sizes[self.index_offset(index_type)] = len(index_bytes)
551
if 'pack' in debug.debug_flags:
552
# XXX: size might be interesting?
553
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
554
time.ctime(), label, self.upload_transport.base,
555
self.random_name, time.time() - self.start_time)
556
# Replace the writable index on this object with a readonly,
557
# presently unloaded index. We should alter
558
# the index layer to make its finish() error if add_node is
559
# subsequently used. RBC
560
self._replace_index_with_readonly(index_type)
563
class AggregateIndex(object):
564
"""An aggregated index for the RepositoryPackCollection.
566
AggregateIndex is reponsible for managing the PackAccess object,
567
Index-To-Pack mapping, and all indices list for a specific type of index
568
such as 'revision index'.
570
A CombinedIndex provides an index on a single key space built up
571
from several on-disk indices. The AggregateIndex builds on this
572
to provide a knit access layer, and allows having up to one writable
573
index within the collection.
575
# XXX: Probably 'can be written to' could/should be separated from 'acts
576
# like a knit index' -- mbp 20071024
578
def __init__(self, reload_func=None, flush_func=None):
579
"""Create an AggregateIndex.
581
:param reload_func: A function to call if we find we are missing an
582
index. Should have the form reload_func() => True if the list of
583
active pack files has changed.
585
self._reload_func = reload_func
586
self.index_to_pack = {}
587
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
588
self.data_access = _DirectPackAccess(self.index_to_pack,
589
reload_func=reload_func,
590
flush_func=flush_func)
591
self.add_callback = None
593
def add_index(self, index, pack):
594
"""Add index to the aggregate, which is an index for Pack pack.
596
Future searches on the aggregate index will seach this new index
597
before all previously inserted indices.
599
:param index: An Index for the pack.
600
:param pack: A Pack instance.
602
# expose it to the index map
603
self.index_to_pack[index] = pack.access_tuple()
604
# put it at the front of the linear index list
605
self.combined_index.insert_index(0, index, pack.name)
607
def add_writable_index(self, index, pack):
608
"""Add an index which is able to have data added to it.
610
There can be at most one writable index at any time. Any
611
modifications made to the knit are put into this index.
613
:param index: An index from the pack parameter.
614
:param pack: A Pack instance.
616
if self.add_callback is not None:
617
raise AssertionError(
618
"%s already has a writable index through %s" % \
619
(self, self.add_callback))
620
# allow writing: queue writes to a new index
621
self.add_index(index, pack)
622
# Updates the index to packs mapping as a side effect,
623
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
624
self.add_callback = index.add_nodes
627
"""Reset all the aggregate data to nothing."""
628
self.data_access.set_writer(None, None, (None, None))
629
self.index_to_pack.clear()
630
del self.combined_index._indices[:]
631
del self.combined_index._index_names[:]
632
self.add_callback = None
634
def remove_index(self, index):
635
"""Remove index from the indices used to answer queries.
637
:param index: An index from the pack parameter.
639
del self.index_to_pack[index]
640
pos = self.combined_index._indices.index(index)
641
del self.combined_index._indices[pos]
642
del self.combined_index._index_names[pos]
643
if (self.add_callback is not None and
644
getattr(index, 'add_nodes', None) == self.add_callback):
645
self.add_callback = None
646
self.data_access.set_writer(None, None, (None, None))
649
class Packer(object):
650
"""Create a pack from packs."""
652
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
656
:param pack_collection: A RepositoryPackCollection object where the
657
new pack is being written to.
658
:param packs: The packs to combine.
659
:param suffix: The suffix to use on the temporary files for the pack.
660
:param revision_ids: Revision ids to limit the pack to.
661
:param reload_func: A function to call if a pack file/index goes
662
missing. The side effect of calling this function should be to
663
update self.packs. See also AggregateIndex
667
self.revision_ids = revision_ids
668
# The pack object we are creating.
670
self._pack_collection = pack_collection
671
self._reload_func = reload_func
672
# The index layer keys for the revisions being copied. None for 'all
674
self._revision_keys = None
675
# What text keys to copy. None for 'all texts'. This is set by
676
# _copy_inventory_texts
677
self._text_filter = None
679
def pack(self, pb=None):
680
"""Create a new pack by reading data from other packs.
682
This does little more than a bulk copy of data. One key difference
683
is that data with the same item key across multiple packs is elided
684
from the output. The new pack is written into the current pack store
685
along with its indices, and the name added to the pack names. The
686
source packs are not altered and are not required to be in the current
689
:param pb: An optional progress bar to use. A nested bar is created if
691
:return: A Pack object, or None if nothing was copied.
693
# open a pack - using the same name as the last temporary file
694
# - which has already been flushed, so it's safe.
695
# XXX: - duplicate code warning with start_write_group; fix before
696
# considering 'done'.
697
if self._pack_collection._new_pack is not None:
698
raise errors.BzrError('call to %s.pack() while another pack is'
700
% (self.__class__.__name__,))
701
if self.revision_ids is not None:
702
if len(self.revision_ids) == 0:
703
# silly fetch request.
706
self.revision_ids = frozenset(self.revision_ids)
707
self.revision_keys = frozenset((revid,) for revid in
710
self.pb = ui.ui_factory.nested_progress_bar()
714
return self._create_pack_from_packs()
720
"""Open a pack for the pack we are creating."""
721
new_pack = self._pack_collection.pack_factory(self._pack_collection,
722
upload_suffix=self.suffix,
723
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
724
# We know that we will process all nodes in order, and don't need to
725
# query, so don't combine any indices spilled to disk until we are done
726
new_pack.revision_index.set_optimize(combine_backing_indices=False)
727
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
728
new_pack.text_index.set_optimize(combine_backing_indices=False)
729
new_pack.signature_index.set_optimize(combine_backing_indices=False)
732
def _copy_revision_texts(self):
733
"""Copy revision data to the new pack."""
734
raise NotImplementedError(self._copy_revision_texts)
736
def _copy_inventory_texts(self):
737
"""Copy the inventory texts to the new pack.
739
self._revision_keys is used to determine what inventories to copy.
741
Sets self._text_filter appropriately.
743
raise NotImplementedError(self._copy_inventory_texts)
745
def _copy_text_texts(self):
746
raise NotImplementedError(self._copy_text_texts)
748
def _create_pack_from_packs(self):
749
raise NotImplementedError(self._create_pack_from_packs)
751
def _log_copied_texts(self):
752
if 'pack' in debug.debug_flags:
753
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
754
time.ctime(), self._pack_collection._upload_transport.base,
755
self.new_pack.random_name,
756
self.new_pack.text_index.key_count(),
757
time.time() - self.new_pack.start_time)
759
def _use_pack(self, new_pack):
760
"""Return True if new_pack should be used.
762
:param new_pack: The pack that has just been created.
763
:return: True if the pack should be used.
765
return new_pack.data_inserted()
768
class RepositoryPackCollection(object):
769
"""Management of packs within a repository.
771
:ivar _names: map of {pack_name: (index_size,)}
775
resumed_pack_factory = None
776
normal_packer_class = None
777
optimising_packer_class = None
779
def __init__(self, repo, transport, index_transport, upload_transport,
780
pack_transport, index_builder_class, index_class,
782
"""Create a new RepositoryPackCollection.
784
:param transport: Addresses the repository base directory
785
(typically .bzr/repository/).
786
:param index_transport: Addresses the directory containing indices.
787
:param upload_transport: Addresses the directory into which packs are written
788
while they're being created.
789
:param pack_transport: Addresses the directory of existing complete packs.
790
:param index_builder_class: The index builder class to use.
791
:param index_class: The index class to use.
792
:param use_chk_index: Whether to setup and manage a CHK index.
794
# XXX: This should call self.reset()
796
self.transport = transport
797
self._index_transport = index_transport
798
self._upload_transport = upload_transport
799
self._pack_transport = pack_transport
800
self._index_builder_class = index_builder_class
801
self._index_class = index_class
802
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
807
self._packs_by_name = {}
808
# the previous pack-names content
809
self._packs_at_load = None
810
# when a pack is being created by this object, the state of that pack.
811
self._new_pack = None
812
# aggregated revision index data
813
flush = self._flush_new_pack
814
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
815
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
816
self.text_index = AggregateIndex(self.reload_pack_names, flush)
817
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
818
all_indices = [self.revision_index, self.inventory_index,
819
self.text_index, self.signature_index]
821
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
822
all_indices.append(self.chk_index)
824
# used to determine if we're using a chk_index elsewhere.
825
self.chk_index = None
826
# Tell all the CombinedGraphIndex objects about each other, so they can
827
# share hints about which pack names to search first.
828
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
829
for combined_idx in all_combined:
830
combined_idx.set_sibling_indices(
831
set(all_combined).difference([combined_idx]))
833
self._resumed_packs = []
834
self.config_stack = config.LocationStack(self.transport.base)
837
return '%s(%r)' % (self.__class__.__name__, self.repo)
839
def add_pack_to_memory(self, pack):
840
"""Make a Pack object available to the repository to satisfy queries.
842
:param pack: A Pack object.
844
if pack.name in self._packs_by_name:
845
raise AssertionError(
846
'pack %s already in _packs_by_name' % (pack.name,))
847
self.packs.append(pack)
848
self._packs_by_name[pack.name] = pack
849
self.revision_index.add_index(pack.revision_index, pack)
850
self.inventory_index.add_index(pack.inventory_index, pack)
851
self.text_index.add_index(pack.text_index, pack)
852
self.signature_index.add_index(pack.signature_index, pack)
853
if self.chk_index is not None:
854
self.chk_index.add_index(pack.chk_index, pack)
857
"""Return a list of all the Pack objects this repository has.
859
Note that an in-progress pack being created is not returned.
861
:return: A list of Pack objects for all the packs in the repository.
864
for name in self.names():
865
result.append(self.get_pack_by_name(name))
869
"""Pack the pack collection incrementally.
871
This will not attempt global reorganisation or recompression,
872
rather it will just ensure that the total number of packs does
873
not grow without bound. It uses the _max_pack_count method to
874
determine if autopacking is needed, and the pack_distribution
875
method to determine the number of revisions in each pack.
877
If autopacking takes place then the packs name collection will have
878
been flushed to disk - packing requires updating the name collection
879
in synchronisation with certain steps. Otherwise the names collection
882
:return: Something evaluating true if packing took place.
886
return self._do_autopack()
887
except errors.RetryAutopack:
888
# If we get a RetryAutopack exception, we should abort the
889
# current action, and retry.
892
def _do_autopack(self):
893
# XXX: Should not be needed when the management of indices is sane.
894
total_revisions = self.revision_index.combined_index.key_count()
895
total_packs = len(self._names)
896
if self._max_pack_count(total_revisions) >= total_packs:
898
# determine which packs need changing
899
pack_distribution = self.pack_distribution(total_revisions)
901
for pack in self.all_packs():
902
revision_count = pack.get_revision_count()
903
if revision_count == 0:
904
# revision less packs are not generated by normal operation,
905
# only by operations like sign-my-commits, and thus will not
906
# tend to grow rapdily or without bound like commit containing
907
# packs do - leave them alone as packing them really should
908
# group their data with the relevant commit, and that may
909
# involve rewriting ancient history - which autopack tries to
910
# avoid. Alternatively we could not group the data but treat
911
# each of these as having a single revision, and thus add
912
# one revision for each to the total revision count, to get
913
# a matching distribution.
915
existing_packs.append((revision_count, pack))
916
pack_operations = self.plan_autopack_combinations(
917
existing_packs, pack_distribution)
918
num_new_packs = len(pack_operations)
919
num_old_packs = sum([len(po[1]) for po in pack_operations])
920
num_revs_affected = sum([po[0] for po in pack_operations])
921
mutter('Auto-packing repository %s, which has %d pack files, '
922
'containing %d revisions. Packing %d files into %d affecting %d'
923
' revisions', self, total_packs, total_revisions, num_old_packs,
924
num_new_packs, num_revs_affected)
925
result = self._execute_pack_operations(pack_operations, packer_class=self.normal_packer_class,
926
reload_func=self._restart_autopack)
927
mutter('Auto-packing repository %s completed', self)
930
def _execute_pack_operations(self, pack_operations, packer_class,
932
"""Execute a series of pack operations.
934
:param pack_operations: A list of [revision_count, packs_to_combine].
935
:param packer_class: The class of packer to use
936
:return: The new pack names.
938
for revision_count, packs in pack_operations:
939
# we may have no-ops from the setup logic
942
packer = packer_class(self, packs, '.autopack',
943
reload_func=reload_func)
945
result = packer.pack()
946
except errors.RetryWithNewPacks:
947
# An exception is propagating out of this context, make sure
948
# this packer has cleaned up. Packer() doesn't set its new_pack
949
# state into the RepositoryPackCollection object, so we only
950
# have access to it directly here.
951
if packer.new_pack is not None:
952
packer.new_pack.abort()
957
self._remove_pack_from_memory(pack)
958
# record the newly available packs and stop advertising the old
961
for _, packs in pack_operations:
962
to_be_obsoleted.extend(packs)
963
result = self._save_pack_names(clear_obsolete_packs=True,
964
obsolete_packs=to_be_obsoleted)
967
def _flush_new_pack(self):
968
if self._new_pack is not None:
969
self._new_pack.flush()
971
def lock_names(self):
972
"""Acquire the mutex around the pack-names index.
974
This cannot be used in the middle of a read-only transaction on the
977
self.repo.control_files.lock_write()
979
def _already_packed(self):
980
"""Is the collection already packed?"""
981
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
983
def pack(self, hint=None, clean_obsolete_packs=False):
984
"""Pack the pack collection totally."""
986
total_packs = len(self._names)
987
if self._already_packed():
989
total_revisions = self.revision_index.combined_index.key_count()
990
# XXX: the following may want to be a class, to pack with a given
992
mutter('Packing repository %s, which has %d pack files, '
993
'containing %d revisions with hint %r.', self, total_packs,
994
total_revisions, hint)
997
self._try_pack_operations(hint)
998
except RetryPackOperations:
1002
if clean_obsolete_packs:
1003
self._clear_obsolete_packs()
1005
def _try_pack_operations(self, hint):
1006
"""Calculate the pack operations based on the hint (if any), and
1009
# determine which packs need changing
1010
pack_operations = [[0, []]]
1011
for pack in self.all_packs():
1012
if hint is None or pack.name in hint:
1013
# Either no hint was provided (so we are packing everything),
1014
# or this pack was included in the hint.
1015
pack_operations[-1][0] += pack.get_revision_count()
1016
pack_operations[-1][1].append(pack)
1017
self._execute_pack_operations(pack_operations,
1018
packer_class=self.optimising_packer_class,
1019
reload_func=self._restart_pack_operations)
1021
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1022
"""Plan a pack operation.
1024
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1026
:param pack_distribution: A list with the number of revisions desired
1029
if len(existing_packs) <= len(pack_distribution):
1031
existing_packs.sort(reverse=True)
1032
pack_operations = [[0, []]]
1033
# plan out what packs to keep, and what to reorganise
1034
while len(existing_packs):
1035
# take the largest pack, and if it's less than the head of the
1036
# distribution chart we will include its contents in the new pack
1037
# for that position. If it's larger, we remove its size from the
1038
# distribution chart
1039
next_pack_rev_count, next_pack = existing_packs.pop(0)
1040
if next_pack_rev_count >= pack_distribution[0]:
1041
# this is already packed 'better' than this, so we can
1042
# not waste time packing it.
1043
while next_pack_rev_count > 0:
1044
next_pack_rev_count -= pack_distribution[0]
1045
if next_pack_rev_count >= 0:
1047
del pack_distribution[0]
1049
# didn't use that entire bucket up
1050
pack_distribution[0] = -next_pack_rev_count
1052
# add the revisions we're going to add to the next output pack
1053
pack_operations[-1][0] += next_pack_rev_count
1054
# allocate this pack to the next pack sub operation
1055
pack_operations[-1][1].append(next_pack)
1056
if pack_operations[-1][0] >= pack_distribution[0]:
1057
# this pack is used up, shift left.
1058
del pack_distribution[0]
1059
pack_operations.append([0, []])
1060
# Now that we know which pack files we want to move, shove them all
1061
# into a single pack file.
1063
final_pack_list = []
1064
for num_revs, pack_files in pack_operations:
1065
final_rev_count += num_revs
1066
final_pack_list.extend(pack_files)
1067
if len(final_pack_list) == 1:
1068
raise AssertionError('We somehow generated an autopack with a'
1069
' single pack file being moved.')
1071
return [[final_rev_count, final_pack_list]]
1073
def ensure_loaded(self):
1074
"""Ensure we have read names from disk.
1076
:return: True if the disk names had not been previously read.
1078
# NB: if you see an assertion error here, it's probably access against
1079
# an unlocked repo. Naughty.
1080
if not self.repo.is_locked():
1081
raise errors.ObjectNotLocked(self.repo)
1082
if self._names is None:
1084
self._packs_at_load = set()
1085
for index, key, value in self._iter_disk_pack_index():
1087
self._names[name] = self._parse_index_sizes(value)
1088
self._packs_at_load.add((key, value))
1092
# populate all the metadata.
1096
def _parse_index_sizes(self, value):
1097
"""Parse a string of index sizes."""
1098
return tuple([int(digits) for digits in value.split(' ')])
1100
def get_pack_by_name(self, name):
1101
"""Get a Pack object by name.
1103
:param name: The name of the pack - e.g. '123456'
1104
:return: A Pack object.
1107
return self._packs_by_name[name]
1109
rev_index = self._make_index(name, '.rix')
1110
inv_index = self._make_index(name, '.iix')
1111
txt_index = self._make_index(name, '.tix')
1112
sig_index = self._make_index(name, '.six')
1113
if self.chk_index is not None:
1114
chk_index = self._make_index(name, '.cix', is_chk=True)
1117
result = ExistingPack(self._pack_transport, name, rev_index,
1118
inv_index, txt_index, sig_index, chk_index)
1119
self.add_pack_to_memory(result)
1122
def _resume_pack(self, name):
1123
"""Get a suspended Pack object by name.
1125
:param name: The name of the pack - e.g. '123456'
1126
:return: A Pack object.
1128
if not re.match('[a-f0-9]{32}', name):
1129
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1131
raise errors.UnresumableWriteGroup(
1132
self.repo, [name], 'Malformed write group token')
1134
rev_index = self._make_index(name, '.rix', resume=True)
1135
inv_index = self._make_index(name, '.iix', resume=True)
1136
txt_index = self._make_index(name, '.tix', resume=True)
1137
sig_index = self._make_index(name, '.six', resume=True)
1138
if self.chk_index is not None:
1139
chk_index = self._make_index(name, '.cix', resume=True,
1143
result = self.resumed_pack_factory(name, rev_index, inv_index,
1144
txt_index, sig_index, self._upload_transport,
1145
self._pack_transport, self._index_transport, self,
1146
chk_index=chk_index)
1147
except errors.NoSuchFile, e:
1148
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1149
self.add_pack_to_memory(result)
1150
self._resumed_packs.append(result)
1153
def allocate(self, a_new_pack):
1154
"""Allocate name in the list of packs.
1156
:param a_new_pack: A NewPack instance to be added to the collection of
1157
packs for this repository.
1159
self.ensure_loaded()
1160
if a_new_pack.name in self._names:
1161
raise errors.BzrError(
1162
'Pack %r already exists in %s' % (a_new_pack.name, self))
1163
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1164
self.add_pack_to_memory(a_new_pack)
1166
def _iter_disk_pack_index(self):
1167
"""Iterate over the contents of the pack-names index.
1169
This is used when loading the list from disk, and before writing to
1170
detect updates from others during our write operation.
1171
:return: An iterator of the index contents.
1173
return self._index_class(self.transport, 'pack-names', None
1174
).iter_all_entries()
1176
def _make_index(self, name, suffix, resume=False, is_chk=False):
1177
size_offset = self._suffix_offsets[suffix]
1178
index_name = name + suffix
1180
transport = self._upload_transport
1181
index_size = transport.stat(index_name).st_size
1183
transport = self._index_transport
1184
index_size = self._names[name][size_offset]
1185
index = self._index_class(transport, index_name, index_size,
1186
unlimited_cache=is_chk)
1187
if is_chk and self._index_class is btree_index.BTreeGraphIndex:
1188
index._leaf_factory = btree_index._gcchk_factory
1191
def _max_pack_count(self, total_revisions):
1192
"""Return the maximum number of packs to use for total revisions.
1194
:param total_revisions: The total number of revisions in the
1197
if not total_revisions:
1199
digits = str(total_revisions)
1201
for digit in digits:
1202
result += int(digit)
1206
"""Provide an order to the underlying names."""
1207
return sorted(self._names.keys())
1209
def _obsolete_packs(self, packs):
1210
"""Move a number of packs which have been obsoleted out of the way.
1212
Each pack and its associated indices are moved out of the way.
1214
Note: for correctness this function should only be called after a new
1215
pack names index has been written without these pack names, and with
1216
the names of packs that contain the data previously available via these
1219
:param packs: The packs to obsolete.
1220
:param return: None.
1225
pack.pack_transport.move(pack.file_name(),
1226
'../obsolete_packs/' + pack.file_name())
1227
except errors.NoSuchFile:
1228
# perhaps obsolete_packs was removed? Let's create it and
1231
pack.pack_transport.mkdir('../obsolete_packs/')
1232
except errors.FileExists:
1234
pack.pack_transport.move(pack.file_name(),
1235
'../obsolete_packs/' + pack.file_name())
1236
except (errors.PathError, errors.TransportError), e:
1237
# TODO: Should these be warnings or mutters?
1238
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1240
# TODO: Probably needs to know all possible indices for this pack
1241
# - or maybe list the directory and move all indices matching this
1242
# name whether we recognize it or not?
1243
suffixes = ['.iix', '.six', '.tix', '.rix']
1244
if self.chk_index is not None:
1245
suffixes.append('.cix')
1246
for suffix in suffixes:
1248
self._index_transport.move(pack.name + suffix,
1249
'../obsolete_packs/' + pack.name + suffix)
1250
except (errors.PathError, errors.TransportError), e:
1251
mutter("couldn't rename obsolete index, skipping it:\n%s"
1254
def pack_distribution(self, total_revisions):
1255
"""Generate a list of the number of revisions to put in each pack.
1257
:param total_revisions: The total number of revisions in the
1260
if total_revisions == 0:
1262
digits = reversed(str(total_revisions))
1264
for exponent, count in enumerate(digits):
1265
size = 10 ** exponent
1266
for pos in range(int(count)):
1268
return list(reversed(result))
1270
def _pack_tuple(self, name):
1271
"""Return a tuple with the transport and file name for a pack name."""
1272
return self._pack_transport, name + '.pack'
1274
def _remove_pack_from_memory(self, pack):
1275
"""Remove pack from the packs accessed by this repository.
1277
Only affects memory state, until self._save_pack_names() is invoked.
1279
self._names.pop(pack.name)
1280
self._packs_by_name.pop(pack.name)
1281
self._remove_pack_indices(pack)
1282
self.packs.remove(pack)
1284
def _remove_pack_indices(self, pack, ignore_missing=False):
1285
"""Remove the indices for pack from the aggregated indices.
1287
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1289
for index_type in Pack.index_definitions.keys():
1290
attr_name = index_type + '_index'
1291
aggregate_index = getattr(self, attr_name)
1292
if aggregate_index is not None:
1293
pack_index = getattr(pack, attr_name)
1295
aggregate_index.remove_index(pack_index)
1302
"""Clear all cached data."""
1303
# cached revision data
1304
self.revision_index.clear()
1305
# cached signature data
1306
self.signature_index.clear()
1307
# cached file text data
1308
self.text_index.clear()
1309
# cached inventory data
1310
self.inventory_index.clear()
1312
if self.chk_index is not None:
1313
self.chk_index.clear()
1314
# remove the open pack
1315
self._new_pack = None
1316
# information about packs.
1319
self._packs_by_name = {}
1320
self._packs_at_load = None
1322
def _unlock_names(self):
1323
"""Release the mutex around the pack-names index."""
1324
self.repo.control_files.unlock()
1326
def _diff_pack_names(self):
1327
"""Read the pack names from disk, and compare it to the one in memory.
1329
:return: (disk_nodes, deleted_nodes, new_nodes)
1330
disk_nodes The final set of nodes that should be referenced
1331
deleted_nodes Nodes which have been removed from when we started
1332
new_nodes Nodes that are newly introduced
1334
# load the disk nodes across
1336
for index, key, value in self._iter_disk_pack_index():
1337
disk_nodes.add((key, value))
1338
orig_disk_nodes = set(disk_nodes)
1340
# do a two-way diff against our original content
1341
current_nodes = set()
1342
for name, sizes in self._names.iteritems():
1344
((name, ), ' '.join(str(size) for size in sizes)))
1346
# Packs no longer present in the repository, which were present when we
1347
# locked the repository
1348
deleted_nodes = self._packs_at_load - current_nodes
1349
# Packs which this process is adding
1350
new_nodes = current_nodes - self._packs_at_load
1352
# Update the disk_nodes set to include the ones we are adding, and
1353
# remove the ones which were removed by someone else
1354
disk_nodes.difference_update(deleted_nodes)
1355
disk_nodes.update(new_nodes)
1357
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1359
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1360
"""Given the correct set of pack files, update our saved info.
1362
:return: (removed, added, modified)
1363
removed pack names removed from self._names
1364
added pack names added to self._names
1365
modified pack names that had changed value
1370
## self._packs_at_load = disk_nodes
1371
new_names = dict(disk_nodes)
1372
# drop no longer present nodes
1373
for pack in self.all_packs():
1374
if (pack.name,) not in new_names:
1375
removed.append(pack.name)
1376
self._remove_pack_from_memory(pack)
1377
# add new nodes/refresh existing ones
1378
for key, value in disk_nodes:
1380
sizes = self._parse_index_sizes(value)
1381
if name in self._names:
1383
if sizes != self._names[name]:
1384
# the pack for name has had its indices replaced - rare but
1385
# important to handle. XXX: probably can never happen today
1386
# because the three-way merge code above does not handle it
1387
# - you may end up adding the same key twice to the new
1388
# disk index because the set values are the same, unless
1389
# the only index shows up as deleted by the set difference
1390
# - which it may. Until there is a specific test for this,
1391
# assume it's broken. RBC 20071017.
1392
self._remove_pack_from_memory(self.get_pack_by_name(name))
1393
self._names[name] = sizes
1394
self.get_pack_by_name(name)
1395
modified.append(name)
1398
self._names[name] = sizes
1399
self.get_pack_by_name(name)
1401
return removed, added, modified
1403
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1404
"""Save the list of packs.
1406
This will take out the mutex around the pack names list for the
1407
duration of the method call. If concurrent updates have been made, a
1408
three-way merge between the current list and the current in memory list
1411
:param clear_obsolete_packs: If True, clear out the contents of the
1412
obsolete_packs directory.
1413
:param obsolete_packs: Packs that are obsolete once the new pack-names
1414
file has been written.
1415
:return: A list of the names saved that were not previously on disk.
1417
already_obsolete = []
1420
builder = self._index_builder_class()
1421
(disk_nodes, deleted_nodes, new_nodes,
1422
orig_disk_nodes) = self._diff_pack_names()
1423
# TODO: handle same-name, index-size-changes here -
1424
# e.g. use the value from disk, not ours, *unless* we're the one
1426
for key, value in disk_nodes:
1427
builder.add_node(key, value)
1428
self.transport.put_file('pack-names', builder.finish(),
1429
mode=self.repo.bzrdir._get_file_mode())
1430
self._packs_at_load = disk_nodes
1431
if clear_obsolete_packs:
1434
to_preserve = set([o.name for o in obsolete_packs])
1435
already_obsolete = self._clear_obsolete_packs(to_preserve)
1437
self._unlock_names()
1438
# synchronise the memory packs list with what we just wrote:
1439
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1441
# TODO: We could add one more condition here. "if o.name not in
1442
# orig_disk_nodes and o != the new_pack we haven't written to
1443
# disk yet. However, the new pack object is not easily
1444
# accessible here (it would have to be passed through the
1445
# autopacking code, etc.)
1446
obsolete_packs = [o for o in obsolete_packs
1447
if o.name not in already_obsolete]
1448
self._obsolete_packs(obsolete_packs)
1449
return [new_node[0][0] for new_node in new_nodes]
1451
def reload_pack_names(self):
1452
"""Sync our pack listing with what is present in the repository.
1454
This should be called when we find out that something we thought was
1455
present is now missing. This happens when another process re-packs the
1458
:return: True if the in-memory list of packs has been altered at all.
1460
# The ensure_loaded call is to handle the case where the first call
1461
# made involving the collection was to reload_pack_names, where we
1462
# don't have a view of disk contents. It's a bit of a bandaid, and
1463
# causes two reads of pack-names, but it's a rare corner case not
1464
# struck with regular push/pull etc.
1465
first_read = self.ensure_loaded()
1468
# out the new value.
1469
(disk_nodes, deleted_nodes, new_nodes,
1470
orig_disk_nodes) = self._diff_pack_names()
1471
# _packs_at_load is meant to be the explicit list of names in
1472
# 'pack-names' at then start. As such, it should not contain any
1473
# pending names that haven't been written out yet.
1474
self._packs_at_load = orig_disk_nodes
1476
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1477
if removed or added or modified:
1481
def _restart_autopack(self):
1482
"""Reload the pack names list, and restart the autopack code."""
1483
if not self.reload_pack_names():
1484
# Re-raise the original exception, because something went missing
1485
# and a restart didn't find it
1487
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1489
def _restart_pack_operations(self):
1490
"""Reload the pack names list, and restart the autopack code."""
1491
if not self.reload_pack_names():
1492
# Re-raise the original exception, because something went missing
1493
# and a restart didn't find it
1495
raise RetryPackOperations(self.repo, False, sys.exc_info())
1497
def _clear_obsolete_packs(self, preserve=None):
1498
"""Delete everything from the obsolete-packs directory.
1500
:return: A list of pack identifiers (the filename without '.pack') that
1501
were found in obsolete_packs.
1504
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1505
if preserve is None:
1508
obsolete_pack_files = obsolete_pack_transport.list_dir('.')
1509
except errors.NoSuchFile:
1511
for filename in obsolete_pack_files:
1512
name, ext = osutils.splitext(filename)
1515
if name in preserve:
1518
obsolete_pack_transport.delete(filename)
1519
except (errors.PathError, errors.TransportError), e:
1520
warning("couldn't delete obsolete pack, skipping it:\n%s"
1524
def _start_write_group(self):
1525
# Do not permit preparation for writing if we're not in a 'write lock'.
1526
if not self.repo.is_write_locked():
1527
raise errors.NotWriteLocked(self)
1528
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1529
file_mode=self.repo.bzrdir._get_file_mode())
1530
# allow writing: queue writes to a new index
1531
self.revision_index.add_writable_index(self._new_pack.revision_index,
1533
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1535
self.text_index.add_writable_index(self._new_pack.text_index,
1537
self._new_pack.text_index.set_optimize(combine_backing_indices=False)
1538
self.signature_index.add_writable_index(self._new_pack.signature_index,
1540
if self.chk_index is not None:
1541
self.chk_index.add_writable_index(self._new_pack.chk_index,
1543
self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
1544
self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
1546
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1547
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1548
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1549
self.repo.texts._index._add_callback = self.text_index.add_callback
1551
def _abort_write_group(self):
1552
# FIXME: just drop the transient index.
1553
# forget what names there are
1554
if self._new_pack is not None:
1555
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1556
operation.add_cleanup(setattr, self, '_new_pack', None)
1557
# If we aborted while in the middle of finishing the write
1558
# group, _remove_pack_indices could fail because the indexes are
1559
# already gone. But they're not there we shouldn't fail in this
1560
# case, so we pass ignore_missing=True.
1561
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1562
ignore_missing=True)
1563
operation.run_simple()
1564
for resumed_pack in self._resumed_packs:
1565
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1566
# See comment in previous finally block.
1567
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1568
ignore_missing=True)
1569
operation.run_simple()
1570
del self._resumed_packs[:]
1572
def _remove_resumed_pack_indices(self):
1573
for resumed_pack in self._resumed_packs:
1574
self._remove_pack_indices(resumed_pack)
1575
del self._resumed_packs[:]
1577
def _check_new_inventories(self):
1578
"""Detect missing inventories in this write group.
1580
:returns: list of strs, summarising any problems found. If the list is
1581
empty no problems were found.
1583
# The base implementation does no checks. GCRepositoryPackCollection
1587
def _commit_write_group(self):
1589
for prefix, versioned_file in (
1590
('revisions', self.repo.revisions),
1591
('inventories', self.repo.inventories),
1592
('texts', self.repo.texts),
1593
('signatures', self.repo.signatures),
1595
missing = versioned_file.get_missing_compression_parent_keys()
1596
all_missing.update([(prefix,) + key for key in missing])
1598
raise errors.BzrCheckError(
1599
"Repository %s has missing compression parent(s) %r "
1600
% (self.repo, sorted(all_missing)))
1601
problems = self._check_new_inventories()
1603
problems_summary = '\n'.join(problems)
1604
raise errors.BzrCheckError(
1605
"Cannot add revision(s) to repository: " + problems_summary)
1606
self._remove_pack_indices(self._new_pack)
1607
any_new_content = False
1608
if self._new_pack.data_inserted():
1609
# get all the data to disk and read to use
1610
self._new_pack.finish()
1611
self.allocate(self._new_pack)
1612
self._new_pack = None
1613
any_new_content = True
1615
self._new_pack.abort()
1616
self._new_pack = None
1617
for resumed_pack in self._resumed_packs:
1618
# XXX: this is a pretty ugly way to turn the resumed pack into a
1619
# properly committed pack.
1620
self._names[resumed_pack.name] = None
1621
self._remove_pack_from_memory(resumed_pack)
1622
resumed_pack.finish()
1623
self.allocate(resumed_pack)
1624
any_new_content = True
1625
del self._resumed_packs[:]
1627
result = self.autopack()
1629
# when autopack takes no steps, the names list is still
1631
return self._save_pack_names()
1635
def _suspend_write_group(self):
1636
tokens = [pack.name for pack in self._resumed_packs]
1637
self._remove_pack_indices(self._new_pack)
1638
if self._new_pack.data_inserted():
1639
# get all the data to disk and read to use
1640
self._new_pack.finish(suspend=True)
1641
tokens.append(self._new_pack.name)
1642
self._new_pack = None
1644
self._new_pack.abort()
1645
self._new_pack = None
1646
self._remove_resumed_pack_indices()
1649
def _resume_write_group(self, tokens):
1650
for token in tokens:
1651
self._resume_pack(token)
1654
class PackRepository(MetaDirVersionedFileRepository):
1655
"""Repository with knit objects stored inside pack containers.
1657
The layering for a KnitPackRepository is:
1659
Graph | HPSS | Repository public layer |
1660
===================================================
1661
Tuple based apis below, string based, and key based apis above
1662
---------------------------------------------------
1664
Provides .texts, .revisions etc
1665
This adapts the N-tuple keys to physical knit records which only have a
1666
single string identifier (for historical reasons), which in older formats
1667
was always the revision_id, and in the mapped code for packs is always
1668
the last element of key tuples.
1669
---------------------------------------------------
1671
A separate GraphIndex is used for each of the
1672
texts/inventories/revisions/signatures contained within each individual
1673
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1675
===================================================
1679
# These attributes are inherited from the Repository base class. Setting
1680
# them to None ensures that if the constructor is changed to not initialize
1681
# them, or a subclass fails to call the constructor, that an error will
1682
# occur rather than the system working but generating incorrect data.
1683
_commit_builder_class = None
1686
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1688
MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
1689
self._commit_builder_class = _commit_builder_class
1690
self._serializer = _serializer
1691
self._reconcile_fixes_text_parents = True
1692
if self._format.supports_external_lookups:
1693
self._unstacked_provider = graph.CachingParentsProvider(
1694
self._make_parents_provider_unstacked())
1696
self._unstacked_provider = graph.CachingParentsProvider(self)
1697
self._unstacked_provider.disable_cache()
1700
def _all_revision_ids(self):
1701
"""See Repository.all_revision_ids()."""
1702
return [key[0] for key in self.revisions.keys()]
1704
def _abort_write_group(self):
1705
self.revisions._index._key_dependencies.clear()
1706
self._pack_collection._abort_write_group()
1708
def _make_parents_provider(self):
1709
if not self._format.supports_external_lookups:
1710
return self._unstacked_provider
1711
return graph.StackedParentsProvider(_LazyListJoin(
1712
[self._unstacked_provider], self._fallback_repositories))
1714
def _refresh_data(self):
1715
if not self.is_locked():
1717
self._pack_collection.reload_pack_names()
1718
self._unstacked_provider.disable_cache()
1719
self._unstacked_provider.enable_cache()
1721
def _start_write_group(self):
1722
self._pack_collection._start_write_group()
1724
def _commit_write_group(self):
1725
hint = self._pack_collection._commit_write_group()
1726
self.revisions._index._key_dependencies.clear()
1727
# The commit may have added keys that were previously cached as
1728
# missing, so reset the cache.
1729
self._unstacked_provider.disable_cache()
1730
self._unstacked_provider.enable_cache()
1733
def suspend_write_group(self):
1734
# XXX check self._write_group is self.get_transaction()?
1735
tokens = self._pack_collection._suspend_write_group()
1736
self.revisions._index._key_dependencies.clear()
1737
self._write_group = None
1740
def _resume_write_group(self, tokens):
1741
self._start_write_group()
1743
self._pack_collection._resume_write_group(tokens)
1744
except errors.UnresumableWriteGroup:
1745
self._abort_write_group()
1747
for pack in self._pack_collection._resumed_packs:
1748
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1750
def get_transaction(self):
1751
if self._write_lock_count:
1752
return self._transaction
1754
return self.control_files.get_transaction()
1756
def is_locked(self):
1757
return self._write_lock_count or self.control_files.is_locked()
1759
def is_write_locked(self):
1760
return self._write_lock_count
1762
def lock_write(self, token=None):
1763
"""Lock the repository for writes.
1765
:return: A bzrlib.repository.RepositoryWriteLockResult.
1767
locked = self.is_locked()
1768
if not self._write_lock_count and locked:
1769
raise errors.ReadOnlyError(self)
1770
self._write_lock_count += 1
1771
if self._write_lock_count == 1:
1772
self._transaction = transactions.WriteTransaction()
1774
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
1775
note('%r was write locked again', self)
1776
self._prev_lock = 'w'
1777
self._unstacked_provider.enable_cache()
1778
for repo in self._fallback_repositories:
1779
# Writes don't affect fallback repos
1781
self._refresh_data()
1782
return RepositoryWriteLockResult(self.unlock, None)
1784
def lock_read(self):
1785
"""Lock the repository for reads.
1787
:return: A bzrlib.lock.LogicalLockResult.
1789
locked = self.is_locked()
1790
if self._write_lock_count:
1791
self._write_lock_count += 1
1793
self.control_files.lock_read()
1795
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
1796
note('%r was read locked again', self)
1797
self._prev_lock = 'r'
1798
self._unstacked_provider.enable_cache()
1799
for repo in self._fallback_repositories:
1801
self._refresh_data()
1802
return LogicalLockResult(self.unlock)
1804
def leave_lock_in_place(self):
1805
# not supported - raise an error
1806
raise NotImplementedError(self.leave_lock_in_place)
1808
def dont_leave_lock_in_place(self):
1809
# not supported - raise an error
1810
raise NotImplementedError(self.dont_leave_lock_in_place)
1813
def pack(self, hint=None, clean_obsolete_packs=False):
1814
"""Compress the data within the repository.
1816
This will pack all the data to a single pack. In future it may
1817
recompress deltas or do other such expensive operations.
1819
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
1822
def reconcile(self, other=None, thorough=False):
1823
"""Reconcile this repository."""
1824
from bzrlib.reconcile import PackReconciler
1825
reconciler = PackReconciler(self, thorough=thorough)
1826
reconciler.reconcile()
1829
def _reconcile_pack(self, collection, packs, extension, revs, pb):
1830
raise NotImplementedError(self._reconcile_pack)
1832
@only_raises(errors.LockNotHeld, errors.LockBroken)
1834
if self._write_lock_count == 1 and self._write_group is not None:
1835
self.abort_write_group()
1836
self._unstacked_provider.disable_cache()
1837
self._transaction = None
1838
self._write_lock_count = 0
1839
raise errors.BzrError(
1840
'Must end write group before releasing write lock on %s'
1842
if self._write_lock_count:
1843
self._write_lock_count -= 1
1844
if not self._write_lock_count:
1845
transaction = self._transaction
1846
self._transaction = None
1847
transaction.finish()
1849
self.control_files.unlock()
1851
if not self.is_locked():
1852
self._unstacked_provider.disable_cache()
1853
for repo in self._fallback_repositories:
1857
class RepositoryFormatPack(MetaDirVersionedFileRepositoryFormat):
1858
"""Format logic for pack structured repositories.
1860
This repository format has:
1861
- a list of packs in pack-names
1862
- packs in packs/NAME.pack
1863
- indices in indices/NAME.{iix,six,tix,rix}
1864
- knit deltas in the packs, knit indices mapped to the indices.
1865
- thunk objects to support the knits programming API.
1866
- a format marker of its own
1867
- an optional 'shared-storage' flag
1868
- an optional 'no-working-trees' flag
1872
# Set this attribute in derived classes to control the repository class
1873
# created by open and initialize.
1874
repository_class = None
1875
# Set this attribute in derived classes to control the
1876
# _commit_builder_class that the repository objects will have passed to
1877
# their constructor.
1878
_commit_builder_class = None
1879
# Set this attribute in derived clases to control the _serializer that the
1880
# repository objects will have passed to their constructor.
1882
# Packs are not confused by ghosts.
1883
supports_ghosts = True
1884
# External references are not supported in pack repositories yet.
1885
supports_external_lookups = False
1886
# Most pack formats do not use chk lookups.
1887
supports_chks = False
1888
# What index classes to use
1889
index_builder_class = None
1891
_fetch_uses_deltas = True
1893
supports_funky_characters = True
1894
revision_graph_can_have_wrong_parents = True
1896
def initialize(self, a_bzrdir, shared=False):
1897
"""Create a pack based repository.
1899
:param a_bzrdir: bzrdir to contain the new repository; must already
1901
:param shared: If true the repository will be initialized as a shared
1904
mutter('creating repository in %s.', a_bzrdir.transport.base)
1905
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1906
builder = self.index_builder_class()
1907
files = [('pack-names', builder.finish())]
1908
utf8_files = [('format', self.get_format_string())]
1910
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1911
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
1912
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
1915
def open(self, a_bzrdir, _found=False, _override_transport=None):
1916
"""See RepositoryFormat.open().
1918
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1919
repository at a slightly different url
1920
than normal. I.e. during 'upgrade'.
1923
format = RepositoryFormatMetaDir.find_format(a_bzrdir)
1924
if _override_transport is not None:
1925
repo_transport = _override_transport
1927
repo_transport = a_bzrdir.get_repository_transport(None)
1928
control_files = lockable_files.LockableFiles(repo_transport,
1929
'lock', lockdir.LockDir)
1930
return self.repository_class(_format=self,
1932
control_files=control_files,
1933
_commit_builder_class=self._commit_builder_class,
1934
_serializer=self._serializer)
1937
class RetryPackOperations(errors.RetryWithNewPacks):
1938
"""Raised when we are packing and we find a missing file.
1940
Meant as a signaling exception, to tell the RepositoryPackCollection.pack
1941
code it should try again.
1944
internal_error = True
1946
_fmt = ("Pack files have changed, reload and try pack again."
1947
" context: %(context)s %(orig_error)s")
1950
class _DirectPackAccess(object):
1951
"""Access to data in one or more packs with less translation."""
1953
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
1954
"""Create a _DirectPackAccess object.
1956
:param index_to_packs: A dict mapping index objects to the transport
1957
and file names for obtaining data.
1958
:param reload_func: A function to call if we determine that the pack
1959
files have moved and we need to reload our caches. See
1960
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1962
self._container_writer = None
1963
self._write_index = None
1964
self._indices = index_to_packs
1965
self._reload_func = reload_func
1966
self._flush_func = flush_func
1968
def add_raw_records(self, key_sizes, raw_data):
1969
"""Add raw knit bytes to a storage area.
1971
The data is spooled to the container writer in one bytes-record per
1974
:param sizes: An iterable of tuples containing the key and size of each
1976
:param raw_data: A bytestring containing the data.
1977
:return: A list of memos to retrieve the record later. Each memo is an
1978
opaque index memo. For _DirectPackAccess the memo is (index, pos,
1979
length), where the index field is the write_index object supplied
1980
to the PackAccess object.
1982
if type(raw_data) is not str:
1983
raise AssertionError(
1984
'data must be plain bytes was %s' % type(raw_data))
1987
for key, size in key_sizes:
1988
p_offset, p_length = self._container_writer.add_bytes_record(
1989
raw_data[offset:offset+size], [])
1991
result.append((self._write_index, p_offset, p_length))
1995
"""Flush pending writes on this access object.
1997
This will flush any buffered writes to a NewPack.
1999
if self._flush_func is not None:
2002
def get_raw_records(self, memos_for_retrieval):
2003
"""Get the raw bytes for a records.
2005
:param memos_for_retrieval: An iterable containing the (index, pos,
2006
length) memo for retrieving the bytes. The Pack access method
2007
looks up the pack to use for a given record in its index_to_pack
2009
:return: An iterator over the bytes of the records.
2011
# first pass, group into same-index requests
2013
current_index = None
2014
for (index, offset, length) in memos_for_retrieval:
2015
if current_index == index:
2016
current_list.append((offset, length))
2018
if current_index is not None:
2019
request_lists.append((current_index, current_list))
2020
current_index = index
2021
current_list = [(offset, length)]
2022
# handle the last entry
2023
if current_index is not None:
2024
request_lists.append((current_index, current_list))
2025
for index, offsets in request_lists:
2027
transport, path = self._indices[index]
2029
# A KeyError here indicates that someone has triggered an index
2030
# reload, and this index has gone missing, we need to start
2032
if self._reload_func is None:
2033
# If we don't have a _reload_func there is nothing that can
2036
raise errors.RetryWithNewPacks(index,
2037
reload_occurred=True,
2038
exc_info=sys.exc_info())
2040
reader = pack.make_readv_reader(transport, path, offsets)
2041
for names, read_func in reader.iter_records():
2042
yield read_func(None)
2043
except errors.NoSuchFile:
2044
# A NoSuchFile error indicates that a pack file has gone
2045
# missing on disk, we need to trigger a reload, and start over.
2046
if self._reload_func is None:
2048
raise errors.RetryWithNewPacks(transport.abspath(path),
2049
reload_occurred=False,
2050
exc_info=sys.exc_info())
2052
def set_writer(self, writer, index, transport_packname):
2053
"""Set a writer to use for adding data."""
2054
if index is not None:
2055
self._indices[index] = transport_packname
2056
self._container_writer = writer
2057
self._write_index = index
2059
def reload_or_raise(self, retry_exc):
2060
"""Try calling the reload function, or re-raise the original exception.
2062
This should be called after _DirectPackAccess raises a
2063
RetryWithNewPacks exception. This function will handle the common logic
2064
of determining when the error is fatal versus being temporary.
2065
It will also make sure that the original exception is raised, rather
2066
than the RetryWithNewPacks exception.
2068
If this function returns, then the calling function should retry
2069
whatever operation was being performed. Otherwise an exception will
2072
:param retry_exc: A RetryWithNewPacks exception.
2075
if self._reload_func is None:
2077
elif not self._reload_func():
2078
# The reload claimed that nothing changed
2079
if not retry_exc.reload_occurred:
2080
# If there wasn't an earlier reload, then we really were
2081
# expecting to find changes. We didn't find them, so this is a
2085
exc_class, exc_value, exc_traceback = retry_exc.exc_info
2086
raise exc_class, exc_value, exc_traceback