1
# Copyright (C) 2007-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from __future__ import absolute_import
22
from bzrlib.lazy_import import lazy_import
23
lazy_import(globals(), """
24
from itertools import izip
39
from bzrlib.index import (
41
GraphIndexPrefixAdapter,
51
from bzrlib.decorators import (
56
from bzrlib.lock import LogicalLockResult
57
from bzrlib.repository import (
60
RepositoryFormatMetaDir,
61
RepositoryWriteLockResult,
63
from bzrlib.vf_repository import (
64
MetaDirVersionedFileRepository,
65
MetaDirVersionedFileRepositoryFormat,
66
VersionedFileCommitBuilder,
67
VersionedFileRootCommitBuilder,
69
from bzrlib.trace import (
76
class PackCommitBuilder(VersionedFileCommitBuilder):
77
"""Subclass of VersionedFileCommitBuilder to add texts with pack semantics.
79
Specifically this uses one knit object rather than one knit object per
80
added text, reducing memory and object pressure.
83
def __init__(self, repository, parents, config, timestamp=None,
84
timezone=None, committer=None, revprops=None,
85
revision_id=None, lossy=False):
86
VersionedFileCommitBuilder.__init__(self, repository, parents, config,
87
timestamp=timestamp, timezone=timezone, committer=committer,
88
revprops=revprops, revision_id=revision_id, lossy=lossy)
89
self._file_graph = graph.Graph(
90
repository._pack_collection.text_index.combined_index)
92
def _heads(self, file_id, revision_ids):
93
keys = [(file_id, revision_id) for revision_id in revision_ids]
94
return set([key[1] for key in self._file_graph.heads(keys)])
97
class PackRootCommitBuilder(VersionedFileRootCommitBuilder):
98
"""A subclass of RootCommitBuilder to add texts with pack semantics.
100
Specifically this uses one knit object rather than one knit object per
101
added text, reducing memory and object pressure.
104
def __init__(self, repository, parents, config, timestamp=None,
105
timezone=None, committer=None, revprops=None,
106
revision_id=None, lossy=False):
107
super(PackRootCommitBuilder, self).__init__(repository, parents,
108
config, timestamp=timestamp, timezone=timezone,
109
committer=committer, revprops=revprops, revision_id=revision_id,
111
self._file_graph = graph.Graph(
112
repository._pack_collection.text_index.combined_index)
114
def _heads(self, file_id, revision_ids):
115
keys = [(file_id, revision_id) for revision_id in revision_ids]
116
return set([key[1] for key in self._file_graph.heads(keys)])
120
"""An in memory proxy for a pack and its indices.
122
This is a base class that is not directly used, instead the classes
123
ExistingPack and NewPack are used.
126
# A map of index 'type' to the file extension and position in the
128
index_definitions = {
130
'revision': ('.rix', 0),
131
'inventory': ('.iix', 1),
133
'signature': ('.six', 3),
136
def __init__(self, revision_index, inventory_index, text_index,
137
signature_index, chk_index=None):
138
"""Create a pack instance.
140
:param revision_index: A GraphIndex for determining what revisions are
141
present in the Pack and accessing the locations of their texts.
142
:param inventory_index: A GraphIndex for determining what inventories are
143
present in the Pack and accessing the locations of their
145
:param text_index: A GraphIndex for determining what file texts
146
are present in the pack and accessing the locations of their
147
texts/deltas (via (fileid, revisionid) tuples).
148
:param signature_index: A GraphIndex for determining what signatures are
149
present in the Pack and accessing the locations of their texts.
150
:param chk_index: A GraphIndex for accessing content by CHK, if the
153
self.revision_index = revision_index
154
self.inventory_index = inventory_index
155
self.text_index = text_index
156
self.signature_index = signature_index
157
self.chk_index = chk_index
159
def access_tuple(self):
160
"""Return a tuple (transport, name) for the pack content."""
161
return self.pack_transport, self.file_name()
163
def _check_references(self):
164
"""Make sure our external references are present.
166
Packs are allowed to have deltas whose base is not in the pack, but it
167
must be present somewhere in this collection. It is not allowed to
168
have deltas based on a fallback repository.
169
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
172
for (index_name, external_refs, index) in [
174
self._get_external_refs(self.text_index),
175
self._pack_collection.text_index.combined_index),
177
self._get_external_refs(self.inventory_index),
178
self._pack_collection.inventory_index.combined_index),
180
missing = external_refs.difference(
181
k for (idx, k, v, r) in
182
index.iter_entries(external_refs))
184
missing_items[index_name] = sorted(list(missing))
186
from pprint import pformat
187
raise errors.BzrCheckError(
188
"Newly created pack file %r has delta references to "
189
"items not in its repository:\n%s"
190
% (self, pformat(missing_items)))
193
"""Get the file name for the pack on disk."""
194
return self.name + '.pack'
196
def get_revision_count(self):
197
return self.revision_index.key_count()
199
def index_name(self, index_type, name):
200
"""Get the disk name of an index type for pack name 'name'."""
201
return name + Pack.index_definitions[index_type][0]
203
def index_offset(self, index_type):
204
"""Get the position in a index_size array for a given index type."""
205
return Pack.index_definitions[index_type][1]
207
def inventory_index_name(self, name):
208
"""The inv index is the name + .iix."""
209
return self.index_name('inventory', name)
211
def revision_index_name(self, name):
212
"""The revision index is the name + .rix."""
213
return self.index_name('revision', name)
215
def signature_index_name(self, name):
216
"""The signature index is the name + .six."""
217
return self.index_name('signature', name)
219
def text_index_name(self, name):
220
"""The text index is the name + .tix."""
221
return self.index_name('text', name)
223
def _replace_index_with_readonly(self, index_type):
224
unlimited_cache = False
225
if index_type == 'chk':
226
unlimited_cache = True
227
index = self.index_class(self.index_transport,
228
self.index_name(index_type, self.name),
229
self.index_sizes[self.index_offset(index_type)],
230
unlimited_cache=unlimited_cache)
231
if index_type == 'chk':
232
index._leaf_factory = btree_index._gcchk_factory
233
setattr(self, index_type + '_index', index)
236
class ExistingPack(Pack):
237
"""An in memory proxy for an existing .pack and its disk indices."""
239
def __init__(self, pack_transport, name, revision_index, inventory_index,
240
text_index, signature_index, chk_index=None):
241
"""Create an ExistingPack object.
243
:param pack_transport: The transport where the pack file resides.
244
:param name: The name of the pack on disk in the pack_transport.
246
Pack.__init__(self, revision_index, inventory_index, text_index,
247
signature_index, chk_index)
249
self.pack_transport = pack_transport
250
if None in (revision_index, inventory_index, text_index,
251
signature_index, name, pack_transport):
252
raise AssertionError()
254
def __eq__(self, other):
255
return self.__dict__ == other.__dict__
257
def __ne__(self, other):
258
return not self.__eq__(other)
261
return "<%s.%s object at 0x%x, %s, %s" % (
262
self.__class__.__module__, self.__class__.__name__, id(self),
263
self.pack_transport, self.name)
266
class ResumedPack(ExistingPack):
268
def __init__(self, name, revision_index, inventory_index, text_index,
269
signature_index, upload_transport, pack_transport, index_transport,
270
pack_collection, chk_index=None):
271
"""Create a ResumedPack object."""
272
ExistingPack.__init__(self, pack_transport, name, revision_index,
273
inventory_index, text_index, signature_index,
275
self.upload_transport = upload_transport
276
self.index_transport = index_transport
277
self.index_sizes = [None, None, None, None]
279
('revision', revision_index),
280
('inventory', inventory_index),
281
('text', text_index),
282
('signature', signature_index),
284
if chk_index is not None:
285
indices.append(('chk', chk_index))
286
self.index_sizes.append(None)
287
for index_type, index in indices:
288
offset = self.index_offset(index_type)
289
self.index_sizes[offset] = index._size
290
self.index_class = pack_collection._index_class
291
self._pack_collection = pack_collection
292
self._state = 'resumed'
293
# XXX: perhaps check that the .pack file exists?
295
def access_tuple(self):
296
if self._state == 'finished':
297
return Pack.access_tuple(self)
298
elif self._state == 'resumed':
299
return self.upload_transport, self.file_name()
301
raise AssertionError(self._state)
304
self.upload_transport.delete(self.file_name())
305
indices = [self.revision_index, self.inventory_index, self.text_index,
306
self.signature_index]
307
if self.chk_index is not None:
308
indices.append(self.chk_index)
309
for index in indices:
310
index._transport.delete(index._name)
313
self._check_references()
314
index_types = ['revision', 'inventory', 'text', 'signature']
315
if self.chk_index is not None:
316
index_types.append('chk')
317
for index_type in index_types:
318
old_name = self.index_name(index_type, self.name)
319
new_name = '../indices/' + old_name
320
self.upload_transport.move(old_name, new_name)
321
self._replace_index_with_readonly(index_type)
322
new_name = '../packs/' + self.file_name()
323
self.upload_transport.move(self.file_name(), new_name)
324
self._state = 'finished'
326
def _get_external_refs(self, index):
327
"""Return compression parents for this index that are not present.
329
This returns any compression parents that are referenced by this index,
330
which are not contained *in* this index. They may be present elsewhere.
332
return index.external_references(1)
336
"""An in memory proxy for a pack which is being created."""
338
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
339
"""Create a NewPack instance.
341
:param pack_collection: A PackCollection into which this is being inserted.
342
:param upload_suffix: An optional suffix to be given to any temporary
343
files created during the pack creation. e.g '.autopack'
344
:param file_mode: Unix permissions for newly created file.
346
# The relative locations of the packs are constrained, but all are
347
# passed in because the caller has them, so as to avoid object churn.
348
index_builder_class = pack_collection._index_builder_class
349
if pack_collection.chk_index is not None:
350
chk_index = index_builder_class(reference_lists=0)
354
# Revisions: parents list, no text compression.
355
index_builder_class(reference_lists=1),
356
# Inventory: We want to map compression only, but currently the
357
# knit code hasn't been updated enough to understand that, so we
358
# have a regular 2-list index giving parents and compression
360
index_builder_class(reference_lists=2),
361
# Texts: compression and per file graph, for all fileids - so two
362
# reference lists and two elements in the key tuple.
363
index_builder_class(reference_lists=2, key_elements=2),
364
# Signatures: Just blobs to store, no compression, no parents
366
index_builder_class(reference_lists=0),
367
# CHK based storage - just blobs, no compression or parents.
370
self._pack_collection = pack_collection
371
# When we make readonly indices, we need this.
372
self.index_class = pack_collection._index_class
373
# where should the new pack be opened
374
self.upload_transport = pack_collection._upload_transport
375
# where are indices written out to
376
self.index_transport = pack_collection._index_transport
377
# where is the pack renamed to when it is finished?
378
self.pack_transport = pack_collection._pack_transport
379
# What file mode to upload the pack and indices with.
380
self._file_mode = file_mode
381
# tracks the content written to the .pack file.
382
self._hash = osutils.md5()
383
# a tuple with the length in bytes of the indices, once the pack
384
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
385
self.index_sizes = None
386
# How much data to cache when writing packs. Note that this is not
387
# synchronised with reads, because it's not in the transport layer, so
388
# is not safe unless the client knows it won't be reading from the pack
390
self._cache_limit = 0
391
# the temporary pack file name.
392
self.random_name = osutils.rand_chars(20) + upload_suffix
393
# when was this pack started ?
394
self.start_time = time.time()
395
# open an output stream for the data added to the pack.
396
self.write_stream = self.upload_transport.open_write_stream(
397
self.random_name, mode=self._file_mode)
398
if 'pack' in debug.debug_flags:
399
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
400
time.ctime(), self.upload_transport.base, self.random_name,
401
time.time() - self.start_time)
402
# A list of byte sequences to be written to the new pack, and the
403
# aggregate size of them. Stored as a list rather than separate
404
# variables so that the _write_data closure below can update them.
405
self._buffer = [[], 0]
406
# create a callable for adding data
408
# robertc says- this is a closure rather than a method on the object
409
# so that the variables are locals, and faster than accessing object
411
def _write_data(bytes, flush=False, _buffer=self._buffer,
412
_write=self.write_stream.write, _update=self._hash.update):
413
_buffer[0].append(bytes)
414
_buffer[1] += len(bytes)
416
if _buffer[1] > self._cache_limit or flush:
417
bytes = ''.join(_buffer[0])
421
# expose this on self, for the occasion when clients want to add data.
422
self._write_data = _write_data
423
# a pack writer object to serialise pack records.
424
self._writer = pack.ContainerWriter(self._write_data)
426
# what state is the pack in? (open, finished, aborted)
428
# no name until we finish writing the content
432
"""Cancel creating this pack."""
433
self._state = 'aborted'
434
self.write_stream.close()
435
# Remove the temporary pack file.
436
self.upload_transport.delete(self.random_name)
437
# The indices have no state on disk.
439
def access_tuple(self):
440
"""Return a tuple (transport, name) for the pack content."""
441
if self._state == 'finished':
442
return Pack.access_tuple(self)
443
elif self._state == 'open':
444
return self.upload_transport, self.random_name
446
raise AssertionError(self._state)
448
def data_inserted(self):
449
"""True if data has been added to this pack."""
450
return bool(self.get_revision_count() or
451
self.inventory_index.key_count() or
452
self.text_index.key_count() or
453
self.signature_index.key_count() or
454
(self.chk_index is not None and self.chk_index.key_count()))
456
def finish_content(self):
457
if self.name is not None:
461
self._write_data('', flush=True)
462
self.name = self._hash.hexdigest()
464
def finish(self, suspend=False):
465
"""Finish the new pack.
468
- finalises the content
469
- assigns a name (the md5 of the content, currently)
470
- writes out the associated indices
471
- renames the pack into place.
472
- stores the index size tuple for the pack in the index_sizes
475
self.finish_content()
477
self._check_references()
479
# XXX: It'd be better to write them all to temporary names, then
480
# rename them all into place, so that the window when only some are
481
# visible is smaller. On the other hand none will be seen until
482
# they're in the names list.
483
self.index_sizes = [None, None, None, None]
484
self._write_index('revision', self.revision_index, 'revision',
486
self._write_index('inventory', self.inventory_index, 'inventory',
488
self._write_index('text', self.text_index, 'file texts', suspend)
489
self._write_index('signature', self.signature_index,
490
'revision signatures', suspend)
491
if self.chk_index is not None:
492
self.index_sizes.append(None)
493
self._write_index('chk', self.chk_index,
494
'content hash bytes', suspend)
495
self.write_stream.close(
496
want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
497
# Note that this will clobber an existing pack with the same name,
498
# without checking for hash collisions. While this is undesirable this
499
# is something that can be rectified in a subsequent release. One way
500
# to rectify it may be to leave the pack at the original name, writing
501
# its pack-names entry as something like 'HASH: index-sizes
502
# temporary-name'. Allocate that and check for collisions, if it is
503
# collision free then rename it into place. If clients know this scheme
504
# they can handle missing-file errors by:
505
# - try for HASH.pack
506
# - try for temporary-name
507
# - refresh the pack-list to see if the pack is now absent
508
new_name = self.name + '.pack'
510
new_name = '../packs/' + new_name
511
self.upload_transport.move(self.random_name, new_name)
512
self._state = 'finished'
513
if 'pack' in debug.debug_flags:
514
# XXX: size might be interesting?
515
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
516
time.ctime(), self.upload_transport.base, self.random_name,
517
new_name, time.time() - self.start_time)
520
"""Flush any current data."""
522
bytes = ''.join(self._buffer[0])
523
self.write_stream.write(bytes)
524
self._hash.update(bytes)
525
self._buffer[:] = [[], 0]
527
def _get_external_refs(self, index):
528
return index._external_references()
530
def set_write_cache_size(self, size):
531
self._cache_limit = size
533
def _write_index(self, index_type, index, label, suspend=False):
534
"""Write out an index.
536
:param index_type: The type of index to write - e.g. 'revision'.
537
:param index: The index object to serialise.
538
:param label: What label to give the index e.g. 'revision'.
540
index_name = self.index_name(index_type, self.name)
542
transport = self.upload_transport
544
transport = self.index_transport
545
index_tempfile = index.finish()
546
index_bytes = index_tempfile.read()
547
write_stream = transport.open_write_stream(index_name,
548
mode=self._file_mode)
549
write_stream.write(index_bytes)
551
want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
552
self.index_sizes[self.index_offset(index_type)] = len(index_bytes)
553
if 'pack' in debug.debug_flags:
554
# XXX: size might be interesting?
555
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
556
time.ctime(), label, self.upload_transport.base,
557
self.random_name, time.time() - self.start_time)
558
# Replace the writable index on this object with a readonly,
559
# presently unloaded index. We should alter
560
# the index layer to make its finish() error if add_node is
561
# subsequently used. RBC
562
self._replace_index_with_readonly(index_type)
565
class AggregateIndex(object):
566
"""An aggregated index for the RepositoryPackCollection.
568
AggregateIndex is reponsible for managing the PackAccess object,
569
Index-To-Pack mapping, and all indices list for a specific type of index
570
such as 'revision index'.
572
A CombinedIndex provides an index on a single key space built up
573
from several on-disk indices. The AggregateIndex builds on this
574
to provide a knit access layer, and allows having up to one writable
575
index within the collection.
577
# XXX: Probably 'can be written to' could/should be separated from 'acts
578
# like a knit index' -- mbp 20071024
580
def __init__(self, reload_func=None, flush_func=None):
581
"""Create an AggregateIndex.
583
:param reload_func: A function to call if we find we are missing an
584
index. Should have the form reload_func() => True if the list of
585
active pack files has changed.
587
self._reload_func = reload_func
588
self.index_to_pack = {}
589
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
590
self.data_access = _DirectPackAccess(self.index_to_pack,
591
reload_func=reload_func,
592
flush_func=flush_func)
593
self.add_callback = None
595
def add_index(self, index, pack):
596
"""Add index to the aggregate, which is an index for Pack pack.
598
Future searches on the aggregate index will seach this new index
599
before all previously inserted indices.
601
:param index: An Index for the pack.
602
:param pack: A Pack instance.
604
# expose it to the index map
605
self.index_to_pack[index] = pack.access_tuple()
606
# put it at the front of the linear index list
607
self.combined_index.insert_index(0, index, pack.name)
609
def add_writable_index(self, index, pack):
610
"""Add an index which is able to have data added to it.
612
There can be at most one writable index at any time. Any
613
modifications made to the knit are put into this index.
615
:param index: An index from the pack parameter.
616
:param pack: A Pack instance.
618
if self.add_callback is not None:
619
raise AssertionError(
620
"%s already has a writable index through %s" % \
621
(self, self.add_callback))
622
# allow writing: queue writes to a new index
623
self.add_index(index, pack)
624
# Updates the index to packs mapping as a side effect,
625
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
626
self.add_callback = index.add_nodes
629
"""Reset all the aggregate data to nothing."""
630
self.data_access.set_writer(None, None, (None, None))
631
self.index_to_pack.clear()
632
del self.combined_index._indices[:]
633
del self.combined_index._index_names[:]
634
self.add_callback = None
636
def remove_index(self, index):
637
"""Remove index from the indices used to answer queries.
639
:param index: An index from the pack parameter.
641
del self.index_to_pack[index]
642
pos = self.combined_index._indices.index(index)
643
del self.combined_index._indices[pos]
644
del self.combined_index._index_names[pos]
645
if (self.add_callback is not None and
646
getattr(index, 'add_nodes', None) == self.add_callback):
647
self.add_callback = None
648
self.data_access.set_writer(None, None, (None, None))
651
class Packer(object):
652
"""Create a pack from packs."""
654
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
658
:param pack_collection: A RepositoryPackCollection object where the
659
new pack is being written to.
660
:param packs: The packs to combine.
661
:param suffix: The suffix to use on the temporary files for the pack.
662
:param revision_ids: Revision ids to limit the pack to.
663
:param reload_func: A function to call if a pack file/index goes
664
missing. The side effect of calling this function should be to
665
update self.packs. See also AggregateIndex
669
self.revision_ids = revision_ids
670
# The pack object we are creating.
672
self._pack_collection = pack_collection
673
self._reload_func = reload_func
674
# The index layer keys for the revisions being copied. None for 'all
676
self._revision_keys = None
677
# What text keys to copy. None for 'all texts'. This is set by
678
# _copy_inventory_texts
679
self._text_filter = None
681
def pack(self, pb=None):
682
"""Create a new pack by reading data from other packs.
684
This does little more than a bulk copy of data. One key difference
685
is that data with the same item key across multiple packs is elided
686
from the output. The new pack is written into the current pack store
687
along with its indices, and the name added to the pack names. The
688
source packs are not altered and are not required to be in the current
691
:param pb: An optional progress bar to use. A nested bar is created if
693
:return: A Pack object, or None if nothing was copied.
695
# open a pack - using the same name as the last temporary file
696
# - which has already been flushed, so it's safe.
697
# XXX: - duplicate code warning with start_write_group; fix before
698
# considering 'done'.
699
if self._pack_collection._new_pack is not None:
700
raise errors.BzrError('call to %s.pack() while another pack is'
702
% (self.__class__.__name__,))
703
if self.revision_ids is not None:
704
if len(self.revision_ids) == 0:
705
# silly fetch request.
708
self.revision_ids = frozenset(self.revision_ids)
709
self.revision_keys = frozenset((revid,) for revid in
712
self.pb = ui.ui_factory.nested_progress_bar()
716
return self._create_pack_from_packs()
722
"""Open a pack for the pack we are creating."""
723
new_pack = self._pack_collection.pack_factory(self._pack_collection,
724
upload_suffix=self.suffix,
725
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
726
# We know that we will process all nodes in order, and don't need to
727
# query, so don't combine any indices spilled to disk until we are done
728
new_pack.revision_index.set_optimize(combine_backing_indices=False)
729
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
730
new_pack.text_index.set_optimize(combine_backing_indices=False)
731
new_pack.signature_index.set_optimize(combine_backing_indices=False)
734
def _copy_revision_texts(self):
735
"""Copy revision data to the new pack."""
736
raise NotImplementedError(self._copy_revision_texts)
738
def _copy_inventory_texts(self):
739
"""Copy the inventory texts to the new pack.
741
self._revision_keys is used to determine what inventories to copy.
743
Sets self._text_filter appropriately.
745
raise NotImplementedError(self._copy_inventory_texts)
747
def _copy_text_texts(self):
748
raise NotImplementedError(self._copy_text_texts)
750
def _create_pack_from_packs(self):
751
raise NotImplementedError(self._create_pack_from_packs)
753
def _log_copied_texts(self):
754
if 'pack' in debug.debug_flags:
755
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
756
time.ctime(), self._pack_collection._upload_transport.base,
757
self.new_pack.random_name,
758
self.new_pack.text_index.key_count(),
759
time.time() - self.new_pack.start_time)
761
def _use_pack(self, new_pack):
762
"""Return True if new_pack should be used.
764
:param new_pack: The pack that has just been created.
765
:return: True if the pack should be used.
767
return new_pack.data_inserted()
770
class RepositoryPackCollection(object):
771
"""Management of packs within a repository.
773
:ivar _names: map of {pack_name: (index_size,)}
777
resumed_pack_factory = None
778
normal_packer_class = None
779
optimising_packer_class = None
781
def __init__(self, repo, transport, index_transport, upload_transport,
782
pack_transport, index_builder_class, index_class,
784
"""Create a new RepositoryPackCollection.
786
:param transport: Addresses the repository base directory
787
(typically .bzr/repository/).
788
:param index_transport: Addresses the directory containing indices.
789
:param upload_transport: Addresses the directory into which packs are written
790
while they're being created.
791
:param pack_transport: Addresses the directory of existing complete packs.
792
:param index_builder_class: The index builder class to use.
793
:param index_class: The index class to use.
794
:param use_chk_index: Whether to setup and manage a CHK index.
796
# XXX: This should call self.reset()
798
self.transport = transport
799
self._index_transport = index_transport
800
self._upload_transport = upload_transport
801
self._pack_transport = pack_transport
802
self._index_builder_class = index_builder_class
803
self._index_class = index_class
804
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
809
self._packs_by_name = {}
810
# the previous pack-names content
811
self._packs_at_load = None
812
# when a pack is being created by this object, the state of that pack.
813
self._new_pack = None
814
# aggregated revision index data
815
flush = self._flush_new_pack
816
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
817
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
818
self.text_index = AggregateIndex(self.reload_pack_names, flush)
819
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
820
all_indices = [self.revision_index, self.inventory_index,
821
self.text_index, self.signature_index]
823
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
824
all_indices.append(self.chk_index)
826
# used to determine if we're using a chk_index elsewhere.
827
self.chk_index = None
828
# Tell all the CombinedGraphIndex objects about each other, so they can
829
# share hints about which pack names to search first.
830
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
831
for combined_idx in all_combined:
832
combined_idx.set_sibling_indices(
833
set(all_combined).difference([combined_idx]))
835
self._resumed_packs = []
836
self.config_stack = config.LocationStack(self.transport.base)
839
return '%s(%r)' % (self.__class__.__name__, self.repo)
841
def add_pack_to_memory(self, pack):
842
"""Make a Pack object available to the repository to satisfy queries.
844
:param pack: A Pack object.
846
if pack.name in self._packs_by_name:
847
raise AssertionError(
848
'pack %s already in _packs_by_name' % (pack.name,))
849
self.packs.append(pack)
850
self._packs_by_name[pack.name] = pack
851
self.revision_index.add_index(pack.revision_index, pack)
852
self.inventory_index.add_index(pack.inventory_index, pack)
853
self.text_index.add_index(pack.text_index, pack)
854
self.signature_index.add_index(pack.signature_index, pack)
855
if self.chk_index is not None:
856
self.chk_index.add_index(pack.chk_index, pack)
859
"""Return a list of all the Pack objects this repository has.
861
Note that an in-progress pack being created is not returned.
863
:return: A list of Pack objects for all the packs in the repository.
866
for name in self.names():
867
result.append(self.get_pack_by_name(name))
871
"""Pack the pack collection incrementally.
873
This will not attempt global reorganisation or recompression,
874
rather it will just ensure that the total number of packs does
875
not grow without bound. It uses the _max_pack_count method to
876
determine if autopacking is needed, and the pack_distribution
877
method to determine the number of revisions in each pack.
879
If autopacking takes place then the packs name collection will have
880
been flushed to disk - packing requires updating the name collection
881
in synchronisation with certain steps. Otherwise the names collection
884
:return: Something evaluating true if packing took place.
888
return self._do_autopack()
889
except errors.RetryAutopack:
890
# If we get a RetryAutopack exception, we should abort the
891
# current action, and retry.
894
def _do_autopack(self):
895
# XXX: Should not be needed when the management of indices is sane.
896
total_revisions = self.revision_index.combined_index.key_count()
897
total_packs = len(self._names)
898
if self._max_pack_count(total_revisions) >= total_packs:
900
# determine which packs need changing
901
pack_distribution = self.pack_distribution(total_revisions)
903
for pack in self.all_packs():
904
revision_count = pack.get_revision_count()
905
if revision_count == 0:
906
# revision less packs are not generated by normal operation,
907
# only by operations like sign-my-commits, and thus will not
908
# tend to grow rapdily or without bound like commit containing
909
# packs do - leave them alone as packing them really should
910
# group their data with the relevant commit, and that may
911
# involve rewriting ancient history - which autopack tries to
912
# avoid. Alternatively we could not group the data but treat
913
# each of these as having a single revision, and thus add
914
# one revision for each to the total revision count, to get
915
# a matching distribution.
917
existing_packs.append((revision_count, pack))
918
pack_operations = self.plan_autopack_combinations(
919
existing_packs, pack_distribution)
920
num_new_packs = len(pack_operations)
921
num_old_packs = sum([len(po[1]) for po in pack_operations])
922
num_revs_affected = sum([po[0] for po in pack_operations])
923
mutter('Auto-packing repository %s, which has %d pack files, '
924
'containing %d revisions. Packing %d files into %d affecting %d'
925
' revisions', self, total_packs, total_revisions, num_old_packs,
926
num_new_packs, num_revs_affected)
927
result = self._execute_pack_operations(pack_operations, packer_class=self.normal_packer_class,
928
reload_func=self._restart_autopack)
929
mutter('Auto-packing repository %s completed', self)
932
def _execute_pack_operations(self, pack_operations, packer_class,
934
"""Execute a series of pack operations.
936
:param pack_operations: A list of [revision_count, packs_to_combine].
937
:param packer_class: The class of packer to use
938
:return: The new pack names.
940
for revision_count, packs in pack_operations:
941
# we may have no-ops from the setup logic
944
packer = packer_class(self, packs, '.autopack',
945
reload_func=reload_func)
947
result = packer.pack()
948
except errors.RetryWithNewPacks:
949
# An exception is propagating out of this context, make sure
950
# this packer has cleaned up. Packer() doesn't set its new_pack
951
# state into the RepositoryPackCollection object, so we only
952
# have access to it directly here.
953
if packer.new_pack is not None:
954
packer.new_pack.abort()
959
self._remove_pack_from_memory(pack)
960
# record the newly available packs and stop advertising the old
963
for _, packs in pack_operations:
964
to_be_obsoleted.extend(packs)
965
result = self._save_pack_names(clear_obsolete_packs=True,
966
obsolete_packs=to_be_obsoleted)
969
def _flush_new_pack(self):
970
if self._new_pack is not None:
971
self._new_pack.flush()
973
def lock_names(self):
974
"""Acquire the mutex around the pack-names index.
976
This cannot be used in the middle of a read-only transaction on the
979
self.repo.control_files.lock_write()
981
def _already_packed(self):
982
"""Is the collection already packed?"""
983
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
985
def pack(self, hint=None, clean_obsolete_packs=False):
986
"""Pack the pack collection totally."""
988
total_packs = len(self._names)
989
if self._already_packed():
991
total_revisions = self.revision_index.combined_index.key_count()
992
# XXX: the following may want to be a class, to pack with a given
994
mutter('Packing repository %s, which has %d pack files, '
995
'containing %d revisions with hint %r.', self, total_packs,
996
total_revisions, hint)
999
self._try_pack_operations(hint)
1000
except RetryPackOperations:
1004
if clean_obsolete_packs:
1005
self._clear_obsolete_packs()
1007
def _try_pack_operations(self, hint):
1008
"""Calculate the pack operations based on the hint (if any), and
1011
# determine which packs need changing
1012
pack_operations = [[0, []]]
1013
for pack in self.all_packs():
1014
if hint is None or pack.name in hint:
1015
# Either no hint was provided (so we are packing everything),
1016
# or this pack was included in the hint.
1017
pack_operations[-1][0] += pack.get_revision_count()
1018
pack_operations[-1][1].append(pack)
1019
self._execute_pack_operations(pack_operations,
1020
packer_class=self.optimising_packer_class,
1021
reload_func=self._restart_pack_operations)
1023
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1024
"""Plan a pack operation.
1026
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1028
:param pack_distribution: A list with the number of revisions desired
1031
if len(existing_packs) <= len(pack_distribution):
1033
existing_packs.sort(reverse=True)
1034
pack_operations = [[0, []]]
1035
# plan out what packs to keep, and what to reorganise
1036
while len(existing_packs):
1037
# take the largest pack, and if it's less than the head of the
1038
# distribution chart we will include its contents in the new pack
1039
# for that position. If it's larger, we remove its size from the
1040
# distribution chart
1041
next_pack_rev_count, next_pack = existing_packs.pop(0)
1042
if next_pack_rev_count >= pack_distribution[0]:
1043
# this is already packed 'better' than this, so we can
1044
# not waste time packing it.
1045
while next_pack_rev_count > 0:
1046
next_pack_rev_count -= pack_distribution[0]
1047
if next_pack_rev_count >= 0:
1049
del pack_distribution[0]
1051
# didn't use that entire bucket up
1052
pack_distribution[0] = -next_pack_rev_count
1054
# add the revisions we're going to add to the next output pack
1055
pack_operations[-1][0] += next_pack_rev_count
1056
# allocate this pack to the next pack sub operation
1057
pack_operations[-1][1].append(next_pack)
1058
if pack_operations[-1][0] >= pack_distribution[0]:
1059
# this pack is used up, shift left.
1060
del pack_distribution[0]
1061
pack_operations.append([0, []])
1062
# Now that we know which pack files we want to move, shove them all
1063
# into a single pack file.
1065
final_pack_list = []
1066
for num_revs, pack_files in pack_operations:
1067
final_rev_count += num_revs
1068
final_pack_list.extend(pack_files)
1069
if len(final_pack_list) == 1:
1070
raise AssertionError('We somehow generated an autopack with a'
1071
' single pack file being moved.')
1073
return [[final_rev_count, final_pack_list]]
1075
def ensure_loaded(self):
1076
"""Ensure we have read names from disk.
1078
:return: True if the disk names had not been previously read.
1080
# NB: if you see an assertion error here, it's probably access against
1081
# an unlocked repo. Naughty.
1082
if not self.repo.is_locked():
1083
raise errors.ObjectNotLocked(self.repo)
1084
if self._names is None:
1086
self._packs_at_load = set()
1087
for index, key, value in self._iter_disk_pack_index():
1089
self._names[name] = self._parse_index_sizes(value)
1090
self._packs_at_load.add((key, value))
1094
# populate all the metadata.
1098
def _parse_index_sizes(self, value):
1099
"""Parse a string of index sizes."""
1100
return tuple([int(digits) for digits in value.split(' ')])
1102
def get_pack_by_name(self, name):
1103
"""Get a Pack object by name.
1105
:param name: The name of the pack - e.g. '123456'
1106
:return: A Pack object.
1109
return self._packs_by_name[name]
1111
rev_index = self._make_index(name, '.rix')
1112
inv_index = self._make_index(name, '.iix')
1113
txt_index = self._make_index(name, '.tix')
1114
sig_index = self._make_index(name, '.six')
1115
if self.chk_index is not None:
1116
chk_index = self._make_index(name, '.cix', is_chk=True)
1119
result = ExistingPack(self._pack_transport, name, rev_index,
1120
inv_index, txt_index, sig_index, chk_index)
1121
self.add_pack_to_memory(result)
1124
def _resume_pack(self, name):
1125
"""Get a suspended Pack object by name.
1127
:param name: The name of the pack - e.g. '123456'
1128
:return: A Pack object.
1130
if not re.match('[a-f0-9]{32}', name):
1131
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1133
raise errors.UnresumableWriteGroup(
1134
self.repo, [name], 'Malformed write group token')
1136
rev_index = self._make_index(name, '.rix', resume=True)
1137
inv_index = self._make_index(name, '.iix', resume=True)
1138
txt_index = self._make_index(name, '.tix', resume=True)
1139
sig_index = self._make_index(name, '.six', resume=True)
1140
if self.chk_index is not None:
1141
chk_index = self._make_index(name, '.cix', resume=True,
1145
result = self.resumed_pack_factory(name, rev_index, inv_index,
1146
txt_index, sig_index, self._upload_transport,
1147
self._pack_transport, self._index_transport, self,
1148
chk_index=chk_index)
1149
except errors.NoSuchFile, e:
1150
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1151
self.add_pack_to_memory(result)
1152
self._resumed_packs.append(result)
1155
def allocate(self, a_new_pack):
1156
"""Allocate name in the list of packs.
1158
:param a_new_pack: A NewPack instance to be added to the collection of
1159
packs for this repository.
1161
self.ensure_loaded()
1162
if a_new_pack.name in self._names:
1163
raise errors.BzrError(
1164
'Pack %r already exists in %s' % (a_new_pack.name, self))
1165
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1166
self.add_pack_to_memory(a_new_pack)
1168
def _iter_disk_pack_index(self):
1169
"""Iterate over the contents of the pack-names index.
1171
This is used when loading the list from disk, and before writing to
1172
detect updates from others during our write operation.
1173
:return: An iterator of the index contents.
1175
return self._index_class(self.transport, 'pack-names', None
1176
).iter_all_entries()
1178
def _make_index(self, name, suffix, resume=False, is_chk=False):
1179
size_offset = self._suffix_offsets[suffix]
1180
index_name = name + suffix
1182
transport = self._upload_transport
1183
index_size = transport.stat(index_name).st_size
1185
transport = self._index_transport
1186
index_size = self._names[name][size_offset]
1187
index = self._index_class(transport, index_name, index_size,
1188
unlimited_cache=is_chk)
1189
if is_chk and self._index_class is btree_index.BTreeGraphIndex:
1190
index._leaf_factory = btree_index._gcchk_factory
1193
def _max_pack_count(self, total_revisions):
1194
"""Return the maximum number of packs to use for total revisions.
1196
:param total_revisions: The total number of revisions in the
1199
if not total_revisions:
1201
digits = str(total_revisions)
1203
for digit in digits:
1204
result += int(digit)
1208
"""Provide an order to the underlying names."""
1209
return sorted(self._names.keys())
1211
def _obsolete_packs(self, packs):
1212
"""Move a number of packs which have been obsoleted out of the way.
1214
Each pack and its associated indices are moved out of the way.
1216
Note: for correctness this function should only be called after a new
1217
pack names index has been written without these pack names, and with
1218
the names of packs that contain the data previously available via these
1221
:param packs: The packs to obsolete.
1222
:param return: None.
1227
pack.pack_transport.move(pack.file_name(),
1228
'../obsolete_packs/' + pack.file_name())
1229
except errors.NoSuchFile:
1230
# perhaps obsolete_packs was removed? Let's create it and
1233
pack.pack_transport.mkdir('../obsolete_packs/')
1234
except errors.FileExists:
1236
pack.pack_transport.move(pack.file_name(),
1237
'../obsolete_packs/' + pack.file_name())
1238
except (errors.PathError, errors.TransportError), e:
1239
# TODO: Should these be warnings or mutters?
1240
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1242
# TODO: Probably needs to know all possible indices for this pack
1243
# - or maybe list the directory and move all indices matching this
1244
# name whether we recognize it or not?
1245
suffixes = ['.iix', '.six', '.tix', '.rix']
1246
if self.chk_index is not None:
1247
suffixes.append('.cix')
1248
for suffix in suffixes:
1250
self._index_transport.move(pack.name + suffix,
1251
'../obsolete_packs/' + pack.name + suffix)
1252
except (errors.PathError, errors.TransportError), e:
1253
mutter("couldn't rename obsolete index, skipping it:\n%s"
1256
def pack_distribution(self, total_revisions):
1257
"""Generate a list of the number of revisions to put in each pack.
1259
:param total_revisions: The total number of revisions in the
1262
if total_revisions == 0:
1264
digits = reversed(str(total_revisions))
1266
for exponent, count in enumerate(digits):
1267
size = 10 ** exponent
1268
for pos in range(int(count)):
1270
return list(reversed(result))
1272
def _pack_tuple(self, name):
1273
"""Return a tuple with the transport and file name for a pack name."""
1274
return self._pack_transport, name + '.pack'
1276
def _remove_pack_from_memory(self, pack):
1277
"""Remove pack from the packs accessed by this repository.
1279
Only affects memory state, until self._save_pack_names() is invoked.
1281
self._names.pop(pack.name)
1282
self._packs_by_name.pop(pack.name)
1283
self._remove_pack_indices(pack)
1284
self.packs.remove(pack)
1286
def _remove_pack_indices(self, pack, ignore_missing=False):
1287
"""Remove the indices for pack from the aggregated indices.
1289
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1291
for index_type in Pack.index_definitions.keys():
1292
attr_name = index_type + '_index'
1293
aggregate_index = getattr(self, attr_name)
1294
if aggregate_index is not None:
1295
pack_index = getattr(pack, attr_name)
1297
aggregate_index.remove_index(pack_index)
1304
"""Clear all cached data."""
1305
# cached revision data
1306
self.revision_index.clear()
1307
# cached signature data
1308
self.signature_index.clear()
1309
# cached file text data
1310
self.text_index.clear()
1311
# cached inventory data
1312
self.inventory_index.clear()
1314
if self.chk_index is not None:
1315
self.chk_index.clear()
1316
# remove the open pack
1317
self._new_pack = None
1318
# information about packs.
1321
self._packs_by_name = {}
1322
self._packs_at_load = None
1324
def _unlock_names(self):
1325
"""Release the mutex around the pack-names index."""
1326
self.repo.control_files.unlock()
1328
def _diff_pack_names(self):
1329
"""Read the pack names from disk, and compare it to the one in memory.
1331
:return: (disk_nodes, deleted_nodes, new_nodes)
1332
disk_nodes The final set of nodes that should be referenced
1333
deleted_nodes Nodes which have been removed from when we started
1334
new_nodes Nodes that are newly introduced
1336
# load the disk nodes across
1338
for index, key, value in self._iter_disk_pack_index():
1339
disk_nodes.add((key, value))
1340
orig_disk_nodes = set(disk_nodes)
1342
# do a two-way diff against our original content
1343
current_nodes = set()
1344
for name, sizes in self._names.iteritems():
1346
((name, ), ' '.join(str(size) for size in sizes)))
1348
# Packs no longer present in the repository, which were present when we
1349
# locked the repository
1350
deleted_nodes = self._packs_at_load - current_nodes
1351
# Packs which this process is adding
1352
new_nodes = current_nodes - self._packs_at_load
1354
# Update the disk_nodes set to include the ones we are adding, and
1355
# remove the ones which were removed by someone else
1356
disk_nodes.difference_update(deleted_nodes)
1357
disk_nodes.update(new_nodes)
1359
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1361
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1362
"""Given the correct set of pack files, update our saved info.
1364
:return: (removed, added, modified)
1365
removed pack names removed from self._names
1366
added pack names added to self._names
1367
modified pack names that had changed value
1372
## self._packs_at_load = disk_nodes
1373
new_names = dict(disk_nodes)
1374
# drop no longer present nodes
1375
for pack in self.all_packs():
1376
if (pack.name,) not in new_names:
1377
removed.append(pack.name)
1378
self._remove_pack_from_memory(pack)
1379
# add new nodes/refresh existing ones
1380
for key, value in disk_nodes:
1382
sizes = self._parse_index_sizes(value)
1383
if name in self._names:
1385
if sizes != self._names[name]:
1386
# the pack for name has had its indices replaced - rare but
1387
# important to handle. XXX: probably can never happen today
1388
# because the three-way merge code above does not handle it
1389
# - you may end up adding the same key twice to the new
1390
# disk index because the set values are the same, unless
1391
# the only index shows up as deleted by the set difference
1392
# - which it may. Until there is a specific test for this,
1393
# assume it's broken. RBC 20071017.
1394
self._remove_pack_from_memory(self.get_pack_by_name(name))
1395
self._names[name] = sizes
1396
self.get_pack_by_name(name)
1397
modified.append(name)
1400
self._names[name] = sizes
1401
self.get_pack_by_name(name)
1403
return removed, added, modified
1405
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1406
"""Save the list of packs.
1408
This will take out the mutex around the pack names list for the
1409
duration of the method call. If concurrent updates have been made, a
1410
three-way merge between the current list and the current in memory list
1413
:param clear_obsolete_packs: If True, clear out the contents of the
1414
obsolete_packs directory.
1415
:param obsolete_packs: Packs that are obsolete once the new pack-names
1416
file has been written.
1417
:return: A list of the names saved that were not previously on disk.
1419
already_obsolete = []
1422
builder = self._index_builder_class()
1423
(disk_nodes, deleted_nodes, new_nodes,
1424
orig_disk_nodes) = self._diff_pack_names()
1425
# TODO: handle same-name, index-size-changes here -
1426
# e.g. use the value from disk, not ours, *unless* we're the one
1428
for key, value in disk_nodes:
1429
builder.add_node(key, value)
1430
self.transport.put_file('pack-names', builder.finish(),
1431
mode=self.repo.bzrdir._get_file_mode())
1432
self._packs_at_load = disk_nodes
1433
if clear_obsolete_packs:
1436
to_preserve = set([o.name for o in obsolete_packs])
1437
already_obsolete = self._clear_obsolete_packs(to_preserve)
1439
self._unlock_names()
1440
# synchronise the memory packs list with what we just wrote:
1441
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1443
# TODO: We could add one more condition here. "if o.name not in
1444
# orig_disk_nodes and o != the new_pack we haven't written to
1445
# disk yet. However, the new pack object is not easily
1446
# accessible here (it would have to be passed through the
1447
# autopacking code, etc.)
1448
obsolete_packs = [o for o in obsolete_packs
1449
if o.name not in already_obsolete]
1450
self._obsolete_packs(obsolete_packs)
1451
return [new_node[0][0] for new_node in new_nodes]
1453
def reload_pack_names(self):
1454
"""Sync our pack listing with what is present in the repository.
1456
This should be called when we find out that something we thought was
1457
present is now missing. This happens when another process re-packs the
1460
:return: True if the in-memory list of packs has been altered at all.
1462
# The ensure_loaded call is to handle the case where the first call
1463
# made involving the collection was to reload_pack_names, where we
1464
# don't have a view of disk contents. It's a bit of a bandaid, and
1465
# causes two reads of pack-names, but it's a rare corner case not
1466
# struck with regular push/pull etc.
1467
first_read = self.ensure_loaded()
1470
# out the new value.
1471
(disk_nodes, deleted_nodes, new_nodes,
1472
orig_disk_nodes) = self._diff_pack_names()
1473
# _packs_at_load is meant to be the explicit list of names in
1474
# 'pack-names' at then start. As such, it should not contain any
1475
# pending names that haven't been written out yet.
1476
self._packs_at_load = orig_disk_nodes
1478
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1479
if removed or added or modified:
1483
def _restart_autopack(self):
1484
"""Reload the pack names list, and restart the autopack code."""
1485
if not self.reload_pack_names():
1486
# Re-raise the original exception, because something went missing
1487
# and a restart didn't find it
1489
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1491
def _restart_pack_operations(self):
1492
"""Reload the pack names list, and restart the autopack code."""
1493
if not self.reload_pack_names():
1494
# Re-raise the original exception, because something went missing
1495
# and a restart didn't find it
1497
raise RetryPackOperations(self.repo, False, sys.exc_info())
1499
def _clear_obsolete_packs(self, preserve=None):
1500
"""Delete everything from the obsolete-packs directory.
1502
:return: A list of pack identifiers (the filename without '.pack') that
1503
were found in obsolete_packs.
1506
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1507
if preserve is None:
1510
obsolete_pack_files = obsolete_pack_transport.list_dir('.')
1511
except errors.NoSuchFile:
1513
for filename in obsolete_pack_files:
1514
name, ext = osutils.splitext(filename)
1517
if name in preserve:
1520
obsolete_pack_transport.delete(filename)
1521
except (errors.PathError, errors.TransportError), e:
1522
warning("couldn't delete obsolete pack, skipping it:\n%s"
1526
def _start_write_group(self):
1527
# Do not permit preparation for writing if we're not in a 'write lock'.
1528
if not self.repo.is_write_locked():
1529
raise errors.NotWriteLocked(self)
1530
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1531
file_mode=self.repo.bzrdir._get_file_mode())
1532
# allow writing: queue writes to a new index
1533
self.revision_index.add_writable_index(self._new_pack.revision_index,
1535
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1537
self.text_index.add_writable_index(self._new_pack.text_index,
1539
self._new_pack.text_index.set_optimize(combine_backing_indices=False)
1540
self.signature_index.add_writable_index(self._new_pack.signature_index,
1542
if self.chk_index is not None:
1543
self.chk_index.add_writable_index(self._new_pack.chk_index,
1545
self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
1546
self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
1548
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1549
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1550
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1551
self.repo.texts._index._add_callback = self.text_index.add_callback
1553
def _abort_write_group(self):
1554
# FIXME: just drop the transient index.
1555
# forget what names there are
1556
if self._new_pack is not None:
1557
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1558
operation.add_cleanup(setattr, self, '_new_pack', None)
1559
# If we aborted while in the middle of finishing the write
1560
# group, _remove_pack_indices could fail because the indexes are
1561
# already gone. But they're not there we shouldn't fail in this
1562
# case, so we pass ignore_missing=True.
1563
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1564
ignore_missing=True)
1565
operation.run_simple()
1566
for resumed_pack in self._resumed_packs:
1567
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1568
# See comment in previous finally block.
1569
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1570
ignore_missing=True)
1571
operation.run_simple()
1572
del self._resumed_packs[:]
1574
def _remove_resumed_pack_indices(self):
1575
for resumed_pack in self._resumed_packs:
1576
self._remove_pack_indices(resumed_pack)
1577
del self._resumed_packs[:]
1579
def _check_new_inventories(self):
1580
"""Detect missing inventories in this write group.
1582
:returns: list of strs, summarising any problems found. If the list is
1583
empty no problems were found.
1585
# The base implementation does no checks. GCRepositoryPackCollection
1589
def _commit_write_group(self):
1591
for prefix, versioned_file in (
1592
('revisions', self.repo.revisions),
1593
('inventories', self.repo.inventories),
1594
('texts', self.repo.texts),
1595
('signatures', self.repo.signatures),
1597
missing = versioned_file.get_missing_compression_parent_keys()
1598
all_missing.update([(prefix,) + key for key in missing])
1600
raise errors.BzrCheckError(
1601
"Repository %s has missing compression parent(s) %r "
1602
% (self.repo, sorted(all_missing)))
1603
problems = self._check_new_inventories()
1605
problems_summary = '\n'.join(problems)
1606
raise errors.BzrCheckError(
1607
"Cannot add revision(s) to repository: " + problems_summary)
1608
self._remove_pack_indices(self._new_pack)
1609
any_new_content = False
1610
if self._new_pack.data_inserted():
1611
# get all the data to disk and read to use
1612
self._new_pack.finish()
1613
self.allocate(self._new_pack)
1614
self._new_pack = None
1615
any_new_content = True
1617
self._new_pack.abort()
1618
self._new_pack = None
1619
for resumed_pack in self._resumed_packs:
1620
# XXX: this is a pretty ugly way to turn the resumed pack into a
1621
# properly committed pack.
1622
self._names[resumed_pack.name] = None
1623
self._remove_pack_from_memory(resumed_pack)
1624
resumed_pack.finish()
1625
self.allocate(resumed_pack)
1626
any_new_content = True
1627
del self._resumed_packs[:]
1629
result = self.autopack()
1631
# when autopack takes no steps, the names list is still
1633
return self._save_pack_names()
1637
def _suspend_write_group(self):
1638
tokens = [pack.name for pack in self._resumed_packs]
1639
self._remove_pack_indices(self._new_pack)
1640
if self._new_pack.data_inserted():
1641
# get all the data to disk and read to use
1642
self._new_pack.finish(suspend=True)
1643
tokens.append(self._new_pack.name)
1644
self._new_pack = None
1646
self._new_pack.abort()
1647
self._new_pack = None
1648
self._remove_resumed_pack_indices()
1651
def _resume_write_group(self, tokens):
1652
for token in tokens:
1653
self._resume_pack(token)
1656
class PackRepository(MetaDirVersionedFileRepository):
1657
"""Repository with knit objects stored inside pack containers.
1659
The layering for a KnitPackRepository is:
1661
Graph | HPSS | Repository public layer |
1662
===================================================
1663
Tuple based apis below, string based, and key based apis above
1664
---------------------------------------------------
1666
Provides .texts, .revisions etc
1667
This adapts the N-tuple keys to physical knit records which only have a
1668
single string identifier (for historical reasons), which in older formats
1669
was always the revision_id, and in the mapped code for packs is always
1670
the last element of key tuples.
1671
---------------------------------------------------
1673
A separate GraphIndex is used for each of the
1674
texts/inventories/revisions/signatures contained within each individual
1675
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1677
===================================================
1681
# These attributes are inherited from the Repository base class. Setting
1682
# them to None ensures that if the constructor is changed to not initialize
1683
# them, or a subclass fails to call the constructor, that an error will
1684
# occur rather than the system working but generating incorrect data.
1685
_commit_builder_class = None
1688
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1690
MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
1691
self._commit_builder_class = _commit_builder_class
1692
self._serializer = _serializer
1693
self._reconcile_fixes_text_parents = True
1694
if self._format.supports_external_lookups:
1695
self._unstacked_provider = graph.CachingParentsProvider(
1696
self._make_parents_provider_unstacked())
1698
self._unstacked_provider = graph.CachingParentsProvider(self)
1699
self._unstacked_provider.disable_cache()
1702
def _all_revision_ids(self):
1703
"""See Repository.all_revision_ids()."""
1704
return [key[0] for key in self.revisions.keys()]
1706
def _abort_write_group(self):
1707
self.revisions._index._key_dependencies.clear()
1708
self._pack_collection._abort_write_group()
1710
def _make_parents_provider(self):
1711
if not self._format.supports_external_lookups:
1712
return self._unstacked_provider
1713
return graph.StackedParentsProvider(_LazyListJoin(
1714
[self._unstacked_provider], self._fallback_repositories))
1716
def _refresh_data(self):
1717
if not self.is_locked():
1719
self._pack_collection.reload_pack_names()
1720
self._unstacked_provider.disable_cache()
1721
self._unstacked_provider.enable_cache()
1723
def _start_write_group(self):
1724
self._pack_collection._start_write_group()
1726
def _commit_write_group(self):
1727
hint = self._pack_collection._commit_write_group()
1728
self.revisions._index._key_dependencies.clear()
1729
# The commit may have added keys that were previously cached as
1730
# missing, so reset the cache.
1731
self._unstacked_provider.disable_cache()
1732
self._unstacked_provider.enable_cache()
1735
def suspend_write_group(self):
1736
# XXX check self._write_group is self.get_transaction()?
1737
tokens = self._pack_collection._suspend_write_group()
1738
self.revisions._index._key_dependencies.clear()
1739
self._write_group = None
1742
def _resume_write_group(self, tokens):
1743
self._start_write_group()
1745
self._pack_collection._resume_write_group(tokens)
1746
except errors.UnresumableWriteGroup:
1747
self._abort_write_group()
1749
for pack in self._pack_collection._resumed_packs:
1750
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1752
def get_transaction(self):
1753
if self._write_lock_count:
1754
return self._transaction
1756
return self.control_files.get_transaction()
1758
def is_locked(self):
1759
return self._write_lock_count or self.control_files.is_locked()
1761
def is_write_locked(self):
1762
return self._write_lock_count
1764
def lock_write(self, token=None):
1765
"""Lock the repository for writes.
1767
:return: A bzrlib.repository.RepositoryWriteLockResult.
1769
locked = self.is_locked()
1770
if not self._write_lock_count and locked:
1771
raise errors.ReadOnlyError(self)
1772
self._write_lock_count += 1
1773
if self._write_lock_count == 1:
1774
self._transaction = transactions.WriteTransaction()
1776
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
1777
note('%r was write locked again', self)
1778
self._prev_lock = 'w'
1779
self._unstacked_provider.enable_cache()
1780
for repo in self._fallback_repositories:
1781
# Writes don't affect fallback repos
1783
self._refresh_data()
1784
return RepositoryWriteLockResult(self.unlock, None)
1786
def lock_read(self):
1787
"""Lock the repository for reads.
1789
:return: A bzrlib.lock.LogicalLockResult.
1791
locked = self.is_locked()
1792
if self._write_lock_count:
1793
self._write_lock_count += 1
1795
self.control_files.lock_read()
1797
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
1798
note('%r was read locked again', self)
1799
self._prev_lock = 'r'
1800
self._unstacked_provider.enable_cache()
1801
for repo in self._fallback_repositories:
1803
self._refresh_data()
1804
return LogicalLockResult(self.unlock)
1806
def leave_lock_in_place(self):
1807
# not supported - raise an error
1808
raise NotImplementedError(self.leave_lock_in_place)
1810
def dont_leave_lock_in_place(self):
1811
# not supported - raise an error
1812
raise NotImplementedError(self.dont_leave_lock_in_place)
1815
def pack(self, hint=None, clean_obsolete_packs=False):
1816
"""Compress the data within the repository.
1818
This will pack all the data to a single pack. In future it may
1819
recompress deltas or do other such expensive operations.
1821
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
1824
def reconcile(self, other=None, thorough=False):
1825
"""Reconcile this repository."""
1826
from bzrlib.reconcile import PackReconciler
1827
reconciler = PackReconciler(self, thorough=thorough)
1828
reconciler.reconcile()
1831
def _reconcile_pack(self, collection, packs, extension, revs, pb):
1832
raise NotImplementedError(self._reconcile_pack)
1834
@only_raises(errors.LockNotHeld, errors.LockBroken)
1836
if self._write_lock_count == 1 and self._write_group is not None:
1837
self.abort_write_group()
1838
self._unstacked_provider.disable_cache()
1839
self._transaction = None
1840
self._write_lock_count = 0
1841
raise errors.BzrError(
1842
'Must end write group before releasing write lock on %s'
1844
if self._write_lock_count:
1845
self._write_lock_count -= 1
1846
if not self._write_lock_count:
1847
transaction = self._transaction
1848
self._transaction = None
1849
transaction.finish()
1851
self.control_files.unlock()
1853
if not self.is_locked():
1854
self._unstacked_provider.disable_cache()
1855
for repo in self._fallback_repositories:
1859
class RepositoryFormatPack(MetaDirVersionedFileRepositoryFormat):
1860
"""Format logic for pack structured repositories.
1862
This repository format has:
1863
- a list of packs in pack-names
1864
- packs in packs/NAME.pack
1865
- indices in indices/NAME.{iix,six,tix,rix}
1866
- knit deltas in the packs, knit indices mapped to the indices.
1867
- thunk objects to support the knits programming API.
1868
- a format marker of its own
1869
- an optional 'shared-storage' flag
1870
- an optional 'no-working-trees' flag
1874
# Set this attribute in derived classes to control the repository class
1875
# created by open and initialize.
1876
repository_class = None
1877
# Set this attribute in derived classes to control the
1878
# _commit_builder_class that the repository objects will have passed to
1879
# their constructor.
1880
_commit_builder_class = None
1881
# Set this attribute in derived clases to control the _serializer that the
1882
# repository objects will have passed to their constructor.
1884
# Packs are not confused by ghosts.
1885
supports_ghosts = True
1886
# External references are not supported in pack repositories yet.
1887
supports_external_lookups = False
1888
# Most pack formats do not use chk lookups.
1889
supports_chks = False
1890
# What index classes to use
1891
index_builder_class = None
1893
_fetch_uses_deltas = True
1895
supports_funky_characters = True
1896
revision_graph_can_have_wrong_parents = True
1898
def initialize(self, a_bzrdir, shared=False):
1899
"""Create a pack based repository.
1901
:param a_bzrdir: bzrdir to contain the new repository; must already
1903
:param shared: If true the repository will be initialized as a shared
1906
mutter('creating repository in %s.', a_bzrdir.transport.base)
1907
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1908
builder = self.index_builder_class()
1909
files = [('pack-names', builder.finish())]
1910
utf8_files = [('format', self.get_format_string())]
1912
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1913
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
1914
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
1917
def open(self, a_bzrdir, _found=False, _override_transport=None):
1918
"""See RepositoryFormat.open().
1920
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1921
repository at a slightly different url
1922
than normal. I.e. during 'upgrade'.
1925
format = RepositoryFormatMetaDir.find_format(a_bzrdir)
1926
if _override_transport is not None:
1927
repo_transport = _override_transport
1929
repo_transport = a_bzrdir.get_repository_transport(None)
1930
control_files = lockable_files.LockableFiles(repo_transport,
1931
'lock', lockdir.LockDir)
1932
return self.repository_class(_format=self,
1934
control_files=control_files,
1935
_commit_builder_class=self._commit_builder_class,
1936
_serializer=self._serializer)
1939
class RetryPackOperations(errors.RetryWithNewPacks):
1940
"""Raised when we are packing and we find a missing file.
1942
Meant as a signaling exception, to tell the RepositoryPackCollection.pack
1943
code it should try again.
1946
internal_error = True
1948
_fmt = ("Pack files have changed, reload and try pack again."
1949
" context: %(context)s %(orig_error)s")
1952
class _DirectPackAccess(object):
1953
"""Access to data in one or more packs with less translation."""
1955
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
1956
"""Create a _DirectPackAccess object.
1958
:param index_to_packs: A dict mapping index objects to the transport
1959
and file names for obtaining data.
1960
:param reload_func: A function to call if we determine that the pack
1961
files have moved and we need to reload our caches. See
1962
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1964
self._container_writer = None
1965
self._write_index = None
1966
self._indices = index_to_packs
1967
self._reload_func = reload_func
1968
self._flush_func = flush_func
1970
def add_raw_records(self, key_sizes, raw_data):
1971
"""Add raw knit bytes to a storage area.
1973
The data is spooled to the container writer in one bytes-record per
1976
:param sizes: An iterable of tuples containing the key and size of each
1978
:param raw_data: A bytestring containing the data.
1979
:return: A list of memos to retrieve the record later. Each memo is an
1980
opaque index memo. For _DirectPackAccess the memo is (index, pos,
1981
length), where the index field is the write_index object supplied
1982
to the PackAccess object.
1984
if type(raw_data) is not str:
1985
raise AssertionError(
1986
'data must be plain bytes was %s' % type(raw_data))
1989
for key, size in key_sizes:
1990
p_offset, p_length = self._container_writer.add_bytes_record(
1991
raw_data[offset:offset+size], [])
1993
result.append((self._write_index, p_offset, p_length))
1997
"""Flush pending writes on this access object.
1999
This will flush any buffered writes to a NewPack.
2001
if self._flush_func is not None:
2004
def get_raw_records(self, memos_for_retrieval):
2005
"""Get the raw bytes for a records.
2007
:param memos_for_retrieval: An iterable containing the (index, pos,
2008
length) memo for retrieving the bytes. The Pack access method
2009
looks up the pack to use for a given record in its index_to_pack
2011
:return: An iterator over the bytes of the records.
2013
# first pass, group into same-index requests
2015
current_index = None
2016
for (index, offset, length) in memos_for_retrieval:
2017
if current_index == index:
2018
current_list.append((offset, length))
2020
if current_index is not None:
2021
request_lists.append((current_index, current_list))
2022
current_index = index
2023
current_list = [(offset, length)]
2024
# handle the last entry
2025
if current_index is not None:
2026
request_lists.append((current_index, current_list))
2027
for index, offsets in request_lists:
2029
transport, path = self._indices[index]
2031
# A KeyError here indicates that someone has triggered an index
2032
# reload, and this index has gone missing, we need to start
2034
if self._reload_func is None:
2035
# If we don't have a _reload_func there is nothing that can
2038
raise errors.RetryWithNewPacks(index,
2039
reload_occurred=True,
2040
exc_info=sys.exc_info())
2042
reader = pack.make_readv_reader(transport, path, offsets)
2043
for names, read_func in reader.iter_records():
2044
yield read_func(None)
2045
except errors.NoSuchFile:
2046
# A NoSuchFile error indicates that a pack file has gone
2047
# missing on disk, we need to trigger a reload, and start over.
2048
if self._reload_func is None:
2050
raise errors.RetryWithNewPacks(transport.abspath(path),
2051
reload_occurred=False,
2052
exc_info=sys.exc_info())
2054
def set_writer(self, writer, index, transport_packname):
2055
"""Set a writer to use for adding data."""
2056
if index is not None:
2057
self._indices[index] = transport_packname
2058
self._container_writer = writer
2059
self._write_index = index
2061
def reload_or_raise(self, retry_exc):
2062
"""Try calling the reload function, or re-raise the original exception.
2064
This should be called after _DirectPackAccess raises a
2065
RetryWithNewPacks exception. This function will handle the common logic
2066
of determining when the error is fatal versus being temporary.
2067
It will also make sure that the original exception is raised, rather
2068
than the RetryWithNewPacks exception.
2070
If this function returns, then the calling function should retry
2071
whatever operation was being performed. Otherwise an exception will
2074
:param retry_exc: A RetryWithNewPacks exception.
2077
if self._reload_func is None:
2079
elif not self._reload_func():
2080
# The reload claimed that nothing changed
2081
if not retry_exc.reload_occurred:
2082
# If there wasn't an earlier reload, then we really were
2083
# expecting to find changes. We didn't find them, so this is a
2087
exc_class, exc_value, exc_traceback = retry_exc.exc_info
2088
raise exc_class, exc_value, exc_traceback