1
# Copyright (C) 2007-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
from bzrlib.lazy_import import lazy_import
21
lazy_import(globals(), """
22
from itertools import izip
36
from bzrlib.index import (
38
GraphIndexPrefixAdapter,
48
from bzrlib.decorators import (
53
from bzrlib.lock import LogicalLockResult
54
from bzrlib.repository import (
57
MetaDirRepositoryFormat,
59
RepositoryWriteLockResult,
62
from bzrlib.trace import (
69
class PackCommitBuilder(CommitBuilder):
70
"""A subclass of CommitBuilder to add texts with pack semantics.
72
Specifically this uses one knit object rather than one knit object per
73
added text, reducing memory and object pressure.
76
def __init__(self, repository, parents, config, timestamp=None,
77
timezone=None, committer=None, revprops=None,
78
revision_id=None, lossy=False):
79
CommitBuilder.__init__(self, repository, parents, config,
80
timestamp=timestamp, timezone=timezone, committer=committer,
81
revprops=revprops, revision_id=revision_id, lossy=lossy)
82
self._file_graph = graph.Graph(
83
repository._pack_collection.text_index.combined_index)
85
def _heads(self, file_id, revision_ids):
86
keys = [(file_id, revision_id) for revision_id in revision_ids]
87
return set([key[1] for key in self._file_graph.heads(keys)])
90
class PackRootCommitBuilder(RootCommitBuilder):
91
"""A subclass of RootCommitBuilder to add texts with pack semantics.
93
Specifically this uses one knit object rather than one knit object per
94
added text, reducing memory and object pressure.
97
def __init__(self, repository, parents, config, timestamp=None,
98
timezone=None, committer=None, revprops=None,
99
revision_id=None, lossy=False):
100
CommitBuilder.__init__(self, repository, parents, config,
101
timestamp=timestamp, timezone=timezone, committer=committer,
102
revprops=revprops, revision_id=revision_id, lossy=lossy)
103
self._file_graph = graph.Graph(
104
repository._pack_collection.text_index.combined_index)
106
def _heads(self, file_id, revision_ids):
107
keys = [(file_id, revision_id) for revision_id in revision_ids]
108
return set([key[1] for key in self._file_graph.heads(keys)])
112
"""An in memory proxy for a pack and its indices.
114
This is a base class that is not directly used, instead the classes
115
ExistingPack and NewPack are used.
118
# A map of index 'type' to the file extension and position in the
120
index_definitions = {
122
'revision': ('.rix', 0),
123
'inventory': ('.iix', 1),
125
'signature': ('.six', 3),
128
def __init__(self, revision_index, inventory_index, text_index,
129
signature_index, chk_index=None):
130
"""Create a pack instance.
132
:param revision_index: A GraphIndex for determining what revisions are
133
present in the Pack and accessing the locations of their texts.
134
:param inventory_index: A GraphIndex for determining what inventories are
135
present in the Pack and accessing the locations of their
137
:param text_index: A GraphIndex for determining what file texts
138
are present in the pack and accessing the locations of their
139
texts/deltas (via (fileid, revisionid) tuples).
140
:param signature_index: A GraphIndex for determining what signatures are
141
present in the Pack and accessing the locations of their texts.
142
:param chk_index: A GraphIndex for accessing content by CHK, if the
145
self.revision_index = revision_index
146
self.inventory_index = inventory_index
147
self.text_index = text_index
148
self.signature_index = signature_index
149
self.chk_index = chk_index
151
def access_tuple(self):
152
"""Return a tuple (transport, name) for the pack content."""
153
return self.pack_transport, self.file_name()
155
def _check_references(self):
156
"""Make sure our external references are present.
158
Packs are allowed to have deltas whose base is not in the pack, but it
159
must be present somewhere in this collection. It is not allowed to
160
have deltas based on a fallback repository.
161
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
164
for (index_name, external_refs, index) in [
166
self._get_external_refs(self.text_index),
167
self._pack_collection.text_index.combined_index),
169
self._get_external_refs(self.inventory_index),
170
self._pack_collection.inventory_index.combined_index),
172
missing = external_refs.difference(
173
k for (idx, k, v, r) in
174
index.iter_entries(external_refs))
176
missing_items[index_name] = sorted(list(missing))
178
from pprint import pformat
179
raise errors.BzrCheckError(
180
"Newly created pack file %r has delta references to "
181
"items not in its repository:\n%s"
182
% (self, pformat(missing_items)))
185
"""Get the file name for the pack on disk."""
186
return self.name + '.pack'
188
def get_revision_count(self):
189
return self.revision_index.key_count()
191
def index_name(self, index_type, name):
192
"""Get the disk name of an index type for pack name 'name'."""
193
return name + Pack.index_definitions[index_type][0]
195
def index_offset(self, index_type):
196
"""Get the position in a index_size array for a given index type."""
197
return Pack.index_definitions[index_type][1]
199
def inventory_index_name(self, name):
200
"""The inv index is the name + .iix."""
201
return self.index_name('inventory', name)
203
def revision_index_name(self, name):
204
"""The revision index is the name + .rix."""
205
return self.index_name('revision', name)
207
def signature_index_name(self, name):
208
"""The signature index is the name + .six."""
209
return self.index_name('signature', name)
211
def text_index_name(self, name):
212
"""The text index is the name + .tix."""
213
return self.index_name('text', name)
215
def _replace_index_with_readonly(self, index_type):
216
unlimited_cache = False
217
if index_type == 'chk':
218
unlimited_cache = True
219
index = self.index_class(self.index_transport,
220
self.index_name(index_type, self.name),
221
self.index_sizes[self.index_offset(index_type)],
222
unlimited_cache=unlimited_cache)
223
if index_type == 'chk':
224
index._leaf_factory = btree_index._gcchk_factory
225
setattr(self, index_type + '_index', index)
228
class ExistingPack(Pack):
229
"""An in memory proxy for an existing .pack and its disk indices."""
231
def __init__(self, pack_transport, name, revision_index, inventory_index,
232
text_index, signature_index, chk_index=None):
233
"""Create an ExistingPack object.
235
:param pack_transport: The transport where the pack file resides.
236
:param name: The name of the pack on disk in the pack_transport.
238
Pack.__init__(self, revision_index, inventory_index, text_index,
239
signature_index, chk_index)
241
self.pack_transport = pack_transport
242
if None in (revision_index, inventory_index, text_index,
243
signature_index, name, pack_transport):
244
raise AssertionError()
246
def __eq__(self, other):
247
return self.__dict__ == other.__dict__
249
def __ne__(self, other):
250
return not self.__eq__(other)
253
return "<%s.%s object at 0x%x, %s, %s" % (
254
self.__class__.__module__, self.__class__.__name__, id(self),
255
self.pack_transport, self.name)
258
class ResumedPack(ExistingPack):
260
def __init__(self, name, revision_index, inventory_index, text_index,
261
signature_index, upload_transport, pack_transport, index_transport,
262
pack_collection, chk_index=None):
263
"""Create a ResumedPack object."""
264
ExistingPack.__init__(self, pack_transport, name, revision_index,
265
inventory_index, text_index, signature_index,
267
self.upload_transport = upload_transport
268
self.index_transport = index_transport
269
self.index_sizes = [None, None, None, None]
271
('revision', revision_index),
272
('inventory', inventory_index),
273
('text', text_index),
274
('signature', signature_index),
276
if chk_index is not None:
277
indices.append(('chk', chk_index))
278
self.index_sizes.append(None)
279
for index_type, index in indices:
280
offset = self.index_offset(index_type)
281
self.index_sizes[offset] = index._size
282
self.index_class = pack_collection._index_class
283
self._pack_collection = pack_collection
284
self._state = 'resumed'
285
# XXX: perhaps check that the .pack file exists?
287
def access_tuple(self):
288
if self._state == 'finished':
289
return Pack.access_tuple(self)
290
elif self._state == 'resumed':
291
return self.upload_transport, self.file_name()
293
raise AssertionError(self._state)
296
self.upload_transport.delete(self.file_name())
297
indices = [self.revision_index, self.inventory_index, self.text_index,
298
self.signature_index]
299
if self.chk_index is not None:
300
indices.append(self.chk_index)
301
for index in indices:
302
index._transport.delete(index._name)
305
self._check_references()
306
index_types = ['revision', 'inventory', 'text', 'signature']
307
if self.chk_index is not None:
308
index_types.append('chk')
309
for index_type in index_types:
310
old_name = self.index_name(index_type, self.name)
311
new_name = '../indices/' + old_name
312
self.upload_transport.rename(old_name, new_name)
313
self._replace_index_with_readonly(index_type)
314
new_name = '../packs/' + self.file_name()
315
self.upload_transport.rename(self.file_name(), new_name)
316
self._state = 'finished'
318
def _get_external_refs(self, index):
319
"""Return compression parents for this index that are not present.
321
This returns any compression parents that are referenced by this index,
322
which are not contained *in* this index. They may be present elsewhere.
324
return index.external_references(1)
328
"""An in memory proxy for a pack which is being created."""
330
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
331
"""Create a NewPack instance.
333
:param pack_collection: A PackCollection into which this is being inserted.
334
:param upload_suffix: An optional suffix to be given to any temporary
335
files created during the pack creation. e.g '.autopack'
336
:param file_mode: Unix permissions for newly created file.
338
# The relative locations of the packs are constrained, but all are
339
# passed in because the caller has them, so as to avoid object churn.
340
index_builder_class = pack_collection._index_builder_class
341
if pack_collection.chk_index is not None:
342
chk_index = index_builder_class(reference_lists=0)
346
# Revisions: parents list, no text compression.
347
index_builder_class(reference_lists=1),
348
# Inventory: We want to map compression only, but currently the
349
# knit code hasn't been updated enough to understand that, so we
350
# have a regular 2-list index giving parents and compression
352
index_builder_class(reference_lists=2),
353
# Texts: compression and per file graph, for all fileids - so two
354
# reference lists and two elements in the key tuple.
355
index_builder_class(reference_lists=2, key_elements=2),
356
# Signatures: Just blobs to store, no compression, no parents
358
index_builder_class(reference_lists=0),
359
# CHK based storage - just blobs, no compression or parents.
362
self._pack_collection = pack_collection
363
# When we make readonly indices, we need this.
364
self.index_class = pack_collection._index_class
365
# where should the new pack be opened
366
self.upload_transport = pack_collection._upload_transport
367
# where are indices written out to
368
self.index_transport = pack_collection._index_transport
369
# where is the pack renamed to when it is finished?
370
self.pack_transport = pack_collection._pack_transport
371
# What file mode to upload the pack and indices with.
372
self._file_mode = file_mode
373
# tracks the content written to the .pack file.
374
self._hash = osutils.md5()
375
# a tuple with the length in bytes of the indices, once the pack
376
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
377
self.index_sizes = None
378
# How much data to cache when writing packs. Note that this is not
379
# synchronised with reads, because it's not in the transport layer, so
380
# is not safe unless the client knows it won't be reading from the pack
382
self._cache_limit = 0
383
# the temporary pack file name.
384
self.random_name = osutils.rand_chars(20) + upload_suffix
385
# when was this pack started ?
386
self.start_time = time.time()
387
# open an output stream for the data added to the pack.
388
self.write_stream = self.upload_transport.open_write_stream(
389
self.random_name, mode=self._file_mode)
390
if 'pack' in debug.debug_flags:
391
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
392
time.ctime(), self.upload_transport.base, self.random_name,
393
time.time() - self.start_time)
394
# A list of byte sequences to be written to the new pack, and the
395
# aggregate size of them. Stored as a list rather than separate
396
# variables so that the _write_data closure below can update them.
397
self._buffer = [[], 0]
398
# create a callable for adding data
400
# robertc says- this is a closure rather than a method on the object
401
# so that the variables are locals, and faster than accessing object
403
def _write_data(bytes, flush=False, _buffer=self._buffer,
404
_write=self.write_stream.write, _update=self._hash.update):
405
_buffer[0].append(bytes)
406
_buffer[1] += len(bytes)
408
if _buffer[1] > self._cache_limit or flush:
409
bytes = ''.join(_buffer[0])
413
# expose this on self, for the occasion when clients want to add data.
414
self._write_data = _write_data
415
# a pack writer object to serialise pack records.
416
self._writer = pack.ContainerWriter(self._write_data)
418
# what state is the pack in? (open, finished, aborted)
420
# no name until we finish writing the content
424
"""Cancel creating this pack."""
425
self._state = 'aborted'
426
self.write_stream.close()
427
# Remove the temporary pack file.
428
self.upload_transport.delete(self.random_name)
429
# The indices have no state on disk.
431
def access_tuple(self):
432
"""Return a tuple (transport, name) for the pack content."""
433
if self._state == 'finished':
434
return Pack.access_tuple(self)
435
elif self._state == 'open':
436
return self.upload_transport, self.random_name
438
raise AssertionError(self._state)
440
def data_inserted(self):
441
"""True if data has been added to this pack."""
442
return bool(self.get_revision_count() or
443
self.inventory_index.key_count() or
444
self.text_index.key_count() or
445
self.signature_index.key_count() or
446
(self.chk_index is not None and self.chk_index.key_count()))
448
def finish_content(self):
449
if self.name is not None:
453
self._write_data('', flush=True)
454
self.name = self._hash.hexdigest()
456
def finish(self, suspend=False):
457
"""Finish the new pack.
460
- finalises the content
461
- assigns a name (the md5 of the content, currently)
462
- writes out the associated indices
463
- renames the pack into place.
464
- stores the index size tuple for the pack in the index_sizes
467
self.finish_content()
469
self._check_references()
471
# XXX: It'd be better to write them all to temporary names, then
472
# rename them all into place, so that the window when only some are
473
# visible is smaller. On the other hand none will be seen until
474
# they're in the names list.
475
self.index_sizes = [None, None, None, None]
476
self._write_index('revision', self.revision_index, 'revision', suspend)
477
self._write_index('inventory', self.inventory_index, 'inventory',
479
self._write_index('text', self.text_index, 'file texts', suspend)
480
self._write_index('signature', self.signature_index,
481
'revision signatures', suspend)
482
if self.chk_index is not None:
483
self.index_sizes.append(None)
484
self._write_index('chk', self.chk_index,
485
'content hash bytes', suspend)
486
self.write_stream.close()
487
# Note that this will clobber an existing pack with the same name,
488
# without checking for hash collisions. While this is undesirable this
489
# is something that can be rectified in a subsequent release. One way
490
# to rectify it may be to leave the pack at the original name, writing
491
# its pack-names entry as something like 'HASH: index-sizes
492
# temporary-name'. Allocate that and check for collisions, if it is
493
# collision free then rename it into place. If clients know this scheme
494
# they can handle missing-file errors by:
495
# - try for HASH.pack
496
# - try for temporary-name
497
# - refresh the pack-list to see if the pack is now absent
498
new_name = self.name + '.pack'
500
new_name = '../packs/' + new_name
501
self.upload_transport.rename(self.random_name, new_name)
502
self._state = 'finished'
503
if 'pack' in debug.debug_flags:
504
# XXX: size might be interesting?
505
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
506
time.ctime(), self.upload_transport.base, self.random_name,
507
new_name, time.time() - self.start_time)
510
"""Flush any current data."""
512
bytes = ''.join(self._buffer[0])
513
self.write_stream.write(bytes)
514
self._hash.update(bytes)
515
self._buffer[:] = [[], 0]
517
def _get_external_refs(self, index):
518
return index._external_references()
520
def set_write_cache_size(self, size):
521
self._cache_limit = size
523
def _write_index(self, index_type, index, label, suspend=False):
524
"""Write out an index.
526
:param index_type: The type of index to write - e.g. 'revision'.
527
:param index: The index object to serialise.
528
:param label: What label to give the index e.g. 'revision'.
530
index_name = self.index_name(index_type, self.name)
532
transport = self.upload_transport
534
transport = self.index_transport
535
self.index_sizes[self.index_offset(index_type)] = transport.put_file(
536
index_name, index.finish(), mode=self._file_mode)
537
if 'pack' in debug.debug_flags:
538
# XXX: size might be interesting?
539
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
540
time.ctime(), label, self.upload_transport.base,
541
self.random_name, time.time() - self.start_time)
542
# Replace the writable index on this object with a readonly,
543
# presently unloaded index. We should alter
544
# the index layer to make its finish() error if add_node is
545
# subsequently used. RBC
546
self._replace_index_with_readonly(index_type)
549
class AggregateIndex(object):
550
"""An aggregated index for the RepositoryPackCollection.
552
AggregateIndex is reponsible for managing the PackAccess object,
553
Index-To-Pack mapping, and all indices list for a specific type of index
554
such as 'revision index'.
556
A CombinedIndex provides an index on a single key space built up
557
from several on-disk indices. The AggregateIndex builds on this
558
to provide a knit access layer, and allows having up to one writable
559
index within the collection.
561
# XXX: Probably 'can be written to' could/should be separated from 'acts
562
# like a knit index' -- mbp 20071024
564
def __init__(self, reload_func=None, flush_func=None):
565
"""Create an AggregateIndex.
567
:param reload_func: A function to call if we find we are missing an
568
index. Should have the form reload_func() => True if the list of
569
active pack files has changed.
571
self._reload_func = reload_func
572
self.index_to_pack = {}
573
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
574
self.data_access = _DirectPackAccess(self.index_to_pack,
575
reload_func=reload_func,
576
flush_func=flush_func)
577
self.add_callback = None
579
def add_index(self, index, pack):
580
"""Add index to the aggregate, which is an index for Pack pack.
582
Future searches on the aggregate index will seach this new index
583
before all previously inserted indices.
585
:param index: An Index for the pack.
586
:param pack: A Pack instance.
588
# expose it to the index map
589
self.index_to_pack[index] = pack.access_tuple()
590
# put it at the front of the linear index list
591
self.combined_index.insert_index(0, index, pack.name)
593
def add_writable_index(self, index, pack):
594
"""Add an index which is able to have data added to it.
596
There can be at most one writable index at any time. Any
597
modifications made to the knit are put into this index.
599
:param index: An index from the pack parameter.
600
:param pack: A Pack instance.
602
if self.add_callback is not None:
603
raise AssertionError(
604
"%s already has a writable index through %s" % \
605
(self, self.add_callback))
606
# allow writing: queue writes to a new index
607
self.add_index(index, pack)
608
# Updates the index to packs mapping as a side effect,
609
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
610
self.add_callback = index.add_nodes
613
"""Reset all the aggregate data to nothing."""
614
self.data_access.set_writer(None, None, (None, None))
615
self.index_to_pack.clear()
616
del self.combined_index._indices[:]
617
del self.combined_index._index_names[:]
618
self.add_callback = None
620
def remove_index(self, index):
621
"""Remove index from the indices used to answer queries.
623
:param index: An index from the pack parameter.
625
del self.index_to_pack[index]
626
pos = self.combined_index._indices.index(index)
627
del self.combined_index._indices[pos]
628
del self.combined_index._index_names[pos]
629
if (self.add_callback is not None and
630
getattr(index, 'add_nodes', None) == self.add_callback):
631
self.add_callback = None
632
self.data_access.set_writer(None, None, (None, None))
635
class Packer(object):
636
"""Create a pack from packs."""
638
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
642
:param pack_collection: A RepositoryPackCollection object where the
643
new pack is being written to.
644
:param packs: The packs to combine.
645
:param suffix: The suffix to use on the temporary files for the pack.
646
:param revision_ids: Revision ids to limit the pack to.
647
:param reload_func: A function to call if a pack file/index goes
648
missing. The side effect of calling this function should be to
649
update self.packs. See also AggregateIndex
653
self.revision_ids = revision_ids
654
# The pack object we are creating.
656
self._pack_collection = pack_collection
657
self._reload_func = reload_func
658
# The index layer keys for the revisions being copied. None for 'all
660
self._revision_keys = None
661
# What text keys to copy. None for 'all texts'. This is set by
662
# _copy_inventory_texts
663
self._text_filter = None
665
def pack(self, pb=None):
666
"""Create a new pack by reading data from other packs.
668
This does little more than a bulk copy of data. One key difference
669
is that data with the same item key across multiple packs is elided
670
from the output. The new pack is written into the current pack store
671
along with its indices, and the name added to the pack names. The
672
source packs are not altered and are not required to be in the current
675
:param pb: An optional progress bar to use. A nested bar is created if
677
:return: A Pack object, or None if nothing was copied.
679
# open a pack - using the same name as the last temporary file
680
# - which has already been flushed, so it's safe.
681
# XXX: - duplicate code warning with start_write_group; fix before
682
# considering 'done'.
683
if self._pack_collection._new_pack is not None:
684
raise errors.BzrError('call to %s.pack() while another pack is'
686
% (self.__class__.__name__,))
687
if self.revision_ids is not None:
688
if len(self.revision_ids) == 0:
689
# silly fetch request.
692
self.revision_ids = frozenset(self.revision_ids)
693
self.revision_keys = frozenset((revid,) for revid in
696
self.pb = ui.ui_factory.nested_progress_bar()
700
return self._create_pack_from_packs()
706
"""Open a pack for the pack we are creating."""
707
new_pack = self._pack_collection.pack_factory(self._pack_collection,
708
upload_suffix=self.suffix,
709
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
710
# We know that we will process all nodes in order, and don't need to
711
# query, so don't combine any indices spilled to disk until we are done
712
new_pack.revision_index.set_optimize(combine_backing_indices=False)
713
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
714
new_pack.text_index.set_optimize(combine_backing_indices=False)
715
new_pack.signature_index.set_optimize(combine_backing_indices=False)
718
def _copy_revision_texts(self):
719
"""Copy revision data to the new pack."""
720
raise NotImplementedError(self._copy_revision_texts)
722
def _copy_inventory_texts(self):
723
"""Copy the inventory texts to the new pack.
725
self._revision_keys is used to determine what inventories to copy.
727
Sets self._text_filter appropriately.
729
raise NotImplementedError(self._copy_inventory_texts)
731
def _copy_text_texts(self):
732
raise NotImplementedError(self._copy_text_texts)
734
def _create_pack_from_packs(self):
735
raise NotImplementedError(self._create_pack_from_packs)
737
def _log_copied_texts(self):
738
if 'pack' in debug.debug_flags:
739
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
740
time.ctime(), self._pack_collection._upload_transport.base,
741
self.new_pack.random_name,
742
self.new_pack.text_index.key_count(),
743
time.time() - self.new_pack.start_time)
745
def _use_pack(self, new_pack):
746
"""Return True if new_pack should be used.
748
:param new_pack: The pack that has just been created.
749
:return: True if the pack should be used.
751
return new_pack.data_inserted()
754
class RepositoryPackCollection(object):
755
"""Management of packs within a repository.
757
:ivar _names: map of {pack_name: (index_size,)}
761
resumed_pack_factory = None
762
normal_packer_class = None
763
optimising_packer_class = None
765
def __init__(self, repo, transport, index_transport, upload_transport,
766
pack_transport, index_builder_class, index_class,
768
"""Create a new RepositoryPackCollection.
770
:param transport: Addresses the repository base directory
771
(typically .bzr/repository/).
772
:param index_transport: Addresses the directory containing indices.
773
:param upload_transport: Addresses the directory into which packs are written
774
while they're being created.
775
:param pack_transport: Addresses the directory of existing complete packs.
776
:param index_builder_class: The index builder class to use.
777
:param index_class: The index class to use.
778
:param use_chk_index: Whether to setup and manage a CHK index.
780
# XXX: This should call self.reset()
782
self.transport = transport
783
self._index_transport = index_transport
784
self._upload_transport = upload_transport
785
self._pack_transport = pack_transport
786
self._index_builder_class = index_builder_class
787
self._index_class = index_class
788
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
793
self._packs_by_name = {}
794
# the previous pack-names content
795
self._packs_at_load = None
796
# when a pack is being created by this object, the state of that pack.
797
self._new_pack = None
798
# aggregated revision index data
799
flush = self._flush_new_pack
800
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
801
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
802
self.text_index = AggregateIndex(self.reload_pack_names, flush)
803
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
804
all_indices = [self.revision_index, self.inventory_index,
805
self.text_index, self.signature_index]
807
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
808
all_indices.append(self.chk_index)
810
# used to determine if we're using a chk_index elsewhere.
811
self.chk_index = None
812
# Tell all the CombinedGraphIndex objects about each other, so they can
813
# share hints about which pack names to search first.
814
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
815
for combined_idx in all_combined:
816
combined_idx.set_sibling_indices(
817
set(all_combined).difference([combined_idx]))
819
self._resumed_packs = []
822
return '%s(%r)' % (self.__class__.__name__, self.repo)
824
def add_pack_to_memory(self, pack):
825
"""Make a Pack object available to the repository to satisfy queries.
827
:param pack: A Pack object.
829
if pack.name in self._packs_by_name:
830
raise AssertionError(
831
'pack %s already in _packs_by_name' % (pack.name,))
832
self.packs.append(pack)
833
self._packs_by_name[pack.name] = pack
834
self.revision_index.add_index(pack.revision_index, pack)
835
self.inventory_index.add_index(pack.inventory_index, pack)
836
self.text_index.add_index(pack.text_index, pack)
837
self.signature_index.add_index(pack.signature_index, pack)
838
if self.chk_index is not None:
839
self.chk_index.add_index(pack.chk_index, pack)
842
"""Return a list of all the Pack objects this repository has.
844
Note that an in-progress pack being created is not returned.
846
:return: A list of Pack objects for all the packs in the repository.
849
for name in self.names():
850
result.append(self.get_pack_by_name(name))
854
"""Pack the pack collection incrementally.
856
This will not attempt global reorganisation or recompression,
857
rather it will just ensure that the total number of packs does
858
not grow without bound. It uses the _max_pack_count method to
859
determine if autopacking is needed, and the pack_distribution
860
method to determine the number of revisions in each pack.
862
If autopacking takes place then the packs name collection will have
863
been flushed to disk - packing requires updating the name collection
864
in synchronisation with certain steps. Otherwise the names collection
867
:return: Something evaluating true if packing took place.
871
return self._do_autopack()
872
except errors.RetryAutopack:
873
# If we get a RetryAutopack exception, we should abort the
874
# current action, and retry.
877
def _do_autopack(self):
878
# XXX: Should not be needed when the management of indices is sane.
879
total_revisions = self.revision_index.combined_index.key_count()
880
total_packs = len(self._names)
881
if self._max_pack_count(total_revisions) >= total_packs:
883
# determine which packs need changing
884
pack_distribution = self.pack_distribution(total_revisions)
886
for pack in self.all_packs():
887
revision_count = pack.get_revision_count()
888
if revision_count == 0:
889
# revision less packs are not generated by normal operation,
890
# only by operations like sign-my-commits, and thus will not
891
# tend to grow rapdily or without bound like commit containing
892
# packs do - leave them alone as packing them really should
893
# group their data with the relevant commit, and that may
894
# involve rewriting ancient history - which autopack tries to
895
# avoid. Alternatively we could not group the data but treat
896
# each of these as having a single revision, and thus add
897
# one revision for each to the total revision count, to get
898
# a matching distribution.
900
existing_packs.append((revision_count, pack))
901
pack_operations = self.plan_autopack_combinations(
902
existing_packs, pack_distribution)
903
num_new_packs = len(pack_operations)
904
num_old_packs = sum([len(po[1]) for po in pack_operations])
905
num_revs_affected = sum([po[0] for po in pack_operations])
906
mutter('Auto-packing repository %s, which has %d pack files, '
907
'containing %d revisions. Packing %d files into %d affecting %d'
908
' revisions', self, total_packs, total_revisions, num_old_packs,
909
num_new_packs, num_revs_affected)
910
result = self._execute_pack_operations(pack_operations, packer_class=self.normal_packer_class,
911
reload_func=self._restart_autopack)
912
mutter('Auto-packing repository %s completed', self)
915
def _execute_pack_operations(self, pack_operations, packer_class,
917
"""Execute a series of pack operations.
919
:param pack_operations: A list of [revision_count, packs_to_combine].
920
:param packer_class: The class of packer to use
921
:return: The new pack names.
923
for revision_count, packs in pack_operations:
924
# we may have no-ops from the setup logic
927
packer = packer_class(self, packs, '.autopack',
928
reload_func=reload_func)
930
result = packer.pack()
931
except errors.RetryWithNewPacks:
932
# An exception is propagating out of this context, make sure
933
# this packer has cleaned up. Packer() doesn't set its new_pack
934
# state into the RepositoryPackCollection object, so we only
935
# have access to it directly here.
936
if packer.new_pack is not None:
937
packer.new_pack.abort()
942
self._remove_pack_from_memory(pack)
943
# record the newly available packs and stop advertising the old
946
for _, packs in pack_operations:
947
to_be_obsoleted.extend(packs)
948
result = self._save_pack_names(clear_obsolete_packs=True,
949
obsolete_packs=to_be_obsoleted)
952
def _flush_new_pack(self):
953
if self._new_pack is not None:
954
self._new_pack.flush()
956
def lock_names(self):
957
"""Acquire the mutex around the pack-names index.
959
This cannot be used in the middle of a read-only transaction on the
962
self.repo.control_files.lock_write()
964
def _already_packed(self):
965
"""Is the collection already packed?"""
966
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
968
def pack(self, hint=None, clean_obsolete_packs=False):
969
"""Pack the pack collection totally."""
971
total_packs = len(self._names)
972
if self._already_packed():
974
total_revisions = self.revision_index.combined_index.key_count()
975
# XXX: the following may want to be a class, to pack with a given
977
mutter('Packing repository %s, which has %d pack files, '
978
'containing %d revisions with hint %r.', self, total_packs,
979
total_revisions, hint)
982
self._try_pack_operations(hint)
983
except RetryPackOperations:
987
if clean_obsolete_packs:
988
self._clear_obsolete_packs()
990
def _try_pack_operations(self, hint):
991
"""Calculate the pack operations based on the hint (if any), and
994
# determine which packs need changing
995
pack_operations = [[0, []]]
996
for pack in self.all_packs():
997
if hint is None or pack.name in hint:
998
# Either no hint was provided (so we are packing everything),
999
# or this pack was included in the hint.
1000
pack_operations[-1][0] += pack.get_revision_count()
1001
pack_operations[-1][1].append(pack)
1002
self._execute_pack_operations(pack_operations,
1003
packer_class=self.optimising_packer_class,
1004
reload_func=self._restart_pack_operations)
1006
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1007
"""Plan a pack operation.
1009
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1011
:param pack_distribution: A list with the number of revisions desired
1014
if len(existing_packs) <= len(pack_distribution):
1016
existing_packs.sort(reverse=True)
1017
pack_operations = [[0, []]]
1018
# plan out what packs to keep, and what to reorganise
1019
while len(existing_packs):
1020
# take the largest pack, and if it's less than the head of the
1021
# distribution chart we will include its contents in the new pack
1022
# for that position. If it's larger, we remove its size from the
1023
# distribution chart
1024
next_pack_rev_count, next_pack = existing_packs.pop(0)
1025
if next_pack_rev_count >= pack_distribution[0]:
1026
# this is already packed 'better' than this, so we can
1027
# not waste time packing it.
1028
while next_pack_rev_count > 0:
1029
next_pack_rev_count -= pack_distribution[0]
1030
if next_pack_rev_count >= 0:
1032
del pack_distribution[0]
1034
# didn't use that entire bucket up
1035
pack_distribution[0] = -next_pack_rev_count
1037
# add the revisions we're going to add to the next output pack
1038
pack_operations[-1][0] += next_pack_rev_count
1039
# allocate this pack to the next pack sub operation
1040
pack_operations[-1][1].append(next_pack)
1041
if pack_operations[-1][0] >= pack_distribution[0]:
1042
# this pack is used up, shift left.
1043
del pack_distribution[0]
1044
pack_operations.append([0, []])
1045
# Now that we know which pack files we want to move, shove them all
1046
# into a single pack file.
1048
final_pack_list = []
1049
for num_revs, pack_files in pack_operations:
1050
final_rev_count += num_revs
1051
final_pack_list.extend(pack_files)
1052
if len(final_pack_list) == 1:
1053
raise AssertionError('We somehow generated an autopack with a'
1054
' single pack file being moved.')
1056
return [[final_rev_count, final_pack_list]]
1058
def ensure_loaded(self):
1059
"""Ensure we have read names from disk.
1061
:return: True if the disk names had not been previously read.
1063
# NB: if you see an assertion error here, it's probably access against
1064
# an unlocked repo. Naughty.
1065
if not self.repo.is_locked():
1066
raise errors.ObjectNotLocked(self.repo)
1067
if self._names is None:
1069
self._packs_at_load = set()
1070
for index, key, value in self._iter_disk_pack_index():
1072
self._names[name] = self._parse_index_sizes(value)
1073
self._packs_at_load.add((key, value))
1077
# populate all the metadata.
1081
def _parse_index_sizes(self, value):
1082
"""Parse a string of index sizes."""
1083
return tuple([int(digits) for digits in value.split(' ')])
1085
def get_pack_by_name(self, name):
1086
"""Get a Pack object by name.
1088
:param name: The name of the pack - e.g. '123456'
1089
:return: A Pack object.
1092
return self._packs_by_name[name]
1094
rev_index = self._make_index(name, '.rix')
1095
inv_index = self._make_index(name, '.iix')
1096
txt_index = self._make_index(name, '.tix')
1097
sig_index = self._make_index(name, '.six')
1098
if self.chk_index is not None:
1099
chk_index = self._make_index(name, '.cix', is_chk=True)
1102
result = ExistingPack(self._pack_transport, name, rev_index,
1103
inv_index, txt_index, sig_index, chk_index)
1104
self.add_pack_to_memory(result)
1107
def _resume_pack(self, name):
1108
"""Get a suspended Pack object by name.
1110
:param name: The name of the pack - e.g. '123456'
1111
:return: A Pack object.
1113
if not re.match('[a-f0-9]{32}', name):
1114
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1116
raise errors.UnresumableWriteGroup(
1117
self.repo, [name], 'Malformed write group token')
1119
rev_index = self._make_index(name, '.rix', resume=True)
1120
inv_index = self._make_index(name, '.iix', resume=True)
1121
txt_index = self._make_index(name, '.tix', resume=True)
1122
sig_index = self._make_index(name, '.six', resume=True)
1123
if self.chk_index is not None:
1124
chk_index = self._make_index(name, '.cix', resume=True,
1128
result = self.resumed_pack_factory(name, rev_index, inv_index,
1129
txt_index, sig_index, self._upload_transport,
1130
self._pack_transport, self._index_transport, self,
1131
chk_index=chk_index)
1132
except errors.NoSuchFile, e:
1133
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1134
self.add_pack_to_memory(result)
1135
self._resumed_packs.append(result)
1138
def allocate(self, a_new_pack):
1139
"""Allocate name in the list of packs.
1141
:param a_new_pack: A NewPack instance to be added to the collection of
1142
packs for this repository.
1144
self.ensure_loaded()
1145
if a_new_pack.name in self._names:
1146
raise errors.BzrError(
1147
'Pack %r already exists in %s' % (a_new_pack.name, self))
1148
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1149
self.add_pack_to_memory(a_new_pack)
1151
def _iter_disk_pack_index(self):
1152
"""Iterate over the contents of the pack-names index.
1154
This is used when loading the list from disk, and before writing to
1155
detect updates from others during our write operation.
1156
:return: An iterator of the index contents.
1158
return self._index_class(self.transport, 'pack-names', None
1159
).iter_all_entries()
1161
def _make_index(self, name, suffix, resume=False, is_chk=False):
1162
size_offset = self._suffix_offsets[suffix]
1163
index_name = name + suffix
1165
transport = self._upload_transport
1166
index_size = transport.stat(index_name).st_size
1168
transport = self._index_transport
1169
index_size = self._names[name][size_offset]
1170
index = self._index_class(transport, index_name, index_size,
1171
unlimited_cache=is_chk)
1172
if is_chk and self._index_class is btree_index.BTreeGraphIndex:
1173
index._leaf_factory = btree_index._gcchk_factory
1176
def _max_pack_count(self, total_revisions):
1177
"""Return the maximum number of packs to use for total revisions.
1179
:param total_revisions: The total number of revisions in the
1182
if not total_revisions:
1184
digits = str(total_revisions)
1186
for digit in digits:
1187
result += int(digit)
1191
"""Provide an order to the underlying names."""
1192
return sorted(self._names.keys())
1194
def _obsolete_packs(self, packs):
1195
"""Move a number of packs which have been obsoleted out of the way.
1197
Each pack and its associated indices are moved out of the way.
1199
Note: for correctness this function should only be called after a new
1200
pack names index has been written without these pack names, and with
1201
the names of packs that contain the data previously available via these
1204
:param packs: The packs to obsolete.
1205
:param return: None.
1209
pack.pack_transport.rename(pack.file_name(),
1210
'../obsolete_packs/' + pack.file_name())
1211
except (errors.PathError, errors.TransportError), e:
1212
# TODO: Should these be warnings or mutters?
1213
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1215
# TODO: Probably needs to know all possible indices for this pack
1216
# - or maybe list the directory and move all indices matching this
1217
# name whether we recognize it or not?
1218
suffixes = ['.iix', '.six', '.tix', '.rix']
1219
if self.chk_index is not None:
1220
suffixes.append('.cix')
1221
for suffix in suffixes:
1223
self._index_transport.rename(pack.name + suffix,
1224
'../obsolete_packs/' + pack.name + suffix)
1225
except (errors.PathError, errors.TransportError), e:
1226
mutter("couldn't rename obsolete index, skipping it:\n%s"
1229
def pack_distribution(self, total_revisions):
1230
"""Generate a list of the number of revisions to put in each pack.
1232
:param total_revisions: The total number of revisions in the
1235
if total_revisions == 0:
1237
digits = reversed(str(total_revisions))
1239
for exponent, count in enumerate(digits):
1240
size = 10 ** exponent
1241
for pos in range(int(count)):
1243
return list(reversed(result))
1245
def _pack_tuple(self, name):
1246
"""Return a tuple with the transport and file name for a pack name."""
1247
return self._pack_transport, name + '.pack'
1249
def _remove_pack_from_memory(self, pack):
1250
"""Remove pack from the packs accessed by this repository.
1252
Only affects memory state, until self._save_pack_names() is invoked.
1254
self._names.pop(pack.name)
1255
self._packs_by_name.pop(pack.name)
1256
self._remove_pack_indices(pack)
1257
self.packs.remove(pack)
1259
def _remove_pack_indices(self, pack, ignore_missing=False):
1260
"""Remove the indices for pack from the aggregated indices.
1262
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1264
for index_type in Pack.index_definitions.keys():
1265
attr_name = index_type + '_index'
1266
aggregate_index = getattr(self, attr_name)
1267
if aggregate_index is not None:
1268
pack_index = getattr(pack, attr_name)
1270
aggregate_index.remove_index(pack_index)
1277
"""Clear all cached data."""
1278
# cached revision data
1279
self.revision_index.clear()
1280
# cached signature data
1281
self.signature_index.clear()
1282
# cached file text data
1283
self.text_index.clear()
1284
# cached inventory data
1285
self.inventory_index.clear()
1287
if self.chk_index is not None:
1288
self.chk_index.clear()
1289
# remove the open pack
1290
self._new_pack = None
1291
# information about packs.
1294
self._packs_by_name = {}
1295
self._packs_at_load = None
1297
def _unlock_names(self):
1298
"""Release the mutex around the pack-names index."""
1299
self.repo.control_files.unlock()
1301
def _diff_pack_names(self):
1302
"""Read the pack names from disk, and compare it to the one in memory.
1304
:return: (disk_nodes, deleted_nodes, new_nodes)
1305
disk_nodes The final set of nodes that should be referenced
1306
deleted_nodes Nodes which have been removed from when we started
1307
new_nodes Nodes that are newly introduced
1309
# load the disk nodes across
1311
for index, key, value in self._iter_disk_pack_index():
1312
disk_nodes.add((key, value))
1313
orig_disk_nodes = set(disk_nodes)
1315
# do a two-way diff against our original content
1316
current_nodes = set()
1317
for name, sizes in self._names.iteritems():
1319
((name, ), ' '.join(str(size) for size in sizes)))
1321
# Packs no longer present in the repository, which were present when we
1322
# locked the repository
1323
deleted_nodes = self._packs_at_load - current_nodes
1324
# Packs which this process is adding
1325
new_nodes = current_nodes - self._packs_at_load
1327
# Update the disk_nodes set to include the ones we are adding, and
1328
# remove the ones which were removed by someone else
1329
disk_nodes.difference_update(deleted_nodes)
1330
disk_nodes.update(new_nodes)
1332
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1334
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1335
"""Given the correct set of pack files, update our saved info.
1337
:return: (removed, added, modified)
1338
removed pack names removed from self._names
1339
added pack names added to self._names
1340
modified pack names that had changed value
1345
## self._packs_at_load = disk_nodes
1346
new_names = dict(disk_nodes)
1347
# drop no longer present nodes
1348
for pack in self.all_packs():
1349
if (pack.name,) not in new_names:
1350
removed.append(pack.name)
1351
self._remove_pack_from_memory(pack)
1352
# add new nodes/refresh existing ones
1353
for key, value in disk_nodes:
1355
sizes = self._parse_index_sizes(value)
1356
if name in self._names:
1358
if sizes != self._names[name]:
1359
# the pack for name has had its indices replaced - rare but
1360
# important to handle. XXX: probably can never happen today
1361
# because the three-way merge code above does not handle it
1362
# - you may end up adding the same key twice to the new
1363
# disk index because the set values are the same, unless
1364
# the only index shows up as deleted by the set difference
1365
# - which it may. Until there is a specific test for this,
1366
# assume it's broken. RBC 20071017.
1367
self._remove_pack_from_memory(self.get_pack_by_name(name))
1368
self._names[name] = sizes
1369
self.get_pack_by_name(name)
1370
modified.append(name)
1373
self._names[name] = sizes
1374
self.get_pack_by_name(name)
1376
return removed, added, modified
1378
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1379
"""Save the list of packs.
1381
This will take out the mutex around the pack names list for the
1382
duration of the method call. If concurrent updates have been made, a
1383
three-way merge between the current list and the current in memory list
1386
:param clear_obsolete_packs: If True, clear out the contents of the
1387
obsolete_packs directory.
1388
:param obsolete_packs: Packs that are obsolete once the new pack-names
1389
file has been written.
1390
:return: A list of the names saved that were not previously on disk.
1392
already_obsolete = []
1395
builder = self._index_builder_class()
1396
(disk_nodes, deleted_nodes, new_nodes,
1397
orig_disk_nodes) = self._diff_pack_names()
1398
# TODO: handle same-name, index-size-changes here -
1399
# e.g. use the value from disk, not ours, *unless* we're the one
1401
for key, value in disk_nodes:
1402
builder.add_node(key, value)
1403
self.transport.put_file('pack-names', builder.finish(),
1404
mode=self.repo.bzrdir._get_file_mode())
1405
self._packs_at_load = disk_nodes
1406
if clear_obsolete_packs:
1409
to_preserve = set([o.name for o in obsolete_packs])
1410
already_obsolete = self._clear_obsolete_packs(to_preserve)
1412
self._unlock_names()
1413
# synchronise the memory packs list with what we just wrote:
1414
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1416
# TODO: We could add one more condition here. "if o.name not in
1417
# orig_disk_nodes and o != the new_pack we haven't written to
1418
# disk yet. However, the new pack object is not easily
1419
# accessible here (it would have to be passed through the
1420
# autopacking code, etc.)
1421
obsolete_packs = [o for o in obsolete_packs
1422
if o.name not in already_obsolete]
1423
self._obsolete_packs(obsolete_packs)
1424
return [new_node[0][0] for new_node in new_nodes]
1426
def reload_pack_names(self):
1427
"""Sync our pack listing with what is present in the repository.
1429
This should be called when we find out that something we thought was
1430
present is now missing. This happens when another process re-packs the
1433
:return: True if the in-memory list of packs has been altered at all.
1435
# The ensure_loaded call is to handle the case where the first call
1436
# made involving the collection was to reload_pack_names, where we
1437
# don't have a view of disk contents. It's a bit of a bandaid, and
1438
# causes two reads of pack-names, but it's a rare corner case not
1439
# struck with regular push/pull etc.
1440
first_read = self.ensure_loaded()
1443
# out the new value.
1444
(disk_nodes, deleted_nodes, new_nodes,
1445
orig_disk_nodes) = self._diff_pack_names()
1446
# _packs_at_load is meant to be the explicit list of names in
1447
# 'pack-names' at then start. As such, it should not contain any
1448
# pending names that haven't been written out yet.
1449
self._packs_at_load = orig_disk_nodes
1451
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1452
if removed or added or modified:
1456
def _restart_autopack(self):
1457
"""Reload the pack names list, and restart the autopack code."""
1458
if not self.reload_pack_names():
1459
# Re-raise the original exception, because something went missing
1460
# and a restart didn't find it
1462
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1464
def _restart_pack_operations(self):
1465
"""Reload the pack names list, and restart the autopack code."""
1466
if not self.reload_pack_names():
1467
# Re-raise the original exception, because something went missing
1468
# and a restart didn't find it
1470
raise RetryPackOperations(self.repo, False, sys.exc_info())
1472
def _clear_obsolete_packs(self, preserve=None):
1473
"""Delete everything from the obsolete-packs directory.
1475
:return: A list of pack identifiers (the filename without '.pack') that
1476
were found in obsolete_packs.
1479
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1480
if preserve is None:
1482
for filename in obsolete_pack_transport.list_dir('.'):
1483
name, ext = osutils.splitext(filename)
1486
if name in preserve:
1489
obsolete_pack_transport.delete(filename)
1490
except (errors.PathError, errors.TransportError), e:
1491
warning("couldn't delete obsolete pack, skipping it:\n%s"
1495
def _start_write_group(self):
1496
# Do not permit preparation for writing if we're not in a 'write lock'.
1497
if not self.repo.is_write_locked():
1498
raise errors.NotWriteLocked(self)
1499
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1500
file_mode=self.repo.bzrdir._get_file_mode())
1501
# allow writing: queue writes to a new index
1502
self.revision_index.add_writable_index(self._new_pack.revision_index,
1504
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1506
self.text_index.add_writable_index(self._new_pack.text_index,
1508
self._new_pack.text_index.set_optimize(combine_backing_indices=False)
1509
self.signature_index.add_writable_index(self._new_pack.signature_index,
1511
if self.chk_index is not None:
1512
self.chk_index.add_writable_index(self._new_pack.chk_index,
1514
self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
1515
self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
1517
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1518
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1519
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1520
self.repo.texts._index._add_callback = self.text_index.add_callback
1522
def _abort_write_group(self):
1523
# FIXME: just drop the transient index.
1524
# forget what names there are
1525
if self._new_pack is not None:
1526
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1527
operation.add_cleanup(setattr, self, '_new_pack', None)
1528
# If we aborted while in the middle of finishing the write
1529
# group, _remove_pack_indices could fail because the indexes are
1530
# already gone. But they're not there we shouldn't fail in this
1531
# case, so we pass ignore_missing=True.
1532
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1533
ignore_missing=True)
1534
operation.run_simple()
1535
for resumed_pack in self._resumed_packs:
1536
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1537
# See comment in previous finally block.
1538
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1539
ignore_missing=True)
1540
operation.run_simple()
1541
del self._resumed_packs[:]
1543
def _remove_resumed_pack_indices(self):
1544
for resumed_pack in self._resumed_packs:
1545
self._remove_pack_indices(resumed_pack)
1546
del self._resumed_packs[:]
1548
def _check_new_inventories(self):
1549
"""Detect missing inventories in this write group.
1551
:returns: list of strs, summarising any problems found. If the list is
1552
empty no problems were found.
1554
# The base implementation does no checks. GCRepositoryPackCollection
1558
def _commit_write_group(self):
1560
for prefix, versioned_file in (
1561
('revisions', self.repo.revisions),
1562
('inventories', self.repo.inventories),
1563
('texts', self.repo.texts),
1564
('signatures', self.repo.signatures),
1566
missing = versioned_file.get_missing_compression_parent_keys()
1567
all_missing.update([(prefix,) + key for key in missing])
1569
raise errors.BzrCheckError(
1570
"Repository %s has missing compression parent(s) %r "
1571
% (self.repo, sorted(all_missing)))
1572
problems = self._check_new_inventories()
1574
problems_summary = '\n'.join(problems)
1575
raise errors.BzrCheckError(
1576
"Cannot add revision(s) to repository: " + problems_summary)
1577
self._remove_pack_indices(self._new_pack)
1578
any_new_content = False
1579
if self._new_pack.data_inserted():
1580
# get all the data to disk and read to use
1581
self._new_pack.finish()
1582
self.allocate(self._new_pack)
1583
self._new_pack = None
1584
any_new_content = True
1586
self._new_pack.abort()
1587
self._new_pack = None
1588
for resumed_pack in self._resumed_packs:
1589
# XXX: this is a pretty ugly way to turn the resumed pack into a
1590
# properly committed pack.
1591
self._names[resumed_pack.name] = None
1592
self._remove_pack_from_memory(resumed_pack)
1593
resumed_pack.finish()
1594
self.allocate(resumed_pack)
1595
any_new_content = True
1596
del self._resumed_packs[:]
1598
result = self.autopack()
1600
# when autopack takes no steps, the names list is still
1602
return self._save_pack_names()
1606
def _suspend_write_group(self):
1607
tokens = [pack.name for pack in self._resumed_packs]
1608
self._remove_pack_indices(self._new_pack)
1609
if self._new_pack.data_inserted():
1610
# get all the data to disk and read to use
1611
self._new_pack.finish(suspend=True)
1612
tokens.append(self._new_pack.name)
1613
self._new_pack = None
1615
self._new_pack.abort()
1616
self._new_pack = None
1617
self._remove_resumed_pack_indices()
1620
def _resume_write_group(self, tokens):
1621
for token in tokens:
1622
self._resume_pack(token)
1625
class PackRepository(MetaDirRepository):
1626
"""Repository with knit objects stored inside pack containers.
1628
The layering for a KnitPackRepository is:
1630
Graph | HPSS | Repository public layer |
1631
===================================================
1632
Tuple based apis below, string based, and key based apis above
1633
---------------------------------------------------
1635
Provides .texts, .revisions etc
1636
This adapts the N-tuple keys to physical knit records which only have a
1637
single string identifier (for historical reasons), which in older formats
1638
was always the revision_id, and in the mapped code for packs is always
1639
the last element of key tuples.
1640
---------------------------------------------------
1642
A separate GraphIndex is used for each of the
1643
texts/inventories/revisions/signatures contained within each individual
1644
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1646
===================================================
1650
# These attributes are inherited from the Repository base class. Setting
1651
# them to None ensures that if the constructor is changed to not initialize
1652
# them, or a subclass fails to call the constructor, that an error will
1653
# occur rather than the system working but generating incorrect data.
1654
_commit_builder_class = None
1657
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1659
MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
1660
self._commit_builder_class = _commit_builder_class
1661
self._serializer = _serializer
1662
self._reconcile_fixes_text_parents = True
1665
def _all_revision_ids(self):
1666
"""See Repository.all_revision_ids()."""
1667
return [key[0] for key in self.revisions.keys()]
1669
def _abort_write_group(self):
1670
self.revisions._index._key_dependencies.clear()
1671
self._pack_collection._abort_write_group()
1673
def _make_parents_provider(self):
1674
return graph.CachingParentsProvider(self)
1676
def _refresh_data(self):
1677
if not self.is_locked():
1679
self._pack_collection.reload_pack_names()
1681
def _start_write_group(self):
1682
self._pack_collection._start_write_group()
1684
def _commit_write_group(self):
1685
hint = self._pack_collection._commit_write_group()
1686
self.revisions._index._key_dependencies.clear()
1689
def suspend_write_group(self):
1690
# XXX check self._write_group is self.get_transaction()?
1691
tokens = self._pack_collection._suspend_write_group()
1692
self.revisions._index._key_dependencies.clear()
1693
self._write_group = None
1696
def _resume_write_group(self, tokens):
1697
self._start_write_group()
1699
self._pack_collection._resume_write_group(tokens)
1700
except errors.UnresumableWriteGroup:
1701
self._abort_write_group()
1703
for pack in self._pack_collection._resumed_packs:
1704
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1706
def get_transaction(self):
1707
if self._write_lock_count:
1708
return self._transaction
1710
return self.control_files.get_transaction()
1712
def is_locked(self):
1713
return self._write_lock_count or self.control_files.is_locked()
1715
def is_write_locked(self):
1716
return self._write_lock_count
1718
def lock_write(self, token=None):
1719
"""Lock the repository for writes.
1721
:return: A bzrlib.repository.RepositoryWriteLockResult.
1723
locked = self.is_locked()
1724
if not self._write_lock_count and locked:
1725
raise errors.ReadOnlyError(self)
1726
self._write_lock_count += 1
1727
if self._write_lock_count == 1:
1728
self._transaction = transactions.WriteTransaction()
1730
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
1731
note('%r was write locked again', self)
1732
self._prev_lock = 'w'
1733
for repo in self._fallback_repositories:
1734
# Writes don't affect fallback repos
1736
self._refresh_data()
1737
return RepositoryWriteLockResult(self.unlock, None)
1739
def lock_read(self):
1740
"""Lock the repository for reads.
1742
:return: A bzrlib.lock.LogicalLockResult.
1744
locked = self.is_locked()
1745
if self._write_lock_count:
1746
self._write_lock_count += 1
1748
self.control_files.lock_read()
1750
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
1751
note('%r was read locked again', self)
1752
self._prev_lock = 'r'
1753
for repo in self._fallback_repositories:
1755
self._refresh_data()
1756
return LogicalLockResult(self.unlock)
1758
def leave_lock_in_place(self):
1759
# not supported - raise an error
1760
raise NotImplementedError(self.leave_lock_in_place)
1762
def dont_leave_lock_in_place(self):
1763
# not supported - raise an error
1764
raise NotImplementedError(self.dont_leave_lock_in_place)
1767
def pack(self, hint=None, clean_obsolete_packs=False):
1768
"""Compress the data within the repository.
1770
This will pack all the data to a single pack. In future it may
1771
recompress deltas or do other such expensive operations.
1773
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
1776
def reconcile(self, other=None, thorough=False):
1777
"""Reconcile this repository."""
1778
from bzrlib.reconcile import PackReconciler
1779
reconciler = PackReconciler(self, thorough=thorough)
1780
reconciler.reconcile()
1783
def _reconcile_pack(self, collection, packs, extension, revs, pb):
1784
raise NotImplementedError(self._reconcile_pack)
1786
@only_raises(errors.LockNotHeld, errors.LockBroken)
1788
if self._write_lock_count == 1 and self._write_group is not None:
1789
self.abort_write_group()
1790
self._transaction = None
1791
self._write_lock_count = 0
1792
raise errors.BzrError(
1793
'Must end write group before releasing write lock on %s'
1795
if self._write_lock_count:
1796
self._write_lock_count -= 1
1797
if not self._write_lock_count:
1798
transaction = self._transaction
1799
self._transaction = None
1800
transaction.finish()
1802
self.control_files.unlock()
1804
if not self.is_locked():
1805
for repo in self._fallback_repositories:
1809
class RepositoryFormatPack(MetaDirRepositoryFormat):
1810
"""Format logic for pack structured repositories.
1812
This repository format has:
1813
- a list of packs in pack-names
1814
- packs in packs/NAME.pack
1815
- indices in indices/NAME.{iix,six,tix,rix}
1816
- knit deltas in the packs, knit indices mapped to the indices.
1817
- thunk objects to support the knits programming API.
1818
- a format marker of its own
1819
- an optional 'shared-storage' flag
1820
- an optional 'no-working-trees' flag
1824
# Set this attribute in derived classes to control the repository class
1825
# created by open and initialize.
1826
repository_class = None
1827
# Set this attribute in derived classes to control the
1828
# _commit_builder_class that the repository objects will have passed to
1829
# their constructor.
1830
_commit_builder_class = None
1831
# Set this attribute in derived clases to control the _serializer that the
1832
# repository objects will have passed to their constructor.
1834
# Packs are not confused by ghosts.
1835
supports_ghosts = True
1836
# External references are not supported in pack repositories yet.
1837
supports_external_lookups = False
1838
# Most pack formats do not use chk lookups.
1839
supports_chks = False
1840
# What index classes to use
1841
index_builder_class = None
1843
_fetch_uses_deltas = True
1845
supports_full_versioned_files = True
1846
supports_funky_characters = True
1847
revision_graph_can_have_wrong_parents = True
1849
def initialize(self, a_bzrdir, shared=False):
1850
"""Create a pack based repository.
1852
:param a_bzrdir: bzrdir to contain the new repository; must already
1854
:param shared: If true the repository will be initialized as a shared
1857
mutter('creating repository in %s.', a_bzrdir.transport.base)
1858
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1859
builder = self.index_builder_class()
1860
files = [('pack-names', builder.finish())]
1861
utf8_files = [('format', self.get_format_string())]
1863
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1864
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
1865
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
1868
def open(self, a_bzrdir, _found=False, _override_transport=None):
1869
"""See RepositoryFormat.open().
1871
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1872
repository at a slightly different url
1873
than normal. I.e. during 'upgrade'.
1876
format = RepositoryFormat.find_format(a_bzrdir)
1877
if _override_transport is not None:
1878
repo_transport = _override_transport
1880
repo_transport = a_bzrdir.get_repository_transport(None)
1881
control_files = lockable_files.LockableFiles(repo_transport,
1882
'lock', lockdir.LockDir)
1883
return self.repository_class(_format=self,
1885
control_files=control_files,
1886
_commit_builder_class=self._commit_builder_class,
1887
_serializer=self._serializer)
1890
class RetryPackOperations(errors.RetryWithNewPacks):
1891
"""Raised when we are packing and we find a missing file.
1893
Meant as a signaling exception, to tell the RepositoryPackCollection.pack
1894
code it should try again.
1897
internal_error = True
1899
_fmt = ("Pack files have changed, reload and try pack again."
1900
" context: %(context)s %(orig_error)s")
1903
class _DirectPackAccess(object):
1904
"""Access to data in one or more packs with less translation."""
1906
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
1907
"""Create a _DirectPackAccess object.
1909
:param index_to_packs: A dict mapping index objects to the transport
1910
and file names for obtaining data.
1911
:param reload_func: A function to call if we determine that the pack
1912
files have moved and we need to reload our caches. See
1913
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1915
self._container_writer = None
1916
self._write_index = None
1917
self._indices = index_to_packs
1918
self._reload_func = reload_func
1919
self._flush_func = flush_func
1921
def add_raw_records(self, key_sizes, raw_data):
1922
"""Add raw knit bytes to a storage area.
1924
The data is spooled to the container writer in one bytes-record per
1927
:param sizes: An iterable of tuples containing the key and size of each
1929
:param raw_data: A bytestring containing the data.
1930
:return: A list of memos to retrieve the record later. Each memo is an
1931
opaque index memo. For _DirectPackAccess the memo is (index, pos,
1932
length), where the index field is the write_index object supplied
1933
to the PackAccess object.
1935
if type(raw_data) is not str:
1936
raise AssertionError(
1937
'data must be plain bytes was %s' % type(raw_data))
1940
for key, size in key_sizes:
1941
p_offset, p_length = self._container_writer.add_bytes_record(
1942
raw_data[offset:offset+size], [])
1944
result.append((self._write_index, p_offset, p_length))
1948
"""Flush pending writes on this access object.
1950
This will flush any buffered writes to a NewPack.
1952
if self._flush_func is not None:
1955
def get_raw_records(self, memos_for_retrieval):
1956
"""Get the raw bytes for a records.
1958
:param memos_for_retrieval: An iterable containing the (index, pos,
1959
length) memo for retrieving the bytes. The Pack access method
1960
looks up the pack to use for a given record in its index_to_pack
1962
:return: An iterator over the bytes of the records.
1964
# first pass, group into same-index requests
1966
current_index = None
1967
for (index, offset, length) in memos_for_retrieval:
1968
if current_index == index:
1969
current_list.append((offset, length))
1971
if current_index is not None:
1972
request_lists.append((current_index, current_list))
1973
current_index = index
1974
current_list = [(offset, length)]
1975
# handle the last entry
1976
if current_index is not None:
1977
request_lists.append((current_index, current_list))
1978
for index, offsets in request_lists:
1980
transport, path = self._indices[index]
1982
# A KeyError here indicates that someone has triggered an index
1983
# reload, and this index has gone missing, we need to start
1985
if self._reload_func is None:
1986
# If we don't have a _reload_func there is nothing that can
1989
raise errors.RetryWithNewPacks(index,
1990
reload_occurred=True,
1991
exc_info=sys.exc_info())
1993
reader = pack.make_readv_reader(transport, path, offsets)
1994
for names, read_func in reader.iter_records():
1995
yield read_func(None)
1996
except errors.NoSuchFile:
1997
# A NoSuchFile error indicates that a pack file has gone
1998
# missing on disk, we need to trigger a reload, and start over.
1999
if self._reload_func is None:
2001
raise errors.RetryWithNewPacks(transport.abspath(path),
2002
reload_occurred=False,
2003
exc_info=sys.exc_info())
2005
def set_writer(self, writer, index, transport_packname):
2006
"""Set a writer to use for adding data."""
2007
if index is not None:
2008
self._indices[index] = transport_packname
2009
self._container_writer = writer
2010
self._write_index = index
2012
def reload_or_raise(self, retry_exc):
2013
"""Try calling the reload function, or re-raise the original exception.
2015
This should be called after _DirectPackAccess raises a
2016
RetryWithNewPacks exception. This function will handle the common logic
2017
of determining when the error is fatal versus being temporary.
2018
It will also make sure that the original exception is raised, rather
2019
than the RetryWithNewPacks exception.
2021
If this function returns, then the calling function should retry
2022
whatever operation was being performed. Otherwise an exception will
2025
:param retry_exc: A RetryWithNewPacks exception.
2028
if self._reload_func is None:
2030
elif not self._reload_func():
2031
# The reload claimed that nothing changed
2032
if not retry_exc.reload_occurred:
2033
# If there wasn't an earlier reload, then we really were
2034
# expecting to find changes. We didn't find them, so this is a
2038
exc_class, exc_value, exc_traceback = retry_exc.exc_info
2039
raise exc_class, exc_value, exc_traceback