1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
21
from itertools import izip
35
from bzrlib.index import (
39
GraphIndexPrefixAdapter,
42
from bzrlib.knit import (
48
from bzrlib import tsort
58
from bzrlib.decorators import needs_write_lock
59
from bzrlib.btree_index import (
63
from bzrlib.index import (
67
from bzrlib.repofmt.knitrepo import KnitRepository
68
from bzrlib.repository import (
70
MetaDirRepositoryFormat,
74
import bzrlib.revision as _mod_revision
75
from bzrlib.trace import (
81
class PackCommitBuilder(CommitBuilder):
82
"""A subclass of CommitBuilder to add texts with pack semantics.
84
Specifically this uses one knit object rather than one knit object per
85
added text, reducing memory and object pressure.
88
def __init__(self, repository, parents, config, timestamp=None,
89
timezone=None, committer=None, revprops=None,
91
CommitBuilder.__init__(self, repository, parents, config,
92
timestamp=timestamp, timezone=timezone, committer=committer,
93
revprops=revprops, revision_id=revision_id)
94
self._file_graph = graph.Graph(
95
repository._pack_collection.text_index.combined_index)
97
def _heads(self, file_id, revision_ids):
98
keys = [(file_id, revision_id) for revision_id in revision_ids]
99
return set([key[1] for key in self._file_graph.heads(keys)])
102
class PackRootCommitBuilder(RootCommitBuilder):
103
"""A subclass of RootCommitBuilder to add texts with pack semantics.
105
Specifically this uses one knit object rather than one knit object per
106
added text, reducing memory and object pressure.
109
def __init__(self, repository, parents, config, timestamp=None,
110
timezone=None, committer=None, revprops=None,
112
CommitBuilder.__init__(self, repository, parents, config,
113
timestamp=timestamp, timezone=timezone, committer=committer,
114
revprops=revprops, revision_id=revision_id)
115
self._file_graph = graph.Graph(
116
repository._pack_collection.text_index.combined_index)
118
def _heads(self, file_id, revision_ids):
119
keys = [(file_id, revision_id) for revision_id in revision_ids]
120
return set([key[1] for key in self._file_graph.heads(keys)])
124
"""An in memory proxy for a pack and its indices.
126
This is a base class that is not directly used, instead the classes
127
ExistingPack and NewPack are used.
130
def __init__(self, revision_index, inventory_index, text_index,
132
"""Create a pack instance.
134
:param revision_index: A GraphIndex for determining what revisions are
135
present in the Pack and accessing the locations of their texts.
136
:param inventory_index: A GraphIndex for determining what inventories are
137
present in the Pack and accessing the locations of their
139
:param text_index: A GraphIndex for determining what file texts
140
are present in the pack and accessing the locations of their
141
texts/deltas (via (fileid, revisionid) tuples).
142
:param signature_index: A GraphIndex for determining what signatures are
143
present in the Pack and accessing the locations of their texts.
145
self.revision_index = revision_index
146
self.inventory_index = inventory_index
147
self.text_index = text_index
148
self.signature_index = signature_index
150
def access_tuple(self):
151
"""Return a tuple (transport, name) for the pack content."""
152
return self.pack_transport, self.file_name()
155
"""Get the file name for the pack on disk."""
156
return self.name + '.pack'
158
def get_revision_count(self):
159
return self.revision_index.key_count()
161
def inventory_index_name(self, name):
162
"""The inv index is the name + .iix."""
163
return self.index_name('inventory', name)
165
def revision_index_name(self, name):
166
"""The revision index is the name + .rix."""
167
return self.index_name('revision', name)
169
def signature_index_name(self, name):
170
"""The signature index is the name + .six."""
171
return self.index_name('signature', name)
173
def text_index_name(self, name):
174
"""The text index is the name + .tix."""
175
return self.index_name('text', name)
178
class ExistingPack(Pack):
179
"""An in memory proxy for an existing .pack and its disk indices."""
181
def __init__(self, pack_transport, name, revision_index, inventory_index,
182
text_index, signature_index):
183
"""Create an ExistingPack object.
185
:param pack_transport: The transport where the pack file resides.
186
:param name: The name of the pack on disk in the pack_transport.
188
Pack.__init__(self, revision_index, inventory_index, text_index,
191
self.pack_transport = pack_transport
192
if None in (revision_index, inventory_index, text_index,
193
signature_index, name, pack_transport):
194
raise AssertionError()
196
def __eq__(self, other):
197
return self.__dict__ == other.__dict__
199
def __ne__(self, other):
200
return not self.__eq__(other)
203
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
204
id(self), self.pack_transport, self.name)
208
"""An in memory proxy for a pack which is being created."""
210
# A map of index 'type' to the file extension and position in the
212
index_definitions = {
213
'revision': ('.rix', 0),
214
'inventory': ('.iix', 1),
216
'signature': ('.six', 3),
219
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
220
"""Create a NewPack instance.
222
:param pack_collection: A PackCollection into which this is being inserted.
223
:param upload_suffix: An optional suffix to be given to any temporary
224
files created during the pack creation. e.g '.autopack'
225
:param file_mode: Unix permissions for newly created file.
227
# The relative locations of the packs are constrained, but all are
228
# passed in because the caller has them, so as to avoid object churn.
229
index_builder_class = pack_collection._index_builder_class
231
# Revisions: parents list, no text compression.
232
index_builder_class(reference_lists=1),
233
# Inventory: We want to map compression only, but currently the
234
# knit code hasn't been updated enough to understand that, so we
235
# have a regular 2-list index giving parents and compression
237
index_builder_class(reference_lists=2),
238
# Texts: compression and per file graph, for all fileids - so two
239
# reference lists and two elements in the key tuple.
240
index_builder_class(reference_lists=2, key_elements=2),
241
# Signatures: Just blobs to store, no compression, no parents
243
index_builder_class(reference_lists=0),
245
self._pack_collection = pack_collection
246
# When we make readonly indices, we need this.
247
self.index_class = pack_collection._index_class
248
# where should the new pack be opened
249
self.upload_transport = pack_collection._upload_transport
250
# where are indices written out to
251
self.index_transport = pack_collection._index_transport
252
# where is the pack renamed to when it is finished?
253
self.pack_transport = pack_collection._pack_transport
254
# What file mode to upload the pack and indices with.
255
self._file_mode = file_mode
256
# tracks the content written to the .pack file.
257
self._hash = osutils.md5()
258
# a four-tuple with the length in bytes of the indices, once the pack
259
# is finalised. (rev, inv, text, sigs)
260
self.index_sizes = None
261
# How much data to cache when writing packs. Note that this is not
262
# synchronised with reads, because it's not in the transport layer, so
263
# is not safe unless the client knows it won't be reading from the pack
265
self._cache_limit = 0
266
# the temporary pack file name.
267
self.random_name = osutils.rand_chars(20) + upload_suffix
268
# when was this pack started ?
269
self.start_time = time.time()
270
# open an output stream for the data added to the pack.
271
self.write_stream = self.upload_transport.open_write_stream(
272
self.random_name, mode=self._file_mode)
273
if 'pack' in debug.debug_flags:
274
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
275
time.ctime(), self.upload_transport.base, self.random_name,
276
time.time() - self.start_time)
277
# A list of byte sequences to be written to the new pack, and the
278
# aggregate size of them. Stored as a list rather than separate
279
# variables so that the _write_data closure below can update them.
280
self._buffer = [[], 0]
281
# create a callable for adding data
283
# robertc says- this is a closure rather than a method on the object
284
# so that the variables are locals, and faster than accessing object
286
def _write_data(bytes, flush=False, _buffer=self._buffer,
287
_write=self.write_stream.write, _update=self._hash.update):
288
_buffer[0].append(bytes)
289
_buffer[1] += len(bytes)
291
if _buffer[1] > self._cache_limit or flush:
292
bytes = ''.join(_buffer[0])
296
# expose this on self, for the occasion when clients want to add data.
297
self._write_data = _write_data
298
# a pack writer object to serialise pack records.
299
self._writer = pack.ContainerWriter(self._write_data)
301
# what state is the pack in? (open, finished, aborted)
305
"""Cancel creating this pack."""
306
self._state = 'aborted'
307
self.write_stream.close()
308
# Remove the temporary pack file.
309
self.upload_transport.delete(self.random_name)
310
# The indices have no state on disk.
312
def access_tuple(self):
313
"""Return a tuple (transport, name) for the pack content."""
314
if self._state == 'finished':
315
return Pack.access_tuple(self)
316
elif self._state == 'open':
317
return self.upload_transport, self.random_name
319
raise AssertionError(self._state)
321
def _check_references(self):
322
"""Make sure our external references are present.
324
Packs are allowed to have deltas whose base is not in the pack, but it
325
must be present somewhere in this collection. It is not allowed to
326
have deltas based on a fallback repository.
327
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
330
for (index_name, external_refs, index) in [
332
self.text_index._external_references(),
333
self._pack_collection.text_index.combined_index),
335
self.inventory_index._external_references(),
336
self._pack_collection.inventory_index.combined_index),
338
missing = external_refs.difference(
339
k for (idx, k, v, r) in
340
index.iter_entries(external_refs))
342
missing_items[index_name] = sorted(list(missing))
344
from pprint import pformat
345
raise errors.BzrCheckError(
346
"Newly created pack file %r has delta references to "
347
"items not in its repository:\n%s"
348
% (self, pformat(missing_items)))
350
def data_inserted(self):
351
"""True if data has been added to this pack."""
352
return bool(self.get_revision_count() or
353
self.inventory_index.key_count() or
354
self.text_index.key_count() or
355
self.signature_index.key_count())
358
"""Finish the new pack.
361
- finalises the content
362
- assigns a name (the md5 of the content, currently)
363
- writes out the associated indices
364
- renames the pack into place.
365
- stores the index size tuple for the pack in the index_sizes
370
self._write_data('', flush=True)
371
self.name = self._hash.hexdigest()
372
self._check_references()
374
# XXX: It'd be better to write them all to temporary names, then
375
# rename them all into place, so that the window when only some are
376
# visible is smaller. On the other hand none will be seen until
377
# they're in the names list.
378
self.index_sizes = [None, None, None, None]
379
self._write_index('revision', self.revision_index, 'revision')
380
self._write_index('inventory', self.inventory_index, 'inventory')
381
self._write_index('text', self.text_index, 'file texts')
382
self._write_index('signature', self.signature_index,
383
'revision signatures')
384
self.write_stream.close()
385
# Note that this will clobber an existing pack with the same name,
386
# without checking for hash collisions. While this is undesirable this
387
# is something that can be rectified in a subsequent release. One way
388
# to rectify it may be to leave the pack at the original name, writing
389
# its pack-names entry as something like 'HASH: index-sizes
390
# temporary-name'. Allocate that and check for collisions, if it is
391
# collision free then rename it into place. If clients know this scheme
392
# they can handle missing-file errors by:
393
# - try for HASH.pack
394
# - try for temporary-name
395
# - refresh the pack-list to see if the pack is now absent
396
self.upload_transport.rename(self.random_name,
397
'../packs/' + self.name + '.pack')
398
self._state = 'finished'
399
if 'pack' in debug.debug_flags:
400
# XXX: size might be interesting?
401
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
402
time.ctime(), self.upload_transport.base, self.random_name,
403
self.pack_transport, self.name,
404
time.time() - self.start_time)
407
"""Flush any current data."""
409
bytes = ''.join(self._buffer[0])
410
self.write_stream.write(bytes)
411
self._hash.update(bytes)
412
self._buffer[:] = [[], 0]
414
def index_name(self, index_type, name):
415
"""Get the disk name of an index type for pack name 'name'."""
416
return name + NewPack.index_definitions[index_type][0]
418
def index_offset(self, index_type):
419
"""Get the position in a index_size array for a given index type."""
420
return NewPack.index_definitions[index_type][1]
422
def _replace_index_with_readonly(self, index_type):
423
setattr(self, index_type + '_index',
424
self.index_class(self.index_transport,
425
self.index_name(index_type, self.name),
426
self.index_sizes[self.index_offset(index_type)]))
428
def set_write_cache_size(self, size):
429
self._cache_limit = size
431
def _write_index(self, index_type, index, label):
432
"""Write out an index.
434
:param index_type: The type of index to write - e.g. 'revision'.
435
:param index: The index object to serialise.
436
:param label: What label to give the index e.g. 'revision'.
438
index_name = self.index_name(index_type, self.name)
439
self.index_sizes[self.index_offset(index_type)] = \
440
self.index_transport.put_file(index_name, index.finish(),
441
mode=self._file_mode)
442
if 'pack' in debug.debug_flags:
443
# XXX: size might be interesting?
444
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
445
time.ctime(), label, self.upload_transport.base,
446
self.random_name, time.time() - self.start_time)
447
# Replace the writable index on this object with a readonly,
448
# presently unloaded index. We should alter
449
# the index layer to make its finish() error if add_node is
450
# subsequently used. RBC
451
self._replace_index_with_readonly(index_type)
454
class AggregateIndex(object):
455
"""An aggregated index for the RepositoryPackCollection.
457
AggregateIndex is reponsible for managing the PackAccess object,
458
Index-To-Pack mapping, and all indices list for a specific type of index
459
such as 'revision index'.
461
A CombinedIndex provides an index on a single key space built up
462
from several on-disk indices. The AggregateIndex builds on this
463
to provide a knit access layer, and allows having up to one writable
464
index within the collection.
466
# XXX: Probably 'can be written to' could/should be separated from 'acts
467
# like a knit index' -- mbp 20071024
469
def __init__(self, reload_func=None):
470
"""Create an AggregateIndex.
472
:param reload_func: A function to call if we find we are missing an
473
index. Should have the form reload_func() => True if the list of
474
active pack files has changed.
476
self._reload_func = reload_func
477
self.index_to_pack = {}
478
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
479
self.data_access = _DirectPackAccess(self.index_to_pack,
480
reload_func=reload_func)
481
self.add_callback = None
483
def replace_indices(self, index_to_pack, indices):
484
"""Replace the current mappings with fresh ones.
486
This should probably not be used eventually, rather incremental add and
487
removal of indices. It has been added during refactoring of existing
490
:param index_to_pack: A mapping from index objects to
491
(transport, name) tuples for the pack file data.
492
:param indices: A list of indices.
494
# refresh the revision pack map dict without replacing the instance.
495
self.index_to_pack.clear()
496
self.index_to_pack.update(index_to_pack)
497
# XXX: API break - clearly a 'replace' method would be good?
498
self.combined_index._indices[:] = indices
499
# the current add nodes callback for the current writable index if
501
self.add_callback = None
503
def add_index(self, index, pack):
504
"""Add index to the aggregate, which is an index for Pack pack.
506
Future searches on the aggregate index will seach this new index
507
before all previously inserted indices.
509
:param index: An Index for the pack.
510
:param pack: A Pack instance.
512
# expose it to the index map
513
self.index_to_pack[index] = pack.access_tuple()
514
# put it at the front of the linear index list
515
self.combined_index.insert_index(0, index)
517
def add_writable_index(self, index, pack):
518
"""Add an index which is able to have data added to it.
520
There can be at most one writable index at any time. Any
521
modifications made to the knit are put into this index.
523
:param index: An index from the pack parameter.
524
:param pack: A Pack instance.
526
if self.add_callback is not None:
527
raise AssertionError(
528
"%s already has a writable index through %s" % \
529
(self, self.add_callback))
530
# allow writing: queue writes to a new index
531
self.add_index(index, pack)
532
# Updates the index to packs mapping as a side effect,
533
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
534
self.add_callback = index.add_nodes
537
"""Reset all the aggregate data to nothing."""
538
self.data_access.set_writer(None, None, (None, None))
539
self.index_to_pack.clear()
540
del self.combined_index._indices[:]
541
self.add_callback = None
543
def remove_index(self, index, pack):
544
"""Remove index from the indices used to answer queries.
546
:param index: An index from the pack parameter.
547
:param pack: A Pack instance.
549
del self.index_to_pack[index]
550
self.combined_index._indices.remove(index)
551
if (self.add_callback is not None and
552
getattr(index, 'add_nodes', None) == self.add_callback):
553
self.add_callback = None
554
self.data_access.set_writer(None, None, (None, None))
557
class Packer(object):
558
"""Create a pack from packs."""
560
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
564
:param pack_collection: A RepositoryPackCollection object where the
565
new pack is being written to.
566
:param packs: The packs to combine.
567
:param suffix: The suffix to use on the temporary files for the pack.
568
:param revision_ids: Revision ids to limit the pack to.
569
:param reload_func: A function to call if a pack file/index goes
570
missing. The side effect of calling this function should be to
571
update self.packs. See also AggregateIndex
575
self.revision_ids = revision_ids
576
# The pack object we are creating.
578
self._pack_collection = pack_collection
579
self._reload_func = reload_func
580
# The index layer keys for the revisions being copied. None for 'all
582
self._revision_keys = None
583
# What text keys to copy. None for 'all texts'. This is set by
584
# _copy_inventory_texts
585
self._text_filter = None
588
def _extra_init(self):
589
"""A template hook to allow extending the constructor trivially."""
591
def _pack_map_and_index_list(self, index_attribute):
592
"""Convert a list of packs to an index pack map and index list.
594
:param index_attribute: The attribute that the desired index is found
596
:return: A tuple (map, list) where map contains the dict from
597
index:pack_tuple, and list contains the indices in the preferred
602
for pack_obj in self.packs:
603
index = getattr(pack_obj, index_attribute)
604
indices.append(index)
605
pack_map[index] = pack_obj
606
return pack_map, indices
608
def _index_contents(self, indices, key_filter=None):
609
"""Get an iterable of the index contents from a pack_map.
611
:param indices: The list of indices to query
612
:param key_filter: An optional filter to limit the keys returned.
614
all_index = CombinedGraphIndex(indices)
615
if key_filter is None:
616
return all_index.iter_all_entries()
618
return all_index.iter_entries(key_filter)
620
def pack(self, pb=None):
621
"""Create a new pack by reading data from other packs.
623
This does little more than a bulk copy of data. One key difference
624
is that data with the same item key across multiple packs is elided
625
from the output. The new pack is written into the current pack store
626
along with its indices, and the name added to the pack names. The
627
source packs are not altered and are not required to be in the current
630
:param pb: An optional progress bar to use. A nested bar is created if
632
:return: A Pack object, or None if nothing was copied.
634
# open a pack - using the same name as the last temporary file
635
# - which has already been flushed, so its safe.
636
# XXX: - duplicate code warning with start_write_group; fix before
637
# considering 'done'.
638
if self._pack_collection._new_pack is not None:
639
raise errors.BzrError('call to %s.pack() while another pack is'
641
% (self.__class__.__name__,))
642
if self.revision_ids is not None:
643
if len(self.revision_ids) == 0:
644
# silly fetch request.
647
self.revision_ids = frozenset(self.revision_ids)
648
self.revision_keys = frozenset((revid,) for revid in
651
self.pb = ui.ui_factory.nested_progress_bar()
655
return self._create_pack_from_packs()
661
"""Open a pack for the pack we are creating."""
662
return NewPack(self._pack_collection, upload_suffix=self.suffix,
663
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
665
def _update_pack_order(self, entries, index_to_pack_map):
666
"""Determine how we want our packs to be ordered.
668
This changes the sort order of the self.packs list so that packs unused
669
by 'entries' will be at the end of the list, so that future requests
670
can avoid probing them. Used packs will be at the front of the
671
self.packs list, in the order of their first use in 'entries'.
673
:param entries: A list of (index, ...) tuples
674
:param index_to_pack_map: A mapping from index objects to pack objects.
678
for entry in entries:
680
if index not in seen_indexes:
681
packs.append(index_to_pack_map[index])
682
seen_indexes.add(index)
683
if len(packs) == len(self.packs):
684
if 'pack' in debug.debug_flags:
685
mutter('Not changing pack list, all packs used.')
687
seen_packs = set(packs)
688
for pack in self.packs:
689
if pack not in seen_packs:
692
if 'pack' in debug.debug_flags:
693
old_names = [p.access_tuple()[1] for p in self.packs]
694
new_names = [p.access_tuple()[1] for p in packs]
695
mutter('Reordering packs\nfrom: %s\n to: %s',
696
old_names, new_names)
699
def _copy_revision_texts(self):
700
"""Copy revision data to the new pack."""
702
if self.revision_ids:
703
revision_keys = [(revision_id,) for revision_id in self.revision_ids]
706
# select revision keys
707
revision_index_map, revision_indices = self._pack_map_and_index_list(
709
revision_nodes = self._index_contents(revision_indices, revision_keys)
710
revision_nodes = list(revision_nodes)
711
self._update_pack_order(revision_nodes, revision_index_map)
712
# copy revision keys and adjust values
713
self.pb.update("Copying revision texts", 1)
714
total_items, readv_group_iter = self._revision_node_readv(revision_nodes)
715
list(self._copy_nodes_graph(revision_index_map, self.new_pack._writer,
716
self.new_pack.revision_index, readv_group_iter, total_items))
717
if 'pack' in debug.debug_flags:
718
mutter('%s: create_pack: revisions copied: %s%s %d items t+%6.3fs',
719
time.ctime(), self._pack_collection._upload_transport.base,
720
self.new_pack.random_name,
721
self.new_pack.revision_index.key_count(),
722
time.time() - self.new_pack.start_time)
723
self._revision_keys = revision_keys
725
def _copy_inventory_texts(self):
726
"""Copy the inventory texts to the new pack.
728
self._revision_keys is used to determine what inventories to copy.
730
Sets self._text_filter appropriately.
732
# select inventory keys
733
inv_keys = self._revision_keys # currently the same keyspace, and note that
734
# querying for keys here could introduce a bug where an inventory item
735
# is missed, so do not change it to query separately without cross
736
# checking like the text key check below.
737
inventory_index_map, inventory_indices = self._pack_map_and_index_list(
739
inv_nodes = self._index_contents(inventory_indices, inv_keys)
740
# copy inventory keys and adjust values
741
# XXX: Should be a helper function to allow different inv representation
743
self.pb.update("Copying inventory texts", 2)
744
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
745
# Only grab the output lines if we will be processing them
746
output_lines = bool(self.revision_ids)
747
inv_lines = self._copy_nodes_graph(inventory_index_map,
748
self.new_pack._writer, self.new_pack.inventory_index,
749
readv_group_iter, total_items, output_lines=output_lines)
750
if self.revision_ids:
751
self._process_inventory_lines(inv_lines)
753
# eat the iterator to cause it to execute.
755
self._text_filter = None
756
if 'pack' in debug.debug_flags:
757
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
758
time.ctime(), self._pack_collection._upload_transport.base,
759
self.new_pack.random_name,
760
self.new_pack.inventory_index.key_count(),
761
time.time() - self.new_pack.start_time)
763
def _copy_text_texts(self):
765
text_index_map, text_nodes = self._get_text_nodes()
766
if self._text_filter is not None:
767
# We could return the keys copied as part of the return value from
768
# _copy_nodes_graph but this doesn't work all that well with the
769
# need to get line output too, so we check separately, and as we're
770
# going to buffer everything anyway, we check beforehand, which
771
# saves reading knit data over the wire when we know there are
773
text_nodes = set(text_nodes)
774
present_text_keys = set(_node[1] for _node in text_nodes)
775
missing_text_keys = set(self._text_filter) - present_text_keys
776
if missing_text_keys:
777
# TODO: raise a specific error that can handle many missing
779
a_missing_key = missing_text_keys.pop()
780
raise errors.RevisionNotPresent(a_missing_key[1],
782
# copy text keys and adjust values
783
self.pb.update("Copying content texts", 3)
784
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
785
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
786
self.new_pack.text_index, readv_group_iter, total_items))
787
self._log_copied_texts()
789
def _create_pack_from_packs(self):
790
self.pb.update("Opening pack", 0, 5)
791
self.new_pack = self.open_pack()
792
new_pack = self.new_pack
793
# buffer data - we won't be reading-back during the pack creation and
794
# this makes a significant difference on sftp pushes.
795
new_pack.set_write_cache_size(1024*1024)
796
if 'pack' in debug.debug_flags:
797
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
798
for a_pack in self.packs]
799
if self.revision_ids is not None:
800
rev_count = len(self.revision_ids)
803
mutter('%s: create_pack: creating pack from source packs: '
804
'%s%s %s revisions wanted %s t=0',
805
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
806
plain_pack_list, rev_count)
807
self._copy_revision_texts()
808
self._copy_inventory_texts()
809
self._copy_text_texts()
810
# select signature keys
811
signature_filter = self._revision_keys # same keyspace
812
signature_index_map, signature_indices = self._pack_map_and_index_list(
814
signature_nodes = self._index_contents(signature_indices,
816
# copy signature keys and adjust values
817
self.pb.update("Copying signature texts", 4)
818
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
819
new_pack.signature_index)
820
if 'pack' in debug.debug_flags:
821
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
822
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
823
new_pack.signature_index.key_count(),
824
time.time() - new_pack.start_time)
825
new_pack._check_references()
826
if not self._use_pack(new_pack):
829
self.pb.update("Finishing pack", 5)
831
self._pack_collection.allocate(new_pack)
834
def _copy_nodes(self, nodes, index_map, writer, write_index):
835
"""Copy knit nodes between packs with no graph references."""
836
pb = ui.ui_factory.nested_progress_bar()
838
return self._do_copy_nodes(nodes, index_map, writer,
843
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
844
# for record verification
845
knit = KnitVersionedFiles(None, None)
846
# plan a readv on each source pack:
848
nodes = sorted(nodes)
849
# how to map this into knit.py - or knit.py into this?
850
# we don't want the typical knit logic, we want grouping by pack
851
# at this point - perhaps a helper library for the following code
852
# duplication points?
854
for index, key, value in nodes:
855
if index not in request_groups:
856
request_groups[index] = []
857
request_groups[index].append((key, value))
859
pb.update("Copied record", record_index, len(nodes))
860
for index, items in request_groups.iteritems():
861
pack_readv_requests = []
862
for key, value in items:
863
# ---- KnitGraphIndex.get_position
864
bits = value[1:].split(' ')
865
offset, length = int(bits[0]), int(bits[1])
866
pack_readv_requests.append((offset, length, (key, value[0])))
867
# linear scan up the pack
868
pack_readv_requests.sort()
870
pack_obj = index_map[index]
871
transport, path = pack_obj.access_tuple()
873
reader = pack.make_readv_reader(transport, path,
874
[offset[0:2] for offset in pack_readv_requests])
875
except errors.NoSuchFile:
876
if self._reload_func is not None:
879
for (names, read_func), (_1, _2, (key, eol_flag)) in \
880
izip(reader.iter_records(), pack_readv_requests):
881
raw_data = read_func(None)
882
# check the header only
883
df, _ = knit._parse_record_header(key, raw_data)
885
pos, size = writer.add_bytes_record(raw_data, names)
886
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
887
pb.update("Copied record", record_index)
890
def _copy_nodes_graph(self, index_map, writer, write_index,
891
readv_group_iter, total_items, output_lines=False):
892
"""Copy knit nodes between packs.
894
:param output_lines: Return lines present in the copied data as
895
an iterator of line,version_id.
897
pb = ui.ui_factory.nested_progress_bar()
899
for result in self._do_copy_nodes_graph(index_map, writer,
900
write_index, output_lines, pb, readv_group_iter, total_items):
903
# Python 2.4 does not permit try:finally: in a generator.
909
def _do_copy_nodes_graph(self, index_map, writer, write_index,
910
output_lines, pb, readv_group_iter, total_items):
911
# for record verification
912
knit = KnitVersionedFiles(None, None)
913
# for line extraction when requested (inventories only)
915
factory = KnitPlainFactory()
917
pb.update("Copied record", record_index, total_items)
918
for index, readv_vector, node_vector in readv_group_iter:
920
pack_obj = index_map[index]
921
transport, path = pack_obj.access_tuple()
923
reader = pack.make_readv_reader(transport, path, readv_vector)
924
except errors.NoSuchFile:
925
if self._reload_func is not None:
928
for (names, read_func), (key, eol_flag, references) in \
929
izip(reader.iter_records(), node_vector):
930
raw_data = read_func(None)
932
# read the entire thing
933
content, _ = knit._parse_record(key[-1], raw_data)
934
if len(references[-1]) == 0:
935
line_iterator = factory.get_fulltext_content(content)
937
line_iterator = factory.get_linedelta_content(content)
938
for line in line_iterator:
941
# check the header only
942
df, _ = knit._parse_record_header(key, raw_data)
944
pos, size = writer.add_bytes_record(raw_data, names)
945
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
946
pb.update("Copied record", record_index)
949
def _get_text_nodes(self):
950
text_index_map, text_indices = self._pack_map_and_index_list(
952
return text_index_map, self._index_contents(text_indices,
955
def _least_readv_node_readv(self, nodes):
956
"""Generate request groups for nodes using the least readv's.
958
:param nodes: An iterable of graph index nodes.
959
:return: Total node count and an iterator of the data needed to perform
960
readvs to obtain the data for nodes. Each item yielded by the
961
iterator is a tuple with:
962
index, readv_vector, node_vector. readv_vector is a list ready to
963
hand to the transport readv method, and node_vector is a list of
964
(key, eol_flag, references) for the the node retrieved by the
965
matching readv_vector.
967
# group by pack so we do one readv per pack
968
nodes = sorted(nodes)
971
for index, key, value, references in nodes:
972
if index not in request_groups:
973
request_groups[index] = []
974
request_groups[index].append((key, value, references))
976
for index, items in request_groups.iteritems():
977
pack_readv_requests = []
978
for key, value, references in items:
979
# ---- KnitGraphIndex.get_position
980
bits = value[1:].split(' ')
981
offset, length = int(bits[0]), int(bits[1])
982
pack_readv_requests.append(
983
((offset, length), (key, value[0], references)))
984
# linear scan up the pack to maximum range combining.
985
pack_readv_requests.sort()
986
# split out the readv and the node data.
987
pack_readv = [readv for readv, node in pack_readv_requests]
988
node_vector = [node for readv, node in pack_readv_requests]
989
result.append((index, pack_readv, node_vector))
992
def _log_copied_texts(self):
993
if 'pack' in debug.debug_flags:
994
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
995
time.ctime(), self._pack_collection._upload_transport.base,
996
self.new_pack.random_name,
997
self.new_pack.text_index.key_count(),
998
time.time() - self.new_pack.start_time)
1000
def _process_inventory_lines(self, inv_lines):
1001
"""Use up the inv_lines generator and setup a text key filter."""
1002
repo = self._pack_collection.repo
1003
fileid_revisions = repo._find_file_ids_from_xml_inventory_lines(
1004
inv_lines, self.revision_keys)
1006
for fileid, file_revids in fileid_revisions.iteritems():
1007
text_filter.extend([(fileid, file_revid) for file_revid in file_revids])
1008
self._text_filter = text_filter
1010
def _revision_node_readv(self, revision_nodes):
1011
"""Return the total revisions and the readv's to issue.
1013
:param revision_nodes: The revision index contents for the packs being
1014
incorporated into the new pack.
1015
:return: As per _least_readv_node_readv.
1017
return self._least_readv_node_readv(revision_nodes)
1019
def _use_pack(self, new_pack):
1020
"""Return True if new_pack should be used.
1022
:param new_pack: The pack that has just been created.
1023
:return: True if the pack should be used.
1025
return new_pack.data_inserted()
1028
class OptimisingPacker(Packer):
1029
"""A packer which spends more time to create better disk layouts."""
1031
def _revision_node_readv(self, revision_nodes):
1032
"""Return the total revisions and the readv's to issue.
1034
This sort places revisions in topological order with the ancestors
1037
:param revision_nodes: The revision index contents for the packs being
1038
incorporated into the new pack.
1039
:return: As per _least_readv_node_readv.
1041
# build an ancestors dict
1044
for index, key, value, references in revision_nodes:
1045
ancestors[key] = references[0]
1046
by_key[key] = (index, value, references)
1047
order = tsort.topo_sort(ancestors)
1049
# Single IO is pathological, but it will work as a starting point.
1051
for key in reversed(order):
1052
index, value, references = by_key[key]
1053
# ---- KnitGraphIndex.get_position
1054
bits = value[1:].split(' ')
1055
offset, length = int(bits[0]), int(bits[1])
1057
(index, [(offset, length)], [(key, value[0], references)]))
1058
# TODO: combine requests in the same index that are in ascending order.
1059
return total, requests
1061
def open_pack(self):
1062
"""Open a pack for the pack we are creating."""
1063
new_pack = super(OptimisingPacker, self).open_pack()
1064
# Turn on the optimization flags for all the index builders.
1065
new_pack.revision_index.set_optimize(for_size=True)
1066
new_pack.inventory_index.set_optimize(for_size=True)
1067
new_pack.text_index.set_optimize(for_size=True)
1068
new_pack.signature_index.set_optimize(for_size=True)
1072
class ReconcilePacker(Packer):
1073
"""A packer which regenerates indices etc as it copies.
1075
This is used by ``bzr reconcile`` to cause parent text pointers to be
1079
def _extra_init(self):
1080
self._data_changed = False
1082
def _process_inventory_lines(self, inv_lines):
1083
"""Generate a text key reference map rather for reconciling with."""
1084
repo = self._pack_collection.repo
1085
refs = repo._find_text_key_references_from_xml_inventory_lines(
1087
self._text_refs = refs
1088
# during reconcile we:
1089
# - convert unreferenced texts to full texts
1090
# - correct texts which reference a text not copied to be full texts
1091
# - copy all others as-is but with corrected parents.
1092
# - so at this point we don't know enough to decide what becomes a full
1094
self._text_filter = None
1096
def _copy_text_texts(self):
1097
"""generate what texts we should have and then copy."""
1098
self.pb.update("Copying content texts", 3)
1099
# we have three major tasks here:
1100
# 1) generate the ideal index
1101
repo = self._pack_collection.repo
1102
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
1103
_1, key, _2, refs in
1104
self.new_pack.revision_index.iter_all_entries()])
1105
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
1106
# 2) generate a text_nodes list that contains all the deltas that can
1107
# be used as-is, with corrected parents.
1110
discarded_nodes = []
1111
NULL_REVISION = _mod_revision.NULL_REVISION
1112
text_index_map, text_nodes = self._get_text_nodes()
1113
for node in text_nodes:
1119
ideal_parents = tuple(ideal_index[node[1]])
1121
discarded_nodes.append(node)
1122
self._data_changed = True
1124
if ideal_parents == (NULL_REVISION,):
1126
if ideal_parents == node[3][0]:
1128
ok_nodes.append(node)
1129
elif ideal_parents[0:1] == node[3][0][0:1]:
1130
# the left most parent is the same, or there are no parents
1131
# today. Either way, we can preserve the representation as
1132
# long as we change the refs to be inserted.
1133
self._data_changed = True
1134
ok_nodes.append((node[0], node[1], node[2],
1135
(ideal_parents, node[3][1])))
1136
self._data_changed = True
1138
# Reinsert this text completely
1139
bad_texts.append((node[1], ideal_parents))
1140
self._data_changed = True
1141
# we're finished with some data.
1144
# 3) bulk copy the ok data
1145
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
1146
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
1147
self.new_pack.text_index, readv_group_iter, total_items))
1148
# 4) adhoc copy all the other texts.
1149
# We have to topologically insert all texts otherwise we can fail to
1150
# reconcile when parts of a single delta chain are preserved intact,
1151
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
1152
# reinserted, and if d3 has incorrect parents it will also be
1153
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1154
# copied), so we will try to delta, but d2 is not currently able to be
1155
# extracted because it's basis d1 is not present. Topologically sorting
1156
# addresses this. The following generates a sort for all the texts that
1157
# are being inserted without having to reference the entire text key
1158
# space (we only topo sort the revisions, which is smaller).
1159
topo_order = tsort.topo_sort(ancestors)
1160
rev_order = dict(zip(topo_order, range(len(topo_order))))
1161
bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1162
transaction = repo.get_transaction()
1163
file_id_index = GraphIndexPrefixAdapter(
1164
self.new_pack.text_index,
1166
add_nodes_callback=self.new_pack.text_index.add_nodes)
1167
data_access = _DirectPackAccess(
1168
{self.new_pack.text_index:self.new_pack.access_tuple()})
1169
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
1170
self.new_pack.access_tuple())
1171
output_texts = KnitVersionedFiles(
1172
_KnitGraphIndex(self.new_pack.text_index,
1173
add_callback=self.new_pack.text_index.add_nodes,
1174
deltas=True, parents=True, is_locked=repo.is_locked),
1175
data_access=data_access, max_delta_chain=200)
1176
for key, parent_keys in bad_texts:
1177
# We refer to the new pack to delta data being output.
1178
# A possible improvement would be to catch errors on short reads
1179
# and only flush then.
1180
self.new_pack.flush()
1182
for parent_key in parent_keys:
1183
if parent_key[0] != key[0]:
1184
# Graph parents must match the fileid
1185
raise errors.BzrError('Mismatched key parent %r:%r' %
1187
parents.append(parent_key[1])
1188
text_lines = osutils.split_lines(repo.texts.get_record_stream(
1189
[key], 'unordered', True).next().get_bytes_as('fulltext'))
1190
output_texts.add_lines(key, parent_keys, text_lines,
1191
random_id=True, check_content=False)
1192
# 5) check that nothing inserted has a reference outside the keyspace.
1193
missing_text_keys = self.new_pack.text_index._external_references()
1194
if missing_text_keys:
1195
raise errors.BzrCheckError('Reference to missing compression parents %r'
1196
% (missing_text_keys,))
1197
self._log_copied_texts()
1199
def _use_pack(self, new_pack):
1200
"""Override _use_pack to check for reconcile having changed content."""
1201
# XXX: we might be better checking this at the copy time.
1202
original_inventory_keys = set()
1203
inv_index = self._pack_collection.inventory_index.combined_index
1204
for entry in inv_index.iter_all_entries():
1205
original_inventory_keys.add(entry[1])
1206
new_inventory_keys = set()
1207
for entry in new_pack.inventory_index.iter_all_entries():
1208
new_inventory_keys.add(entry[1])
1209
if new_inventory_keys != original_inventory_keys:
1210
self._data_changed = True
1211
return new_pack.data_inserted() and self._data_changed
1214
class RepositoryPackCollection(object):
1215
"""Management of packs within a repository.
1217
:ivar _names: map of {pack_name: (index_size,)}
1220
def __init__(self, repo, transport, index_transport, upload_transport,
1221
pack_transport, index_builder_class, index_class):
1222
"""Create a new RepositoryPackCollection.
1224
:param transport: Addresses the repository base directory
1225
(typically .bzr/repository/).
1226
:param index_transport: Addresses the directory containing indices.
1227
:param upload_transport: Addresses the directory into which packs are written
1228
while they're being created.
1229
:param pack_transport: Addresses the directory of existing complete packs.
1230
:param index_builder_class: The index builder class to use.
1231
:param index_class: The index class to use.
1234
self.transport = transport
1235
self._index_transport = index_transport
1236
self._upload_transport = upload_transport
1237
self._pack_transport = pack_transport
1238
self._index_builder_class = index_builder_class
1239
self._index_class = index_class
1240
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1243
self._packs_by_name = {}
1244
# the previous pack-names content
1245
self._packs_at_load = None
1246
# when a pack is being created by this object, the state of that pack.
1247
self._new_pack = None
1248
# aggregated revision index data
1249
self.revision_index = AggregateIndex(self.reload_pack_names)
1250
self.inventory_index = AggregateIndex(self.reload_pack_names)
1251
self.text_index = AggregateIndex(self.reload_pack_names)
1252
self.signature_index = AggregateIndex(self.reload_pack_names)
1254
def add_pack_to_memory(self, pack):
1255
"""Make a Pack object available to the repository to satisfy queries.
1257
:param pack: A Pack object.
1259
if pack.name in self._packs_by_name:
1260
raise AssertionError()
1261
self.packs.append(pack)
1262
self._packs_by_name[pack.name] = pack
1263
self.revision_index.add_index(pack.revision_index, pack)
1264
self.inventory_index.add_index(pack.inventory_index, pack)
1265
self.text_index.add_index(pack.text_index, pack)
1266
self.signature_index.add_index(pack.signature_index, pack)
1268
def all_packs(self):
1269
"""Return a list of all the Pack objects this repository has.
1271
Note that an in-progress pack being created is not returned.
1273
:return: A list of Pack objects for all the packs in the repository.
1276
for name in self.names():
1277
result.append(self.get_pack_by_name(name))
1281
"""Pack the pack collection incrementally.
1283
This will not attempt global reorganisation or recompression,
1284
rather it will just ensure that the total number of packs does
1285
not grow without bound. It uses the _max_pack_count method to
1286
determine if autopacking is needed, and the pack_distribution
1287
method to determine the number of revisions in each pack.
1289
If autopacking takes place then the packs name collection will have
1290
been flushed to disk - packing requires updating the name collection
1291
in synchronisation with certain steps. Otherwise the names collection
1294
:return: True if packing took place.
1298
return self._do_autopack()
1299
except errors.RetryAutopack, e:
1300
# If we get a RetryAutopack exception, we should abort the
1301
# current action, and retry.
1304
def _do_autopack(self):
1305
# XXX: Should not be needed when the management of indices is sane.
1306
total_revisions = self.revision_index.combined_index.key_count()
1307
total_packs = len(self._names)
1308
if self._max_pack_count(total_revisions) >= total_packs:
1310
# XXX: the following may want to be a class, to pack with a given
1312
# determine which packs need changing
1313
pack_distribution = self.pack_distribution(total_revisions)
1315
for pack in self.all_packs():
1316
revision_count = pack.get_revision_count()
1317
if revision_count == 0:
1318
# revision less packs are not generated by normal operation,
1319
# only by operations like sign-my-commits, and thus will not
1320
# tend to grow rapdily or without bound like commit containing
1321
# packs do - leave them alone as packing them really should
1322
# group their data with the relevant commit, and that may
1323
# involve rewriting ancient history - which autopack tries to
1324
# avoid. Alternatively we could not group the data but treat
1325
# each of these as having a single revision, and thus add
1326
# one revision for each to the total revision count, to get
1327
# a matching distribution.
1329
existing_packs.append((revision_count, pack))
1330
pack_operations = self.plan_autopack_combinations(
1331
existing_packs, pack_distribution)
1332
num_new_packs = len(pack_operations)
1333
num_old_packs = sum([len(po[1]) for po in pack_operations])
1334
num_revs_affected = sum([po[0] for po in pack_operations])
1335
mutter('Auto-packing repository %s, which has %d pack files, '
1336
'containing %d revisions. Packing %d files into %d affecting %d'
1337
' revisions', self, total_packs, total_revisions, num_old_packs,
1338
num_new_packs, num_revs_affected)
1339
self._execute_pack_operations(pack_operations,
1340
reload_func=self._restart_autopack)
1343
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1345
"""Execute a series of pack operations.
1347
:param pack_operations: A list of [revision_count, packs_to_combine].
1348
:param _packer_class: The class of packer to use (default: Packer).
1351
for revision_count, packs in pack_operations:
1352
# we may have no-ops from the setup logic
1355
packer = _packer_class(self, packs, '.autopack',
1356
reload_func=reload_func)
1359
except errors.RetryWithNewPacks:
1360
# An exception is propagating out of this context, make sure
1361
# this packer has cleaned up. Packer() doesn't set its new_pack
1362
# state into the RepositoryPackCollection object, so we only
1363
# have access to it directly here.
1364
if packer.new_pack is not None:
1365
packer.new_pack.abort()
1368
self._remove_pack_from_memory(pack)
1369
# record the newly available packs and stop advertising the old
1371
self._save_pack_names(clear_obsolete_packs=True)
1372
# Move the old packs out of the way now they are no longer referenced.
1373
for revision_count, packs in pack_operations:
1374
self._obsolete_packs(packs)
1376
def lock_names(self):
1377
"""Acquire the mutex around the pack-names index.
1379
This cannot be used in the middle of a read-only transaction on the
1382
self.repo.control_files.lock_write()
1385
"""Pack the pack collection totally."""
1386
self.ensure_loaded()
1387
total_packs = len(self._names)
1389
# This is arguably wrong because we might not be optimal, but for
1390
# now lets leave it in. (e.g. reconcile -> one pack. But not
1393
total_revisions = self.revision_index.combined_index.key_count()
1394
# XXX: the following may want to be a class, to pack with a given
1396
mutter('Packing repository %s, which has %d pack files, '
1397
'containing %d revisions into 1 packs.', self, total_packs,
1399
# determine which packs need changing
1400
pack_distribution = [1]
1401
pack_operations = [[0, []]]
1402
for pack in self.all_packs():
1403
pack_operations[-1][0] += pack.get_revision_count()
1404
pack_operations[-1][1].append(pack)
1405
self._execute_pack_operations(pack_operations, OptimisingPacker)
1407
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1408
"""Plan a pack operation.
1410
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1412
:param pack_distribution: A list with the number of revisions desired
1415
if len(existing_packs) <= len(pack_distribution):
1417
existing_packs.sort(reverse=True)
1418
pack_operations = [[0, []]]
1419
# plan out what packs to keep, and what to reorganise
1420
while len(existing_packs):
1421
# take the largest pack, and if its less than the head of the
1422
# distribution chart we will include its contents in the new pack
1423
# for that position. If its larger, we remove its size from the
1424
# distribution chart
1425
next_pack_rev_count, next_pack = existing_packs.pop(0)
1426
if next_pack_rev_count >= pack_distribution[0]:
1427
# this is already packed 'better' than this, so we can
1428
# not waste time packing it.
1429
while next_pack_rev_count > 0:
1430
next_pack_rev_count -= pack_distribution[0]
1431
if next_pack_rev_count >= 0:
1433
del pack_distribution[0]
1435
# didn't use that entire bucket up
1436
pack_distribution[0] = -next_pack_rev_count
1438
# add the revisions we're going to add to the next output pack
1439
pack_operations[-1][0] += next_pack_rev_count
1440
# allocate this pack to the next pack sub operation
1441
pack_operations[-1][1].append(next_pack)
1442
if pack_operations[-1][0] >= pack_distribution[0]:
1443
# this pack is used up, shift left.
1444
del pack_distribution[0]
1445
pack_operations.append([0, []])
1446
# Now that we know which pack files we want to move, shove them all
1447
# into a single pack file.
1449
final_pack_list = []
1450
for num_revs, pack_files in pack_operations:
1451
final_rev_count += num_revs
1452
final_pack_list.extend(pack_files)
1453
if len(final_pack_list) == 1:
1454
raise AssertionError('We somehow generated an autopack with a'
1455
' single pack file being moved.')
1457
return [[final_rev_count, final_pack_list]]
1459
def ensure_loaded(self):
1460
# NB: if you see an assertion error here, its probably access against
1461
# an unlocked repo. Naughty.
1462
if not self.repo.is_locked():
1463
raise errors.ObjectNotLocked(self.repo)
1464
if self._names is None:
1466
self._packs_at_load = set()
1467
for index, key, value in self._iter_disk_pack_index():
1469
self._names[name] = self._parse_index_sizes(value)
1470
self._packs_at_load.add((key, value))
1471
# populate all the metadata.
1474
def _parse_index_sizes(self, value):
1475
"""Parse a string of index sizes."""
1476
return tuple([int(digits) for digits in value.split(' ')])
1478
def get_pack_by_name(self, name):
1479
"""Get a Pack object by name.
1481
:param name: The name of the pack - e.g. '123456'
1482
:return: A Pack object.
1485
return self._packs_by_name[name]
1487
rev_index = self._make_index(name, '.rix')
1488
inv_index = self._make_index(name, '.iix')
1489
txt_index = self._make_index(name, '.tix')
1490
sig_index = self._make_index(name, '.six')
1491
result = ExistingPack(self._pack_transport, name, rev_index,
1492
inv_index, txt_index, sig_index)
1493
self.add_pack_to_memory(result)
1496
def allocate(self, a_new_pack):
1497
"""Allocate name in the list of packs.
1499
:param a_new_pack: A NewPack instance to be added to the collection of
1500
packs for this repository.
1502
self.ensure_loaded()
1503
if a_new_pack.name in self._names:
1504
raise errors.BzrError(
1505
'Pack %r already exists in %s' % (a_new_pack.name, self))
1506
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1507
self.add_pack_to_memory(a_new_pack)
1509
def _iter_disk_pack_index(self):
1510
"""Iterate over the contents of the pack-names index.
1512
This is used when loading the list from disk, and before writing to
1513
detect updates from others during our write operation.
1514
:return: An iterator of the index contents.
1516
return self._index_class(self.transport, 'pack-names', None
1517
).iter_all_entries()
1519
def _make_index(self, name, suffix):
1520
size_offset = self._suffix_offsets[suffix]
1521
index_name = name + suffix
1522
index_size = self._names[name][size_offset]
1523
return self._index_class(
1524
self._index_transport, index_name, index_size)
1526
def _max_pack_count(self, total_revisions):
1527
"""Return the maximum number of packs to use for total revisions.
1529
:param total_revisions: The total number of revisions in the
1532
if not total_revisions:
1534
digits = str(total_revisions)
1536
for digit in digits:
1537
result += int(digit)
1541
"""Provide an order to the underlying names."""
1542
return sorted(self._names.keys())
1544
def _obsolete_packs(self, packs):
1545
"""Move a number of packs which have been obsoleted out of the way.
1547
Each pack and its associated indices are moved out of the way.
1549
Note: for correctness this function should only be called after a new
1550
pack names index has been written without these pack names, and with
1551
the names of packs that contain the data previously available via these
1554
:param packs: The packs to obsolete.
1555
:param return: None.
1558
pack.pack_transport.rename(pack.file_name(),
1559
'../obsolete_packs/' + pack.file_name())
1560
# TODO: Probably needs to know all possible indices for this pack
1561
# - or maybe list the directory and move all indices matching this
1562
# name whether we recognize it or not?
1563
for suffix in ('.iix', '.six', '.tix', '.rix'):
1564
self._index_transport.rename(pack.name + suffix,
1565
'../obsolete_packs/' + pack.name + suffix)
1567
def pack_distribution(self, total_revisions):
1568
"""Generate a list of the number of revisions to put in each pack.
1570
:param total_revisions: The total number of revisions in the
1573
if total_revisions == 0:
1575
digits = reversed(str(total_revisions))
1577
for exponent, count in enumerate(digits):
1578
size = 10 ** exponent
1579
for pos in range(int(count)):
1581
return list(reversed(result))
1583
def _pack_tuple(self, name):
1584
"""Return a tuple with the transport and file name for a pack name."""
1585
return self._pack_transport, name + '.pack'
1587
def _remove_pack_from_memory(self, pack):
1588
"""Remove pack from the packs accessed by this repository.
1590
Only affects memory state, until self._save_pack_names() is invoked.
1592
self._names.pop(pack.name)
1593
self._packs_by_name.pop(pack.name)
1594
self._remove_pack_indices(pack)
1595
self.packs.remove(pack)
1597
def _remove_pack_indices(self, pack):
1598
"""Remove the indices for pack from the aggregated indices."""
1599
self.revision_index.remove_index(pack.revision_index, pack)
1600
self.inventory_index.remove_index(pack.inventory_index, pack)
1601
self.text_index.remove_index(pack.text_index, pack)
1602
self.signature_index.remove_index(pack.signature_index, pack)
1605
"""Clear all cached data."""
1606
# cached revision data
1607
self.repo._revision_knit = None
1608
self.revision_index.clear()
1609
# cached signature data
1610
self.repo._signature_knit = None
1611
self.signature_index.clear()
1612
# cached file text data
1613
self.text_index.clear()
1614
self.repo._text_knit = None
1615
# cached inventory data
1616
self.inventory_index.clear()
1617
# remove the open pack
1618
self._new_pack = None
1619
# information about packs.
1622
self._packs_by_name = {}
1623
self._packs_at_load = None
1625
def _unlock_names(self):
1626
"""Release the mutex around the pack-names index."""
1627
self.repo.control_files.unlock()
1629
def _diff_pack_names(self):
1630
"""Read the pack names from disk, and compare it to the one in memory.
1632
:return: (disk_nodes, deleted_nodes, new_nodes)
1633
disk_nodes The final set of nodes that should be referenced
1634
deleted_nodes Nodes which have been removed from when we started
1635
new_nodes Nodes that are newly introduced
1637
# load the disk nodes across
1639
for index, key, value in self._iter_disk_pack_index():
1640
disk_nodes.add((key, value))
1642
# do a two-way diff against our original content
1643
current_nodes = set()
1644
for name, sizes in self._names.iteritems():
1646
((name, ), ' '.join(str(size) for size in sizes)))
1648
# Packs no longer present in the repository, which were present when we
1649
# locked the repository
1650
deleted_nodes = self._packs_at_load - current_nodes
1651
# Packs which this process is adding
1652
new_nodes = current_nodes - self._packs_at_load
1654
# Update the disk_nodes set to include the ones we are adding, and
1655
# remove the ones which were removed by someone else
1656
disk_nodes.difference_update(deleted_nodes)
1657
disk_nodes.update(new_nodes)
1659
return disk_nodes, deleted_nodes, new_nodes
1661
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1662
"""Given the correct set of pack files, update our saved info.
1664
:return: (removed, added, modified)
1665
removed pack names removed from self._names
1666
added pack names added to self._names
1667
modified pack names that had changed value
1672
## self._packs_at_load = disk_nodes
1673
new_names = dict(disk_nodes)
1674
# drop no longer present nodes
1675
for pack in self.all_packs():
1676
if (pack.name,) not in new_names:
1677
removed.append(pack.name)
1678
self._remove_pack_from_memory(pack)
1679
# add new nodes/refresh existing ones
1680
for key, value in disk_nodes:
1682
sizes = self._parse_index_sizes(value)
1683
if name in self._names:
1685
if sizes != self._names[name]:
1686
# the pack for name has had its indices replaced - rare but
1687
# important to handle. XXX: probably can never happen today
1688
# because the three-way merge code above does not handle it
1689
# - you may end up adding the same key twice to the new
1690
# disk index because the set values are the same, unless
1691
# the only index shows up as deleted by the set difference
1692
# - which it may. Until there is a specific test for this,
1693
# assume its broken. RBC 20071017.
1694
self._remove_pack_from_memory(self.get_pack_by_name(name))
1695
self._names[name] = sizes
1696
self.get_pack_by_name(name)
1697
modified.append(name)
1700
self._names[name] = sizes
1701
self.get_pack_by_name(name)
1703
return removed, added, modified
1705
def _save_pack_names(self, clear_obsolete_packs=False):
1706
"""Save the list of packs.
1708
This will take out the mutex around the pack names list for the
1709
duration of the method call. If concurrent updates have been made, a
1710
three-way merge between the current list and the current in memory list
1713
:param clear_obsolete_packs: If True, clear out the contents of the
1714
obsolete_packs directory.
1718
builder = self._index_builder_class()
1719
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1720
# TODO: handle same-name, index-size-changes here -
1721
# e.g. use the value from disk, not ours, *unless* we're the one
1723
for key, value in disk_nodes:
1724
builder.add_node(key, value)
1725
self.transport.put_file('pack-names', builder.finish(),
1726
mode=self.repo.bzrdir._get_file_mode())
1727
# move the baseline forward
1728
self._packs_at_load = disk_nodes
1729
if clear_obsolete_packs:
1730
self._clear_obsolete_packs()
1732
self._unlock_names()
1733
# synchronise the memory packs list with what we just wrote:
1734
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1736
def reload_pack_names(self):
1737
"""Sync our pack listing with what is present in the repository.
1739
This should be called when we find out that something we thought was
1740
present is now missing. This happens when another process re-packs the
1743
# This is functionally similar to _save_pack_names, but we don't write
1744
# out the new value.
1745
disk_nodes, _, _ = self._diff_pack_names()
1746
self._packs_at_load = disk_nodes
1748
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1749
if removed or added or modified:
1753
def _restart_autopack(self):
1754
"""Reload the pack names list, and restart the autopack code."""
1755
if not self.reload_pack_names():
1756
# Re-raise the original exception, because something went missing
1757
# and a restart didn't find it
1759
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1761
def _clear_obsolete_packs(self):
1762
"""Delete everything from the obsolete-packs directory.
1764
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1765
for filename in obsolete_pack_transport.list_dir('.'):
1767
obsolete_pack_transport.delete(filename)
1768
except (errors.PathError, errors.TransportError), e:
1769
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
1771
def _start_write_group(self):
1772
# Do not permit preparation for writing if we're not in a 'write lock'.
1773
if not self.repo.is_write_locked():
1774
raise errors.NotWriteLocked(self)
1775
self._new_pack = NewPack(self, upload_suffix='.pack',
1776
file_mode=self.repo.bzrdir._get_file_mode())
1777
# allow writing: queue writes to a new index
1778
self.revision_index.add_writable_index(self._new_pack.revision_index,
1780
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1782
self.text_index.add_writable_index(self._new_pack.text_index,
1784
self.signature_index.add_writable_index(self._new_pack.signature_index,
1787
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1788
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1789
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1790
self.repo.texts._index._add_callback = self.text_index.add_callback
1792
def _abort_write_group(self):
1793
# FIXME: just drop the transient index.
1794
# forget what names there are
1795
if self._new_pack is not None:
1797
self._new_pack.abort()
1799
# XXX: If we aborted while in the middle of finishing the write
1800
# group, _remove_pack_indices can fail because the indexes are
1801
# already gone. If they're not there we shouldn't fail in this
1802
# case. -- mbp 20081113
1803
self._remove_pack_indices(self._new_pack)
1804
self._new_pack = None
1805
self.repo._text_knit = None
1807
def _commit_write_group(self):
1808
self._remove_pack_indices(self._new_pack)
1809
if self._new_pack.data_inserted():
1810
# get all the data to disk and read to use
1811
self._new_pack.finish()
1812
self.allocate(self._new_pack)
1813
self._new_pack = None
1814
if not self.autopack():
1815
# when autopack takes no steps, the names list is still
1817
self._save_pack_names()
1819
self._new_pack.abort()
1820
self._new_pack = None
1821
self.repo._text_knit = None
1824
class KnitPackRepository(KnitRepository):
1825
"""Repository with knit objects stored inside pack containers.
1827
The layering for a KnitPackRepository is:
1829
Graph | HPSS | Repository public layer |
1830
===================================================
1831
Tuple based apis below, string based, and key based apis above
1832
---------------------------------------------------
1834
Provides .texts, .revisions etc
1835
This adapts the N-tuple keys to physical knit records which only have a
1836
single string identifier (for historical reasons), which in older formats
1837
was always the revision_id, and in the mapped code for packs is always
1838
the last element of key tuples.
1839
---------------------------------------------------
1841
A separate GraphIndex is used for each of the
1842
texts/inventories/revisions/signatures contained within each individual
1843
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1845
===================================================
1849
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1851
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1852
_commit_builder_class, _serializer)
1853
index_transport = self._transport.clone('indices')
1854
self._pack_collection = RepositoryPackCollection(self, self._transport,
1856
self._transport.clone('upload'),
1857
self._transport.clone('packs'),
1858
_format.index_builder_class,
1859
_format.index_class)
1860
self.inventories = KnitVersionedFiles(
1861
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
1862
add_callback=self._pack_collection.inventory_index.add_callback,
1863
deltas=True, parents=True, is_locked=self.is_locked),
1864
data_access=self._pack_collection.inventory_index.data_access,
1865
max_delta_chain=200)
1866
self.revisions = KnitVersionedFiles(
1867
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
1868
add_callback=self._pack_collection.revision_index.add_callback,
1869
deltas=False, parents=True, is_locked=self.is_locked),
1870
data_access=self._pack_collection.revision_index.data_access,
1872
self.signatures = KnitVersionedFiles(
1873
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
1874
add_callback=self._pack_collection.signature_index.add_callback,
1875
deltas=False, parents=False, is_locked=self.is_locked),
1876
data_access=self._pack_collection.signature_index.data_access,
1878
self.texts = KnitVersionedFiles(
1879
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
1880
add_callback=self._pack_collection.text_index.add_callback,
1881
deltas=True, parents=True, is_locked=self.is_locked),
1882
data_access=self._pack_collection.text_index.data_access,
1883
max_delta_chain=200)
1884
# True when the repository object is 'write locked' (as opposed to the
1885
# physical lock only taken out around changes to the pack-names list.)
1886
# Another way to represent this would be a decorator around the control
1887
# files object that presents logical locks as physical ones - if this
1888
# gets ugly consider that alternative design. RBC 20071011
1889
self._write_lock_count = 0
1890
self._transaction = None
1892
self._reconcile_does_inventory_gc = True
1893
self._reconcile_fixes_text_parents = True
1894
self._reconcile_backsup_inventory = False
1895
self._fetch_order = 'unordered'
1897
def _warn_if_deprecated(self):
1898
# This class isn't deprecated, but one sub-format is
1899
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
1900
from bzrlib import repository
1901
if repository._deprecation_warning_done:
1903
repository._deprecation_warning_done = True
1904
warning("Format %s for %s is deprecated - please use"
1905
" 'bzr upgrade --1.6.1-rich-root'"
1906
% (self._format, self.bzrdir.transport.base))
1908
def _abort_write_group(self):
1909
self._pack_collection._abort_write_group()
1911
def _find_inconsistent_revision_parents(self):
1912
"""Find revisions with incorrectly cached parents.
1914
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1915
parents-in-revision).
1917
if not self.is_locked():
1918
raise errors.ObjectNotLocked(self)
1919
pb = ui.ui_factory.nested_progress_bar()
1922
revision_nodes = self._pack_collection.revision_index \
1923
.combined_index.iter_all_entries()
1924
index_positions = []
1925
# Get the cached index values for all revisions, and also the location
1926
# in each index of the revision text so we can perform linear IO.
1927
for index, key, value, refs in revision_nodes:
1928
pos, length = value[1:].split(' ')
1929
index_positions.append((index, int(pos), key[0],
1930
tuple(parent[0] for parent in refs[0])))
1931
pb.update("Reading revision index.", 0, 0)
1932
index_positions.sort()
1933
batch_count = len(index_positions) / 1000 + 1
1934
pb.update("Checking cached revision graph.", 0, batch_count)
1935
for offset in xrange(batch_count):
1936
pb.update("Checking cached revision graph.", offset)
1937
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1940
rev_ids = [item[2] for item in to_query]
1941
revs = self.get_revisions(rev_ids)
1942
for revision, item in zip(revs, to_query):
1943
index_parents = item[3]
1944
rev_parents = tuple(revision.parent_ids)
1945
if index_parents != rev_parents:
1946
result.append((revision.revision_id, index_parents, rev_parents))
1951
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1952
def get_parents(self, revision_ids):
1953
"""See graph._StackedParentsProvider.get_parents."""
1954
parent_map = self.get_parent_map(revision_ids)
1955
return [parent_map.get(r, None) for r in revision_ids]
1957
def _make_parents_provider(self):
1958
return graph.CachingParentsProvider(self)
1960
def _refresh_data(self):
1961
if self._write_lock_count == 1 or (
1962
self.control_files._lock_count == 1 and
1963
self.control_files._lock_mode == 'r'):
1964
# forget what names there are
1965
self._pack_collection.reset()
1966
# XXX: Better to do an in-memory merge when acquiring a new lock -
1967
# factor out code from _save_pack_names.
1968
self._pack_collection.ensure_loaded()
1970
def _start_write_group(self):
1971
self._pack_collection._start_write_group()
1973
def _commit_write_group(self):
1974
return self._pack_collection._commit_write_group()
1976
def get_transaction(self):
1977
if self._write_lock_count:
1978
return self._transaction
1980
return self.control_files.get_transaction()
1982
def is_locked(self):
1983
return self._write_lock_count or self.control_files.is_locked()
1985
def is_write_locked(self):
1986
return self._write_lock_count
1988
def lock_write(self, token=None):
1989
if not self._write_lock_count and self.is_locked():
1990
raise errors.ReadOnlyError(self)
1991
self._write_lock_count += 1
1992
if self._write_lock_count == 1:
1993
self._transaction = transactions.WriteTransaction()
1994
for repo in self._fallback_repositories:
1995
# Writes don't affect fallback repos
1997
self._refresh_data()
1999
def lock_read(self):
2000
if self._write_lock_count:
2001
self._write_lock_count += 1
2003
self.control_files.lock_read()
2004
for repo in self._fallback_repositories:
2005
# Writes don't affect fallback repos
2007
self._refresh_data()
2009
def leave_lock_in_place(self):
2010
# not supported - raise an error
2011
raise NotImplementedError(self.leave_lock_in_place)
2013
def dont_leave_lock_in_place(self):
2014
# not supported - raise an error
2015
raise NotImplementedError(self.dont_leave_lock_in_place)
2019
"""Compress the data within the repository.
2021
This will pack all the data to a single pack. In future it may
2022
recompress deltas or do other such expensive operations.
2024
self._pack_collection.pack()
2027
def reconcile(self, other=None, thorough=False):
2028
"""Reconcile this repository."""
2029
from bzrlib.reconcile import PackReconciler
2030
reconciler = PackReconciler(self, thorough=thorough)
2031
reconciler.reconcile()
2035
if self._write_lock_count == 1 and self._write_group is not None:
2036
self.abort_write_group()
2037
self._transaction = None
2038
self._write_lock_count = 0
2039
raise errors.BzrError(
2040
'Must end write group before releasing write lock on %s'
2042
if self._write_lock_count:
2043
self._write_lock_count -= 1
2044
if not self._write_lock_count:
2045
transaction = self._transaction
2046
self._transaction = None
2047
transaction.finish()
2048
for repo in self._fallback_repositories:
2051
self.control_files.unlock()
2052
for repo in self._fallback_repositories:
2056
class RepositoryFormatPack(MetaDirRepositoryFormat):
2057
"""Format logic for pack structured repositories.
2059
This repository format has:
2060
- a list of packs in pack-names
2061
- packs in packs/NAME.pack
2062
- indices in indices/NAME.{iix,six,tix,rix}
2063
- knit deltas in the packs, knit indices mapped to the indices.
2064
- thunk objects to support the knits programming API.
2065
- a format marker of its own
2066
- an optional 'shared-storage' flag
2067
- an optional 'no-working-trees' flag
2071
# Set this attribute in derived classes to control the repository class
2072
# created by open and initialize.
2073
repository_class = None
2074
# Set this attribute in derived classes to control the
2075
# _commit_builder_class that the repository objects will have passed to
2076
# their constructor.
2077
_commit_builder_class = None
2078
# Set this attribute in derived clases to control the _serializer that the
2079
# repository objects will have passed to their constructor.
2081
# External references are not supported in pack repositories yet.
2082
supports_external_lookups = False
2083
# What index classes to use
2084
index_builder_class = None
2087
def initialize(self, a_bzrdir, shared=False):
2088
"""Create a pack based repository.
2090
:param a_bzrdir: bzrdir to contain the new repository; must already
2092
:param shared: If true the repository will be initialized as a shared
2095
mutter('creating repository in %s.', a_bzrdir.transport.base)
2096
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
2097
builder = self.index_builder_class()
2098
files = [('pack-names', builder.finish())]
2099
utf8_files = [('format', self.get_format_string())]
2101
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2102
return self.open(a_bzrdir=a_bzrdir, _found=True)
2104
def open(self, a_bzrdir, _found=False, _override_transport=None):
2105
"""See RepositoryFormat.open().
2107
:param _override_transport: INTERNAL USE ONLY. Allows opening the
2108
repository at a slightly different url
2109
than normal. I.e. during 'upgrade'.
2112
format = RepositoryFormat.find_format(a_bzrdir)
2113
if _override_transport is not None:
2114
repo_transport = _override_transport
2116
repo_transport = a_bzrdir.get_repository_transport(None)
2117
control_files = lockable_files.LockableFiles(repo_transport,
2118
'lock', lockdir.LockDir)
2119
return self.repository_class(_format=self,
2121
control_files=control_files,
2122
_commit_builder_class=self._commit_builder_class,
2123
_serializer=self._serializer)
2126
class RepositoryFormatKnitPack1(RepositoryFormatPack):
2127
"""A no-subtrees parameterized Pack repository.
2129
This format was introduced in 0.92.
2132
repository_class = KnitPackRepository
2133
_commit_builder_class = PackCommitBuilder
2135
def _serializer(self):
2136
return xml5.serializer_v5
2137
# What index classes to use
2138
index_builder_class = InMemoryGraphIndex
2139
index_class = GraphIndex
2141
def _get_matching_bzrdir(self):
2142
return bzrdir.format_registry.make_bzrdir('pack-0.92')
2144
def _ignore_setting_bzrdir(self, format):
2147
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2149
def get_format_string(self):
2150
"""See RepositoryFormat.get_format_string()."""
2151
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
2153
def get_format_description(self):
2154
"""See RepositoryFormat.get_format_description()."""
2155
return "Packs containing knits without subtree support"
2157
def check_conversion_target(self, target_format):
2161
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2162
"""A subtrees parameterized Pack repository.
2164
This repository format uses the xml7 serializer to get:
2165
- support for recording full info about the tree root
2166
- support for recording tree-references
2168
This format was introduced in 0.92.
2171
repository_class = KnitPackRepository
2172
_commit_builder_class = PackRootCommitBuilder
2173
rich_root_data = True
2174
supports_tree_reference = True
2176
def _serializer(self):
2177
return xml7.serializer_v7
2178
# What index classes to use
2179
index_builder_class = InMemoryGraphIndex
2180
index_class = GraphIndex
2182
def _get_matching_bzrdir(self):
2183
return bzrdir.format_registry.make_bzrdir(
2184
'pack-0.92-subtree')
2186
def _ignore_setting_bzrdir(self, format):
2189
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2191
def check_conversion_target(self, target_format):
2192
if not target_format.rich_root_data:
2193
raise errors.BadConversionTarget(
2194
'Does not support rich root data.', target_format)
2195
if not getattr(target_format, 'supports_tree_reference', False):
2196
raise errors.BadConversionTarget(
2197
'Does not support nested trees', target_format)
2199
def get_format_string(self):
2200
"""See RepositoryFormat.get_format_string()."""
2201
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2203
def get_format_description(self):
2204
"""See RepositoryFormat.get_format_description()."""
2205
return "Packs containing knits with subtree support\n"
2208
class RepositoryFormatKnitPack4(RepositoryFormatPack):
2209
"""A rich-root, no subtrees parameterized Pack repository.
2211
This repository format uses the xml6 serializer to get:
2212
- support for recording full info about the tree root
2214
This format was introduced in 1.0.
2217
repository_class = KnitPackRepository
2218
_commit_builder_class = PackRootCommitBuilder
2219
rich_root_data = True
2220
supports_tree_reference = False
2222
def _serializer(self):
2223
return xml6.serializer_v6
2224
# What index classes to use
2225
index_builder_class = InMemoryGraphIndex
2226
index_class = GraphIndex
2228
def _get_matching_bzrdir(self):
2229
return bzrdir.format_registry.make_bzrdir(
2232
def _ignore_setting_bzrdir(self, format):
2235
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2237
def check_conversion_target(self, target_format):
2238
if not target_format.rich_root_data:
2239
raise errors.BadConversionTarget(
2240
'Does not support rich root data.', target_format)
2242
def get_format_string(self):
2243
"""See RepositoryFormat.get_format_string()."""
2244
return ("Bazaar pack repository format 1 with rich root"
2245
" (needs bzr 1.0)\n")
2247
def get_format_description(self):
2248
"""See RepositoryFormat.get_format_description()."""
2249
return "Packs containing knits with rich root support\n"
2252
class RepositoryFormatKnitPack5(RepositoryFormatPack):
2253
"""Repository that supports external references to allow stacking.
2257
Supports external lookups, which results in non-truncated ghosts after
2258
reconcile compared to pack-0.92 formats.
2261
repository_class = KnitPackRepository
2262
_commit_builder_class = PackCommitBuilder
2263
supports_external_lookups = True
2264
# What index classes to use
2265
index_builder_class = InMemoryGraphIndex
2266
index_class = GraphIndex
2269
def _serializer(self):
2270
return xml5.serializer_v5
2272
def _get_matching_bzrdir(self):
2273
return bzrdir.format_registry.make_bzrdir('1.6')
2275
def _ignore_setting_bzrdir(self, format):
2278
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2280
def get_format_string(self):
2281
"""See RepositoryFormat.get_format_string()."""
2282
return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
2284
def get_format_description(self):
2285
"""See RepositoryFormat.get_format_description()."""
2286
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2288
def check_conversion_target(self, target_format):
2292
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2293
"""A repository with rich roots and stacking.
2295
New in release 1.6.1.
2297
Supports stacking on other repositories, allowing data to be accessed
2298
without being stored locally.
2301
repository_class = KnitPackRepository
2302
_commit_builder_class = PackRootCommitBuilder
2303
rich_root_data = True
2304
supports_tree_reference = False # no subtrees
2305
supports_external_lookups = True
2306
# What index classes to use
2307
index_builder_class = InMemoryGraphIndex
2308
index_class = GraphIndex
2311
def _serializer(self):
2312
return xml6.serializer_v6
2314
def _get_matching_bzrdir(self):
2315
return bzrdir.format_registry.make_bzrdir(
2318
def _ignore_setting_bzrdir(self, format):
2321
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2323
def check_conversion_target(self, target_format):
2324
if not target_format.rich_root_data:
2325
raise errors.BadConversionTarget(
2326
'Does not support rich root data.', target_format)
2328
def get_format_string(self):
2329
"""See RepositoryFormat.get_format_string()."""
2330
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2332
def get_format_description(self):
2333
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2336
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2337
"""A repository with rich roots and external references.
2341
Supports external lookups, which results in non-truncated ghosts after
2342
reconcile compared to pack-0.92 formats.
2344
This format was deprecated because the serializer it uses accidentally
2345
supported subtrees, when the format was not intended to. This meant that
2346
someone could accidentally fetch from an incorrect repository.
2349
repository_class = KnitPackRepository
2350
_commit_builder_class = PackRootCommitBuilder
2351
rich_root_data = True
2352
supports_tree_reference = False # no subtrees
2354
supports_external_lookups = True
2355
# What index classes to use
2356
index_builder_class = InMemoryGraphIndex
2357
index_class = GraphIndex
2360
def _serializer(self):
2361
return xml7.serializer_v7
2363
def _get_matching_bzrdir(self):
2364
matching = bzrdir.format_registry.make_bzrdir(
2366
matching.repository_format = self
2369
def _ignore_setting_bzrdir(self, format):
2372
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2374
def check_conversion_target(self, target_format):
2375
if not target_format.rich_root_data:
2376
raise errors.BadConversionTarget(
2377
'Does not support rich root data.', target_format)
2379
def get_format_string(self):
2380
"""See RepositoryFormat.get_format_string()."""
2381
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2383
def get_format_description(self):
2384
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2388
class RepositoryFormatKnitPack6(RepositoryFormatPack):
2389
"""A repository with stacking and btree indexes,
2390
without rich roots or subtrees.
2392
This is equivalent to pack-1.6 with B+Tree indices.
2395
repository_class = KnitPackRepository
2396
_commit_builder_class = PackCommitBuilder
2397
supports_external_lookups = True
2398
# What index classes to use
2399
index_builder_class = BTreeBuilder
2400
index_class = BTreeGraphIndex
2403
def _serializer(self):
2404
return xml5.serializer_v5
2406
def _get_matching_bzrdir(self):
2407
return bzrdir.format_registry.make_bzrdir('1.9')
2409
def _ignore_setting_bzrdir(self, format):
2412
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2414
def get_format_string(self):
2415
"""See RepositoryFormat.get_format_string()."""
2416
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2418
def get_format_description(self):
2419
"""See RepositoryFormat.get_format_description()."""
2420
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2422
def check_conversion_target(self, target_format):
2426
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2427
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2429
1.6-rich-root with B+Tree indices.
2432
repository_class = KnitPackRepository
2433
_commit_builder_class = PackRootCommitBuilder
2434
rich_root_data = True
2435
supports_tree_reference = False # no subtrees
2436
supports_external_lookups = True
2437
# What index classes to use
2438
index_builder_class = BTreeBuilder
2439
index_class = BTreeGraphIndex
2442
def _serializer(self):
2443
return xml6.serializer_v6
2445
def _get_matching_bzrdir(self):
2446
return bzrdir.format_registry.make_bzrdir(
2449
def _ignore_setting_bzrdir(self, format):
2452
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2454
def check_conversion_target(self, target_format):
2455
if not target_format.rich_root_data:
2456
raise errors.BadConversionTarget(
2457
'Does not support rich root data.', target_format)
2459
def get_format_string(self):
2460
"""See RepositoryFormat.get_format_string()."""
2461
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2463
def get_format_description(self):
2464
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2467
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2468
"""A no-subtrees development repository.
2470
This format should be retained until the second release after bzr 1.7.
2472
This is pack-1.6.1 with B+Tree indices.
2475
repository_class = KnitPackRepository
2476
_commit_builder_class = PackCommitBuilder
2477
supports_external_lookups = True
2478
# What index classes to use
2479
index_builder_class = BTreeBuilder
2480
index_class = BTreeGraphIndex
2483
def _serializer(self):
2484
return xml5.serializer_v5
2486
def _get_matching_bzrdir(self):
2487
return bzrdir.format_registry.make_bzrdir('development2')
2489
def _ignore_setting_bzrdir(self, format):
2492
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2494
def get_format_string(self):
2495
"""See RepositoryFormat.get_format_string()."""
2496
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2498
def get_format_description(self):
2499
"""See RepositoryFormat.get_format_description()."""
2500
return ("Development repository format, currently the same as "
2501
"1.6.1 with B+Trees.\n")
2503
def check_conversion_target(self, target_format):
2507
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2508
"""A subtrees development repository.
2510
This format should be retained until the second release after bzr 1.7.
2512
1.6.1-subtree[as it might have been] with B+Tree indices.
2515
repository_class = KnitPackRepository
2516
_commit_builder_class = PackRootCommitBuilder
2517
rich_root_data = True
2518
supports_tree_reference = True
2519
supports_external_lookups = True
2520
# What index classes to use
2521
index_builder_class = BTreeBuilder
2522
index_class = BTreeGraphIndex
2525
def _serializer(self):
2526
return xml7.serializer_v7
2528
def _get_matching_bzrdir(self):
2529
return bzrdir.format_registry.make_bzrdir(
2530
'development2-subtree')
2532
def _ignore_setting_bzrdir(self, format):
2535
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2537
def check_conversion_target(self, target_format):
2538
if not target_format.rich_root_data:
2539
raise errors.BadConversionTarget(
2540
'Does not support rich root data.', target_format)
2541
if not getattr(target_format, 'supports_tree_reference', False):
2542
raise errors.BadConversionTarget(
2543
'Does not support nested trees', target_format)
2545
def get_format_string(self):
2546
"""See RepositoryFormat.get_format_string()."""
2547
return ("Bazaar development format 2 with subtree support "
2548
"(needs bzr.dev from before 1.8)\n")
2550
def get_format_description(self):
2551
"""See RepositoryFormat.get_format_description()."""
2552
return ("Development repository format, currently the same as "
2553
"1.6.1-subtree with B+Tree indices.\n")