1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
19
from itertools import izip
30
from bzrlib.index import (
35
GraphIndexPrefixAdapter,
37
from bzrlib.knit import (
43
from bzrlib.osutils import rand_chars, split_lines
44
from bzrlib import tsort
57
from bzrlib.decorators import needs_write_lock
58
from bzrlib.repofmt.knitrepo import KnitRepository
59
from bzrlib.repository import (
61
MetaDirRepositoryFormat,
65
import bzrlib.revision as _mod_revision
66
from bzrlib.trace import (
72
class PackCommitBuilder(CommitBuilder):
73
"""A subclass of CommitBuilder to add texts with pack semantics.
75
Specifically this uses one knit object rather than one knit object per
76
added text, reducing memory and object pressure.
79
def __init__(self, repository, parents, config, timestamp=None,
80
timezone=None, committer=None, revprops=None,
82
CommitBuilder.__init__(self, repository, parents, config,
83
timestamp=timestamp, timezone=timezone, committer=committer,
84
revprops=revprops, revision_id=revision_id)
85
self._file_graph = graph.Graph(
86
repository._pack_collection.text_index.combined_index)
88
def _heads(self, file_id, revision_ids):
89
keys = [(file_id, revision_id) for revision_id in revision_ids]
90
return set([key[1] for key in self._file_graph.heads(keys)])
93
class PackRootCommitBuilder(RootCommitBuilder):
94
"""A subclass of RootCommitBuilder to add texts with pack semantics.
96
Specifically this uses one knit object rather than one knit object per
97
added text, reducing memory and object pressure.
100
def __init__(self, repository, parents, config, timestamp=None,
101
timezone=None, committer=None, revprops=None,
103
CommitBuilder.__init__(self, repository, parents, config,
104
timestamp=timestamp, timezone=timezone, committer=committer,
105
revprops=revprops, revision_id=revision_id)
106
self._file_graph = graph.Graph(
107
repository._pack_collection.text_index.combined_index)
109
def _heads(self, file_id, revision_ids):
110
keys = [(file_id, revision_id) for revision_id in revision_ids]
111
return set([key[1] for key in self._file_graph.heads(keys)])
115
"""An in memory proxy for a pack and its indices.
117
This is a base class that is not directly used, instead the classes
118
ExistingPack and NewPack are used.
121
def __init__(self, revision_index, inventory_index, text_index,
123
"""Create a pack instance.
125
:param revision_index: A GraphIndex for determining what revisions are
126
present in the Pack and accessing the locations of their texts.
127
:param inventory_index: A GraphIndex for determining what inventories are
128
present in the Pack and accessing the locations of their
130
:param text_index: A GraphIndex for determining what file texts
131
are present in the pack and accessing the locations of their
132
texts/deltas (via (fileid, revisionid) tuples).
133
:param signature_index: A GraphIndex for determining what signatures are
134
present in the Pack and accessing the locations of their texts.
136
self.revision_index = revision_index
137
self.inventory_index = inventory_index
138
self.text_index = text_index
139
self.signature_index = signature_index
141
def access_tuple(self):
142
"""Return a tuple (transport, name) for the pack content."""
143
return self.pack_transport, self.file_name()
146
"""Get the file name for the pack on disk."""
147
return self.name + '.pack'
149
def get_revision_count(self):
150
return self.revision_index.key_count()
152
def inventory_index_name(self, name):
153
"""The inv index is the name + .iix."""
154
return self.index_name('inventory', name)
156
def revision_index_name(self, name):
157
"""The revision index is the name + .rix."""
158
return self.index_name('revision', name)
160
def signature_index_name(self, name):
161
"""The signature index is the name + .six."""
162
return self.index_name('signature', name)
164
def text_index_name(self, name):
165
"""The text index is the name + .tix."""
166
return self.index_name('text', name)
168
def _external_compression_parents_of_texts(self):
171
for node in self.text_index.iter_all_entries():
173
refs.update(node[3][1])
177
class ExistingPack(Pack):
178
"""An in memory proxy for an existing .pack and its disk indices."""
180
def __init__(self, pack_transport, name, revision_index, inventory_index,
181
text_index, signature_index):
182
"""Create an ExistingPack object.
184
:param pack_transport: The transport where the pack file resides.
185
:param name: The name of the pack on disk in the pack_transport.
187
Pack.__init__(self, revision_index, inventory_index, text_index,
190
self.pack_transport = pack_transport
191
if None in (revision_index, inventory_index, text_index,
192
signature_index, name, pack_transport):
193
raise AssertionError()
195
def __eq__(self, other):
196
return self.__dict__ == other.__dict__
198
def __ne__(self, other):
199
return not self.__eq__(other)
202
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
203
id(self), self.pack_transport, self.name)
207
"""An in memory proxy for a pack which is being created."""
209
# A map of index 'type' to the file extension and position in the
211
index_definitions = {
212
'revision': ('.rix', 0),
213
'inventory': ('.iix', 1),
215
'signature': ('.six', 3),
218
def __init__(self, upload_transport, index_transport, pack_transport,
219
upload_suffix='', file_mode=None):
220
"""Create a NewPack instance.
222
:param upload_transport: A writable transport for the pack to be
223
incrementally uploaded to.
224
:param index_transport: A writable transport for the pack's indices to
225
be written to when the pack is finished.
226
:param pack_transport: A writable transport for the pack to be renamed
227
to when the upload is complete. This *must* be the same as
228
upload_transport.clone('../packs').
229
:param upload_suffix: An optional suffix to be given to any temporary
230
files created during the pack creation. e.g '.autopack'
231
:param file_mode: An optional file mode to create the new files with.
233
# The relative locations of the packs are constrained, but all are
234
# passed in because the caller has them, so as to avoid object churn.
236
# Revisions: parents list, no text compression.
237
InMemoryGraphIndex(reference_lists=1),
238
# Inventory: We want to map compression only, but currently the
239
# knit code hasn't been updated enough to understand that, so we
240
# have a regular 2-list index giving parents and compression
242
InMemoryGraphIndex(reference_lists=2),
243
# Texts: compression and per file graph, for all fileids - so two
244
# reference lists and two elements in the key tuple.
245
InMemoryGraphIndex(reference_lists=2, key_elements=2),
246
# Signatures: Just blobs to store, no compression, no parents
248
InMemoryGraphIndex(reference_lists=0),
250
# where should the new pack be opened
251
self.upload_transport = upload_transport
252
# where are indices written out to
253
self.index_transport = index_transport
254
# where is the pack renamed to when it is finished?
255
self.pack_transport = pack_transport
256
# What file mode to upload the pack and indices with.
257
self._file_mode = file_mode
258
# tracks the content written to the .pack file.
259
self._hash = md5.new()
260
# a four-tuple with the length in bytes of the indices, once the pack
261
# is finalised. (rev, inv, text, sigs)
262
self.index_sizes = None
263
# How much data to cache when writing packs. Note that this is not
264
# synchronised with reads, because it's not in the transport layer, so
265
# is not safe unless the client knows it won't be reading from the pack
267
self._cache_limit = 0
268
# the temporary pack file name.
269
self.random_name = rand_chars(20) + upload_suffix
270
# when was this pack started ?
271
self.start_time = time.time()
272
# open an output stream for the data added to the pack.
273
self.write_stream = self.upload_transport.open_write_stream(
274
self.random_name, mode=self._file_mode)
275
if 'pack' in debug.debug_flags:
276
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
277
time.ctime(), self.upload_transport.base, self.random_name,
278
time.time() - self.start_time)
279
# A list of byte sequences to be written to the new pack, and the
280
# aggregate size of them. Stored as a list rather than separate
281
# variables so that the _write_data closure below can update them.
282
self._buffer = [[], 0]
283
# create a callable for adding data
285
# robertc says- this is a closure rather than a method on the object
286
# so that the variables are locals, and faster than accessing object
288
def _write_data(bytes, flush=False, _buffer=self._buffer,
289
_write=self.write_stream.write, _update=self._hash.update):
290
_buffer[0].append(bytes)
291
_buffer[1] += len(bytes)
293
if _buffer[1] > self._cache_limit or flush:
294
bytes = ''.join(_buffer[0])
298
# expose this on self, for the occasion when clients want to add data.
299
self._write_data = _write_data
300
# a pack writer object to serialise pack records.
301
self._writer = pack.ContainerWriter(self._write_data)
303
# what state is the pack in? (open, finished, aborted)
307
"""Cancel creating this pack."""
308
self._state = 'aborted'
309
self.write_stream.close()
310
# Remove the temporary pack file.
311
self.upload_transport.delete(self.random_name)
312
# The indices have no state on disk.
314
def access_tuple(self):
315
"""Return a tuple (transport, name) for the pack content."""
316
if self._state == 'finished':
317
return Pack.access_tuple(self)
318
elif self._state == 'open':
319
return self.upload_transport, self.random_name
321
raise AssertionError(self._state)
323
def data_inserted(self):
324
"""True if data has been added to this pack."""
325
return bool(self.get_revision_count() or
326
self.inventory_index.key_count() or
327
self.text_index.key_count() or
328
self.signature_index.key_count())
331
"""Finish the new pack.
334
- finalises the content
335
- assigns a name (the md5 of the content, currently)
336
- writes out the associated indices
337
- renames the pack into place.
338
- stores the index size tuple for the pack in the index_sizes
343
self._write_data('', flush=True)
344
self.name = self._hash.hexdigest()
346
# XXX: It'd be better to write them all to temporary names, then
347
# rename them all into place, so that the window when only some are
348
# visible is smaller. On the other hand none will be seen until
349
# they're in the names list.
350
self.index_sizes = [None, None, None, None]
351
self._write_index('revision', self.revision_index, 'revision')
352
self._write_index('inventory', self.inventory_index, 'inventory')
353
self._write_index('text', self.text_index, 'file texts')
354
self._write_index('signature', self.signature_index,
355
'revision signatures')
356
self.write_stream.close()
357
# Note that this will clobber an existing pack with the same name,
358
# without checking for hash collisions. While this is undesirable this
359
# is something that can be rectified in a subsequent release. One way
360
# to rectify it may be to leave the pack at the original name, writing
361
# its pack-names entry as something like 'HASH: index-sizes
362
# temporary-name'. Allocate that and check for collisions, if it is
363
# collision free then rename it into place. If clients know this scheme
364
# they can handle missing-file errors by:
365
# - try for HASH.pack
366
# - try for temporary-name
367
# - refresh the pack-list to see if the pack is now absent
368
self.upload_transport.rename(self.random_name,
369
'../packs/' + self.name + '.pack')
370
self._state = 'finished'
371
if 'pack' in debug.debug_flags:
372
# XXX: size might be interesting?
373
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
374
time.ctime(), self.upload_transport.base, self.random_name,
375
self.pack_transport, self.name,
376
time.time() - self.start_time)
379
"""Flush any current data."""
381
bytes = ''.join(self._buffer[0])
382
self.write_stream.write(bytes)
383
self._hash.update(bytes)
384
self._buffer[:] = [[], 0]
386
def index_name(self, index_type, name):
387
"""Get the disk name of an index type for pack name 'name'."""
388
return name + NewPack.index_definitions[index_type][0]
390
def index_offset(self, index_type):
391
"""Get the position in a index_size array for a given index type."""
392
return NewPack.index_definitions[index_type][1]
394
def _replace_index_with_readonly(self, index_type):
395
setattr(self, index_type + '_index',
396
GraphIndex(self.index_transport,
397
self.index_name(index_type, self.name),
398
self.index_sizes[self.index_offset(index_type)]))
400
def set_write_cache_size(self, size):
401
self._cache_limit = size
403
def _write_index(self, index_type, index, label):
404
"""Write out an index.
406
:param index_type: The type of index to write - e.g. 'revision'.
407
:param index: The index object to serialise.
408
:param label: What label to give the index e.g. 'revision'.
410
index_name = self.index_name(index_type, self.name)
411
self.index_sizes[self.index_offset(index_type)] = \
412
self.index_transport.put_file(index_name, index.finish(),
413
mode=self._file_mode)
414
if 'pack' in debug.debug_flags:
415
# XXX: size might be interesting?
416
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
417
time.ctime(), label, self.upload_transport.base,
418
self.random_name, time.time() - self.start_time)
419
# Replace the writable index on this object with a readonly,
420
# presently unloaded index. We should alter
421
# the index layer to make its finish() error if add_node is
422
# subsequently used. RBC
423
self._replace_index_with_readonly(index_type)
426
class AggregateIndex(object):
427
"""An aggregated index for the RepositoryPackCollection.
429
AggregateIndex is reponsible for managing the PackAccess object,
430
Index-To-Pack mapping, and all indices list for a specific type of index
431
such as 'revision index'.
433
A CombinedIndex provides an index on a single key space built up
434
from several on-disk indices. The AggregateIndex builds on this
435
to provide a knit access layer, and allows having up to one writable
436
index within the collection.
438
# XXX: Probably 'can be written to' could/should be separated from 'acts
439
# like a knit index' -- mbp 20071024
442
"""Create an AggregateIndex."""
443
self.index_to_pack = {}
444
self.combined_index = CombinedGraphIndex([])
445
self.data_access = _DirectPackAccess(self.index_to_pack)
446
self.add_callback = None
448
def replace_indices(self, index_to_pack, indices):
449
"""Replace the current mappings with fresh ones.
451
This should probably not be used eventually, rather incremental add and
452
removal of indices. It has been added during refactoring of existing
455
:param index_to_pack: A mapping from index objects to
456
(transport, name) tuples for the pack file data.
457
:param indices: A list of indices.
459
# refresh the revision pack map dict without replacing the instance.
460
self.index_to_pack.clear()
461
self.index_to_pack.update(index_to_pack)
462
# XXX: API break - clearly a 'replace' method would be good?
463
self.combined_index._indices[:] = indices
464
# the current add nodes callback for the current writable index if
466
self.add_callback = None
468
def add_index(self, index, pack):
469
"""Add index to the aggregate, which is an index for Pack pack.
471
Future searches on the aggregate index will seach this new index
472
before all previously inserted indices.
474
:param index: An Index for the pack.
475
:param pack: A Pack instance.
477
# expose it to the index map
478
self.index_to_pack[index] = pack.access_tuple()
479
# put it at the front of the linear index list
480
self.combined_index.insert_index(0, index)
482
def add_writable_index(self, index, pack):
483
"""Add an index which is able to have data added to it.
485
There can be at most one writable index at any time. Any
486
modifications made to the knit are put into this index.
488
:param index: An index from the pack parameter.
489
:param pack: A Pack instance.
491
if self.add_callback is not None:
492
raise AssertionError(
493
"%s already has a writable index through %s" % \
494
(self, self.add_callback))
495
# allow writing: queue writes to a new index
496
self.add_index(index, pack)
497
# Updates the index to packs mapping as a side effect,
498
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
499
self.add_callback = index.add_nodes
502
"""Reset all the aggregate data to nothing."""
503
self.data_access.set_writer(None, None, (None, None))
504
self.index_to_pack.clear()
505
del self.combined_index._indices[:]
506
self.add_callback = None
508
def remove_index(self, index, pack):
509
"""Remove index from the indices used to answer queries.
511
:param index: An index from the pack parameter.
512
:param pack: A Pack instance.
514
del self.index_to_pack[index]
515
self.combined_index._indices.remove(index)
516
if (self.add_callback is not None and
517
getattr(index, 'add_nodes', None) == self.add_callback):
518
self.add_callback = None
519
self.data_access.set_writer(None, None, (None, None))
522
class Packer(object):
523
"""Create a pack from packs."""
525
def __init__(self, pack_collection, packs, suffix, revision_ids=None):
528
:param pack_collection: A RepositoryPackCollection object where the
529
new pack is being written to.
530
:param packs: The packs to combine.
531
:param suffix: The suffix to use on the temporary files for the pack.
532
:param revision_ids: Revision ids to limit the pack to.
536
self.revision_ids = revision_ids
537
# The pack object we are creating.
539
self._pack_collection = pack_collection
540
# The index layer keys for the revisions being copied. None for 'all
542
self._revision_keys = None
543
# What text keys to copy. None for 'all texts'. This is set by
544
# _copy_inventory_texts
545
self._text_filter = None
548
def _extra_init(self):
549
"""A template hook to allow extending the constructor trivially."""
551
def pack(self, pb=None):
552
"""Create a new pack by reading data from other packs.
554
This does little more than a bulk copy of data. One key difference
555
is that data with the same item key across multiple packs is elided
556
from the output. The new pack is written into the current pack store
557
along with its indices, and the name added to the pack names. The
558
source packs are not altered and are not required to be in the current
561
:param pb: An optional progress bar to use. A nested bar is created if
563
:return: A Pack object, or None if nothing was copied.
565
# open a pack - using the same name as the last temporary file
566
# - which has already been flushed, so its safe.
567
# XXX: - duplicate code warning with start_write_group; fix before
568
# considering 'done'.
569
if self._pack_collection._new_pack is not None:
570
raise errors.BzrError('call to create_pack_from_packs while '
571
'another pack is being written.')
572
if self.revision_ids is not None:
573
if len(self.revision_ids) == 0:
574
# silly fetch request.
577
self.revision_ids = frozenset(self.revision_ids)
578
self.revision_keys = frozenset((revid,) for revid in
581
self.pb = ui.ui_factory.nested_progress_bar()
585
return self._create_pack_from_packs()
591
"""Open a pack for the pack we are creating."""
592
return NewPack(self._pack_collection._upload_transport,
593
self._pack_collection._index_transport,
594
self._pack_collection._pack_transport, upload_suffix=self.suffix,
595
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
597
def _copy_revision_texts(self):
598
"""Copy revision data to the new pack."""
600
if self.revision_ids:
601
revision_keys = [(revision_id,) for revision_id in self.revision_ids]
604
# select revision keys
605
revision_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
606
self.packs, 'revision_index')[0]
607
revision_nodes = self._pack_collection._index_contents(revision_index_map, revision_keys)
608
# copy revision keys and adjust values
609
self.pb.update("Copying revision texts", 1)
610
total_items, readv_group_iter = self._revision_node_readv(revision_nodes)
611
list(self._copy_nodes_graph(revision_index_map, self.new_pack._writer,
612
self.new_pack.revision_index, readv_group_iter, total_items))
613
if 'pack' in debug.debug_flags:
614
mutter('%s: create_pack: revisions copied: %s%s %d items t+%6.3fs',
615
time.ctime(), self._pack_collection._upload_transport.base,
616
self.new_pack.random_name,
617
self.new_pack.revision_index.key_count(),
618
time.time() - self.new_pack.start_time)
619
self._revision_keys = revision_keys
621
def _copy_inventory_texts(self):
622
"""Copy the inventory texts to the new pack.
624
self._revision_keys is used to determine what inventories to copy.
626
Sets self._text_filter appropriately.
628
# select inventory keys
629
inv_keys = self._revision_keys # currently the same keyspace, and note that
630
# querying for keys here could introduce a bug where an inventory item
631
# is missed, so do not change it to query separately without cross
632
# checking like the text key check below.
633
inventory_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
634
self.packs, 'inventory_index')[0]
635
inv_nodes = self._pack_collection._index_contents(inventory_index_map, inv_keys)
636
# copy inventory keys and adjust values
637
# XXX: Should be a helper function to allow different inv representation
639
self.pb.update("Copying inventory texts", 2)
640
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
641
# Only grab the output lines if we will be processing them
642
output_lines = bool(self.revision_ids)
643
inv_lines = self._copy_nodes_graph(inventory_index_map,
644
self.new_pack._writer, self.new_pack.inventory_index,
645
readv_group_iter, total_items, output_lines=output_lines)
646
if self.revision_ids:
647
self._process_inventory_lines(inv_lines)
649
# eat the iterator to cause it to execute.
651
self._text_filter = None
652
if 'pack' in debug.debug_flags:
653
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
654
time.ctime(), self._pack_collection._upload_transport.base,
655
self.new_pack.random_name,
656
self.new_pack.inventory_index.key_count(),
657
time.time() - self.new_pack.start_time)
659
def _copy_text_texts(self):
661
text_index_map, text_nodes = self._get_text_nodes()
662
if self._text_filter is not None:
663
# We could return the keys copied as part of the return value from
664
# _copy_nodes_graph but this doesn't work all that well with the
665
# need to get line output too, so we check separately, and as we're
666
# going to buffer everything anyway, we check beforehand, which
667
# saves reading knit data over the wire when we know there are
669
text_nodes = set(text_nodes)
670
present_text_keys = set(_node[1] for _node in text_nodes)
671
missing_text_keys = set(self._text_filter) - present_text_keys
672
if missing_text_keys:
673
# TODO: raise a specific error that can handle many missing
675
a_missing_key = missing_text_keys.pop()
676
raise errors.RevisionNotPresent(a_missing_key[1],
678
# copy text keys and adjust values
679
self.pb.update("Copying content texts", 3)
680
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
681
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
682
self.new_pack.text_index, readv_group_iter, total_items))
683
self._log_copied_texts()
685
def _check_references(self):
686
"""Make sure our external refereneces are present."""
687
external_refs = self.new_pack._external_compression_parents_of_texts()
689
index = self._pack_collection.text_index.combined_index
690
found_items = list(index.iter_entries(external_refs))
691
if len(found_items) != len(external_refs):
692
found_keys = set(k for idx, k, refs, value in found_items)
693
missing_items = external_refs - found_keys
694
missing_file_id, missing_revision_id = missing_items.pop()
695
raise errors.RevisionNotPresent(missing_revision_id,
698
def _create_pack_from_packs(self):
699
self.pb.update("Opening pack", 0, 5)
700
self.new_pack = self.open_pack()
701
new_pack = self.new_pack
702
# buffer data - we won't be reading-back during the pack creation and
703
# this makes a significant difference on sftp pushes.
704
new_pack.set_write_cache_size(1024*1024)
705
if 'pack' in debug.debug_flags:
706
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
707
for a_pack in self.packs]
708
if self.revision_ids is not None:
709
rev_count = len(self.revision_ids)
712
mutter('%s: create_pack: creating pack from source packs: '
713
'%s%s %s revisions wanted %s t=0',
714
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
715
plain_pack_list, rev_count)
716
self._copy_revision_texts()
717
self._copy_inventory_texts()
718
self._copy_text_texts()
719
# select signature keys
720
signature_filter = self._revision_keys # same keyspace
721
signature_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
722
self.packs, 'signature_index')[0]
723
signature_nodes = self._pack_collection._index_contents(signature_index_map,
725
# copy signature keys and adjust values
726
self.pb.update("Copying signature texts", 4)
727
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
728
new_pack.signature_index)
729
if 'pack' in debug.debug_flags:
730
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
731
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
732
new_pack.signature_index.key_count(),
733
time.time() - new_pack.start_time)
734
self._check_references()
735
if not self._use_pack(new_pack):
738
self.pb.update("Finishing pack", 5)
740
self._pack_collection.allocate(new_pack)
743
def _copy_nodes(self, nodes, index_map, writer, write_index):
744
"""Copy knit nodes between packs with no graph references."""
745
pb = ui.ui_factory.nested_progress_bar()
747
return self._do_copy_nodes(nodes, index_map, writer,
752
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
753
# for record verification
754
knit = KnitVersionedFiles(None, None)
755
# plan a readv on each source pack:
757
nodes = sorted(nodes)
758
# how to map this into knit.py - or knit.py into this?
759
# we don't want the typical knit logic, we want grouping by pack
760
# at this point - perhaps a helper library for the following code
761
# duplication points?
763
for index, key, value in nodes:
764
if index not in request_groups:
765
request_groups[index] = []
766
request_groups[index].append((key, value))
768
pb.update("Copied record", record_index, len(nodes))
769
for index, items in request_groups.iteritems():
770
pack_readv_requests = []
771
for key, value in items:
772
# ---- KnitGraphIndex.get_position
773
bits = value[1:].split(' ')
774
offset, length = int(bits[0]), int(bits[1])
775
pack_readv_requests.append((offset, length, (key, value[0])))
776
# linear scan up the pack
777
pack_readv_requests.sort()
779
transport, path = index_map[index]
780
reader = pack.make_readv_reader(transport, path,
781
[offset[0:2] for offset in pack_readv_requests])
782
for (names, read_func), (_1, _2, (key, eol_flag)) in \
783
izip(reader.iter_records(), pack_readv_requests):
784
raw_data = read_func(None)
785
# check the header only
786
df, _ = knit._parse_record_header(key, raw_data)
788
pos, size = writer.add_bytes_record(raw_data, names)
789
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
790
pb.update("Copied record", record_index)
793
def _copy_nodes_graph(self, index_map, writer, write_index,
794
readv_group_iter, total_items, output_lines=False):
795
"""Copy knit nodes between packs.
797
:param output_lines: Return lines present in the copied data as
798
an iterator of line,version_id.
800
pb = ui.ui_factory.nested_progress_bar()
802
for result in self._do_copy_nodes_graph(index_map, writer,
803
write_index, output_lines, pb, readv_group_iter, total_items):
806
# Python 2.4 does not permit try:finally: in a generator.
812
def _do_copy_nodes_graph(self, index_map, writer, write_index,
813
output_lines, pb, readv_group_iter, total_items):
814
# for record verification
815
knit = KnitVersionedFiles(None, None)
816
# for line extraction when requested (inventories only)
818
factory = KnitPlainFactory()
820
pb.update("Copied record", record_index, total_items)
821
for index, readv_vector, node_vector in readv_group_iter:
823
transport, path = index_map[index]
824
reader = pack.make_readv_reader(transport, path, readv_vector)
825
for (names, read_func), (key, eol_flag, references) in \
826
izip(reader.iter_records(), node_vector):
827
raw_data = read_func(None)
829
# read the entire thing
830
content, _ = knit._parse_record(key[-1], raw_data)
831
if len(references[-1]) == 0:
832
line_iterator = factory.get_fulltext_content(content)
834
line_iterator = factory.get_linedelta_content(content)
835
for line in line_iterator:
838
# check the header only
839
df, _ = knit._parse_record_header(key, raw_data)
841
pos, size = writer.add_bytes_record(raw_data, names)
842
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
843
pb.update("Copied record", record_index)
846
def _get_text_nodes(self):
847
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
848
self.packs, 'text_index')[0]
849
return text_index_map, self._pack_collection._index_contents(text_index_map,
852
def _least_readv_node_readv(self, nodes):
853
"""Generate request groups for nodes using the least readv's.
855
:param nodes: An iterable of graph index nodes.
856
:return: Total node count and an iterator of the data needed to perform
857
readvs to obtain the data for nodes. Each item yielded by the
858
iterator is a tuple with:
859
index, readv_vector, node_vector. readv_vector is a list ready to
860
hand to the transport readv method, and node_vector is a list of
861
(key, eol_flag, references) for the the node retrieved by the
862
matching readv_vector.
864
# group by pack so we do one readv per pack
865
nodes = sorted(nodes)
868
for index, key, value, references in nodes:
869
if index not in request_groups:
870
request_groups[index] = []
871
request_groups[index].append((key, value, references))
873
for index, items in request_groups.iteritems():
874
pack_readv_requests = []
875
for key, value, references in items:
876
# ---- KnitGraphIndex.get_position
877
bits = value[1:].split(' ')
878
offset, length = int(bits[0]), int(bits[1])
879
pack_readv_requests.append(
880
((offset, length), (key, value[0], references)))
881
# linear scan up the pack to maximum range combining.
882
pack_readv_requests.sort()
883
# split out the readv and the node data.
884
pack_readv = [readv for readv, node in pack_readv_requests]
885
node_vector = [node for readv, node in pack_readv_requests]
886
result.append((index, pack_readv, node_vector))
889
def _log_copied_texts(self):
890
if 'pack' in debug.debug_flags:
891
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
892
time.ctime(), self._pack_collection._upload_transport.base,
893
self.new_pack.random_name,
894
self.new_pack.text_index.key_count(),
895
time.time() - self.new_pack.start_time)
897
def _process_inventory_lines(self, inv_lines):
898
"""Use up the inv_lines generator and setup a text key filter."""
899
repo = self._pack_collection.repo
900
fileid_revisions = repo._find_file_ids_from_xml_inventory_lines(
901
inv_lines, self.revision_keys)
903
for fileid, file_revids in fileid_revisions.iteritems():
904
text_filter.extend([(fileid, file_revid) for file_revid in file_revids])
905
self._text_filter = text_filter
907
def _revision_node_readv(self, revision_nodes):
908
"""Return the total revisions and the readv's to issue.
910
:param revision_nodes: The revision index contents for the packs being
911
incorporated into the new pack.
912
:return: As per _least_readv_node_readv.
914
return self._least_readv_node_readv(revision_nodes)
916
def _use_pack(self, new_pack):
917
"""Return True if new_pack should be used.
919
:param new_pack: The pack that has just been created.
920
:return: True if the pack should be used.
922
return new_pack.data_inserted()
925
class OptimisingPacker(Packer):
926
"""A packer which spends more time to create better disk layouts."""
928
def _revision_node_readv(self, revision_nodes):
929
"""Return the total revisions and the readv's to issue.
931
This sort places revisions in topological order with the ancestors
934
:param revision_nodes: The revision index contents for the packs being
935
incorporated into the new pack.
936
:return: As per _least_readv_node_readv.
938
# build an ancestors dict
941
for index, key, value, references in revision_nodes:
942
ancestors[key] = references[0]
943
by_key[key] = (index, value, references)
944
order = tsort.topo_sort(ancestors)
946
# Single IO is pathological, but it will work as a starting point.
948
for key in reversed(order):
949
index, value, references = by_key[key]
950
# ---- KnitGraphIndex.get_position
951
bits = value[1:].split(' ')
952
offset, length = int(bits[0]), int(bits[1])
954
(index, [(offset, length)], [(key, value[0], references)]))
955
# TODO: combine requests in the same index that are in ascending order.
956
return total, requests
959
class ReconcilePacker(Packer):
960
"""A packer which regenerates indices etc as it copies.
962
This is used by ``bzr reconcile`` to cause parent text pointers to be
966
def _extra_init(self):
967
self._data_changed = False
969
def _process_inventory_lines(self, inv_lines):
970
"""Generate a text key reference map rather for reconciling with."""
971
repo = self._pack_collection.repo
972
refs = repo._find_text_key_references_from_xml_inventory_lines(
974
self._text_refs = refs
975
# during reconcile we:
976
# - convert unreferenced texts to full texts
977
# - correct texts which reference a text not copied to be full texts
978
# - copy all others as-is but with corrected parents.
979
# - so at this point we don't know enough to decide what becomes a full
981
self._text_filter = None
983
def _copy_text_texts(self):
984
"""generate what texts we should have and then copy."""
985
self.pb.update("Copying content texts", 3)
986
# we have three major tasks here:
987
# 1) generate the ideal index
988
repo = self._pack_collection.repo
989
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
991
self.new_pack.revision_index.iter_all_entries()])
992
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
993
# 2) generate a text_nodes list that contains all the deltas that can
994
# be used as-is, with corrected parents.
998
NULL_REVISION = _mod_revision.NULL_REVISION
999
text_index_map, text_nodes = self._get_text_nodes()
1000
for node in text_nodes:
1006
ideal_parents = tuple(ideal_index[node[1]])
1008
discarded_nodes.append(node)
1009
self._data_changed = True
1011
if ideal_parents == (NULL_REVISION,):
1013
if ideal_parents == node[3][0]:
1015
ok_nodes.append(node)
1016
elif ideal_parents[0:1] == node[3][0][0:1]:
1017
# the left most parent is the same, or there are no parents
1018
# today. Either way, we can preserve the representation as
1019
# long as we change the refs to be inserted.
1020
self._data_changed = True
1021
ok_nodes.append((node[0], node[1], node[2],
1022
(ideal_parents, node[3][1])))
1023
self._data_changed = True
1025
# Reinsert this text completely
1026
bad_texts.append((node[1], ideal_parents))
1027
self._data_changed = True
1028
# we're finished with some data.
1031
# 3) bulk copy the ok data
1032
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
1033
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
1034
self.new_pack.text_index, readv_group_iter, total_items))
1035
# 4) adhoc copy all the other texts.
1036
# We have to topologically insert all texts otherwise we can fail to
1037
# reconcile when parts of a single delta chain are preserved intact,
1038
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
1039
# reinserted, and if d3 has incorrect parents it will also be
1040
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1041
# copied), so we will try to delta, but d2 is not currently able to be
1042
# extracted because it's basis d1 is not present. Topologically sorting
1043
# addresses this. The following generates a sort for all the texts that
1044
# are being inserted without having to reference the entire text key
1045
# space (we only topo sort the revisions, which is smaller).
1046
topo_order = tsort.topo_sort(ancestors)
1047
rev_order = dict(zip(topo_order, range(len(topo_order))))
1048
bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1049
transaction = repo.get_transaction()
1050
file_id_index = GraphIndexPrefixAdapter(
1051
self.new_pack.text_index,
1053
add_nodes_callback=self.new_pack.text_index.add_nodes)
1054
data_access = _DirectPackAccess(
1055
{self.new_pack.text_index:self.new_pack.access_tuple()})
1056
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
1057
self.new_pack.access_tuple())
1058
output_texts = KnitVersionedFiles(
1059
_KnitGraphIndex(self.new_pack.text_index,
1060
add_callback=self.new_pack.text_index.add_nodes,
1061
deltas=True, parents=True, is_locked=repo.is_locked),
1062
data_access=data_access, max_delta_chain=200)
1063
for key, parent_keys in bad_texts:
1064
# We refer to the new pack to delta data being output.
1065
# A possible improvement would be to catch errors on short reads
1066
# and only flush then.
1067
self.new_pack.flush()
1069
for parent_key in parent_keys:
1070
if parent_key[0] != key[0]:
1071
# Graph parents must match the fileid
1072
raise errors.BzrError('Mismatched key parent %r:%r' %
1074
parents.append(parent_key[1])
1075
text_lines = split_lines(repo.texts.get_record_stream(
1076
[key], 'unordered', True).next().get_bytes_as('fulltext'))
1077
output_texts.add_lines(key, parent_keys, text_lines,
1078
random_id=True, check_content=False)
1079
# 5) check that nothing inserted has a reference outside the keyspace.
1080
missing_text_keys = self.new_pack._external_compression_parents_of_texts()
1081
if missing_text_keys:
1082
raise errors.BzrError('Reference to missing compression parents %r'
1083
% (missing_text_keys,))
1084
self._log_copied_texts()
1086
def _use_pack(self, new_pack):
1087
"""Override _use_pack to check for reconcile having changed content."""
1088
# XXX: we might be better checking this at the copy time.
1089
original_inventory_keys = set()
1090
inv_index = self._pack_collection.inventory_index.combined_index
1091
for entry in inv_index.iter_all_entries():
1092
original_inventory_keys.add(entry[1])
1093
new_inventory_keys = set()
1094
for entry in new_pack.inventory_index.iter_all_entries():
1095
new_inventory_keys.add(entry[1])
1096
if new_inventory_keys != original_inventory_keys:
1097
self._data_changed = True
1098
return new_pack.data_inserted() and self._data_changed
1101
class RepositoryPackCollection(object):
1102
"""Management of packs within a repository.
1104
:ivar _names: map of {pack_name: (index_size,)}
1107
def __init__(self, repo, transport, index_transport, upload_transport,
1109
"""Create a new RepositoryPackCollection.
1111
:param transport: Addresses the repository base directory
1112
(typically .bzr/repository/).
1113
:param index_transport: Addresses the directory containing indices.
1114
:param upload_transport: Addresses the directory into which packs are written
1115
while they're being created.
1116
:param pack_transport: Addresses the directory of existing complete packs.
1119
self.transport = transport
1120
self._index_transport = index_transport
1121
self._upload_transport = upload_transport
1122
self._pack_transport = pack_transport
1123
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1126
self._packs_by_name = {}
1127
# the previous pack-names content
1128
self._packs_at_load = None
1129
# when a pack is being created by this object, the state of that pack.
1130
self._new_pack = None
1131
# aggregated revision index data
1132
self.revision_index = AggregateIndex()
1133
self.inventory_index = AggregateIndex()
1134
self.text_index = AggregateIndex()
1135
self.signature_index = AggregateIndex()
1137
def add_pack_to_memory(self, pack):
1138
"""Make a Pack object available to the repository to satisfy queries.
1140
:param pack: A Pack object.
1142
if pack.name in self._packs_by_name:
1143
raise AssertionError()
1144
self.packs.append(pack)
1145
self._packs_by_name[pack.name] = pack
1146
self.revision_index.add_index(pack.revision_index, pack)
1147
self.inventory_index.add_index(pack.inventory_index, pack)
1148
self.text_index.add_index(pack.text_index, pack)
1149
self.signature_index.add_index(pack.signature_index, pack)
1151
def all_packs(self):
1152
"""Return a list of all the Pack objects this repository has.
1154
Note that an in-progress pack being created is not returned.
1156
:return: A list of Pack objects for all the packs in the repository.
1159
for name in self.names():
1160
result.append(self.get_pack_by_name(name))
1164
"""Pack the pack collection incrementally.
1166
This will not attempt global reorganisation or recompression,
1167
rather it will just ensure that the total number of packs does
1168
not grow without bound. It uses the _max_pack_count method to
1169
determine if autopacking is needed, and the pack_distribution
1170
method to determine the number of revisions in each pack.
1172
If autopacking takes place then the packs name collection will have
1173
been flushed to disk - packing requires updating the name collection
1174
in synchronisation with certain steps. Otherwise the names collection
1177
:return: True if packing took place.
1179
# XXX: Should not be needed when the management of indices is sane.
1180
total_revisions = self.revision_index.combined_index.key_count()
1181
total_packs = len(self._names)
1182
if self._max_pack_count(total_revisions) >= total_packs:
1184
# XXX: the following may want to be a class, to pack with a given
1186
mutter('Auto-packing repository %s, which has %d pack files, '
1187
'containing %d revisions into %d packs.', self, total_packs,
1188
total_revisions, self._max_pack_count(total_revisions))
1189
# determine which packs need changing
1190
pack_distribution = self.pack_distribution(total_revisions)
1192
for pack in self.all_packs():
1193
revision_count = pack.get_revision_count()
1194
if revision_count == 0:
1195
# revision less packs are not generated by normal operation,
1196
# only by operations like sign-my-commits, and thus will not
1197
# tend to grow rapdily or without bound like commit containing
1198
# packs do - leave them alone as packing them really should
1199
# group their data with the relevant commit, and that may
1200
# involve rewriting ancient history - which autopack tries to
1201
# avoid. Alternatively we could not group the data but treat
1202
# each of these as having a single revision, and thus add
1203
# one revision for each to the total revision count, to get
1204
# a matching distribution.
1206
existing_packs.append((revision_count, pack))
1207
pack_operations = self.plan_autopack_combinations(
1208
existing_packs, pack_distribution)
1209
self._execute_pack_operations(pack_operations)
1212
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1213
"""Execute a series of pack operations.
1215
:param pack_operations: A list of [revision_count, packs_to_combine].
1216
:param _packer_class: The class of packer to use (default: Packer).
1219
for revision_count, packs in pack_operations:
1220
# we may have no-ops from the setup logic
1223
_packer_class(self, packs, '.autopack').pack()
1225
self._remove_pack_from_memory(pack)
1226
# record the newly available packs and stop advertising the old
1228
self._save_pack_names(clear_obsolete_packs=True)
1229
# Move the old packs out of the way now they are no longer referenced.
1230
for revision_count, packs in pack_operations:
1231
self._obsolete_packs(packs)
1233
def lock_names(self):
1234
"""Acquire the mutex around the pack-names index.
1236
This cannot be used in the middle of a read-only transaction on the
1239
self.repo.control_files.lock_write()
1242
"""Pack the pack collection totally."""
1243
self.ensure_loaded()
1244
total_packs = len(self._names)
1246
# This is arguably wrong because we might not be optimal, but for
1247
# now lets leave it in. (e.g. reconcile -> one pack. But not
1250
total_revisions = self.revision_index.combined_index.key_count()
1251
# XXX: the following may want to be a class, to pack with a given
1253
mutter('Packing repository %s, which has %d pack files, '
1254
'containing %d revisions into 1 packs.', self, total_packs,
1256
# determine which packs need changing
1257
pack_distribution = [1]
1258
pack_operations = [[0, []]]
1259
for pack in self.all_packs():
1260
pack_operations[-1][0] += pack.get_revision_count()
1261
pack_operations[-1][1].append(pack)
1262
self._execute_pack_operations(pack_operations, OptimisingPacker)
1264
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1265
"""Plan a pack operation.
1267
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1269
:param pack_distribution: A list with the number of revisions desired
1272
if len(existing_packs) <= len(pack_distribution):
1274
existing_packs.sort(reverse=True)
1275
pack_operations = [[0, []]]
1276
# plan out what packs to keep, and what to reorganise
1277
while len(existing_packs):
1278
# take the largest pack, and if its less than the head of the
1279
# distribution chart we will include its contents in the new pack for
1280
# that position. If its larger, we remove its size from the
1281
# distribution chart
1282
next_pack_rev_count, next_pack = existing_packs.pop(0)
1283
if next_pack_rev_count >= pack_distribution[0]:
1284
# this is already packed 'better' than this, so we can
1285
# not waste time packing it.
1286
while next_pack_rev_count > 0:
1287
next_pack_rev_count -= pack_distribution[0]
1288
if next_pack_rev_count >= 0:
1290
del pack_distribution[0]
1292
# didn't use that entire bucket up
1293
pack_distribution[0] = -next_pack_rev_count
1295
# add the revisions we're going to add to the next output pack
1296
pack_operations[-1][0] += next_pack_rev_count
1297
# allocate this pack to the next pack sub operation
1298
pack_operations[-1][1].append(next_pack)
1299
if pack_operations[-1][0] >= pack_distribution[0]:
1300
# this pack is used up, shift left.
1301
del pack_distribution[0]
1302
pack_operations.append([0, []])
1304
return pack_operations
1306
def ensure_loaded(self):
1307
# NB: if you see an assertion error here, its probably access against
1308
# an unlocked repo. Naughty.
1309
if not self.repo.is_locked():
1310
raise errors.ObjectNotLocked(self.repo)
1311
if self._names is None:
1313
self._packs_at_load = set()
1314
for index, key, value in self._iter_disk_pack_index():
1316
self._names[name] = self._parse_index_sizes(value)
1317
self._packs_at_load.add((key, value))
1318
# populate all the metadata.
1321
def _parse_index_sizes(self, value):
1322
"""Parse a string of index sizes."""
1323
return tuple([int(digits) for digits in value.split(' ')])
1325
def get_pack_by_name(self, name):
1326
"""Get a Pack object by name.
1328
:param name: The name of the pack - e.g. '123456'
1329
:return: A Pack object.
1332
return self._packs_by_name[name]
1334
rev_index = self._make_index(name, '.rix')
1335
inv_index = self._make_index(name, '.iix')
1336
txt_index = self._make_index(name, '.tix')
1337
sig_index = self._make_index(name, '.six')
1338
result = ExistingPack(self._pack_transport, name, rev_index,
1339
inv_index, txt_index, sig_index)
1340
self.add_pack_to_memory(result)
1343
def allocate(self, a_new_pack):
1344
"""Allocate name in the list of packs.
1346
:param a_new_pack: A NewPack instance to be added to the collection of
1347
packs for this repository.
1349
self.ensure_loaded()
1350
if a_new_pack.name in self._names:
1351
raise errors.BzrError(
1352
'Pack %r already exists in %s' % (a_new_pack.name, self))
1353
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1354
self.add_pack_to_memory(a_new_pack)
1356
def _iter_disk_pack_index(self):
1357
"""Iterate over the contents of the pack-names index.
1359
This is used when loading the list from disk, and before writing to
1360
detect updates from others during our write operation.
1361
:return: An iterator of the index contents.
1363
return GraphIndex(self.transport, 'pack-names', None
1364
).iter_all_entries()
1366
def _make_index(self, name, suffix):
1367
size_offset = self._suffix_offsets[suffix]
1368
index_name = name + suffix
1369
index_size = self._names[name][size_offset]
1371
self._index_transport, index_name, index_size)
1373
def _max_pack_count(self, total_revisions):
1374
"""Return the maximum number of packs to use for total revisions.
1376
:param total_revisions: The total number of revisions in the
1379
if not total_revisions:
1381
digits = str(total_revisions)
1383
for digit in digits:
1384
result += int(digit)
1388
"""Provide an order to the underlying names."""
1389
return sorted(self._names.keys())
1391
def _obsolete_packs(self, packs):
1392
"""Move a number of packs which have been obsoleted out of the way.
1394
Each pack and its associated indices are moved out of the way.
1396
Note: for correctness this function should only be called after a new
1397
pack names index has been written without these pack names, and with
1398
the names of packs that contain the data previously available via these
1401
:param packs: The packs to obsolete.
1402
:param return: None.
1405
pack.pack_transport.rename(pack.file_name(),
1406
'../obsolete_packs/' + pack.file_name())
1407
# TODO: Probably needs to know all possible indices for this pack
1408
# - or maybe list the directory and move all indices matching this
1409
# name whether we recognize it or not?
1410
for suffix in ('.iix', '.six', '.tix', '.rix'):
1411
self._index_transport.rename(pack.name + suffix,
1412
'../obsolete_packs/' + pack.name + suffix)
1414
def pack_distribution(self, total_revisions):
1415
"""Generate a list of the number of revisions to put in each pack.
1417
:param total_revisions: The total number of revisions in the
1420
if total_revisions == 0:
1422
digits = reversed(str(total_revisions))
1424
for exponent, count in enumerate(digits):
1425
size = 10 ** exponent
1426
for pos in range(int(count)):
1428
return list(reversed(result))
1430
def _pack_tuple(self, name):
1431
"""Return a tuple with the transport and file name for a pack name."""
1432
return self._pack_transport, name + '.pack'
1434
def _remove_pack_from_memory(self, pack):
1435
"""Remove pack from the packs accessed by this repository.
1437
Only affects memory state, until self._save_pack_names() is invoked.
1439
self._names.pop(pack.name)
1440
self._packs_by_name.pop(pack.name)
1441
self._remove_pack_indices(pack)
1443
def _remove_pack_indices(self, pack):
1444
"""Remove the indices for pack from the aggregated indices."""
1445
self.revision_index.remove_index(pack.revision_index, pack)
1446
self.inventory_index.remove_index(pack.inventory_index, pack)
1447
self.text_index.remove_index(pack.text_index, pack)
1448
self.signature_index.remove_index(pack.signature_index, pack)
1451
"""Clear all cached data."""
1452
# cached revision data
1453
self.repo._revision_knit = None
1454
self.revision_index.clear()
1455
# cached signature data
1456
self.repo._signature_knit = None
1457
self.signature_index.clear()
1458
# cached file text data
1459
self.text_index.clear()
1460
self.repo._text_knit = None
1461
# cached inventory data
1462
self.inventory_index.clear()
1463
# remove the open pack
1464
self._new_pack = None
1465
# information about packs.
1468
self._packs_by_name = {}
1469
self._packs_at_load = None
1471
def _make_index_map(self, index_suffix):
1472
"""Return information on existing indices.
1474
:param suffix: Index suffix added to pack name.
1476
:returns: (pack_map, indices) where indices is a list of GraphIndex
1477
objects, and pack_map is a mapping from those objects to the
1478
pack tuple they describe.
1480
# TODO: stop using this; it creates new indices unnecessarily.
1481
self.ensure_loaded()
1482
suffix_map = {'.rix': 'revision_index',
1483
'.six': 'signature_index',
1484
'.iix': 'inventory_index',
1485
'.tix': 'text_index',
1487
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1488
suffix_map[index_suffix])
1490
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1491
"""Convert a list of packs to an index pack map and index list.
1493
:param packs: The packs list to process.
1494
:param index_attribute: The attribute that the desired index is found
1496
:return: A tuple (map, list) where map contains the dict from
1497
index:pack_tuple, and lsit contains the indices in the same order
1503
index = getattr(pack, index_attribute)
1504
indices.append(index)
1505
pack_map[index] = (pack.pack_transport, pack.file_name())
1506
return pack_map, indices
1508
def _index_contents(self, pack_map, key_filter=None):
1509
"""Get an iterable of the index contents from a pack_map.
1511
:param pack_map: A map from indices to pack details.
1512
:param key_filter: An optional filter to limit the
1515
indices = [index for index in pack_map.iterkeys()]
1516
all_index = CombinedGraphIndex(indices)
1517
if key_filter is None:
1518
return all_index.iter_all_entries()
1520
return all_index.iter_entries(key_filter)
1522
def _unlock_names(self):
1523
"""Release the mutex around the pack-names index."""
1524
self.repo.control_files.unlock()
1526
def _save_pack_names(self, clear_obsolete_packs=False):
1527
"""Save the list of packs.
1529
This will take out the mutex around the pack names list for the
1530
duration of the method call. If concurrent updates have been made, a
1531
three-way merge between the current list and the current in memory list
1534
:param clear_obsolete_packs: If True, clear out the contents of the
1535
obsolete_packs directory.
1539
builder = GraphIndexBuilder()
1540
# load the disk nodes across
1542
for index, key, value in self._iter_disk_pack_index():
1543
disk_nodes.add((key, value))
1544
# do a two-way diff against our original content
1545
current_nodes = set()
1546
for name, sizes in self._names.iteritems():
1548
((name, ), ' '.join(str(size) for size in sizes)))
1549
deleted_nodes = self._packs_at_load - current_nodes
1550
new_nodes = current_nodes - self._packs_at_load
1551
disk_nodes.difference_update(deleted_nodes)
1552
disk_nodes.update(new_nodes)
1553
# TODO: handle same-name, index-size-changes here -
1554
# e.g. use the value from disk, not ours, *unless* we're the one
1556
for key, value in disk_nodes:
1557
builder.add_node(key, value)
1558
self.transport.put_file('pack-names', builder.finish(),
1559
mode=self.repo.bzrdir._get_file_mode())
1560
# move the baseline forward
1561
self._packs_at_load = disk_nodes
1562
if clear_obsolete_packs:
1563
self._clear_obsolete_packs()
1565
self._unlock_names()
1566
# synchronise the memory packs list with what we just wrote:
1567
new_names = dict(disk_nodes)
1568
# drop no longer present nodes
1569
for pack in self.all_packs():
1570
if (pack.name,) not in new_names:
1571
self._remove_pack_from_memory(pack)
1572
# add new nodes/refresh existing ones
1573
for key, value in disk_nodes:
1575
sizes = self._parse_index_sizes(value)
1576
if name in self._names:
1578
if sizes != self._names[name]:
1579
# the pack for name has had its indices replaced - rare but
1580
# important to handle. XXX: probably can never happen today
1581
# because the three-way merge code above does not handle it
1582
# - you may end up adding the same key twice to the new
1583
# disk index because the set values are the same, unless
1584
# the only index shows up as deleted by the set difference
1585
# - which it may. Until there is a specific test for this,
1586
# assume its broken. RBC 20071017.
1587
self._remove_pack_from_memory(self.get_pack_by_name(name))
1588
self._names[name] = sizes
1589
self.get_pack_by_name(name)
1592
self._names[name] = sizes
1593
self.get_pack_by_name(name)
1595
def _clear_obsolete_packs(self):
1596
"""Delete everything from the obsolete-packs directory.
1598
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1599
for filename in obsolete_pack_transport.list_dir('.'):
1601
obsolete_pack_transport.delete(filename)
1602
except (errors.PathError, errors.TransportError), e:
1603
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
1605
def _start_write_group(self):
1606
# Do not permit preparation for writing if we're not in a 'write lock'.
1607
if not self.repo.is_write_locked():
1608
raise errors.NotWriteLocked(self)
1609
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1610
self._pack_transport, upload_suffix='.pack',
1611
file_mode=self.repo.bzrdir._get_file_mode())
1612
# allow writing: queue writes to a new index
1613
self.revision_index.add_writable_index(self._new_pack.revision_index,
1615
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1617
self.text_index.add_writable_index(self._new_pack.text_index,
1619
self.signature_index.add_writable_index(self._new_pack.signature_index,
1622
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1623
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1624
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1625
self.repo.texts._index._add_callback = self.text_index.add_callback
1627
def _abort_write_group(self):
1628
# FIXME: just drop the transient index.
1629
# forget what names there are
1630
if self._new_pack is not None:
1631
self._new_pack.abort()
1632
self._remove_pack_indices(self._new_pack)
1633
self._new_pack = None
1634
self.repo._text_knit = None
1636
def _commit_write_group(self):
1637
self._remove_pack_indices(self._new_pack)
1638
if self._new_pack.data_inserted():
1639
# get all the data to disk and read to use
1640
self._new_pack.finish()
1641
self.allocate(self._new_pack)
1642
self._new_pack = None
1643
if not self.autopack():
1644
# when autopack takes no steps, the names list is still
1646
self._save_pack_names()
1648
self._new_pack.abort()
1649
self._new_pack = None
1650
self.repo._text_knit = None
1653
class KnitPackRepository(KnitRepository):
1654
"""Repository with knit objects stored inside pack containers.
1656
The layering for a KnitPackRepository is:
1658
Graph | HPSS | Repository public layer |
1659
===================================================
1660
Tuple based apis below, string based, and key based apis above
1661
---------------------------------------------------
1663
Provides .texts, .revisions etc
1664
This adapts the N-tuple keys to physical knit records which only have a
1665
single string identifier (for historical reasons), which in older formats
1666
was always the revision_id, and in the mapped code for packs is always
1667
the last element of key tuples.
1668
---------------------------------------------------
1670
A separate GraphIndex is used for each of the
1671
texts/inventories/revisions/signatures contained within each individual
1672
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1674
===================================================
1678
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1680
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1681
_commit_builder_class, _serializer)
1682
index_transport = self._transport.clone('indices')
1683
self._pack_collection = RepositoryPackCollection(self, self._transport,
1685
self._transport.clone('upload'),
1686
self._transport.clone('packs'))
1687
self.inventories = KnitVersionedFiles(
1688
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
1689
add_callback=self._pack_collection.inventory_index.add_callback,
1690
deltas=True, parents=True, is_locked=self.is_locked),
1691
data_access=self._pack_collection.inventory_index.data_access,
1692
max_delta_chain=200)
1693
self.revisions = KnitVersionedFiles(
1694
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
1695
add_callback=self._pack_collection.revision_index.add_callback,
1696
deltas=False, parents=True, is_locked=self.is_locked),
1697
data_access=self._pack_collection.revision_index.data_access,
1699
self.signatures = KnitVersionedFiles(
1700
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
1701
add_callback=self._pack_collection.signature_index.add_callback,
1702
deltas=False, parents=False, is_locked=self.is_locked),
1703
data_access=self._pack_collection.signature_index.data_access,
1705
self.texts = KnitVersionedFiles(
1706
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
1707
add_callback=self._pack_collection.text_index.add_callback,
1708
deltas=True, parents=True, is_locked=self.is_locked),
1709
data_access=self._pack_collection.text_index.data_access,
1710
max_delta_chain=200)
1711
# True when the repository object is 'write locked' (as opposed to the
1712
# physical lock only taken out around changes to the pack-names list.)
1713
# Another way to represent this would be a decorator around the control
1714
# files object that presents logical locks as physical ones - if this
1715
# gets ugly consider that alternative design. RBC 20071011
1716
self._write_lock_count = 0
1717
self._transaction = None
1719
self._reconcile_does_inventory_gc = True
1720
self._reconcile_fixes_text_parents = True
1721
self._reconcile_backsup_inventory = False
1722
self._fetch_order = 'unordered'
1724
def _warn_if_deprecated(self):
1725
# This class isn't deprecated, but one sub-format is
1726
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
1727
from bzrlib import repository
1728
if repository._deprecation_warning_done:
1730
repository._deprecation_warning_done = True
1731
warning("Format %s for %s is deprecated - please use"
1732
" 'bzr upgrade --1.6.1-rich-root'"
1733
% (self._format, self.bzrdir.transport.base))
1735
def _abort_write_group(self):
1736
self._pack_collection._abort_write_group()
1738
def _find_inconsistent_revision_parents(self):
1739
"""Find revisions with incorrectly cached parents.
1741
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1742
parents-in-revision).
1744
if not self.is_locked():
1745
raise errors.ObjectNotLocked(self)
1746
pb = ui.ui_factory.nested_progress_bar()
1749
revision_nodes = self._pack_collection.revision_index \
1750
.combined_index.iter_all_entries()
1751
index_positions = []
1752
# Get the cached index values for all revisions, and also the location
1753
# in each index of the revision text so we can perform linear IO.
1754
for index, key, value, refs in revision_nodes:
1755
pos, length = value[1:].split(' ')
1756
index_positions.append((index, int(pos), key[0],
1757
tuple(parent[0] for parent in refs[0])))
1758
pb.update("Reading revision index.", 0, 0)
1759
index_positions.sort()
1760
batch_count = len(index_positions) / 1000 + 1
1761
pb.update("Checking cached revision graph.", 0, batch_count)
1762
for offset in xrange(batch_count):
1763
pb.update("Checking cached revision graph.", offset)
1764
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1767
rev_ids = [item[2] for item in to_query]
1768
revs = self.get_revisions(rev_ids)
1769
for revision, item in zip(revs, to_query):
1770
index_parents = item[3]
1771
rev_parents = tuple(revision.parent_ids)
1772
if index_parents != rev_parents:
1773
result.append((revision.revision_id, index_parents, rev_parents))
1778
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1779
def get_parents(self, revision_ids):
1780
"""See graph._StackedParentsProvider.get_parents."""
1781
parent_map = self.get_parent_map(revision_ids)
1782
return [parent_map.get(r, None) for r in revision_ids]
1784
def _make_parents_provider(self):
1785
return graph.CachingParentsProvider(self)
1787
def _refresh_data(self):
1788
if self._write_lock_count == 1 or (
1789
self.control_files._lock_count == 1 and
1790
self.control_files._lock_mode == 'r'):
1791
# forget what names there are
1792
self._pack_collection.reset()
1793
# XXX: Better to do an in-memory merge when acquiring a new lock -
1794
# factor out code from _save_pack_names.
1795
self._pack_collection.ensure_loaded()
1797
def _start_write_group(self):
1798
self._pack_collection._start_write_group()
1800
def _commit_write_group(self):
1801
return self._pack_collection._commit_write_group()
1803
def get_transaction(self):
1804
if self._write_lock_count:
1805
return self._transaction
1807
return self.control_files.get_transaction()
1809
def is_locked(self):
1810
return self._write_lock_count or self.control_files.is_locked()
1812
def is_write_locked(self):
1813
return self._write_lock_count
1815
def lock_write(self, token=None):
1816
if not self._write_lock_count and self.is_locked():
1817
raise errors.ReadOnlyError(self)
1818
self._write_lock_count += 1
1819
if self._write_lock_count == 1:
1820
self._transaction = transactions.WriteTransaction()
1821
for repo in self._fallback_repositories:
1822
# Writes don't affect fallback repos
1824
self._refresh_data()
1826
def lock_read(self):
1827
if self._write_lock_count:
1828
self._write_lock_count += 1
1830
self.control_files.lock_read()
1831
for repo in self._fallback_repositories:
1832
# Writes don't affect fallback repos
1834
self._refresh_data()
1836
def leave_lock_in_place(self):
1837
# not supported - raise an error
1838
raise NotImplementedError(self.leave_lock_in_place)
1840
def dont_leave_lock_in_place(self):
1841
# not supported - raise an error
1842
raise NotImplementedError(self.dont_leave_lock_in_place)
1846
"""Compress the data within the repository.
1848
This will pack all the data to a single pack. In future it may
1849
recompress deltas or do other such expensive operations.
1851
self._pack_collection.pack()
1854
def reconcile(self, other=None, thorough=False):
1855
"""Reconcile this repository."""
1856
from bzrlib.reconcile import PackReconciler
1857
reconciler = PackReconciler(self, thorough=thorough)
1858
reconciler.reconcile()
1862
if self._write_lock_count == 1 and self._write_group is not None:
1863
self.abort_write_group()
1864
self._transaction = None
1865
self._write_lock_count = 0
1866
raise errors.BzrError(
1867
'Must end write group before releasing write lock on %s'
1869
if self._write_lock_count:
1870
self._write_lock_count -= 1
1871
if not self._write_lock_count:
1872
transaction = self._transaction
1873
self._transaction = None
1874
transaction.finish()
1875
for repo in self._fallback_repositories:
1878
self.control_files.unlock()
1879
for repo in self._fallback_repositories:
1883
class RepositoryFormatPack(MetaDirRepositoryFormat):
1884
"""Format logic for pack structured repositories.
1886
This repository format has:
1887
- a list of packs in pack-names
1888
- packs in packs/NAME.pack
1889
- indices in indices/NAME.{iix,six,tix,rix}
1890
- knit deltas in the packs, knit indices mapped to the indices.
1891
- thunk objects to support the knits programming API.
1892
- a format marker of its own
1893
- an optional 'shared-storage' flag
1894
- an optional 'no-working-trees' flag
1898
# Set this attribute in derived classes to control the repository class
1899
# created by open and initialize.
1900
repository_class = None
1901
# Set this attribute in derived classes to control the
1902
# _commit_builder_class that the repository objects will have passed to
1903
# their constructor.
1904
_commit_builder_class = None
1905
# Set this attribute in derived clases to control the _serializer that the
1906
# repository objects will have passed to their constructor.
1908
# External references are not supported in pack repositories yet.
1909
supports_external_lookups = False
1911
def initialize(self, a_bzrdir, shared=False):
1912
"""Create a pack based repository.
1914
:param a_bzrdir: bzrdir to contain the new repository; must already
1916
:param shared: If true the repository will be initialized as a shared
1919
mutter('creating repository in %s.', a_bzrdir.transport.base)
1920
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1921
builder = GraphIndexBuilder()
1922
files = [('pack-names', builder.finish())]
1923
utf8_files = [('format', self.get_format_string())]
1925
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1926
return self.open(a_bzrdir=a_bzrdir, _found=True)
1928
def open(self, a_bzrdir, _found=False, _override_transport=None):
1929
"""See RepositoryFormat.open().
1931
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1932
repository at a slightly different url
1933
than normal. I.e. during 'upgrade'.
1936
format = RepositoryFormat.find_format(a_bzrdir)
1937
if _override_transport is not None:
1938
repo_transport = _override_transport
1940
repo_transport = a_bzrdir.get_repository_transport(None)
1941
control_files = lockable_files.LockableFiles(repo_transport,
1942
'lock', lockdir.LockDir)
1943
return self.repository_class(_format=self,
1945
control_files=control_files,
1946
_commit_builder_class=self._commit_builder_class,
1947
_serializer=self._serializer)
1950
class RepositoryFormatKnitPack1(RepositoryFormatPack):
1951
"""A no-subtrees parameterized Pack repository.
1953
This format was introduced in 0.92.
1956
repository_class = KnitPackRepository
1957
_commit_builder_class = PackCommitBuilder
1958
_serializer = xml5.serializer_v5
1960
def _get_matching_bzrdir(self):
1961
return bzrdir.format_registry.make_bzrdir('pack-0.92')
1963
def _ignore_setting_bzrdir(self, format):
1966
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1968
def get_format_string(self):
1969
"""See RepositoryFormat.get_format_string()."""
1970
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
1972
def get_format_description(self):
1973
"""See RepositoryFormat.get_format_description()."""
1974
return "Packs containing knits without subtree support"
1976
def check_conversion_target(self, target_format):
1980
class RepositoryFormatKnitPack3(RepositoryFormatPack):
1981
"""A subtrees parameterized Pack repository.
1983
This repository format uses the xml7 serializer to get:
1984
- support for recording full info about the tree root
1985
- support for recording tree-references
1987
This format was introduced in 0.92.
1990
repository_class = KnitPackRepository
1991
_commit_builder_class = PackRootCommitBuilder
1992
rich_root_data = True
1993
supports_tree_reference = True
1994
_serializer = xml7.serializer_v7
1996
def _get_matching_bzrdir(self):
1997
return bzrdir.format_registry.make_bzrdir(
1998
'pack-0.92-subtree')
2000
def _ignore_setting_bzrdir(self, format):
2003
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2005
def check_conversion_target(self, target_format):
2006
if not target_format.rich_root_data:
2007
raise errors.BadConversionTarget(
2008
'Does not support rich root data.', target_format)
2009
if not getattr(target_format, 'supports_tree_reference', False):
2010
raise errors.BadConversionTarget(
2011
'Does not support nested trees', target_format)
2013
def get_format_string(self):
2014
"""See RepositoryFormat.get_format_string()."""
2015
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2017
def get_format_description(self):
2018
"""See RepositoryFormat.get_format_description()."""
2019
return "Packs containing knits with subtree support\n"
2022
class RepositoryFormatKnitPack4(RepositoryFormatPack):
2023
"""A rich-root, no subtrees parameterized Pack repository.
2025
This repository format uses the xml6 serializer to get:
2026
- support for recording full info about the tree root
2028
This format was introduced in 1.0.
2031
repository_class = KnitPackRepository
2032
_commit_builder_class = PackRootCommitBuilder
2033
rich_root_data = True
2034
supports_tree_reference = False
2035
_serializer = xml6.serializer_v6
2037
def _get_matching_bzrdir(self):
2038
return bzrdir.format_registry.make_bzrdir(
2041
def _ignore_setting_bzrdir(self, format):
2044
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2046
def check_conversion_target(self, target_format):
2047
if not target_format.rich_root_data:
2048
raise errors.BadConversionTarget(
2049
'Does not support rich root data.', target_format)
2051
def get_format_string(self):
2052
"""See RepositoryFormat.get_format_string()."""
2053
return ("Bazaar pack repository format 1 with rich root"
2054
" (needs bzr 1.0)\n")
2056
def get_format_description(self):
2057
"""See RepositoryFormat.get_format_description()."""
2058
return "Packs containing knits with rich root support\n"
2061
class RepositoryFormatKnitPack5(RepositoryFormatPack):
2062
"""Repository that supports external references to allow stacking.
2066
Supports external lookups, which results in non-truncated ghosts after
2067
reconcile compared to pack-0.92 formats.
2070
repository_class = KnitPackRepository
2071
_commit_builder_class = PackCommitBuilder
2072
_serializer = xml5.serializer_v5
2073
supports_external_lookups = True
2075
def _get_matching_bzrdir(self):
2076
return bzrdir.format_registry.make_bzrdir('development1')
2078
def _ignore_setting_bzrdir(self, format):
2081
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2083
def get_format_string(self):
2084
"""See RepositoryFormat.get_format_string()."""
2085
return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
2087
def get_format_description(self):
2088
"""See RepositoryFormat.get_format_description()."""
2089
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2091
def check_conversion_target(self, target_format):
2095
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2096
"""A repository with rich roots and stacking.
2098
New in release 1.6.1.
2100
Supports stacking on other repositories, allowing data to be accessed
2101
without being stored locally.
2104
repository_class = KnitPackRepository
2105
_commit_builder_class = PackRootCommitBuilder
2106
rich_root_data = True
2107
supports_tree_reference = False # no subtrees
2108
_serializer = xml6.serializer_v6
2109
supports_external_lookups = True
2111
def _get_matching_bzrdir(self):
2112
return bzrdir.format_registry.make_bzrdir(
2115
def _ignore_setting_bzrdir(self, format):
2118
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2120
def check_conversion_target(self, target_format):
2121
if not target_format.rich_root_data:
2122
raise errors.BadConversionTarget(
2123
'Does not support rich root data.', target_format)
2125
def get_format_string(self):
2126
"""See RepositoryFormat.get_format_string()."""
2127
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2129
def get_format_description(self):
2130
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2133
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2134
"""A repository with rich roots and external references.
2138
Supports external lookups, which results in non-truncated ghosts after
2139
reconcile compared to pack-0.92 formats.
2141
This format was deprecated because the serializer it uses accidentally
2142
supported subtrees, when the format was not intended to. This meant that
2143
someone could accidentally fetch from an incorrect repository.
2146
repository_class = KnitPackRepository
2147
_commit_builder_class = PackRootCommitBuilder
2148
rich_root_data = True
2149
supports_tree_reference = False # no subtrees
2150
_serializer = xml7.serializer_v7
2152
supports_external_lookups = True
2154
def _get_matching_bzrdir(self):
2155
return bzrdir.format_registry.make_bzrdir(
2156
'development1-subtree')
2158
def _ignore_setting_bzrdir(self, format):
2161
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2163
def check_conversion_target(self, target_format):
2164
if not target_format.rich_root_data:
2165
raise errors.BadConversionTarget(
2166
'Does not support rich root data.', target_format)
2168
def get_format_string(self):
2169
"""See RepositoryFormat.get_format_string()."""
2170
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2172
def get_format_description(self):
2173
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2177
class RepositoryFormatPackDevelopment1(RepositoryFormatPack):
2178
"""A no-subtrees development repository.
2180
This format should be retained until the second release after bzr 1.5.
2182
Supports external lookups, which results in non-truncated ghosts after
2183
reconcile compared to pack-0.92 formats.
2186
repository_class = KnitPackRepository
2187
_commit_builder_class = PackCommitBuilder
2188
_serializer = xml5.serializer_v5
2189
supports_external_lookups = True
2191
def _get_matching_bzrdir(self):
2192
return bzrdir.format_registry.make_bzrdir('development1')
2194
def _ignore_setting_bzrdir(self, format):
2197
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2199
def get_format_string(self):
2200
"""See RepositoryFormat.get_format_string()."""
2201
return "Bazaar development format 1 (needs bzr.dev from before 1.6)\n"
2203
def get_format_description(self):
2204
"""See RepositoryFormat.get_format_description()."""
2205
return ("Development repository format, currently the same as "
2206
"pack-0.92 with external reference support.\n")
2208
def check_conversion_target(self, target_format):
2212
class RepositoryFormatPackDevelopment1Subtree(RepositoryFormatPack):
2213
"""A subtrees development repository.
2215
This format should be retained until the second release after bzr 1.5.
2217
Supports external lookups, which results in non-truncated ghosts after
2218
reconcile compared to pack-0.92 formats.
2221
repository_class = KnitPackRepository
2222
_commit_builder_class = PackRootCommitBuilder
2223
rich_root_data = True
2224
supports_tree_reference = True
2225
_serializer = xml7.serializer_v7
2226
supports_external_lookups = True
2228
def _get_matching_bzrdir(self):
2229
return bzrdir.format_registry.make_bzrdir(
2230
'development1-subtree')
2232
def _ignore_setting_bzrdir(self, format):
2235
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2237
def check_conversion_target(self, target_format):
2238
if not target_format.rich_root_data:
2239
raise errors.BadConversionTarget(
2240
'Does not support rich root data.', target_format)
2241
if not getattr(target_format, 'supports_tree_reference', False):
2242
raise errors.BadConversionTarget(
2243
'Does not support nested trees', target_format)
2245
def get_format_string(self):
2246
"""See RepositoryFormat.get_format_string()."""
2247
return ("Bazaar development format 1 with subtree support "
2248
"(needs bzr.dev from before 1.6)\n")
2250
def get_format_description(self):
2251
"""See RepositoryFormat.get_format_description()."""
2252
return ("Development repository format, currently the same as "
2253
"pack-0.92-subtree with external reference support.\n")