~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to repofmt.py

  • Committer: John Arbash Meinel
  • Date: 2009-02-26 22:09:34 UTC
  • mfrom: (0.20.20 trunk)
  • mto: (0.20.21 trunk)
  • mto: This revision was merged to the branch mainline in revision 4280.
  • Revision ID: john@arbash-meinel.com-20090226220934-lnqvbe6uqle8eoum
Bring in the missing update from 'trunk'

Show diffs side-by-side

added added

removed removed

Lines of Context:
20
20
import md5
21
21
import time
22
22
 
23
 
from bzrlib import debug, errors, pack, repository
 
23
from bzrlib import (
 
24
    debug,
 
25
    errors,
 
26
    knit,
 
27
    pack,
 
28
    repository,
 
29
    ui,
 
30
    )
24
31
from bzrlib.btree_index import (
25
32
    BTreeBuilder,
26
33
    BTreeGraphIndex,
276
283
                raise errors.BzrError('call to %s.pack() while another pack is'
277
284
                                      ' being written.'
278
285
                                      % (self.__class__.__name__,))
 
286
            new_pack = self.pack_factory(self, 'autopack',
 
287
                                         self.repo.bzrdir._get_file_mode())
 
288
            new_pack.set_write_cache_size(1024*1024)
279
289
            # TODO: A better alternative is to probably use Packer.open_pack(), and
280
290
            #       then create a GroupCompressVersionedFiles() around the
281
291
            #       target pack to insert into.
282
 
            self._start_write_group()
 
292
            pb = ui.ui_factory.nested_progress_bar()
283
293
            try:
284
 
                for index_name, vf_name in to_copy:
 
294
                for idx, (index_name, vf_name) in enumerate(to_copy):
 
295
                    pb.update('repacking %s' % (vf_name,), idx + 1, len(to_copy))
285
296
                    keys = set()
286
 
                    new_index = getattr(self._new_pack, index_name)
 
297
                    new_index = getattr(new_pack, index_name)
287
298
                    new_index.set_optimize(for_size=True)
288
299
                    for pack in packs:
289
300
                        source_index = getattr(pack, index_name)
290
301
                        keys.update(e[1] for e in source_index.iter_all_entries())
291
 
                    vf = getattr(self.repo, vf_name)
292
 
                    stream = vf.get_record_stream(keys, 'gc-optimal', True)
293
 
                    vf.insert_record_stream(stream)
 
302
                    source_vf = getattr(self.repo, vf_name)
 
303
                    target_access = knit._DirectPackAccess({})
 
304
                    target_access.set_writer(new_pack._writer, new_index,
 
305
                                             new_pack.access_tuple())
 
306
                    target_vf = GroupCompressVersionedFiles(
 
307
                        _GCGraphIndex(new_index,
 
308
                                      add_callback=new_index.add_nodes,
 
309
                                      parents=source_vf._index._parents,
 
310
                                      is_locked=self.repo.is_locked),
 
311
                        access=target_access,
 
312
                        delta=source_vf._delta)
 
313
                    stream = source_vf.get_record_stream(keys, 'gc-optimal', True)
 
314
                    target_vf.insert_record_stream(stream)
 
315
                new_pack._check_references() # shouldn't be needed
294
316
            except:
295
 
                self._abort_write_group()
 
317
                pb.finished()
 
318
                new_pack.abort()
 
319
                raise
296
320
            else:
297
 
                self._commit_write_group()
 
321
                pb.finished()
 
322
                if not new_pack.data_inserted():
 
323
                    raise AssertionError('We copied from pack files,'
 
324
                                         ' but had no data copied')
 
325
                    # we need to abort somehow, because we don't want to remove
 
326
                    # the other packs
 
327
                new_pack.finish()
 
328
                self.allocate(new_pack)
298
329
            for pack in packs:
299
330
                self._remove_pack_from_memory(pack)
300
331
        # record the newly available packs and stop advertising the old
382
413
        #       because the source can be smart about extracting multiple
383
414
        #       in-a-row (and sharing strings). Topological is better for
384
415
        #       remote, because we access less data.
385
 
        self._fetch_order = 'topological'
 
416
        self._fetch_order = 'unordered'
386
417
        self._fetch_gc_optimal = True
387
418
        self._fetch_uses_deltas = False
388
419
 
446
477
            self._reconcile_does_inventory_gc = True
447
478
            self._reconcile_fixes_text_parents = True
448
479
            self._reconcile_backsup_inventory = False
449
 
            self._fetch_order = 'topological'
 
480
            self._fetch_order = 'unordered'
450
481
            self._fetch_gc_optimal = True
451
482
            self._fetch_uses_deltas = False
452
483