261
262
self.addCleanup(tree.unlock)
262
263
pack = tree.branch.repository._pack_collection.get_pack_by_name(
263
264
tree.branch.repository._pack_collection.names()[0])
264
# revision access tends to be tip->ancestor, so ordering that way on
265
# revision access tends to be tip->ancestor, so ordering that way on
265
266
# disk is a good idea.
266
267
for _1, key, val, refs in pack.revision_index.iter_all_entries():
267
268
if key == ('1',):
533
534
self.assertRaises(errors.NoSuchRevision,
534
535
missing_ghost.get_inventory, 'ghost')
537
def make_write_ready_repo(self):
538
repo = self.make_repository('.', format=self.get_format())
540
repo.start_write_group()
543
def test_missing_inventories_compression_parent_prevents_commit(self):
544
repo = self.make_write_ready_repo()
546
repo.inventories._index._missing_compression_parents.add(key)
547
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
548
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
549
repo.abort_write_group()
552
def test_missing_revisions_compression_parent_prevents_commit(self):
553
repo = self.make_write_ready_repo()
555
repo.revisions._index._missing_compression_parents.add(key)
556
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
557
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
558
repo.abort_write_group()
561
def test_missing_signatures_compression_parent_prevents_commit(self):
562
repo = self.make_write_ready_repo()
564
repo.signatures._index._missing_compression_parents.add(key)
565
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
566
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
567
repo.abort_write_group()
570
def test_missing_text_compression_parent_prevents_commit(self):
571
repo = self.make_write_ready_repo()
572
key = ('some', 'junk')
573
repo.texts._index._missing_compression_parents.add(key)
574
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
575
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
576
repo.abort_write_group()
536
579
def test_supports_external_lookups(self):
537
580
repo = self.make_repository('.', format=self.get_format())
538
581
self.assertEqual(self.format_supports_external_lookups,
571
614
self.assertRaises(Exception, repo.abort_write_group)
572
615
if token is not None:
573
616
repo.leave_lock_in_place()
618
def test_suspend_write_group(self):
619
self.vfs_transport_factory = memory.MemoryServer
620
repo = self.make_repository('repo')
621
token = repo.lock_write()
622
self.addCleanup(repo.unlock)
623
repo.start_write_group()
624
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
625
wg_tokens = repo.suspend_write_group()
626
expected_pack_name = wg_tokens[0] + '.pack'
627
upload_transport = repo._pack_collection._upload_transport
628
limbo_files = upload_transport.list_dir('')
629
self.assertTrue(expected_pack_name in limbo_files, limbo_files)
630
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
631
self.assertEqual(wg_tokens[0], md5.hexdigest())
633
def test_resume_write_group_then_abort(self):
634
# Create a repo, start a write group, insert some data, suspend.
635
self.vfs_transport_factory = memory.MemoryServer
636
repo = self.make_repository('repo')
637
token = repo.lock_write()
638
self.addCleanup(repo.unlock)
639
repo.start_write_group()
640
text_key = ('file-id', 'revid')
641
repo.texts.add_lines(text_key, (), ['lines'])
642
wg_tokens = repo.suspend_write_group()
643
# Get a fresh repository object for the repo on the filesystem.
644
same_repo = repo.bzrdir.open_repository()
646
same_repo.lock_write()
647
self.addCleanup(same_repo.unlock)
648
same_repo.resume_write_group(wg_tokens)
649
same_repo.abort_write_group()
651
[], same_repo._pack_collection._upload_transport.list_dir(''))
653
[], same_repo._pack_collection._pack_transport.list_dir(''))
655
def test_resume_malformed_token(self):
656
self.vfs_transport_factory = memory.MemoryServer
657
# Make a repository with a suspended write group
658
repo = self.make_repository('repo')
659
token = repo.lock_write()
660
self.addCleanup(repo.unlock)
661
repo.start_write_group()
662
text_key = ('file-id', 'revid')
663
repo.texts.add_lines(text_key, (), ['lines'])
664
wg_tokens = repo.suspend_write_group()
665
# Make a new repository
666
new_repo = self.make_repository('new_repo')
667
token = new_repo.lock_write()
668
self.addCleanup(new_repo.unlock)
670
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
672
errors.UnresumableWriteGroup,
673
new_repo.resume_write_group, [hacked_wg_token])
576
676
class TestPackRepositoryStacking(TestCaseWithTransport):
731
831
self.hpss_calls = []
732
832
tree.commit('commit triggering pack')
733
833
tree.branch.push(remote_branch)
734
self.assertTrue('PackRepository.autopack' in self.hpss_calls)
834
autopack_calls = len([call for call in self.hpss_calls if call ==
835
'PackRepository.autopack'])
836
streaming_calls = len([call for call in self.hpss_calls if call ==
837
'Repository.insert_stream'])
839
# Non streaming server
840
self.assertEqual(1, autopack_calls)
841
self.assertEqual(0, streaming_calls)
843
# Streaming was used, which autopacks on the remote end.
844
self.assertEqual(0, autopack_calls)
845
# NB: The 2 calls are because of the sanity check that the server
846
# supports the verb (see remote.py:RemoteSink.insert_stream for
848
self.assertEqual(2, streaming_calls)
737
851
def load_tests(basic_tests, module, test_loader):