1401
def _find_children_info(store, interesting_keys, uninteresting_keys, pb):
1402
"""Read the associated records, and determine what is interesting."""
1403
uninteresting_keys = set(uninteresting_keys)
1404
chks_to_read = uninteresting_keys.union(interesting_keys)
1405
next_uninteresting = set()
1406
next_interesting = set()
1407
uninteresting_items = set()
1408
interesting_items = set()
1409
interesting_to_yield = []
1410
for record in store.get_record_stream(chks_to_read, 'unordered', True):
1411
# records_read.add(record.key())
1414
bytes = record.get_bytes_as('fulltext')
1415
# We don't care about search_key_func for this code, because we only
1416
# care about external references.
1417
node = _deserialise(bytes, record.key, search_key_func=None)
1418
if record.key in uninteresting_keys:
1419
if type(node) is InternalNode:
1420
next_uninteresting.update(node.refs())
1422
# We know we are at a LeafNode, so we can pass None for the
1424
uninteresting_items.update(node.iteritems(None))
1426
interesting_to_yield.append(record.key)
1427
if type(node) is InternalNode:
1428
next_interesting.update(node.refs())
1430
interesting_items.update(node.iteritems(None))
1431
return (next_uninteresting, uninteresting_items,
1432
next_interesting, interesting_to_yield, interesting_items)
1435
def _find_all_uninteresting(store, interesting_root_keys,
1436
uninteresting_root_keys, pb):
1437
"""Determine the full set of uninteresting keys."""
1438
# What about duplicates between interesting_root_keys and
1439
# uninteresting_root_keys?
1440
if not uninteresting_root_keys:
1441
# Shortcut case. We know there is nothing uninteresting to filter out
1442
# So we just let the rest of the algorithm do the work
1443
# We know there is nothing uninteresting, and we didn't have to read
1444
# any interesting records yet.
1445
return (set(), set(), set(interesting_root_keys), [], set())
1446
all_uninteresting_chks = set(uninteresting_root_keys)
1447
all_uninteresting_items = set()
1449
# First step, find the direct children of both the interesting and
1451
(uninteresting_keys, uninteresting_items,
1452
interesting_keys, interesting_to_yield,
1453
interesting_items) = _find_children_info(store, interesting_root_keys,
1454
uninteresting_root_keys,
1456
all_uninteresting_chks.update(uninteresting_keys)
1457
all_uninteresting_items.update(uninteresting_items)
1458
del uninteresting_items
1459
# Note: Exact matches between interesting and uninteresting do not need
1460
# to be search further. Non-exact matches need to be searched in case
1461
# there is a future exact-match
1462
uninteresting_keys.difference_update(interesting_keys)
1464
# Second, find the full set of uninteresting bits reachable by the
1465
# uninteresting roots
1466
chks_to_read = uninteresting_keys
1469
for record in store.get_record_stream(chks_to_read, 'unordered', False):
1470
# TODO: Handle 'absent'
1414
class CHKMapDifference(object):
1415
"""Iterate the stored pages and key,value pairs for (new - old).
1417
This class provides a generator over the stored CHK pages and the
1418
(key, value) pairs that are in any of the new maps and not in any of the
1421
Note that it may yield chk pages that are common (especially root nodes),
1422
but it won't yield (key,value) pairs that are common.
1425
def __init__(self, store, new_root_keys, old_root_keys,
1426
search_key_func, pb=None):
1428
self._new_root_keys = new_root_keys
1429
self._old_root_keys = old_root_keys
1431
# All uninteresting chks that we have seen. By the time they are added
1432
# here, they should be either fully ignored, or queued up for
1434
self._all_old_chks = set(self._old_root_keys)
1435
# All items that we have seen from the old_root_keys
1436
self._all_old_items = set()
1437
# These are interesting items which were either read, or already in the
1438
# interesting queue (so we don't need to walk them again)
1439
self._processed_new_refs = set()
1440
self._search_key_func = search_key_func
1442
# The uninteresting and interesting nodes to be searched
1443
self._old_queue = []
1444
self._new_queue = []
1445
# Holds the (key, value) items found when processing the root nodes,
1446
# waiting for the uninteresting nodes to be walked
1447
self._new_item_queue = []
1450
def _read_nodes_from_store(self, keys):
1451
# We chose not to use _page_cache, because we think in terms of records
1452
# to be yielded. Also, we expect to touch each page only 1 time during
1453
# this code. (We may want to evaluate saving the raw bytes into the
1454
# page cache, which would allow a working tree update after the fetch
1455
# to not have to read the bytes again.)
1456
stream = self._store.get_record_stream(keys, 'unordered', True)
1457
for record in stream:
1458
if self._pb is not None:
1460
if record.storage_kind == 'absent':
1461
raise errors.NoSuchRevision(self._store, record.key)
1473
1462
bytes = record.get_bytes_as('fulltext')
1474
# We don't care about search_key_func for this code, because we
1475
# only care about external references.
1476
node = _deserialise(bytes, record.key, search_key_func=None)
1463
node = _deserialise(bytes, record.key,
1464
search_key_func=self._search_key_func)
1477
1465
if type(node) is InternalNode:
1478
# uninteresting_prefix_chks.update(node._items.iteritems())
1479
chks = node._items.values()
1480
# TODO: We remove the entries that are already in
1481
# uninteresting_chks ?
1482
next_chks.update(chks)
1483
all_uninteresting_chks.update(chks)
1466
# Note we don't have to do node.refs() because we know that
1467
# there are no children that have been pushed into this node
1468
prefix_refs = node._items.items()
1485
all_uninteresting_items.update(node._items.iteritems())
1486
chks_to_read = next_chks
1487
return (all_uninteresting_chks, all_uninteresting_items,
1488
interesting_keys, interesting_to_yield, interesting_items)
1472
items = node._items.items()
1473
yield record, node, prefix_refs, items
1475
def _read_old_roots(self):
1476
old_chks_to_enqueue = []
1477
all_old_chks = self._all_old_chks
1478
for record, node, prefix_refs, items in \
1479
self._read_nodes_from_store(self._old_root_keys):
1480
# Uninteresting node
1481
prefix_refs = [p_r for p_r in prefix_refs
1482
if p_r[1] not in all_old_chks]
1483
new_refs = [p_r[1] for p_r in prefix_refs]
1484
all_old_chks.update(new_refs)
1485
self._all_old_items.update(items)
1486
# Queue up the uninteresting references
1487
# Don't actually put them in the 'to-read' queue until we have
1488
# finished checking the interesting references
1489
old_chks_to_enqueue.extend(prefix_refs)
1490
return old_chks_to_enqueue
1492
def _enqueue_old(self, new_prefixes, old_chks_to_enqueue):
1493
# At this point, we have read all the uninteresting and interesting
1494
# items, so we can queue up the uninteresting stuff, knowing that we've
1495
# handled the interesting ones
1496
for prefix, ref in old_chks_to_enqueue:
1497
not_interesting = True
1498
for i in xrange(len(prefix), 0, -1):
1499
if prefix[:i] in new_prefixes:
1500
not_interesting = False
1503
# This prefix is not part of the remaining 'interesting set'
1505
self._old_queue.append(ref)
1507
def _read_all_roots(self):
1508
"""Read the root pages.
1510
This is structured as a generator, so that the root records can be
1511
yielded up to whoever needs them without any buffering.
1513
# This is the bootstrap phase
1514
if not self._old_root_keys:
1515
# With no old_root_keys we can just shortcut and be ready
1516
# for _flush_new_queue
1517
self._new_queue = list(self._new_root_keys)
1519
old_chks_to_enqueue = self._read_old_roots()
1520
# filter out any root keys that are already known to be uninteresting
1521
new_keys = set(self._new_root_keys).difference(self._all_old_chks)
1522
# These are prefixes that are present in new_keys that we are
1524
new_prefixes = set()
1525
# We are about to yield all of these, so we don't want them getting
1526
# added a second time
1527
processed_new_refs = self._processed_new_refs
1528
processed_new_refs.update(new_keys)
1529
for record, node, prefix_refs, items in \
1530
self._read_nodes_from_store(new_keys):
1531
# At this level, we now know all the uninteresting references
1532
# So we filter and queue up whatever is remaining
1533
prefix_refs = [p_r for p_r in prefix_refs
1534
if p_r[1] not in self._all_old_chks
1535
and p_r[1] not in processed_new_refs]
1536
refs = [p_r[1] for p_r in prefix_refs]
1537
new_prefixes.update([p_r[0] for p_r in prefix_refs])
1538
self._new_queue.extend(refs)
1539
# TODO: We can potentially get multiple items here, however the
1540
# current design allows for this, as callers will do the work
1541
# to make the results unique. We might profile whether we
1542
# gain anything by ensuring unique return values for items
1543
new_items = [item for item in items
1544
if item not in self._all_old_items]
1545
self._new_item_queue.extend(new_items)
1546
new_prefixes.update([self._search_key_func(item[0])
1547
for item in new_items])
1548
processed_new_refs.update(refs)
1550
# For new_prefixes we have the full length prefixes queued up.
1551
# However, we also need possible prefixes. (If we have a known ref to
1552
# 'ab', then we also need to include 'a'.) So expand the
1553
# new_prefixes to include all shorter prefixes
1554
for prefix in list(new_prefixes):
1555
new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))])
1556
self._enqueue_old(new_prefixes, old_chks_to_enqueue)
1558
def _flush_new_queue(self):
1559
# No need to maintain the heap invariant anymore, just pull things out
1561
refs = set(self._new_queue)
1562
self._new_queue = []
1563
# First pass, flush all interesting items and convert to using direct refs
1564
all_old_chks = self._all_old_chks
1565
processed_new_refs = self._processed_new_refs
1566
all_old_items = self._all_old_items
1567
new_items = [item for item in self._new_item_queue
1568
if item not in all_old_items]
1569
self._new_item_queue = []
1571
yield None, new_items
1572
refs = refs.difference(all_old_chks)
1575
next_refs_update = next_refs.update
1576
# Inlining _read_nodes_from_store improves 'bzr branch bzr.dev'
1577
# from 1m54s to 1m51s. Consider it.
1578
for record, _, p_refs, items in self._read_nodes_from_store(refs):
1579
items = [item for item in items
1580
if item not in all_old_items]
1582
next_refs_update([p_r[1] for p_r in p_refs])
1583
next_refs = next_refs.difference(all_old_chks)
1584
next_refs = next_refs.difference(processed_new_refs)
1585
processed_new_refs.update(next_refs)
1588
def _process_next_old(self):
1589
# Since we don't filter uninteresting any further than during
1590
# _read_all_roots, process the whole queue in a single pass.
1591
refs = self._old_queue
1592
self._old_queue = []
1593
all_old_chks = self._all_old_chks
1594
for record, _, prefix_refs, items in self._read_nodes_from_store(refs):
1595
self._all_old_items.update(items)
1596
refs = [r for _,r in prefix_refs if r not in all_old_chks]
1597
self._old_queue.extend(refs)
1598
all_old_chks.update(refs)
1600
def _process_queues(self):
1601
while self._old_queue:
1602
self._process_next_old()
1603
return self._flush_new_queue()
1606
for record in self._read_all_roots():
1608
for record, items in self._process_queues():
1491
1612
def iter_interesting_nodes(store, interesting_root_keys,
1503
1624
(interesting record, {interesting key:values})
1505
# TODO: consider that it may be more memory efficient to use the 20-byte
1506
# sha1 string, rather than tuples of hexidecimal sha1 strings.
1507
# TODO: Try to factor out a lot of the get_record_stream() calls into a
1508
# helper function similar to _read_bytes. This function should be
1509
# able to use nodes from the _page_cache as well as actually
1510
# requesting bytes from the store.
1512
(all_uninteresting_chks, all_uninteresting_items, interesting_keys,
1513
interesting_to_yield, interesting_items) = _find_all_uninteresting(store,
1514
interesting_root_keys, uninteresting_root_keys, pb)
1516
# Now that we know everything uninteresting, we can yield information from
1518
interesting_items.difference_update(all_uninteresting_items)
1519
interesting_to_yield = set(interesting_to_yield) - all_uninteresting_chks
1520
if interesting_items:
1521
yield None, interesting_items
1522
if interesting_to_yield:
1523
# We request these records again, rather than buffering the root
1524
# records, most likely they are still in the _group_cache anyway.
1525
for record in store.get_record_stream(interesting_to_yield,
1526
'unordered', False):
1528
all_uninteresting_chks.update(interesting_to_yield)
1529
interesting_keys.difference_update(all_uninteresting_chks)
1531
chks_to_read = interesting_keys
1535
for record in store.get_record_stream(chks_to_read, 'unordered', False):
1538
pb.update('find chk pages', counter)
1539
# TODO: Handle 'absent'?
1540
bytes = record.get_bytes_as('fulltext')
1541
# We don't care about search_key_func for this code, because we
1542
# only care about external references.
1543
node = _deserialise(bytes, record.key, search_key_func=None)
1544
if type(node) is InternalNode:
1545
# all_uninteresting_chks grows large, as it lists all nodes we
1546
# don't want to process (including already seen interesting
1548
# small.difference_update(large) scales O(large), but
1549
# small.difference(large) scales O(small).
1550
# Also, we know we just _deserialised this node, so we can
1551
# access the dict directly.
1552
chks = set(node._items.itervalues()).difference(
1553
all_uninteresting_chks)
1554
# Is set() and .difference_update better than:
1555
# chks = [chk for chk in node.refs()
1556
# if chk not in all_uninteresting_chks]
1557
next_chks.update(chks)
1558
# These are now uninteresting everywhere else
1559
all_uninteresting_chks.update(chks)
1560
interesting_items = []
1562
interesting_items = [item for item in node._items.iteritems()
1563
if item not in all_uninteresting_items]
1564
# TODO: Do we need to filter out items that we have already
1565
# seen on other pages? We don't really want to buffer the
1566
# whole thing, but it does mean that callers need to
1567
# understand they may get duplicate values.
1568
# all_uninteresting_items.update(interesting_items)
1569
yield record, interesting_items
1570
chks_to_read = next_chks
1626
iterator = CHKMapDifference(store, interesting_root_keys,
1627
uninteresting_root_keys,
1628
search_key_func=store._search_key_func,
1630
return iterator.process()