1045
955
# prefix is the key in self._items to use, key_filter is the key_filter
1046
956
# entries that would match this node
1049
958
if key_filter is None:
1050
# yielding all nodes, yield whatever we have, and queue up a read
1051
# for whatever we are missing
1053
959
for prefix, node in self._items.iteritems():
1054
if node.__class__ is StaticTuple:
960
if type(node) == tuple:
1055
961
keys[node] = (prefix, None)
1057
963
yield node, None
1058
elif len(key_filter) == 1:
1059
# Technically, this path could also be handled by the first check
1060
# in 'self._node_width' in length_filters. However, we can handle
1061
# this case without spending any time building up the
1062
# prefix_to_keys, etc state.
1064
# This is a bit ugly, but TIMEIT showed it to be by far the fastest
1065
# 0.626us list(key_filter)[0]
1066
# is a func() for list(), 2 mallocs, and a getitem
1067
# 0.489us [k for k in key_filter][0]
1068
# still has the mallocs, avoids the func() call
1069
# 0.350us iter(key_filter).next()
1070
# has a func() call, and mallocs an iterator
1071
# 0.125us for key in key_filter: pass
1072
# no func() overhead, might malloc an iterator
1073
# 0.105us for key in key_filter: break
1074
# no func() overhead, might malloc an iterator, probably
1075
# avoids checking an 'else' clause as part of the for
1076
for key in key_filter:
1078
search_prefix = self._search_prefix_filter(key)
1079
if len(search_prefix) == self._node_width:
1080
# This item will match exactly, so just do a dict lookup, and
1081
# see what we can return
1084
node = self._items[search_prefix]
1086
# A given key can only match 1 child node, if it isn't
1087
# there, then we can just return nothing
1089
if node.__class__ is StaticTuple:
1090
keys[node] = (search_prefix, [key])
1092
# This is loaded, and the only thing that can match,
1097
# First, convert all keys into a list of search prefixes
1098
# Aggregate common prefixes, and track the keys they come from
1099
966
prefix_to_keys = {}
1100
967
length_filters = {}
1101
968
for key in key_filter:
1102
search_prefix = self._search_prefix_filter(key)
969
search_key = self._search_prefix_filter(key)
1103
970
length_filter = length_filters.setdefault(
1104
len(search_prefix), set())
1105
length_filter.add(search_prefix)
1106
prefix_to_keys.setdefault(search_prefix, []).append(key)
1108
if (self._node_width in length_filters
1109
and len(length_filters) == 1):
1110
# all of the search prefixes match exactly _node_width. This
1111
# means that everything is an exact match, and we can do a
1112
# lookup into self._items, rather than iterating over the items
1114
search_prefixes = length_filters[self._node_width]
1115
for search_prefix in search_prefixes:
1117
node = self._items[search_prefix]
1119
# We can ignore this one
1121
node_key_filter = prefix_to_keys[search_prefix]
1122
if node.__class__ is StaticTuple:
1123
keys[node] = (search_prefix, node_key_filter)
971
len(search_key), set())
972
length_filter.add(search_key)
973
prefix_to_keys.setdefault(search_key, []).append(key)
974
length_filters = length_filters.items()
975
for prefix, node in self._items.iteritems():
977
for length, length_filter in length_filters:
978
sub_prefix = prefix[:length]
979
if sub_prefix in length_filter:
980
node_key_filter.extend(prefix_to_keys[sub_prefix])
981
if node_key_filter: # this key matched something, yield it
982
if type(node) == tuple:
983
keys[node] = (prefix, node_key_filter)
1125
985
yield node, node_key_filter
1127
# The slow way. We walk every item in self._items, and check to
1128
# see if there are any matches
1129
length_filters = length_filters.items()
1130
for prefix, node in self._items.iteritems():
1131
node_key_filter = []
1132
for length, length_filter in length_filters:
1133
sub_prefix = prefix[:length]
1134
if sub_prefix in length_filter:
1135
node_key_filter.extend(prefix_to_keys[sub_prefix])
1136
if node_key_filter: # this key matched something, yield it
1137
if node.__class__ is StaticTuple:
1138
keys[node] = (prefix, node_key_filter)
1140
yield node, node_key_filter
1142
987
# Look in the page cache for some more bytes
1143
988
found_keys = set()
1444
class CHKMapDifference(object):
1445
"""Iterate the stored pages and key,value pairs for (new - old).
1447
This class provides a generator over the stored CHK pages and the
1448
(key, value) pairs that are in any of the new maps and not in any of the
1451
Note that it may yield chk pages that are common (especially root nodes),
1452
but it won't yield (key,value) pairs that are common.
1455
def __init__(self, store, new_root_keys, old_root_keys,
1456
search_key_func, pb=None):
1457
# TODO: Should we add a StaticTuple barrier here? It would be nice to
1458
# force callers to use StaticTuple, because there will often be
1459
# lots of keys passed in here. And even if we cast it locally,
1460
# that just meanst that we will have *both* a StaticTuple and a
1461
# tuple() in memory, referring to the same object. (so a net
1462
# increase in memory, not a decrease.)
1464
self._new_root_keys = new_root_keys
1465
self._old_root_keys = old_root_keys
1467
# All uninteresting chks that we have seen. By the time they are added
1468
# here, they should be either fully ignored, or queued up for
1470
# TODO: This might grow to a large size if there are lots of merge
1471
# parents, etc. However, it probably doesn't scale to O(history)
1472
# like _processed_new_refs does.
1473
self._all_old_chks = set(self._old_root_keys)
1474
# All items that we have seen from the old_root_keys
1475
self._all_old_items = set()
1476
# These are interesting items which were either read, or already in the
1477
# interesting queue (so we don't need to walk them again)
1478
# TODO: processed_new_refs becomes O(all_chks), consider switching to
1480
self._processed_new_refs = set()
1481
self._search_key_func = search_key_func
1483
# The uninteresting and interesting nodes to be searched
1484
self._old_queue = []
1485
self._new_queue = []
1486
# Holds the (key, value) items found when processing the root nodes,
1487
# waiting for the uninteresting nodes to be walked
1488
self._new_item_queue = []
1491
def _read_nodes_from_store(self, keys):
1492
# We chose not to use _page_cache, because we think in terms of records
1493
# to be yielded. Also, we expect to touch each page only 1 time during
1494
# this code. (We may want to evaluate saving the raw bytes into the
1495
# page cache, which would allow a working tree update after the fetch
1496
# to not have to read the bytes again.)
1497
as_st = StaticTuple.from_sequence
1498
stream = self._store.get_record_stream(keys, 'unordered', True)
1499
for record in stream:
1500
if self._pb is not None:
1502
if record.storage_kind == 'absent':
1503
raise errors.NoSuchRevision(self._store, record.key)
1289
def _find_children_info(store, interesting_keys, uninteresting_keys, pb):
1290
"""Read the associated records, and determine what is interesting."""
1291
uninteresting_keys = set(uninteresting_keys)
1292
chks_to_read = uninteresting_keys.union(interesting_keys)
1293
next_uninteresting = set()
1294
next_interesting = set()
1295
uninteresting_items = set()
1296
interesting_items = set()
1297
interesting_to_yield = []
1298
for record in store.get_record_stream(chks_to_read, 'unordered', True):
1299
# records_read.add(record.key())
1302
bytes = record.get_bytes_as('fulltext')
1303
# We don't care about search_key_func for this code, because we only
1304
# care about external references.
1305
node = _deserialise(bytes, record.key, search_key_func=None)
1306
if record.key in uninteresting_keys:
1307
if type(node) is InternalNode:
1308
next_uninteresting.update(node.refs())
1310
# We know we are at a LeafNode, so we can pass None for the
1312
uninteresting_items.update(node.iteritems(None))
1314
interesting_to_yield.append(record.key)
1315
if type(node) is InternalNode:
1316
next_interesting.update(node.refs())
1318
interesting_items.update(node.iteritems(None))
1319
return (next_uninteresting, uninteresting_items,
1320
next_interesting, interesting_to_yield, interesting_items)
1323
def _find_all_uninteresting(store, interesting_root_keys,
1324
uninteresting_root_keys, pb):
1325
"""Determine the full set of uninteresting keys."""
1326
# What about duplicates between interesting_root_keys and
1327
# uninteresting_root_keys?
1328
if not uninteresting_root_keys:
1329
# Shortcut case. We know there is nothing uninteresting to filter out
1330
# So we just let the rest of the algorithm do the work
1331
# We know there is nothing uninteresting, and we didn't have to read
1332
# any interesting records yet.
1333
return (set(), set(), set(interesting_root_keys), [], set())
1334
all_uninteresting_chks = set(uninteresting_root_keys)
1335
all_uninteresting_items = set()
1337
# First step, find the direct children of both the interesting and
1339
(uninteresting_keys, uninteresting_items,
1340
interesting_keys, interesting_to_yield,
1341
interesting_items) = _find_children_info(store, interesting_root_keys,
1342
uninteresting_root_keys,
1344
all_uninteresting_chks.update(uninteresting_keys)
1345
all_uninteresting_items.update(uninteresting_items)
1346
del uninteresting_items
1347
# Note: Exact matches between interesting and uninteresting do not need
1348
# to be search further. Non-exact matches need to be searched in case
1349
# there is a future exact-match
1350
uninteresting_keys.difference_update(interesting_keys)
1352
# Second, find the full set of uninteresting bits reachable by the
1353
# uninteresting roots
1354
chks_to_read = uninteresting_keys
1357
for record in store.get_record_stream(chks_to_read, 'unordered', False):
1358
# TODO: Handle 'absent'
1504
1361
bytes = record.get_bytes_as('fulltext')
1505
node = _deserialise(bytes, record.key,
1506
search_key_func=self._search_key_func)
1362
# We don't care about search_key_func for this code, because we
1363
# only care about external references.
1364
node = _deserialise(bytes, record.key, search_key_func=None)
1507
1365
if type(node) is InternalNode:
1508
# Note we don't have to do node.refs() because we know that
1509
# there are no children that have been pushed into this node
1510
# Note: Using as_st() here seemed to save 1.2MB, which would
1511
# indicate that we keep 100k prefix_refs around while
1512
# processing. They *should* be shorter lived than that...
1513
# It does cost us ~10s of processing time
1514
#prefix_refs = [as_st(item) for item in node._items.iteritems()]
1515
prefix_refs = node._items.items()
1366
# uninteresting_prefix_chks.update(node._items.iteritems())
1367
chks = node._items.values()
1368
# TODO: We remove the entries that are already in
1369
# uninteresting_chks ?
1370
next_chks.update(chks)
1371
all_uninteresting_chks.update(chks)
1519
# Note: We don't use a StaticTuple here. Profiling showed a
1520
# minor memory improvement (0.8MB out of 335MB peak 0.2%)
1521
# But a significant slowdown (15s / 145s, or 10%)
1522
items = node._items.items()
1523
yield record, node, prefix_refs, items
1525
def _read_old_roots(self):
1526
old_chks_to_enqueue = []
1527
all_old_chks = self._all_old_chks
1528
for record, node, prefix_refs, items in \
1529
self._read_nodes_from_store(self._old_root_keys):
1530
# Uninteresting node
1531
prefix_refs = [p_r for p_r in prefix_refs
1532
if p_r[1] not in all_old_chks]
1533
new_refs = [p_r[1] for p_r in prefix_refs]
1534
all_old_chks.update(new_refs)
1535
# TODO: This might be a good time to turn items into StaticTuple
1536
# instances and possibly intern them. However, this does not
1537
# impact 'initial branch' performance, so I'm not worrying
1539
self._all_old_items.update(items)
1540
# Queue up the uninteresting references
1541
# Don't actually put them in the 'to-read' queue until we have
1542
# finished checking the interesting references
1543
old_chks_to_enqueue.extend(prefix_refs)
1544
return old_chks_to_enqueue
1546
def _enqueue_old(self, new_prefixes, old_chks_to_enqueue):
1547
# At this point, we have read all the uninteresting and interesting
1548
# items, so we can queue up the uninteresting stuff, knowing that we've
1549
# handled the interesting ones
1550
for prefix, ref in old_chks_to_enqueue:
1551
not_interesting = True
1552
for i in xrange(len(prefix), 0, -1):
1553
if prefix[:i] in new_prefixes:
1554
not_interesting = False
1557
# This prefix is not part of the remaining 'interesting set'
1559
self._old_queue.append(ref)
1561
def _read_all_roots(self):
1562
"""Read the root pages.
1564
This is structured as a generator, so that the root records can be
1565
yielded up to whoever needs them without any buffering.
1567
# This is the bootstrap phase
1568
if not self._old_root_keys:
1569
# With no old_root_keys we can just shortcut and be ready
1570
# for _flush_new_queue
1571
self._new_queue = list(self._new_root_keys)
1573
old_chks_to_enqueue = self._read_old_roots()
1574
# filter out any root keys that are already known to be uninteresting
1575
new_keys = set(self._new_root_keys).difference(self._all_old_chks)
1576
# These are prefixes that are present in new_keys that we are
1578
new_prefixes = set()
1579
# We are about to yield all of these, so we don't want them getting
1580
# added a second time
1581
processed_new_refs = self._processed_new_refs
1582
processed_new_refs.update(new_keys)
1583
for record, node, prefix_refs, items in \
1584
self._read_nodes_from_store(new_keys):
1585
# At this level, we now know all the uninteresting references
1586
# So we filter and queue up whatever is remaining
1587
prefix_refs = [p_r for p_r in prefix_refs
1588
if p_r[1] not in self._all_old_chks
1589
and p_r[1] not in processed_new_refs]
1590
refs = [p_r[1] for p_r in prefix_refs]
1591
new_prefixes.update([p_r[0] for p_r in prefix_refs])
1592
self._new_queue.extend(refs)
1593
# TODO: We can potentially get multiple items here, however the
1594
# current design allows for this, as callers will do the work
1595
# to make the results unique. We might profile whether we
1596
# gain anything by ensuring unique return values for items
1597
# TODO: This might be a good time to cast to StaticTuple, as
1598
# self._new_item_queue will hold the contents of multiple
1599
# records for an extended lifetime
1600
new_items = [item for item in items
1601
if item not in self._all_old_items]
1602
self._new_item_queue.extend(new_items)
1603
new_prefixes.update([self._search_key_func(item[0])
1604
for item in new_items])
1605
processed_new_refs.update(refs)
1607
# For new_prefixes we have the full length prefixes queued up.
1608
# However, we also need possible prefixes. (If we have a known ref to
1609
# 'ab', then we also need to include 'a'.) So expand the
1610
# new_prefixes to include all shorter prefixes
1611
for prefix in list(new_prefixes):
1612
new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))])
1613
self._enqueue_old(new_prefixes, old_chks_to_enqueue)
1615
def _flush_new_queue(self):
1616
# No need to maintain the heap invariant anymore, just pull things out
1618
refs = set(self._new_queue)
1619
self._new_queue = []
1620
# First pass, flush all interesting items and convert to using direct refs
1621
all_old_chks = self._all_old_chks
1622
processed_new_refs = self._processed_new_refs
1623
all_old_items = self._all_old_items
1624
new_items = [item for item in self._new_item_queue
1625
if item not in all_old_items]
1626
self._new_item_queue = []
1628
yield None, new_items
1629
refs = refs.difference(all_old_chks)
1630
processed_new_refs.update(refs)
1632
# TODO: Using a SimpleSet for self._processed_new_refs and
1633
# saved as much as 10MB of peak memory. However, it requires
1634
# implementing a non-pyrex version.
1636
next_refs_update = next_refs.update
1637
# Inlining _read_nodes_from_store improves 'bzr branch bzr.dev'
1638
# from 1m54s to 1m51s. Consider it.
1639
for record, _, p_refs, items in self._read_nodes_from_store(refs):
1641
# using the 'if' check saves about 145s => 141s, when
1642
# streaming initial branch of Launchpad data.
1643
items = [item for item in items
1644
if item not in all_old_items]
1646
next_refs_update([p_r[1] for p_r in p_refs])
1648
# set1.difference(set/dict) walks all of set1, and checks if it
1649
# exists in 'other'.
1650
# set1.difference(iterable) walks all of iterable, and does a
1651
# 'difference_update' on a clone of set1. Pick wisely based on the
1652
# expected sizes of objects.
1653
# in our case it is expected that 'new_refs' will always be quite
1655
next_refs = next_refs.difference(all_old_chks)
1656
next_refs = next_refs.difference(processed_new_refs)
1657
processed_new_refs.update(next_refs)
1660
def _process_next_old(self):
1661
# Since we don't filter uninteresting any further than during
1662
# _read_all_roots, process the whole queue in a single pass.
1663
refs = self._old_queue
1664
self._old_queue = []
1665
all_old_chks = self._all_old_chks
1666
for record, _, prefix_refs, items in self._read_nodes_from_store(refs):
1667
# TODO: Use StaticTuple here?
1668
self._all_old_items.update(items)
1669
refs = [r for _,r in prefix_refs if r not in all_old_chks]
1670
self._old_queue.extend(refs)
1671
all_old_chks.update(refs)
1673
def _process_queues(self):
1674
while self._old_queue:
1675
self._process_next_old()
1676
return self._flush_new_queue()
1679
for record in self._read_all_roots():
1681
for record, items in self._process_queues():
1373
all_uninteresting_items.update(node._items.iteritems())
1374
chks_to_read = next_chks
1375
return (all_uninteresting_chks, all_uninteresting_items,
1376
interesting_keys, interesting_to_yield, interesting_items)
1685
1379
def iter_interesting_nodes(store, interesting_root_keys,
1697
1391
(interesting record, {interesting key:values})
1699
iterator = CHKMapDifference(store, interesting_root_keys,
1700
uninteresting_root_keys,
1701
search_key_func=store._search_key_func,
1703
return iterator.process()
1393
# TODO: consider that it may be more memory efficient to use the 20-byte
1394
# sha1 string, rather than tuples of hexidecimal sha1 strings.
1395
# TODO: Try to factor out a lot of the get_record_stream() calls into a
1396
# helper function similar to _read_bytes. This function should be
1397
# able to use nodes from the _page_cache as well as actually
1398
# requesting bytes from the store.
1400
(all_uninteresting_chks, all_uninteresting_items, interesting_keys,
1401
interesting_to_yield, interesting_items) = _find_all_uninteresting(store,
1402
interesting_root_keys, uninteresting_root_keys, pb)
1404
# Now that we know everything uninteresting, we can yield information from
1406
interesting_items.difference_update(all_uninteresting_items)
1407
interesting_to_yield = set(interesting_to_yield) - all_uninteresting_chks
1408
if interesting_items:
1409
yield None, interesting_items
1410
if interesting_to_yield:
1411
# We request these records again, rather than buffering the root
1412
# records, most likely they are still in the _group_cache anyway.
1413
for record in store.get_record_stream(interesting_to_yield,
1414
'unordered', False):
1416
all_uninteresting_chks.update(interesting_to_yield)
1417
interesting_keys.difference_update(all_uninteresting_chks)
1419
chks_to_read = interesting_keys
1423
for record in store.get_record_stream(chks_to_read, 'unordered', False):
1426
pb.update('find chk pages', counter)
1427
# TODO: Handle 'absent'?
1428
bytes = record.get_bytes_as('fulltext')
1429
# We don't care about search_key_func for this code, because we
1430
# only care about external references.
1431
node = _deserialise(bytes, record.key, search_key_func=None)
1432
if type(node) is InternalNode:
1433
# all_uninteresting_chks grows large, as it lists all nodes we
1434
# don't want to process (including already seen interesting
1436
# small.difference_update(large) scales O(large), but
1437
# small.difference(large) scales O(small).
1438
# Also, we know we just _deserialised this node, so we can
1439
# access the dict directly.
1440
chks = set(node._items.itervalues()).difference(
1441
all_uninteresting_chks)
1442
# Is set() and .difference_update better than:
1443
# chks = [chk for chk in node.refs()
1444
# if chk not in all_uninteresting_chks]
1445
next_chks.update(chks)
1446
# These are now uninteresting everywhere else
1447
all_uninteresting_chks.update(chks)
1448
interesting_items = []
1450
interesting_items = [item for item in node._items.iteritems()
1451
if item not in all_uninteresting_items]
1452
# TODO: Do we need to filter out items that we have already
1453
# seen on other pages? We don't really want to buffer the
1454
# whole thing, but it does mean that callers need to
1455
# understand they may get duplicate values.
1456
# all_uninteresting_items.update(interesting_items)
1457
yield record, interesting_items
1458
chks_to_read = next_chks