955
955
# prefix is the key in self._items to use, key_filter is the key_filter
956
956
# entries that would match this node
958
959
if key_filter is None:
960
# yielding all nodes, yield whatever we have, and queue up a read
961
# for whatever we are missing
959
963
for prefix, node in self._items.iteritems():
960
if type(node) == tuple:
964
if node.__class__ is tuple:
961
965
keys[node] = (prefix, None)
968
elif len(key_filter) == 1:
969
# Technically, this path could also be handled by the first check
970
# in 'self._node_width' in length_filters. However, we can handle
971
# this case without spending any time building up the
972
# prefix_to_keys, etc state.
974
# This is a bit ugly, but TIMEIT showed it to be by far the fastest
975
# 0.626us list(key_filter)[0]
976
# is a func() for list(), 2 mallocs, and a getitem
977
# 0.489us [k for k in key_filter][0]
978
# still has the mallocs, avoids the func() call
979
# 0.350us iter(key_filter).next()
980
# has a func() call, and mallocs an iterator
981
# 0.125us for key in key_filter: pass
982
# no func() overhead, might malloc an iterator
983
# 0.105us for key in key_filter: break
984
# no func() overhead, might malloc an iterator, probably
985
# avoids checking an 'else' clause as part of the for
986
for key in key_filter:
988
search_prefix = self._search_prefix_filter(key)
989
if len(search_prefix) == self._node_width:
990
# This item will match exactly, so just do a dict lookup, and
991
# see what we can return
994
node = self._items[search_prefix]
996
# A given key can only match 1 child node, if it isn't
997
# there, then we can just return nothing
999
if node.__class__ is tuple:
1000
keys[node] = (search_prefix, [key])
1002
# This is loaded, and the only thing that can match,
1007
# First, convert all keys into a list of search prefixes
1008
# Aggregate common prefixes, and track the keys they come from
966
1009
prefix_to_keys = {}
967
1010
length_filters = {}
968
1011
for key in key_filter:
969
search_key = self._search_prefix_filter(key)
1012
search_prefix = self._search_prefix_filter(key)
970
1013
length_filter = length_filters.setdefault(
971
len(search_key), set())
972
length_filter.add(search_key)
973
prefix_to_keys.setdefault(search_key, []).append(key)
974
length_filters = length_filters.items()
975
for prefix, node in self._items.iteritems():
977
for length, length_filter in length_filters:
978
sub_prefix = prefix[:length]
979
if sub_prefix in length_filter:
980
node_key_filter.extend(prefix_to_keys[sub_prefix])
981
if node_key_filter: # this key matched something, yield it
982
if type(node) == tuple:
983
keys[node] = (prefix, node_key_filter)
1014
len(search_prefix), set())
1015
length_filter.add(search_prefix)
1016
prefix_to_keys.setdefault(search_prefix, []).append(key)
1018
if (self._node_width in length_filters
1019
and len(length_filters) == 1):
1020
# all of the search prefixes match exactly _node_width. This
1021
# means that everything is an exact match, and we can do a
1022
# lookup into self._items, rather than iterating over the items
1024
search_prefixes = length_filters[self._node_width]
1025
for search_prefix in search_prefixes:
1027
node = self._items[search_prefix]
1029
# We can ignore this one
1031
node_key_filter = prefix_to_keys[search_prefix]
1032
if node.__class__ is tuple:
1033
keys[node] = (search_prefix, node_key_filter)
985
1035
yield node, node_key_filter
1037
# The slow way. We walk every item in self._items, and check to
1038
# see if there are any matches
1039
length_filters = length_filters.items()
1040
for prefix, node in self._items.iteritems():
1041
node_key_filter = []
1042
for length, length_filter in length_filters:
1043
sub_prefix = prefix[:length]
1044
if sub_prefix in length_filter:
1045
node_key_filter.extend(prefix_to_keys[sub_prefix])
1046
if node_key_filter: # this key matched something, yield it
1047
if node.__class__ is tuple:
1048
keys[node] = (prefix, node_key_filter)
1050
yield node, node_key_filter
987
1052
# Look in the page cache for some more bytes
988
1053
found_keys = set()