~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/btree_index.py

  • Committer: John Arbash Meinel
  • Date: 2008-08-28 01:59:58 UTC
  • mto: This revision was merged to the branch mainline in revision 3653.
  • Revision ID: john@arbash-meinel.com-20080828015958-bvdt8spf2ls57s39
Clean out the global state, good for prototyping and tuning, bad for production code.
(as recommended by Robert)

Show diffs side-by-side

added added

removed removed

Lines of Context:
53
53
# 4K per page: 4MB - 1000 entries
54
54
_NODE_CACHE_SIZE = 1000
55
55
 
56
 
leaf_value_hits = [0, 0]
57
 
internal_node_hits = [0, 0]
58
 
leaf_node_hits = [0, 0]
59
 
miss_attempts = 0  # Missed this entry while looking up
60
 
bisect_shortcut = [0, 0]
61
 
dupes = [0]
62
 
 
63
56
 
64
57
class _BuilderRow(object):
65
58
    """The stored state accumulated while writing out a row in the index.
622
615
            found[node_pos] = node
623
616
        return found
624
617
 
625
 
    def _get_nodes(self, cache, node_indexes, counter):
 
618
    def _get_nodes(self, cache, node_indexes):
626
619
        found = {}
627
620
        needed = []
628
621
        for idx in node_indexes:
631
624
                continue
632
625
            try:
633
626
                found[idx] = cache[idx]
634
 
                counter[0] += 1
635
627
            except KeyError:
636
628
                needed.append(idx)
637
 
                counter[1] += 1
638
629
        found.update(self._cache_nodes(needed, cache))
639
630
        return found
640
631
 
643
634
 
644
635
        After getting it, the node will be cached.
645
636
        """
646
 
        return self._get_nodes(self._internal_node_cache, node_indexes,
647
 
                               internal_node_hits)
 
637
        return self._get_nodes(self._internal_node_cache, node_indexes)
648
638
 
649
639
    def _get_leaf_nodes(self, node_indexes):
650
640
        """Get a bunch of nodes, from cache or disk."""
651
 
        found = self._get_nodes(self._leaf_node_cache, node_indexes,
652
 
                                leaf_node_hits)
 
641
        found = self._get_nodes(self._leaf_node_cache, node_indexes)
653
642
        if self._leaf_value_cache is not None:
654
643
            for node in found.itervalues():
655
644
                for key, value in node.keys.iteritems():
715
704
        # iter_steps = len(in_keys) + len(fixed_keys)
716
705
        # bisect_steps = len(in_keys) * math.log(len(fixed_keys), 2)
717
706
        if len(in_keys) == 1: # Bisect will always be faster for M = 1
718
 
            bisect_shortcut[0] += 1
719
707
            return [(bisect_right(fixed_keys, in_keys[0]), in_keys)]
720
708
        # elif bisect_steps < iter_steps:
721
 
        #     bisect_shortcut[0] += len(in_keys)
722
709
        #     offsets = {}
723
710
        #     for key in in_keys:
724
711
        #         offsets.setdefault(bisect_right(fixed_keys, key),
725
712
        #                            []).append(key)
726
713
        #     return [(o, offsets[o]) for o in sorted(offsets)]
727
 
        else:
728
 
            bisect_shortcut[1] += len(in_keys)
729
714
        in_keys_iter = iter(in_keys)
730
715
        fixed_keys_iter = enumerate(fixed_keys)
731
716
        cur_in_key = in_keys_iter.next()
794
779
        if not keys:
795
780
            return
796
781
 
797
 
        global leaf_value_hits, miss_attempts, dupes
798
782
        if not self.key_count():
799
783
            return
800
784
 
805
789
            for key in keys:
806
790
                value = self._leaf_value_cache.get(key, None)
807
791
                if value is not None:
808
 
                    leaf_value_hits[0] += 1
809
792
                    # This key is known not to be here, skip it
810
793
                    value, refs = value
811
794
                    if self.node_ref_lists:
813
796
                    else:
814
797
                        yield (self, key, value)
815
798
                else:
816
 
                    leaf_value_hits[1] += 1
817
799
                    needed_keys.append(key)
818
800
 
819
801
        last_key = None
857
839
                        yield (self, next_sub_key, value, refs)
858
840
                    else:
859
841
                        yield (self, next_sub_key, value)
860
 
                else:
861
 
                    miss_attempts += 1
862
842
 
863
843
    def iter_entries_prefix(self, keys):
864
844
        """Iterate over keys within the index using prefix matching.