~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/knit.py

  • Committer: Jelmer Vernooij
  • Date: 2009-03-12 14:02:53 UTC
  • mfrom: (4135 +trunk)
  • mto: This revision was merged to the branch mainline in revision 4137.
  • Revision ID: jelmer@samba.org-20090312140253-bmldbzlmsitfdrzf
Merge bzr.dev.

Show diffs side-by-side

added added

removed removed

Lines of Context:
103
103
    ConstantMapper,
104
104
    ContentFactory,
105
105
    ChunkedContentFactory,
 
106
    sort_groupcompress,
106
107
    VersionedFile,
107
108
    VersionedFiles,
108
109
    )
966
967
        else:
967
968
            options.append('fulltext')
968
969
            # isinstance is slower and we have no hierarchy.
969
 
            if self._factory.__class__ == KnitPlainFactory:
 
970
            if self._factory.__class__ is KnitPlainFactory:
970
971
                # Use the already joined bytes saving iteration time in
971
972
                # _record_to_data.
972
973
                size, bytes = self._record_to_data(key, digest,
1297
1298
        if cur_keys:
1298
1299
            result.append((cur_keys, cur_non_local))
1299
1300
            sizes.append(cur_size)
1300
 
        trace.mutter('Collapsed %d keys into %d requests w/ %d file_ids'
1301
 
                     ' w/ sizes: %s', total_keys, len(result),
1302
 
                     len(prefix_split_keys), sizes)
1303
1301
        return result
1304
1302
 
1305
1303
    def get_record_stream(self, keys, ordering, include_delta_closure):
1319
1317
        if not keys:
1320
1318
            return
1321
1319
        if not self._index.has_graph:
1322
 
            # Cannot topological order when no graph has been stored.
 
1320
            # Cannot sort when no graph has been stored.
1323
1321
            ordering = 'unordered'
1324
1322
 
1325
1323
        remaining_keys = keys
1381
1379
                    needed_from_fallback.add(key)
1382
1380
        # Double index lookups here : need a unified api ?
1383
1381
        global_map, parent_maps = self._get_parent_map_with_sources(keys)
1384
 
        if ordering == 'topological':
1385
 
            # Global topological sort
1386
 
            present_keys = tsort.topo_sort(global_map)
 
1382
        if ordering in ('topological', 'groupcompress'):
 
1383
            if ordering == 'topological':
 
1384
                # Global topological sort
 
1385
                present_keys = tsort.topo_sort(global_map)
 
1386
            else:
 
1387
                present_keys = sort_groupcompress(global_map)
1387
1388
            # Now group by source:
1388
1389
            source_keys = []
1389
1390
            current_source = None
1399
1400
        else:
1400
1401
            if ordering != 'unordered':
1401
1402
                raise AssertionError('valid values for ordering are:'
1402
 
                    ' "unordered" or "topological" not: %r'
 
1403
                    ' "unordered", "groupcompress" or "topological" not: %r'
1403
1404
                    % (ordering,))
1404
1405
            # Just group by source; remote sources first.
1405
1406
            present_keys = []
1517
1518
        # key = basis_parent, value = index entry to add
1518
1519
        buffered_index_entries = {}
1519
1520
        for record in stream:
 
1521
            buffered = False
1520
1522
            parents = record.parents
1521
1523
            if record.storage_kind in delta_types:
1522
1524
                # TODO: eventually the record itself should track
1568
1570
                access_memo = self._access.add_raw_records(
1569
1571
                    [(record.key, len(bytes))], bytes)[0]
1570
1572
                index_entry = (record.key, options, access_memo, parents)
1571
 
                buffered = False
1572
1573
                if 'fulltext' not in options:
1573
1574
                    # Not a fulltext, so we need to make sure the compression
1574
1575
                    # parent will also be present.
1609
1610
                except errors.RevisionAlreadyPresent:
1610
1611
                    pass
1611
1612
            # Add any records whose basis parent is now available.
1612
 
            added_keys = [record.key]
1613
 
            while added_keys:
1614
 
                key = added_keys.pop(0)
1615
 
                if key in buffered_index_entries:
1616
 
                    index_entries = buffered_index_entries[key]
1617
 
                    self._index.add_records(index_entries)
1618
 
                    added_keys.extend(
1619
 
                        [index_entry[0] for index_entry in index_entries])
1620
 
                    del buffered_index_entries[key]
 
1613
            if not buffered:
 
1614
                added_keys = [record.key]
 
1615
                while added_keys:
 
1616
                    key = added_keys.pop(0)
 
1617
                    if key in buffered_index_entries:
 
1618
                        index_entries = buffered_index_entries[key]
 
1619
                        self._index.add_records(index_entries)
 
1620
                        added_keys.extend(
 
1621
                            [index_entry[0] for index_entry in index_entries])
 
1622
                        del buffered_index_entries[key]
1621
1623
        if buffered_index_entries:
1622
1624
            # There were index entries buffered at the end of the stream,
1623
1625
            # So these need to be added (if the index supports holding such
1679
1681
                        key_records.append((key, details[0]))
1680
1682
                records_iter = enumerate(self._read_records_iter(key_records))
1681
1683
                for (key_idx, (key, data, sha_value)) in records_iter:
1682
 
                    pb.update('Walking content.', key_idx, total)
 
1684
                    pb.update('Walking content', key_idx, total)
1683
1685
                    compression_parent = build_details[key][1]
1684
1686
                    if compression_parent is None:
1685
1687
                        # fulltext
1715
1717
                source_keys.add(key)
1716
1718
                yield line, key
1717
1719
            keys.difference_update(source_keys)
1718
 
        pb.update('Walking content.', total, total)
 
1720
        pb.update('Walking content', total, total)
1719
1721
 
1720
1722
    def _make_line_delta(self, delta_seq, new_content):
1721
1723
        """Generate a line delta from delta_seq and new_content."""