4988.10.5
by John Arbash Meinel
Merge bzr.dev 5021 to resolve NEWS |
1 |
# Copyright (C) 2006-2010 Canonical Ltd
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
2 |
#
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
16 |
|
17 |
"""Knit versionedfile implementation.
|
|
18 |
||
19 |
A knit is a versioned file implementation that supports efficient append only
|
|
20 |
updates.
|
|
1563.2.6
by Robert Collins
Start check tests for knits (pending), and remove dead code. |
21 |
|
22 |
Knit file layout:
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
23 |
lifeless: the data file is made up of "delta records". each delta record has a delta header
|
24 |
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
|
|
25 |
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
|
|
1563.2.6
by Robert Collins
Start check tests for knits (pending), and remove dead code. |
26 |
end-marker; simply "end VERSION"
|
27 |
||
28 |
delta can be line or full contents.a
|
|
29 |
... the 8's there are the index number of the annotation.
|
|
30 |
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
|
|
31 |
59,59,3
|
|
32 |
8
|
|
33 |
8 if ie.executable:
|
|
34 |
8 e.set('executable', 'yes')
|
|
35 |
130,130,2
|
|
36 |
8 if elt.get('executable') == 'yes':
|
|
37 |
8 ie.executable = True
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
38 |
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
|
1563.2.6
by Robert Collins
Start check tests for knits (pending), and remove dead code. |
39 |
|
40 |
||
41 |
whats in an index:
|
|
42 |
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
|
|
43 |
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
|
|
44 |
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
|
|
45 |
09:33 < lifeless> right
|
|
46 |
09:33 < jrydberg> lifeless: the position and size is the range in the data file
|
|
47 |
||
48 |
||
49 |
so the index sequence is the dictionary compressed sequence number used
|
|
50 |
in the deltas to provide line annotation
|
|
51 |
||
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
52 |
"""
|
53 |
||
1563.2.6
by Robert Collins
Start check tests for knits (pending), and remove dead code. |
54 |
|
1563.2.11
by Robert Collins
Consolidate reweave and join as we have no separate usage, make reweave tests apply to all versionedfile implementations and deprecate the old reweave apis. |
55 |
from cStringIO import StringIO |
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
56 |
from itertools import izip |
1756.2.17
by Aaron Bentley
Fixes suggested by John Meinel |
57 |
import operator |
1563.2.6
by Robert Collins
Start check tests for knits (pending), and remove dead code. |
58 |
import os |
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
59 |
import sys |
1594.2.19
by Robert Collins
More coalescing tweaks, and knit feedback. |
60 |
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
61 |
from bzrlib.lazy_import import lazy_import |
62 |
lazy_import(globals(), """ |
|
63 |
from bzrlib import (
|
|
2770.1.1
by Aaron Bentley
Initial implmentation of plain knit annotation |
64 |
annotate,
|
3535.5.1
by John Arbash Meinel
cleanup a few imports to be lazily loaded. |
65 |
debug,
|
66 |
diff,
|
|
3224.1.10
by John Arbash Meinel
Introduce the heads_provider for reannotate. |
67 |
graph as _mod_graph,
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
68 |
index as _mod_index,
|
2998.2.2
by John Arbash Meinel
implement a faster path for copying from packs back to knits. |
69 |
lru_cache,
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
70 |
pack,
|
3535.5.1
by John Arbash Meinel
cleanup a few imports to be lazily loaded. |
71 |
progress,
|
4789.28.3
by John Arbash Meinel
Add a static_tuple.as_tuples() helper. |
72 |
static_tuple,
|
2745.1.2
by Robert Collins
Ensure mutter_callsite is not directly called on a lazy_load object, to make the stacklevel parameter work correctly. |
73 |
trace,
|
3224.5.1
by Andrew Bennetts
Lots of assorted hackery to reduce the number of imports for common operations. Improves 'rocks', 'st' and 'help' times by ~50ms on my laptop. |
74 |
tsort,
|
3535.5.1
by John Arbash Meinel
cleanup a few imports to be lazily loaded. |
75 |
tuned_gzip,
|
4961.2.13
by Martin Pool
Further progress bar string-pulling |
76 |
ui,
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
77 |
)
|
78 |
""") |
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
79 |
from bzrlib import ( |
80 |
errors, |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
81 |
osutils, |
2104.4.2
by John Arbash Meinel
Small cleanup and NEWS entry about fixing bug #65714 |
82 |
patiencediff, |
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
83 |
)
|
84 |
from bzrlib.errors import ( |
|
85 |
FileExists, |
|
86 |
NoSuchFile, |
|
87 |
KnitError, |
|
88 |
InvalidRevisionId, |
|
89 |
KnitCorrupt, |
|
90 |
KnitHeaderError, |
|
91 |
RevisionNotPresent, |
|
92 |
RevisionAlreadyPresent, |
|
3787.1.1
by Robert Collins
Embed the failed text in sha1 knit errors. |
93 |
SHA1KnitCorrupt, |
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
94 |
)
|
95 |
from bzrlib.osutils import ( |
|
96 |
contains_whitespace, |
|
97 |
contains_linebreaks, |
|
2850.1.1
by Robert Collins
* ``KnitVersionedFile.add*`` will no longer cache added records even when |
98 |
sha_string, |
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
99 |
sha_strings, |
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
100 |
split_lines, |
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
101 |
)
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
102 |
from bzrlib.versionedfile import ( |
3350.3.12
by Robert Collins
Generate streams with absent records. |
103 |
AbsentContentFactory, |
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
104 |
adapter_registry, |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
105 |
ConstantMapper, |
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
106 |
ContentFactory, |
3890.2.1
by John Arbash Meinel
Start working on a ChunkedContentFactory. |
107 |
ChunkedContentFactory, |
4111.1.1
by Robert Collins
Add a groupcompress sort order. |
108 |
sort_groupcompress, |
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
109 |
VersionedFile, |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
110 |
VersionedFiles, |
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
111 |
)
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
112 |
|
113 |
||
114 |
# TODO: Split out code specific to this format into an associated object.
|
|
115 |
||
116 |
# TODO: Can we put in some kind of value to check that the index and data
|
|
117 |
# files belong together?
|
|
118 |
||
1759.2.1
by Jelmer Vernooij
Fix some types (found using aspell). |
119 |
# TODO: accommodate binaries, perhaps by storing a byte count
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
120 |
|
121 |
# TODO: function to check whole file
|
|
122 |
||
123 |
# TODO: atomically append data, then measure backwards from the cursor
|
|
124 |
# position after writing to work out where it was located. we may need to
|
|
125 |
# bypass python file buffering.
|
|
126 |
||
127 |
DATA_SUFFIX = '.knit' |
|
128 |
INDEX_SUFFIX = '.kndx' |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
129 |
_STREAM_MIN_BUFFER_SIZE = 5*1024*1024 |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
130 |
|
131 |
||
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
132 |
class KnitAdapter(object): |
133 |
"""Base class for knit record adaption."""
|
|
134 |
||
3350.3.7
by Robert Collins
Create a registry of versioned file record adapters. |
135 |
def __init__(self, basis_vf): |
136 |
"""Create an adapter which accesses full texts from basis_vf.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
137 |
|
3350.3.7
by Robert Collins
Create a registry of versioned file record adapters. |
138 |
:param basis_vf: A versioned file to access basis texts of deltas from.
|
139 |
May be None for adapters that do not need to access basis texts.
|
|
140 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
141 |
self._data = KnitVersionedFiles(None, None) |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
142 |
self._annotate_factory = KnitAnnotateFactory() |
143 |
self._plain_factory = KnitPlainFactory() |
|
3350.3.7
by Robert Collins
Create a registry of versioned file record adapters. |
144 |
self._basis_vf = basis_vf |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
145 |
|
146 |
||
147 |
class FTAnnotatedToUnannotated(KnitAdapter): |
|
148 |
"""An adapter from FT annotated knits to unannotated ones."""
|
|
149 |
||
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
150 |
def get_bytes(self, factory): |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
151 |
annotated_compressed_bytes = factory._raw_record |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
152 |
rec, contents = \ |
153 |
self._data._parse_record_unchecked(annotated_compressed_bytes) |
|
154 |
content = self._annotate_factory.parse_fulltext(contents, rec[1]) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
155 |
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text()) |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
156 |
return bytes |
157 |
||
158 |
||
159 |
class DeltaAnnotatedToUnannotated(KnitAdapter): |
|
160 |
"""An adapter for deltas from annotated to unannotated."""
|
|
161 |
||
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
162 |
def get_bytes(self, factory): |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
163 |
annotated_compressed_bytes = factory._raw_record |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
164 |
rec, contents = \ |
165 |
self._data._parse_record_unchecked(annotated_compressed_bytes) |
|
166 |
delta = self._annotate_factory.parse_line_delta(contents, rec[1], |
|
167 |
plain=True) |
|
168 |
contents = self._plain_factory.lower_line_delta(delta) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
169 |
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents) |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
170 |
return bytes |
171 |
||
172 |
||
173 |
class FTAnnotatedToFullText(KnitAdapter): |
|
174 |
"""An adapter from FT annotated knits to unannotated ones."""
|
|
175 |
||
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
176 |
def get_bytes(self, factory): |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
177 |
annotated_compressed_bytes = factory._raw_record |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
178 |
rec, contents = \ |
179 |
self._data._parse_record_unchecked(annotated_compressed_bytes) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
180 |
content, delta = self._annotate_factory.parse_record(factory.key[-1], |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
181 |
contents, factory._build_details, None) |
182 |
return ''.join(content.text()) |
|
183 |
||
184 |
||
185 |
class DeltaAnnotatedToFullText(KnitAdapter): |
|
186 |
"""An adapter for deltas from annotated to unannotated."""
|
|
187 |
||
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
188 |
def get_bytes(self, factory): |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
189 |
annotated_compressed_bytes = factory._raw_record |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
190 |
rec, contents = \ |
191 |
self._data._parse_record_unchecked(annotated_compressed_bytes) |
|
192 |
delta = self._annotate_factory.parse_line_delta(contents, rec[1], |
|
193 |
plain=True) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
194 |
compression_parent = factory.parents[0] |
195 |
basis_entry = self._basis_vf.get_record_stream( |
|
196 |
[compression_parent], 'unordered', True).next() |
|
197 |
if basis_entry.storage_kind == 'absent': |
|
198 |
raise errors.RevisionNotPresent(compression_parent, self._basis_vf) |
|
3890.2.9
by John Arbash Meinel
Start using osutils.chunks_as_lines rather than osutils.split_lines. |
199 |
basis_chunks = basis_entry.get_bytes_as('chunked') |
200 |
basis_lines = osutils.chunks_to_lines(basis_chunks) |
|
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
201 |
# Manually apply the delta because we have one annotated content and
|
202 |
# one plain.
|
|
203 |
basis_content = PlainKnitContent(basis_lines, compression_parent) |
|
204 |
basis_content.apply_delta(delta, rec[1]) |
|
205 |
basis_content._should_strip_eol = factory._build_details[1] |
|
206 |
return ''.join(basis_content.text()) |
|
207 |
||
208 |
||
3350.3.5
by Robert Collins
Create adapters from plain compressed knit content. |
209 |
class FTPlainToFullText(KnitAdapter): |
210 |
"""An adapter from FT plain knits to unannotated ones."""
|
|
211 |
||
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
212 |
def get_bytes(self, factory): |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
213 |
compressed_bytes = factory._raw_record |
3350.3.5
by Robert Collins
Create adapters from plain compressed knit content. |
214 |
rec, contents = \ |
215 |
self._data._parse_record_unchecked(compressed_bytes) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
216 |
content, delta = self._plain_factory.parse_record(factory.key[-1], |
3350.3.5
by Robert Collins
Create adapters from plain compressed knit content. |
217 |
contents, factory._build_details, None) |
218 |
return ''.join(content.text()) |
|
219 |
||
220 |
||
221 |
class DeltaPlainToFullText(KnitAdapter): |
|
222 |
"""An adapter for deltas from annotated to unannotated."""
|
|
223 |
||
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
224 |
def get_bytes(self, factory): |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
225 |
compressed_bytes = factory._raw_record |
3350.3.5
by Robert Collins
Create adapters from plain compressed knit content. |
226 |
rec, contents = \ |
227 |
self._data._parse_record_unchecked(compressed_bytes) |
|
228 |
delta = self._plain_factory.parse_line_delta(contents, rec[1]) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
229 |
compression_parent = factory.parents[0] |
230 |
# XXX: string splitting overhead.
|
|
231 |
basis_entry = self._basis_vf.get_record_stream( |
|
232 |
[compression_parent], 'unordered', True).next() |
|
233 |
if basis_entry.storage_kind == 'absent': |
|
234 |
raise errors.RevisionNotPresent(compression_parent, self._basis_vf) |
|
3890.2.9
by John Arbash Meinel
Start using osutils.chunks_as_lines rather than osutils.split_lines. |
235 |
basis_chunks = basis_entry.get_bytes_as('chunked') |
236 |
basis_lines = osutils.chunks_to_lines(basis_chunks) |
|
3350.3.5
by Robert Collins
Create adapters from plain compressed knit content. |
237 |
basis_content = PlainKnitContent(basis_lines, compression_parent) |
238 |
# Manually apply the delta because we have one annotated content and
|
|
239 |
# one plain.
|
|
240 |
content, _ = self._plain_factory.parse_record(rec[1], contents, |
|
241 |
factory._build_details, basis_content) |
|
242 |
return ''.join(content.text()) |
|
243 |
||
244 |
||
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
245 |
class KnitContentFactory(ContentFactory): |
246 |
"""Content factory for streaming from knits.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
247 |
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
248 |
:seealso ContentFactory:
|
249 |
"""
|
|
250 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
251 |
def __init__(self, key, parents, build_details, sha1, raw_record, |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
252 |
annotated, knit=None, network_bytes=None): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
253 |
"""Create a KnitContentFactory for key.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
254 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
255 |
:param key: The key.
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
256 |
:param parents: The parents.
|
257 |
:param build_details: The build details as returned from
|
|
258 |
get_build_details.
|
|
259 |
:param sha1: The sha1 expected from the full text of this object.
|
|
260 |
:param raw_record: The bytes of the knit data from disk.
|
|
261 |
:param annotated: True if the raw data is annotated.
|
|
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
262 |
:param network_bytes: None to calculate the network bytes on demand,
|
263 |
not-none if they are already known.
|
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
264 |
"""
|
265 |
ContentFactory.__init__(self) |
|
266 |
self.sha1 = sha1 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
267 |
self.key = key |
268 |
self.parents = parents |
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
269 |
if build_details[0] == 'line-delta': |
270 |
kind = 'delta' |
|
271 |
else: |
|
272 |
kind = 'ft' |
|
273 |
if annotated: |
|
274 |
annotated_kind = 'annotated-' |
|
275 |
else: |
|
276 |
annotated_kind = '' |
|
277 |
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind) |
|
278 |
self._raw_record = raw_record |
|
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
279 |
self._network_bytes = network_bytes |
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
280 |
self._build_details = build_details |
281 |
self._knit = knit |
|
282 |
||
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
283 |
def _create_network_bytes(self): |
284 |
"""Create a fully serialised network version for transmission."""
|
|
285 |
# storage_kind, key, parents, Noeol, raw_record
|
|
286 |
key_bytes = '\x00'.join(self.key) |
|
287 |
if self.parents is None: |
|
288 |
parent_bytes = 'None:' |
|
289 |
else: |
|
290 |
parent_bytes = '\t'.join('\x00'.join(key) for key in self.parents) |
|
291 |
if self._build_details[1]: |
|
292 |
noeol = 'N' |
|
293 |
else: |
|
294 |
noeol = ' ' |
|
295 |
network_bytes = "%s\n%s\n%s\n%s%s" % (self.storage_kind, key_bytes, |
|
296 |
parent_bytes, noeol, self._raw_record) |
|
297 |
self._network_bytes = network_bytes |
|
298 |
||
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
299 |
def get_bytes_as(self, storage_kind): |
300 |
if storage_kind == self.storage_kind: |
|
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
301 |
if self._network_bytes is None: |
302 |
self._create_network_bytes() |
|
303 |
return self._network_bytes |
|
4152.1.2
by Robert Collins
Add streaming from a stacked branch when the sort order is compatible with doing so. |
304 |
if ('-ft-' in self.storage_kind and |
305 |
storage_kind in ('chunked', 'fulltext')): |
|
306 |
adapter_key = (self.storage_kind, 'fulltext') |
|
307 |
adapter_factory = adapter_registry.get(adapter_key) |
|
308 |
adapter = adapter_factory(None) |
|
309 |
bytes = adapter.get_bytes(self) |
|
310 |
if storage_kind == 'chunked': |
|
311 |
return [bytes] |
|
312 |
else: |
|
313 |
return bytes |
|
3890.2.1
by John Arbash Meinel
Start working on a ChunkedContentFactory. |
314 |
if self._knit is not None: |
4152.1.2
by Robert Collins
Add streaming from a stacked branch when the sort order is compatible with doing so. |
315 |
# Not redundant with direct conversion above - that only handles
|
316 |
# fulltext cases.
|
|
3890.2.1
by John Arbash Meinel
Start working on a ChunkedContentFactory. |
317 |
if storage_kind == 'chunked': |
318 |
return self._knit.get_lines(self.key[0]) |
|
319 |
elif storage_kind == 'fulltext': |
|
320 |
return self._knit.get_text(self.key[0]) |
|
321 |
raise errors.UnavailableRepresentation(self.key, storage_kind, |
|
322 |
self.storage_kind) |
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
323 |
|
324 |
||
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
325 |
class LazyKnitContentFactory(ContentFactory): |
326 |
"""A ContentFactory which can either generate full text or a wire form.
|
|
327 |
||
328 |
:seealso ContentFactory:
|
|
329 |
"""
|
|
330 |
||
331 |
def __init__(self, key, parents, generator, first): |
|
332 |
"""Create a LazyKnitContentFactory.
|
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
333 |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
334 |
:param key: The key of the record.
|
335 |
:param parents: The parents of the record.
|
|
336 |
:param generator: A _ContentMapGenerator containing the record for this
|
|
337 |
key.
|
|
338 |
:param first: Is this the first content object returned from generator?
|
|
339 |
if it is, its storage kind is knit-delta-closure, otherwise it is
|
|
340 |
knit-delta-closure-ref
|
|
341 |
"""
|
|
342 |
self.key = key |
|
343 |
self.parents = parents |
|
344 |
self.sha1 = None |
|
345 |
self._generator = generator |
|
346 |
self.storage_kind = "knit-delta-closure" |
|
347 |
if not first: |
|
348 |
self.storage_kind = self.storage_kind + "-ref" |
|
349 |
self._first = first |
|
350 |
||
351 |
def get_bytes_as(self, storage_kind): |
|
352 |
if storage_kind == self.storage_kind: |
|
353 |
if self._first: |
|
354 |
return self._generator._wire_bytes() |
|
355 |
else: |
|
356 |
# all the keys etc are contained in the bytes returned in the
|
|
357 |
# first record.
|
|
358 |
return '' |
|
359 |
if storage_kind in ('chunked', 'fulltext'): |
|
360 |
chunks = self._generator._get_one_work(self.key).text() |
|
361 |
if storage_kind == 'chunked': |
|
362 |
return chunks |
|
363 |
else: |
|
364 |
return ''.join(chunks) |
|
365 |
raise errors.UnavailableRepresentation(self.key, storage_kind, |
|
366 |
self.storage_kind) |
|
367 |
||
368 |
||
369 |
def knit_delta_closure_to_records(storage_kind, bytes, line_end): |
|
370 |
"""Convert a network record to a iterator over stream records.
|
|
371 |
||
372 |
:param storage_kind: The storage kind of the record.
|
|
373 |
Must be 'knit-delta-closure'.
|
|
374 |
:param bytes: The bytes of the record on the network.
|
|
375 |
"""
|
|
376 |
generator = _NetworkContentMapGenerator(bytes, line_end) |
|
377 |
return generator.get_record_stream() |
|
378 |
||
379 |
||
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
380 |
def knit_network_to_record(storage_kind, bytes, line_end): |
381 |
"""Convert a network record to a record object.
|
|
382 |
||
383 |
:param storage_kind: The storage kind of the record.
|
|
384 |
:param bytes: The bytes of the record on the network.
|
|
385 |
"""
|
|
386 |
start = line_end |
|
387 |
line_end = bytes.find('\n', start) |
|
4005.3.3
by Robert Collins
Test NetworkRecordStream with delta'd texts. |
388 |
key = tuple(bytes[start:line_end].split('\x00')) |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
389 |
start = line_end + 1 |
390 |
line_end = bytes.find('\n', start) |
|
391 |
parent_line = bytes[start:line_end] |
|
392 |
if parent_line == 'None:': |
|
393 |
parents = None |
|
394 |
else: |
|
395 |
parents = tuple( |
|
4005.3.3
by Robert Collins
Test NetworkRecordStream with delta'd texts. |
396 |
[tuple(segment.split('\x00')) for segment in parent_line.split('\t') |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
397 |
if segment]) |
398 |
start = line_end + 1 |
|
4005.3.3
by Robert Collins
Test NetworkRecordStream with delta'd texts. |
399 |
noeol = bytes[start] == 'N' |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
400 |
if 'ft' in storage_kind: |
401 |
method = 'fulltext' |
|
402 |
else: |
|
403 |
method = 'line-delta' |
|
404 |
build_details = (method, noeol) |
|
405 |
start = start + 1 |
|
406 |
raw_record = bytes[start:] |
|
407 |
annotated = 'annotated' in storage_kind |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
408 |
return [KnitContentFactory(key, parents, build_details, None, raw_record, |
409 |
annotated, network_bytes=bytes)] |
|
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
410 |
|
411 |
||
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
412 |
class KnitContent(object): |
3468.2.4
by Martin Pool
Test and fix #234748 problems in trailing newline diffs |
413 |
"""Content of a knit version to which deltas can be applied.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
414 |
|
3468.2.5
by Martin Pool
Correct comment and remove overbroad except block |
415 |
This is always stored in memory as a list of lines with \n at the end,
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
416 |
plus a flag saying if the final ending is really there or not, because that
|
3468.2.5
by Martin Pool
Correct comment and remove overbroad except block |
417 |
corresponds to the on-disk knit representation.
|
3468.2.4
by Martin Pool
Test and fix #234748 problems in trailing newline diffs |
418 |
"""
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
419 |
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
420 |
def __init__(self): |
421 |
self._should_strip_eol = False |
|
422 |
||
2921.2.1
by Robert Collins
* Knit text reconstruction now avoids making copies of the lines list for |
423 |
def apply_delta(self, delta, new_version_id): |
2921.2.2
by Robert Collins
Review feedback. |
424 |
"""Apply delta to this object to become new_version_id."""
|
2921.2.1
by Robert Collins
* Knit text reconstruction now avoids making copies of the lines list for |
425 |
raise NotImplementedError(self.apply_delta) |
426 |
||
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
427 |
def line_delta_iter(self, new_lines): |
1596.2.32
by Robert Collins
Reduce re-extraction of texts during weave to knit joins by providing a memoisation facility. |
428 |
"""Generate line-based delta from this content to new_lines."""
|
2151.1.1
by John Arbash Meinel
(Dmitry Vasiliev) Tune KnitContent and add tests |
429 |
new_texts = new_lines.text() |
430 |
old_texts = self.text() |
|
2781.1.1
by Martin Pool
merge cpatiencediff from Lukas |
431 |
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts) |
2151.1.1
by John Arbash Meinel
(Dmitry Vasiliev) Tune KnitContent and add tests |
432 |
for tag, i1, i2, j1, j2 in s.get_opcodes(): |
433 |
if tag == 'equal': |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
434 |
continue
|
2151.1.1
by John Arbash Meinel
(Dmitry Vasiliev) Tune KnitContent and add tests |
435 |
# ofrom, oto, length, data
|
436 |
yield i1, i2, j2 - j1, new_lines._lines[j1:j2] |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
437 |
|
438 |
def line_delta(self, new_lines): |
|
439 |
return list(self.line_delta_iter(new_lines)) |
|
440 |
||
2520.4.41
by Aaron Bentley
Accelerate mpdiff generation |
441 |
@staticmethod
|
2520.4.48
by Aaron Bentley
Support getting blocks from knit deltas with no final EOL |
442 |
def get_line_delta_blocks(knit_delta, source, target): |
2520.4.41
by Aaron Bentley
Accelerate mpdiff generation |
443 |
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
|
2520.4.48
by Aaron Bentley
Support getting blocks from knit deltas with no final EOL |
444 |
target_len = len(target) |
2520.4.41
by Aaron Bentley
Accelerate mpdiff generation |
445 |
s_pos = 0 |
446 |
t_pos = 0 |
|
447 |
for s_begin, s_end, t_len, new_text in knit_delta: |
|
2520.4.47
by Aaron Bentley
Fix get_line_delta_blocks with eol |
448 |
true_n = s_begin - s_pos |
449 |
n = true_n |
|
2520.4.41
by Aaron Bentley
Accelerate mpdiff generation |
450 |
if n > 0: |
2520.4.48
by Aaron Bentley
Support getting blocks from knit deltas with no final EOL |
451 |
# knit deltas do not provide reliable info about whether the
|
452 |
# last line of a file matches, due to eol handling.
|
|
453 |
if source[s_pos + n -1] != target[t_pos + n -1]: |
|
2520.4.47
by Aaron Bentley
Fix get_line_delta_blocks with eol |
454 |
n-=1 |
455 |
if n > 0: |
|
456 |
yield s_pos, t_pos, n |
|
457 |
t_pos += t_len + true_n |
|
2520.4.41
by Aaron Bentley
Accelerate mpdiff generation |
458 |
s_pos = s_end |
2520.4.48
by Aaron Bentley
Support getting blocks from knit deltas with no final EOL |
459 |
n = target_len - t_pos |
460 |
if n > 0: |
|
461 |
if source[s_pos + n -1] != target[t_pos + n -1]: |
|
462 |
n-=1 |
|
463 |
if n > 0: |
|
464 |
yield s_pos, t_pos, n |
|
2520.4.41
by Aaron Bentley
Accelerate mpdiff generation |
465 |
yield s_pos + (target_len - t_pos), target_len, 0 |
466 |
||
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
467 |
|
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
468 |
class AnnotatedKnitContent(KnitContent): |
469 |
"""Annotated content."""
|
|
470 |
||
471 |
def __init__(self, lines): |
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
472 |
KnitContent.__init__(self) |
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
473 |
self._lines = lines |
474 |
||
3316.2.13
by Robert Collins
* ``VersionedFile.annotate_iter`` is deprecated. While in principal this |
475 |
def annotate(self): |
476 |
"""Return a list of (origin, text) for each content line."""
|
|
3468.2.4
by Martin Pool
Test and fix #234748 problems in trailing newline diffs |
477 |
lines = self._lines[:] |
478 |
if self._should_strip_eol: |
|
479 |
origin, last_line = lines[-1] |
|
480 |
lines[-1] = (origin, last_line.rstrip('\n')) |
|
481 |
return lines |
|
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
482 |
|
2921.2.1
by Robert Collins
* Knit text reconstruction now avoids making copies of the lines list for |
483 |
def apply_delta(self, delta, new_version_id): |
2921.2.2
by Robert Collins
Review feedback. |
484 |
"""Apply delta to this object to become new_version_id."""
|
2921.2.1
by Robert Collins
* Knit text reconstruction now avoids making copies of the lines list for |
485 |
offset = 0 |
486 |
lines = self._lines |
|
487 |
for start, end, count, delta_lines in delta: |
|
488 |
lines[offset+start:offset+end] = delta_lines |
|
489 |
offset = offset + (start - end) + count |
|
490 |
||
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
491 |
def text(self): |
2911.1.1
by Martin Pool
Better messages when problems are detected inside a knit |
492 |
try: |
3224.1.22
by John Arbash Meinel
Cleanup the extra debugging info, and some >80 char lines. |
493 |
lines = [text for origin, text in self._lines] |
2911.1.1
by Martin Pool
Better messages when problems are detected inside a knit |
494 |
except ValueError, e: |
495 |
# most commonly (only?) caused by the internal form of the knit
|
|
496 |
# missing annotation information because of a bug - see thread
|
|
497 |
# around 20071015
|
|
498 |
raise KnitCorrupt(self, |
|
499 |
"line in annotated knit missing annotation information: %s" |
|
500 |
% (e,)) |
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
501 |
if self._should_strip_eol: |
3350.3.4
by Robert Collins
Finish adapters for annotated knits to unannotated knits and full texts. |
502 |
lines[-1] = lines[-1].rstrip('\n') |
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
503 |
return lines |
504 |
||
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
505 |
def copy(self): |
506 |
return AnnotatedKnitContent(self._lines[:]) |
|
507 |
||
508 |
||
509 |
class PlainKnitContent(KnitContent): |
|
2794.1.3
by Robert Collins
Review feedback. |
510 |
"""Unannotated content.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
511 |
|
2794.1.3
by Robert Collins
Review feedback. |
512 |
When annotate[_iter] is called on this content, the same version is reported
|
513 |
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
|
|
514 |
objects.
|
|
515 |
"""
|
|
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
516 |
|
517 |
def __init__(self, lines, version_id): |
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
518 |
KnitContent.__init__(self) |
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
519 |
self._lines = lines |
520 |
self._version_id = version_id |
|
521 |
||
3316.2.13
by Robert Collins
* ``VersionedFile.annotate_iter`` is deprecated. While in principal this |
522 |
def annotate(self): |
523 |
"""Return a list of (origin, text) for each content line."""
|
|
524 |
return [(self._version_id, line) for line in self._lines] |
|
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
525 |
|
2921.2.1
by Robert Collins
* Knit text reconstruction now avoids making copies of the lines list for |
526 |
def apply_delta(self, delta, new_version_id): |
2921.2.2
by Robert Collins
Review feedback. |
527 |
"""Apply delta to this object to become new_version_id."""
|
2921.2.1
by Robert Collins
* Knit text reconstruction now avoids making copies of the lines list for |
528 |
offset = 0 |
529 |
lines = self._lines |
|
530 |
for start, end, count, delta_lines in delta: |
|
531 |
lines[offset+start:offset+end] = delta_lines |
|
532 |
offset = offset + (start - end) + count |
|
533 |
self._version_id = new_version_id |
|
534 |
||
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
535 |
def copy(self): |
536 |
return PlainKnitContent(self._lines[:], self._version_id) |
|
537 |
||
538 |
def text(self): |
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
539 |
lines = self._lines |
540 |
if self._should_strip_eol: |
|
541 |
lines = lines[:] |
|
542 |
lines[-1] = lines[-1].rstrip('\n') |
|
543 |
return lines |
|
544 |
||
545 |
||
546 |
class _KnitFactory(object): |
|
547 |
"""Base class for common Factory functions."""
|
|
548 |
||
549 |
def parse_record(self, version_id, record, record_details, |
|
550 |
base_content, copy_base_content=True): |
|
551 |
"""Parse a record into a full content object.
|
|
552 |
||
553 |
:param version_id: The official version id for this content
|
|
554 |
:param record: The data returned by read_records_iter()
|
|
555 |
:param record_details: Details about the record returned by
|
|
556 |
get_build_details
|
|
557 |
:param base_content: If get_build_details returns a compression_parent,
|
|
558 |
you must return a base_content here, else use None
|
|
559 |
:param copy_base_content: When building from the base_content, decide
|
|
560 |
you can either copy it and return a new object, or modify it in
|
|
561 |
place.
|
|
562 |
:return: (content, delta) A Content object and possibly a line-delta,
|
|
563 |
delta may be None
|
|
564 |
"""
|
|
565 |
method, noeol = record_details |
|
566 |
if method == 'line-delta': |
|
567 |
if copy_base_content: |
|
568 |
content = base_content.copy() |
|
569 |
else: |
|
570 |
content = base_content |
|
571 |
delta = self.parse_line_delta(record, version_id) |
|
572 |
content.apply_delta(delta, version_id) |
|
573 |
else: |
|
574 |
content = self.parse_fulltext(record, version_id) |
|
575 |
delta = None |
|
576 |
content._should_strip_eol = noeol |
|
577 |
return (content, delta) |
|
578 |
||
579 |
||
580 |
class KnitAnnotateFactory(_KnitFactory): |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
581 |
"""Factory for creating annotated Content objects."""
|
582 |
||
583 |
annotated = True |
|
584 |
||
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
585 |
def make(self, lines, version_id): |
586 |
num_lines = len(lines) |
|
587 |
return AnnotatedKnitContent(zip([version_id] * num_lines, lines)) |
|
588 |
||
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
589 |
def parse_fulltext(self, content, version_id): |
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
590 |
"""Convert fulltext to internal representation
|
591 |
||
592 |
fulltext content is of the format
|
|
593 |
revid(utf8) plaintext\n
|
|
594 |
internal representation is of the format:
|
|
595 |
(revid, plaintext)
|
|
596 |
"""
|
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
597 |
# TODO: jam 20070209 The tests expect this to be returned as tuples,
|
598 |
# but the code itself doesn't really depend on that.
|
|
599 |
# Figure out a way to not require the overhead of turning the
|
|
600 |
# list back into tuples.
|
|
601 |
lines = [tuple(line.split(' ', 1)) for line in content] |
|
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
602 |
return AnnotatedKnitContent(lines) |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
603 |
|
604 |
def parse_line_delta_iter(self, lines): |
|
2163.1.2
by John Arbash Meinel
Don't modify the list during parse_line_delta |
605 |
return iter(self.parse_line_delta(lines)) |
1628.1.2
by Robert Collins
More knit micro-optimisations. |
606 |
|
2851.4.2
by Ian Clatworthy
use factory methods in annotated-to-plain conversion instead of duplicating format knowledge |
607 |
def parse_line_delta(self, lines, version_id, plain=False): |
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
608 |
"""Convert a line based delta into internal representation.
|
609 |
||
610 |
line delta is in the form of:
|
|
611 |
intstart intend intcount
|
|
612 |
1..count lines:
|
|
613 |
revid(utf8) newline\n
|
|
1759.2.1
by Jelmer Vernooij
Fix some types (found using aspell). |
614 |
internal representation is
|
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
615 |
(start, end, count, [1..count tuples (revid, newline)])
|
2851.4.2
by Ian Clatworthy
use factory methods in annotated-to-plain conversion instead of duplicating format knowledge |
616 |
|
617 |
:param plain: If True, the lines are returned as a plain
|
|
2911.1.1
by Martin Pool
Better messages when problems are detected inside a knit |
618 |
list without annotations, not as a list of (origin, content) tuples, i.e.
|
2851.4.2
by Ian Clatworthy
use factory methods in annotated-to-plain conversion instead of duplicating format knowledge |
619 |
(start, end, count, [1..count newline])
|
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
620 |
"""
|
1628.1.2
by Robert Collins
More knit micro-optimisations. |
621 |
result = [] |
622 |
lines = iter(lines) |
|
623 |
next = lines.next |
|
2249.5.1
by John Arbash Meinel
Leave revision-ids in utf-8 when reading. |
624 |
|
2249.5.15
by John Arbash Meinel
remove get_cached_utf8 checks which were slowing things down. |
625 |
cache = {} |
626 |
def cache_and_return(line): |
|
627 |
origin, text = line.split(' ', 1) |
|
628 |
return cache.setdefault(origin, origin), text |
|
629 |
||
1628.1.2
by Robert Collins
More knit micro-optimisations. |
630 |
# walk through the lines parsing.
|
2851.4.2
by Ian Clatworthy
use factory methods in annotated-to-plain conversion instead of duplicating format knowledge |
631 |
# Note that the plain test is explicitly pulled out of the
|
632 |
# loop to minimise any performance impact
|
|
633 |
if plain: |
|
634 |
for header in lines: |
|
635 |
start, end, count = [int(n) for n in header.split(',')] |
|
636 |
contents = [next().split(' ', 1)[1] for i in xrange(count)] |
|
637 |
result.append((start, end, count, contents)) |
|
638 |
else: |
|
639 |
for header in lines: |
|
640 |
start, end, count = [int(n) for n in header.split(',')] |
|
641 |
contents = [tuple(next().split(' ', 1)) for i in xrange(count)] |
|
642 |
result.append((start, end, count, contents)) |
|
1628.1.2
by Robert Collins
More knit micro-optimisations. |
643 |
return result |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
644 |
|
2163.2.2
by John Arbash Meinel
Don't deal with annotations when we don't care about them. Saves another 300+ms |
645 |
def get_fulltext_content(self, lines): |
646 |
"""Extract just the content lines from a fulltext."""
|
|
647 |
return (line.split(' ', 1)[1] for line in lines) |
|
648 |
||
649 |
def get_linedelta_content(self, lines): |
|
650 |
"""Extract just the content from a line delta.
|
|
651 |
||
652 |
This doesn't return all of the extra information stored in a delta.
|
|
653 |
Only the actual content lines.
|
|
654 |
"""
|
|
655 |
lines = iter(lines) |
|
656 |
next = lines.next |
|
657 |
for header in lines: |
|
658 |
header = header.split(',') |
|
659 |
count = int(header[2]) |
|
660 |
for i in xrange(count): |
|
661 |
origin, text = next().split(' ', 1) |
|
662 |
yield text |
|
663 |
||
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
664 |
def lower_fulltext(self, content): |
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
665 |
"""convert a fulltext content record into a serializable form.
|
666 |
||
667 |
see parse_fulltext which this inverts.
|
|
668 |
"""
|
|
2249.5.15
by John Arbash Meinel
remove get_cached_utf8 checks which were slowing things down. |
669 |
return ['%s %s' % (o, t) for o, t in content._lines] |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
670 |
|
671 |
def lower_line_delta(self, delta): |
|
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
672 |
"""convert a delta into a serializable form.
|
673 |
||
1628.1.2
by Robert Collins
More knit micro-optimisations. |
674 |
See parse_line_delta which this inverts.
|
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
675 |
"""
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
676 |
# TODO: jam 20070209 We only do the caching thing to make sure that
|
677 |
# the origin is a valid utf-8 line, eventually we could remove it
|
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
678 |
out = [] |
679 |
for start, end, c, lines in delta: |
|
680 |
out.append('%d,%d,%d\n' % (start, end, c)) |
|
2249.5.15
by John Arbash Meinel
remove get_cached_utf8 checks which were slowing things down. |
681 |
out.extend(origin + ' ' + text |
1911.2.1
by John Arbash Meinel
Cache encode/decode operations, saves memory and time. Especially when committing a new kernel tree with 7.7M new lines to annotate |
682 |
for origin, text in lines) |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
683 |
return out |
684 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
685 |
def annotate(self, knit, key): |
686 |
content = knit._get_content(key) |
|
687 |
# adjust for the fact that serialised annotations are only key suffixes
|
|
688 |
# for this factory.
|
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
689 |
if type(key) is tuple: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
690 |
prefix = key[:-1] |
691 |
origins = content.annotate() |
|
692 |
result = [] |
|
693 |
for origin, line in origins: |
|
694 |
result.append((prefix + (origin,), line)) |
|
695 |
return result |
|
696 |
else: |
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
697 |
# XXX: This smells a bit. Why would key ever be a non-tuple here?
|
698 |
# Aren't keys defined to be tuples? -- spiv 20080618
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
699 |
return content.annotate() |
2770.1.1
by Aaron Bentley
Initial implmentation of plain knit annotation |
700 |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
701 |
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
702 |
class KnitPlainFactory(_KnitFactory): |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
703 |
"""Factory for creating plain Content objects."""
|
704 |
||
705 |
annotated = False |
|
706 |
||
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
707 |
def make(self, lines, version_id): |
708 |
return PlainKnitContent(lines, version_id) |
|
709 |
||
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
710 |
def parse_fulltext(self, content, version_id): |
1596.2.7
by Robert Collins
Remove the requirement for reannotation in knit joins. |
711 |
"""This parses an unannotated fulltext.
|
712 |
||
713 |
Note that this is not a noop - the internal representation
|
|
714 |
has (versionid, line) - its just a constant versionid.
|
|
715 |
"""
|
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
716 |
return self.make(content, version_id) |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
717 |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
718 |
def parse_line_delta_iter(self, lines, version_id): |
2163.1.2
by John Arbash Meinel
Don't modify the list during parse_line_delta |
719 |
cur = 0 |
720 |
num_lines = len(lines) |
|
721 |
while cur < num_lines: |
|
722 |
header = lines[cur] |
|
723 |
cur += 1 |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
724 |
start, end, c = [int(n) for n in header.split(',')] |
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
725 |
yield start, end, c, lines[cur:cur+c] |
2163.1.2
by John Arbash Meinel
Don't modify the list during parse_line_delta |
726 |
cur += c |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
727 |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
728 |
def parse_line_delta(self, lines, version_id): |
729 |
return list(self.parse_line_delta_iter(lines, version_id)) |
|
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
730 |
|
2163.2.2
by John Arbash Meinel
Don't deal with annotations when we don't care about them. Saves another 300+ms |
731 |
def get_fulltext_content(self, lines): |
732 |
"""Extract just the content lines from a fulltext."""
|
|
733 |
return iter(lines) |
|
734 |
||
735 |
def get_linedelta_content(self, lines): |
|
736 |
"""Extract just the content from a line delta.
|
|
737 |
||
738 |
This doesn't return all of the extra information stored in a delta.
|
|
739 |
Only the actual content lines.
|
|
740 |
"""
|
|
741 |
lines = iter(lines) |
|
742 |
next = lines.next |
|
743 |
for header in lines: |
|
744 |
header = header.split(',') |
|
745 |
count = int(header[2]) |
|
746 |
for i in xrange(count): |
|
747 |
yield next() |
|
748 |
||
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
749 |
def lower_fulltext(self, content): |
750 |
return content.text() |
|
751 |
||
752 |
def lower_line_delta(self, delta): |
|
753 |
out = [] |
|
754 |
for start, end, c, lines in delta: |
|
755 |
out.append('%d,%d,%d\n' % (start, end, c)) |
|
2794.1.2
by Robert Collins
Nuke versioned file add/get delta support, allowing easy simplification of unannotated Content, reducing memory copies and friction during commit on unannotated texts. |
756 |
out.extend(lines) |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
757 |
return out |
758 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
759 |
def annotate(self, knit, key): |
3224.1.7
by John Arbash Meinel
_StreamIndex also needs to return the proper values for get_build_details. |
760 |
annotator = _KnitAnnotator(knit) |
4454.3.26
by John Arbash Meinel
The new _KnitAnnotator based on Annotator seems to pass the test suite. |
761 |
return annotator.annotate_flat(key) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
762 |
|
763 |
||
764 |
||
765 |
def make_file_factory(annotated, mapper): |
|
766 |
"""Create a factory for creating a file based KnitVersionedFiles.
|
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
767 |
|
768 |
This is only functional enough to run interface tests, it doesn't try to
|
|
769 |
provide a full pack environment.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
770 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
771 |
:param annotated: knit annotations are wanted.
|
772 |
:param mapper: The mapper from keys to paths.
|
|
773 |
"""
|
|
774 |
def factory(transport): |
|
775 |
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True) |
|
776 |
access = _KnitKeyAccess(transport, mapper) |
|
777 |
return KnitVersionedFiles(index, access, annotated=annotated) |
|
778 |
return factory |
|
779 |
||
780 |
||
781 |
def make_pack_factory(graph, delta, keylength): |
|
782 |
"""Create a factory for creating a pack based VersionedFiles.
|
|
783 |
||
784 |
This is only functional enough to run interface tests, it doesn't try to
|
|
785 |
provide a full pack environment.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
786 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
787 |
:param graph: Store a graph.
|
788 |
:param delta: Delta compress contents.
|
|
789 |
:param keylength: How long should keys be.
|
|
790 |
"""
|
|
791 |
def factory(transport): |
|
792 |
parents = graph or delta |
|
793 |
ref_length = 0 |
|
794 |
if graph: |
|
795 |
ref_length += 1 |
|
796 |
if delta: |
|
797 |
ref_length += 1 |
|
798 |
max_delta_chain = 200 |
|
799 |
else: |
|
800 |
max_delta_chain = 0 |
|
801 |
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length, |
|
802 |
key_elements=keylength) |
|
803 |
stream = transport.open_write_stream('newpack') |
|
804 |
writer = pack.ContainerWriter(stream.write) |
|
805 |
writer.begin() |
|
806 |
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents, |
|
807 |
deltas=delta, add_callback=graph_index.add_nodes) |
|
808 |
access = _DirectPackAccess({}) |
|
809 |
access.set_writer(writer, graph_index, (transport, 'newpack')) |
|
810 |
result = KnitVersionedFiles(index, access, |
|
811 |
max_delta_chain=max_delta_chain) |
|
812 |
result.stream = stream |
|
813 |
result.writer = writer |
|
814 |
return result |
|
815 |
return factory |
|
816 |
||
817 |
||
818 |
def cleanup_pack_knit(versioned_files): |
|
819 |
versioned_files.stream.close() |
|
820 |
versioned_files.writer.end() |
|
821 |
||
822 |
||
4039.3.5
by John Arbash Meinel
Add direct tests for _get_total_build_size. |
823 |
def _get_total_build_size(self, keys, positions): |
4039.3.4
by John Arbash Meinel
Properly determine the total number of bytes needed for a given key. |
824 |
"""Determine the total bytes to build these keys.
|
825 |
||
826 |
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
|
|
827 |
don't inherit from a common base.)
|
|
828 |
||
829 |
:param keys: Keys that we want to build
|
|
830 |
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
|
|
831 |
as returned by _get_components_positions)
|
|
832 |
:return: Number of bytes to build those keys
|
|
833 |
"""
|
|
834 |
all_build_index_memos = {} |
|
835 |
build_keys = keys |
|
836 |
while build_keys: |
|
837 |
next_keys = set() |
|
838 |
for key in build_keys: |
|
839 |
# This is mostly for the 'stacked' case
|
|
840 |
# Where we will be getting the data from a fallback
|
|
841 |
if key not in positions: |
|
842 |
continue
|
|
843 |
_, index_memo, compression_parent = positions[key] |
|
844 |
all_build_index_memos[key] = index_memo |
|
845 |
if compression_parent not in all_build_index_memos: |
|
846 |
next_keys.add(compression_parent) |
|
847 |
build_keys = next_keys |
|
848 |
return sum([index_memo[2] for index_memo |
|
849 |
in all_build_index_memos.itervalues()]) |
|
850 |
||
851 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
852 |
class KnitVersionedFiles(VersionedFiles): |
853 |
"""Storage for many versioned files using knit compression.
|
|
854 |
||
855 |
Backend storage is managed by indices and data objects.
|
|
3582.1.14
by Martin Pool
Clearer comments about KnitVersionedFile stacking |
856 |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
857 |
:ivar _index: A _KnitGraphIndex or similar that can describe the
|
858 |
parents, graph, compression and data location of entries in this
|
|
859 |
KnitVersionedFiles. Note that this is only the index for
|
|
3582.1.16
by Martin Pool
Review feedback and news entry |
860 |
*this* vfs; if there are fallbacks they must be queried separately.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
861 |
"""
|
862 |
||
863 |
def __init__(self, index, data_access, max_delta_chain=200, |
|
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
864 |
annotated=False, reload_func=None): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
865 |
"""Create a KnitVersionedFiles with index and data_access.
|
866 |
||
867 |
:param index: The index for the knit data.
|
|
868 |
:param data_access: The access object to store and retrieve knit
|
|
869 |
records.
|
|
870 |
:param max_delta_chain: The maximum number of deltas to permit during
|
|
871 |
insertion. Set to 0 to prohibit the use of deltas.
|
|
872 |
:param annotated: Set to True to cause annotations to be calculated and
|
|
873 |
stored during insertion.
|
|
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
874 |
:param reload_func: An function that can be called if we think we need
|
875 |
to reload the pack listing and try again. See
|
|
876 |
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
|
|
1563.2.25
by Robert Collins
Merge in upstream. |
877 |
"""
|
3316.2.3
by Robert Collins
Remove manual notification of transaction finishing on versioned files. |
878 |
self._index = index |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
879 |
self._access = data_access |
880 |
self._max_delta_chain = max_delta_chain |
|
881 |
if annotated: |
|
882 |
self._factory = KnitAnnotateFactory() |
|
883 |
else: |
|
884 |
self._factory = KnitPlainFactory() |
|
3350.8.1
by Robert Collins
KnitVersionedFiles.add_fallback_versioned_files exists. |
885 |
self._fallback_vfs = [] |
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
886 |
self._reload_func = reload_func |
3350.8.1
by Robert Collins
KnitVersionedFiles.add_fallback_versioned_files exists. |
887 |
|
3702.1.1
by Martin Pool
Add repr for KnitVersionedFiles |
888 |
def __repr__(self): |
889 |
return "%s(%r, %r)" % ( |
|
890 |
self.__class__.__name__, |
|
891 |
self._index, |
|
892 |
self._access) |
|
893 |
||
3350.8.1
by Robert Collins
KnitVersionedFiles.add_fallback_versioned_files exists. |
894 |
def add_fallback_versioned_files(self, a_versioned_files): |
895 |
"""Add a source of texts for texts not present in this knit.
|
|
896 |
||
897 |
:param a_versioned_files: A VersionedFiles object.
|
|
898 |
"""
|
|
899 |
self._fallback_vfs.append(a_versioned_files) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
900 |
|
901 |
def add_lines(self, key, parents, lines, parent_texts=None, |
|
902 |
left_matching_blocks=None, nostore_sha=None, random_id=False, |
|
903 |
check_content=True): |
|
904 |
"""See VersionedFiles.add_lines()."""
|
|
905 |
self._index._check_write_ok() |
|
906 |
self._check_add(key, lines, random_id, check_content) |
|
907 |
if parents is None: |
|
3350.6.11
by Martin Pool
Review cleanups and documentation from Robert's mail on 2080618 |
908 |
# The caller might pass None if there is no graph data, but kndx
|
909 |
# indexes can't directly store that, so we give them
|
|
910 |
# an empty tuple instead.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
911 |
parents = () |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
912 |
line_bytes = ''.join(lines) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
913 |
return self._add(key, lines, parents, |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
914 |
parent_texts, left_matching_blocks, nostore_sha, random_id, |
915 |
line_bytes=line_bytes) |
|
916 |
||
4398.8.6
by John Arbash Meinel
Switch the api from VF.add_text to VF._add_text and trim some extra 'features'. |
917 |
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False): |
4398.9.1
by Matt Nordhoff
Update _add_text docstrings that still referred to add_text. |
918 |
"""See VersionedFiles._add_text()."""
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
919 |
self._index._check_write_ok() |
920 |
self._check_add(key, None, random_id, check_content=False) |
|
921 |
if text.__class__ is not str: |
|
4398.8.5
by John Arbash Meinel
Fix a few more cases where we were adding a list rather than an empty string. |
922 |
raise errors.BzrBadParameterUnicode("text") |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
923 |
if parents is None: |
924 |
# The caller might pass None if there is no graph data, but kndx
|
|
925 |
# indexes can't directly store that, so we give them
|
|
926 |
# an empty tuple instead.
|
|
927 |
parents = () |
|
928 |
return self._add(key, None, parents, |
|
4398.8.6
by John Arbash Meinel
Switch the api from VF.add_text to VF._add_text and trim some extra 'features'. |
929 |
None, None, nostore_sha, random_id, |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
930 |
line_bytes=text) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
931 |
|
932 |
def _add(self, key, lines, parents, parent_texts, |
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
933 |
left_matching_blocks, nostore_sha, random_id, |
934 |
line_bytes): |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
935 |
"""Add a set of lines on top of version specified by parents.
|
936 |
||
937 |
Any versions not present will be converted into ghosts.
|
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
938 |
|
939 |
:param lines: A list of strings where each one is a single line (has a
|
|
940 |
single newline at the end of the string) This is now optional
|
|
941 |
(callers can pass None). It is left in its location for backwards
|
|
942 |
compatibility. It should ''.join(lines) must == line_bytes
|
|
943 |
:param line_bytes: A single string containing the content
|
|
944 |
||
945 |
We pass both lines and line_bytes because different routes bring the
|
|
946 |
values to this function. And for memory efficiency, we don't want to
|
|
947 |
have to split/join on-demand.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
948 |
"""
|
949 |
# first thing, if the content is something we don't need to store, find
|
|
950 |
# that out.
|
|
951 |
digest = sha_string(line_bytes) |
|
952 |
if nostore_sha == digest: |
|
953 |
raise errors.ExistingContent |
|
954 |
||
955 |
present_parents = [] |
|
956 |
if parent_texts is None: |
|
957 |
parent_texts = {} |
|
3830.3.9
by Martin Pool
Simplify kvf insert_record_stream; add has_key shorthand methods; update stacking effort tests |
958 |
# Do a single query to ascertain parent presence; we only compress
|
959 |
# against parents in the same kvf.
|
|
960 |
present_parent_map = self._index.get_parent_map(parents) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
961 |
for parent in parents: |
962 |
if parent in present_parent_map: |
|
963 |
present_parents.append(parent) |
|
964 |
||
965 |
# Currently we can only compress against the left most present parent.
|
|
966 |
if (len(present_parents) == 0 or |
|
967 |
present_parents[0] != parents[0]): |
|
968 |
delta = False |
|
969 |
else: |
|
970 |
# To speed the extract of texts the delta chain is limited
|
|
971 |
# to a fixed number of deltas. This should minimize both
|
|
972 |
# I/O and the time spend applying deltas.
|
|
973 |
delta = self._check_should_delta(present_parents[0]) |
|
974 |
||
975 |
text_length = len(line_bytes) |
|
976 |
options = [] |
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
977 |
no_eol = False |
978 |
# Note: line_bytes is not modified to add a newline, that is tracked
|
|
979 |
# via the no_eol flag. 'lines' *is* modified, because that is the
|
|
980 |
# general values needed by the Content code.
|
|
981 |
if line_bytes and line_bytes[-1] != '\n': |
|
982 |
options.append('no-eol') |
|
983 |
no_eol = True |
|
984 |
# Copy the existing list, or create a new one
|
|
985 |
if lines is None: |
|
986 |
lines = osutils.split_lines(line_bytes) |
|
987 |
else: |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
988 |
lines = lines[:] |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
989 |
# Replace the last line with one that ends in a final newline
|
990 |
lines[-1] = lines[-1] + '\n' |
|
991 |
if lines is None: |
|
992 |
lines = osutils.split_lines(line_bytes) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
993 |
|
4241.4.1
by Ian Clatworthy
add sha generation support to versionedfiles |
994 |
for element in key[:-1]: |
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
995 |
if type(element) is not str: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
996 |
raise TypeError("key contains non-strings: %r" % (key,)) |
4241.4.1
by Ian Clatworthy
add sha generation support to versionedfiles |
997 |
if key[-1] is None: |
998 |
key = key[:-1] + ('sha1:' + digest,) |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
999 |
elif type(key[-1]) is not str: |
4241.4.1
by Ian Clatworthy
add sha generation support to versionedfiles |
1000 |
raise TypeError("key contains non-strings: %r" % (key,)) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1001 |
# Knit hunks are still last-element only
|
1002 |
version_id = key[-1] |
|
1003 |
content = self._factory.make(lines, version_id) |
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
1004 |
if no_eol: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1005 |
# Hint to the content object that its text() call should strip the
|
1006 |
# EOL.
|
|
1007 |
content._should_strip_eol = True |
|
1008 |
if delta or (self._factory.annotated and len(present_parents) > 0): |
|
1009 |
# Merge annotations from parent texts if needed.
|
|
1010 |
delta_hunks = self._merge_annotations(content, present_parents, |
|
1011 |
parent_texts, delta, self._factory.annotated, |
|
1012 |
left_matching_blocks) |
|
1013 |
||
1014 |
if delta: |
|
1015 |
options.append('line-delta') |
|
1016 |
store_lines = self._factory.lower_line_delta(delta_hunks) |
|
1017 |
size, bytes = self._record_to_data(key, digest, |
|
1018 |
store_lines) |
|
1019 |
else: |
|
1020 |
options.append('fulltext') |
|
1021 |
# isinstance is slower and we have no hierarchy.
|
|
4088.3.1
by Benjamin Peterson
compare types with 'is' not == |
1022 |
if self._factory.__class__ is KnitPlainFactory: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1023 |
# Use the already joined bytes saving iteration time in
|
1024 |
# _record_to_data.
|
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
1025 |
dense_lines = [line_bytes] |
1026 |
if no_eol: |
|
1027 |
dense_lines.append('\n') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1028 |
size, bytes = self._record_to_data(key, digest, |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
1029 |
lines, dense_lines) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1030 |
else: |
1031 |
# get mixed annotation + content and feed it into the
|
|
1032 |
# serialiser.
|
|
1033 |
store_lines = self._factory.lower_fulltext(content) |
|
1034 |
size, bytes = self._record_to_data(key, digest, |
|
1035 |
store_lines) |
|
1036 |
||
1037 |
access_memo = self._access.add_raw_records([(key, size)], bytes)[0] |
|
1038 |
self._index.add_records( |
|
1039 |
((key, options, access_memo, parents),), |
|
1040 |
random_id=random_id) |
|
1041 |
return digest, text_length, content |
|
1042 |
||
1043 |
def annotate(self, key): |
|
1044 |
"""See VersionedFiles.annotate."""
|
|
1045 |
return self._factory.annotate(self, key) |
|
1046 |
||
4454.3.65
by John Arbash Meinel
Tests that VF implementations support .get_annotator() |
1047 |
def get_annotator(self): |
1048 |
return _KnitAnnotator(self) |
|
1049 |
||
4332.3.26
by Robert Collins
Allow passing keys to check to VersionedFile.check(). |
1050 |
def check(self, progress_bar=None, keys=None): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1051 |
"""See VersionedFiles.check()."""
|
4332.3.26
by Robert Collins
Allow passing keys to check to VersionedFile.check(). |
1052 |
if keys is None: |
1053 |
return self._logical_check() |
|
1054 |
else: |
|
1055 |
# At the moment, check does not extra work over get_record_stream
|
|
1056 |
return self.get_record_stream(keys, 'unordered', True) |
|
1057 |
||
1058 |
def _logical_check(self): |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1059 |
# This doesn't actually test extraction of everything, but that will
|
1060 |
# impact 'bzr check' substantially, and needs to be integrated with
|
|
1061 |
# care. However, it does check for the obvious problem of a delta with
|
|
1062 |
# no basis.
|
|
3517.4.14
by Martin Pool
KnitVersionedFiles.check should just check its own keys then recurse into fallbacks |
1063 |
keys = self._index.keys() |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1064 |
parent_map = self.get_parent_map(keys) |
1065 |
for key in keys: |
|
1066 |
if self._index.get_method(key) != 'fulltext': |
|
1067 |
compression_parent = parent_map[key][0] |
|
1068 |
if compression_parent not in parent_map: |
|
1069 |
raise errors.KnitCorrupt(self, |
|
1070 |
"Missing basis parent %s for %s" % ( |
|
1071 |
compression_parent, key)) |
|
3517.4.14
by Martin Pool
KnitVersionedFiles.check should just check its own keys then recurse into fallbacks |
1072 |
for fallback_vfs in self._fallback_vfs: |
1073 |
fallback_vfs.check() |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1074 |
|
1075 |
def _check_add(self, key, lines, random_id, check_content): |
|
1076 |
"""check that version_id and lines are safe to add."""
|
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1077 |
version_id = key[-1] |
4241.4.1
by Ian Clatworthy
add sha generation support to versionedfiles |
1078 |
if version_id is not None: |
1079 |
if contains_whitespace(version_id): |
|
1080 |
raise InvalidRevisionId(version_id, self) |
|
1081 |
self.check_not_reserved_id(version_id) |
|
3350.6.11
by Martin Pool
Review cleanups and documentation from Robert's mail on 2080618 |
1082 |
# TODO: If random_id==False and the key is already present, we should
|
1083 |
# probably check that the existing content is identical to what is
|
|
1084 |
# being inserted, and otherwise raise an exception. This would make
|
|
1085 |
# the bundle code simpler.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1086 |
if check_content: |
1087 |
self._check_lines_not_unicode(lines) |
|
1088 |
self._check_lines_are_lines(lines) |
|
1089 |
||
1090 |
def _check_header(self, key, line): |
|
1091 |
rec = self._split_header(line) |
|
1092 |
self._check_header_version(rec, key[-1]) |
|
1093 |
return rec |
|
1094 |
||
1095 |
def _check_header_version(self, rec, version_id): |
|
1096 |
"""Checks the header version on original format knit records.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1097 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1098 |
These have the last component of the key embedded in the record.
|
1099 |
"""
|
|
1100 |
if rec[1] != version_id: |
|
1101 |
raise KnitCorrupt(self, |
|
1102 |
'unexpected version, wanted %r, got %r' % (version_id, rec[1])) |
|
1103 |
||
1104 |
def _check_should_delta(self, parent): |
|
2147.1.1
by John Arbash Meinel
Factor the common knit delta selection into a helper func, and allow the fulltext to be chosen based on cumulative delta size |
1105 |
"""Iterate back through the parent listing, looking for a fulltext.
|
1106 |
||
1107 |
This is used when we want to decide whether to add a delta or a new
|
|
1108 |
fulltext. It searches for _max_delta_chain parents. When it finds a
|
|
1109 |
fulltext parent, it sees if the total size of the deltas leading up to
|
|
1110 |
it is large enough to indicate that we want a new full text anyway.
|
|
1111 |
||
1112 |
Return True if we should create a new delta, False if we should use a
|
|
1113 |
full text.
|
|
1114 |
"""
|
|
1115 |
delta_size = 0 |
|
1116 |
fulltext_size = None |
|
2147.1.2
by John Arbash Meinel
Simplify the knit max-chain detection code. |
1117 |
for count in xrange(self._max_delta_chain): |
3350.8.9
by Robert Collins
define behaviour for add_lines with stacked storage. |
1118 |
try: |
3582.1.14
by Martin Pool
Clearer comments about KnitVersionedFile stacking |
1119 |
# Note that this only looks in the index of this particular
|
1120 |
# KnitVersionedFiles, not in the fallbacks. This ensures that
|
|
1121 |
# we won't store a delta spanning physical repository
|
|
1122 |
# boundaries.
|
|
3915.3.1
by John Arbash Meinel
As part of _check_should_delta, use the get_build_details api. |
1123 |
build_details = self._index.get_build_details([parent]) |
1124 |
parent_details = build_details[parent] |
|
3973.1.1
by John Arbash Meinel
Trivially fix a bug in _check_should_delta when a parent is not present. |
1125 |
except (RevisionNotPresent, KeyError), e: |
3915.3.1
by John Arbash Meinel
As part of _check_should_delta, use the get_build_details api. |
1126 |
# Some basis is not locally present: always fulltext
|
3350.8.9
by Robert Collins
define behaviour for add_lines with stacked storage. |
1127 |
return False |
3915.3.1
by John Arbash Meinel
As part of _check_should_delta, use the get_build_details api. |
1128 |
index_memo, compression_parent, _, _ = parent_details |
1129 |
_, _, size = index_memo |
|
1130 |
if compression_parent is None: |
|
2147.1.1
by John Arbash Meinel
Factor the common knit delta selection into a helper func, and allow the fulltext to be chosen based on cumulative delta size |
1131 |
fulltext_size = size |
1132 |
break
|
|
1133 |
delta_size += size |
|
3350.6.11
by Martin Pool
Review cleanups and documentation from Robert's mail on 2080618 |
1134 |
# We don't explicitly check for presence because this is in an
|
1135 |
# inner loop, and if it's missing it'll fail anyhow.
|
|
3915.3.1
by John Arbash Meinel
As part of _check_should_delta, use the get_build_details api. |
1136 |
parent = compression_parent |
2147.1.2
by John Arbash Meinel
Simplify the knit max-chain detection code. |
1137 |
else: |
1138 |
# We couldn't find a fulltext, so we must create a new one
|
|
2147.1.1
by John Arbash Meinel
Factor the common knit delta selection into a helper func, and allow the fulltext to be chosen based on cumulative delta size |
1139 |
return False |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1140 |
# Simple heuristic - if the total I/O wold be greater as a delta than
|
1141 |
# the originally installed fulltext, we create a new fulltext.
|
|
2147.1.2
by John Arbash Meinel
Simplify the knit max-chain detection code. |
1142 |
return fulltext_size > delta_size |
2147.1.1
by John Arbash Meinel
Factor the common knit delta selection into a helper func, and allow the fulltext to be chosen based on cumulative delta size |
1143 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1144 |
def _build_details_to_components(self, build_details): |
1145 |
"""Convert a build_details tuple to a position tuple."""
|
|
1146 |
# record_details, access_memo, compression_parent
|
|
1147 |
return build_details[3], build_details[0], build_details[1] |
|
1148 |
||
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1149 |
def _get_components_positions(self, keys, allow_missing=False): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1150 |
"""Produce a map of position data for the components of keys.
|
1151 |
||
1152 |
This data is intended to be used for retrieving the knit records.
|
|
1153 |
||
1154 |
A dict of key to (record_details, index_memo, next, parents) is
|
|
1155 |
returned.
|
|
1156 |
method is the way referenced data should be applied.
|
|
1157 |
index_memo is the handle to pass to the data access to actually get the
|
|
1158 |
data
|
|
1159 |
next is the build-parent of the version, or None for fulltexts.
|
|
1160 |
parents is the version_ids of the parents of this version
|
|
1161 |
||
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1162 |
:param allow_missing: If True do not raise an error on a missing component,
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1163 |
just ignore it.
|
1164 |
"""
|
|
1165 |
component_data = {} |
|
1166 |
pending_components = keys |
|
1167 |
while pending_components: |
|
1168 |
build_details = self._index.get_build_details(pending_components) |
|
1169 |
current_components = set(pending_components) |
|
1170 |
pending_components = set() |
|
1171 |
for key, details in build_details.iteritems(): |
|
1172 |
(index_memo, compression_parent, parents, |
|
1173 |
record_details) = details |
|
1174 |
method = record_details[0] |
|
1175 |
if compression_parent is not None: |
|
1176 |
pending_components.add(compression_parent) |
|
1177 |
component_data[key] = self._build_details_to_components(details) |
|
1178 |
missing = current_components.difference(build_details) |
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1179 |
if missing and not allow_missing: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1180 |
raise errors.RevisionNotPresent(missing.pop(), self) |
1181 |
return component_data |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1182 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1183 |
def _get_content(self, key, parent_texts={}): |
1184 |
"""Returns a content object that makes up the specified
|
|
1185 |
version."""
|
|
1186 |
cached_version = parent_texts.get(key, None) |
|
1187 |
if cached_version is not None: |
|
1188 |
# Ensure the cache dict is valid.
|
|
1189 |
if not self.get_parent_map([key]): |
|
1190 |
raise RevisionNotPresent(key, self) |
|
1191 |
return cached_version |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1192 |
generator = _VFContentMapGenerator(self, [key]) |
1193 |
return generator._get_content(key) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1194 |
|
4593.5.20
by John Arbash Meinel
Expose KnownGraph off of VersionedFiles |
1195 |
def get_known_graph_ancestry(self, keys): |
1196 |
"""Get a KnownGraph instance with the ancestry of keys."""
|
|
4593.5.36
by John Arbash Meinel
a few more implementations of the interface. |
1197 |
parent_map, missing_keys = self._index.find_ancestry(keys) |
4634.11.2
by John Arbash Meinel
Teach VF.get_known_graph_ancestry to go to fallbacks (bug #419241) |
1198 |
for fallback in self._fallback_vfs: |
1199 |
if not missing_keys: |
|
1200 |
break
|
|
1201 |
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry( |
|
1202 |
missing_keys) |
|
1203 |
parent_map.update(f_parent_map) |
|
1204 |
missing_keys = f_missing_keys |
|
4593.5.20
by John Arbash Meinel
Expose KnownGraph off of VersionedFiles |
1205 |
kg = _mod_graph.KnownGraph(parent_map) |
1206 |
return kg |
|
1207 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1208 |
def get_parent_map(self, keys): |
3517.4.17
by Martin Pool
Redo base Repository.get_parent_map to use .revisions graph |
1209 |
"""Get a map of the graph parents of keys.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1210 |
|
1211 |
:param keys: The keys to look up parents for.
|
|
1212 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
1213 |
the mapping.
|
|
1214 |
"""
|
|
3350.8.14
by Robert Collins
Review feedback. |
1215 |
return self._get_parent_map_with_sources(keys)[0] |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1216 |
|
3350.8.14
by Robert Collins
Review feedback. |
1217 |
def _get_parent_map_with_sources(self, keys): |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1218 |
"""Get a map of the parents of keys.
|
1219 |
||
1220 |
:param keys: The keys to look up parents for.
|
|
1221 |
:return: A tuple. The first element is a mapping from keys to parents.
|
|
1222 |
Absent keys are absent from the mapping. The second element is a
|
|
1223 |
list with the locations each key was found in. The first element
|
|
1224 |
is the in-this-knit parents, the second the first fallback source,
|
|
1225 |
and so on.
|
|
1226 |
"""
|
|
3350.8.2
by Robert Collins
stacked get_parent_map. |
1227 |
result = {} |
1228 |
sources = [self._index] + self._fallback_vfs |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1229 |
source_results = [] |
3350.8.2
by Robert Collins
stacked get_parent_map. |
1230 |
missing = set(keys) |
1231 |
for source in sources: |
|
1232 |
if not missing: |
|
1233 |
break
|
|
1234 |
new_result = source.get_parent_map(missing) |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1235 |
source_results.append(new_result) |
3350.8.2
by Robert Collins
stacked get_parent_map. |
1236 |
result.update(new_result) |
1237 |
missing.difference_update(set(new_result)) |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1238 |
return result, source_results |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1239 |
|
3350.8.3
by Robert Collins
VF.get_sha1s needed changing to be stackable. |
1240 |
def _get_record_map(self, keys, allow_missing=False): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1241 |
"""Produce a dictionary of knit records.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1242 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1243 |
:return: {key:(record, record_details, digest, next)}
|
1244 |
record
|
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1245 |
data returned from read_records (a KnitContentobject)
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1246 |
record_details
|
1247 |
opaque information to pass to parse_record
|
|
1248 |
digest
|
|
1249 |
SHA1 digest of the full text after all steps are done
|
|
1250 |
next
|
|
1251 |
build-parent of the version, i.e. the leftmost ancestor.
|
|
1252 |
Will be None if the record is not a delta.
|
|
3350.8.3
by Robert Collins
VF.get_sha1s needed changing to be stackable. |
1253 |
:param keys: The keys to build a map for
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1254 |
:param allow_missing: If some records are missing, rather than
|
3350.8.3
by Robert Collins
VF.get_sha1s needed changing to be stackable. |
1255 |
error, just return the data that could be generated.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1256 |
"""
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1257 |
raw_map = self._get_record_map_unparsed(keys, |
1258 |
allow_missing=allow_missing) |
|
1259 |
return self._raw_map_to_record_map(raw_map) |
|
1260 |
||
1261 |
def _raw_map_to_record_map(self, raw_map): |
|
1262 |
"""Parse the contents of _get_record_map_unparsed.
|
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
1263 |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1264 |
:return: see _get_record_map.
|
1265 |
"""
|
|
1266 |
result = {} |
|
1267 |
for key in raw_map: |
|
1268 |
data, record_details, next = raw_map[key] |
|
1269 |
content, digest = self._parse_record(key[-1], data) |
|
1270 |
result[key] = content, record_details, digest, next |
|
1271 |
return result |
|
1272 |
||
1273 |
def _get_record_map_unparsed(self, keys, allow_missing=False): |
|
1274 |
"""Get the raw data for reconstructing keys without parsing it.
|
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
1275 |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1276 |
:return: A dict suitable for parsing via _raw_map_to_record_map.
|
1277 |
key-> raw_bytes, (method, noeol), compression_parent
|
|
1278 |
"""
|
|
3789.2.11
by John Arbash Meinel
KnitVersionedFile.get_record_stream now retries *and* fails correctly. |
1279 |
# This retries the whole request if anything fails. Potentially we
|
1280 |
# could be a bit more selective. We could track the keys whose records
|
|
1281 |
# we have successfully found, and then only request the new records
|
|
1282 |
# from there. However, _get_components_positions grabs the whole build
|
|
1283 |
# chain, which means we'll likely try to grab the same records again
|
|
4005.3.7
by Robert Collins
Review feedback. |
1284 |
# anyway. Also, can the build chains change as part of a pack
|
3789.2.11
by John Arbash Meinel
KnitVersionedFile.get_record_stream now retries *and* fails correctly. |
1285 |
# operation? We wouldn't want to end up with a broken chain.
|
3789.2.10
by John Arbash Meinel
The first function for KnitVersionedFiles can now retry on request. |
1286 |
while True: |
1287 |
try: |
|
1288 |
position_map = self._get_components_positions(keys, |
|
1289 |
allow_missing=allow_missing) |
|
3789.2.11
by John Arbash Meinel
KnitVersionedFile.get_record_stream now retries *and* fails correctly. |
1290 |
# key = component_id, r = record_details, i_m = index_memo,
|
1291 |
# n = next
|
|
3789.2.10
by John Arbash Meinel
The first function for KnitVersionedFiles can now retry on request. |
1292 |
records = [(key, i_m) for key, (r, i_m, n) |
3789.2.11
by John Arbash Meinel
KnitVersionedFile.get_record_stream now retries *and* fails correctly. |
1293 |
in position_map.iteritems()] |
4039.3.1
by John Arbash Meinel
Group records to read by pack file and sort by offset. |
1294 |
# Sort by the index memo, so that we request records from the
|
1295 |
# same pack file together, and in forward-sorted order
|
|
1296 |
records.sort(key=operator.itemgetter(1)) |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1297 |
raw_record_map = {} |
1298 |
for key, data in self._read_records_iter_unchecked(records): |
|
3789.2.10
by John Arbash Meinel
The first function for KnitVersionedFiles can now retry on request. |
1299 |
(record_details, index_memo, next) = position_map[key] |
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1300 |
raw_record_map[key] = data, record_details, next |
1301 |
return raw_record_map |
|
3789.2.10
by John Arbash Meinel
The first function for KnitVersionedFiles can now retry on request. |
1302 |
except errors.RetryWithNewPacks, e: |
1303 |
self._access.reload_or_raise(e) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1304 |
|
4039.3.6
by John Arbash Meinel
Turn _split_by_prefix into a classmethod, and add direct tests. |
1305 |
@classmethod
|
1306 |
def _split_by_prefix(cls, keys): |
|
3763.4.1
by John Arbash Meinel
Possible fix for bug #269456. |
1307 |
"""For the given keys, split them up based on their prefix.
|
1308 |
||
1309 |
To keep memory pressure somewhat under control, split the
|
|
1310 |
requests back into per-file-id requests, otherwise "bzr co"
|
|
1311 |
extracts the full tree into memory before writing it to disk.
|
|
1312 |
This should be revisited if _get_content_maps() can ever cross
|
|
1313 |
file-id boundaries.
|
|
1314 |
||
4039.3.6
by John Arbash Meinel
Turn _split_by_prefix into a classmethod, and add direct tests. |
1315 |
The keys for a given file_id are kept in the same relative order.
|
1316 |
Ordering between file_ids is not, though prefix_order will return the
|
|
1317 |
order that the key was first seen.
|
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1318 |
|
3763.4.1
by John Arbash Meinel
Possible fix for bug #269456. |
1319 |
:param keys: An iterable of key tuples
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1320 |
:return: (split_map, prefix_order)
|
1321 |
split_map A dictionary mapping prefix => keys
|
|
1322 |
prefix_order The order that we saw the various prefixes
|
|
3763.4.1
by John Arbash Meinel
Possible fix for bug #269456. |
1323 |
"""
|
1324 |
split_by_prefix = {} |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1325 |
prefix_order = [] |
3763.4.1
by John Arbash Meinel
Possible fix for bug #269456. |
1326 |
for key in keys: |
1327 |
if len(key) == 1: |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1328 |
prefix = '' |
1329 |
else: |
|
1330 |
prefix = key[0] |
|
1331 |
||
1332 |
if prefix in split_by_prefix: |
|
1333 |
split_by_prefix[prefix].append(key) |
|
1334 |
else: |
|
1335 |
split_by_prefix[prefix] = [key] |
|
1336 |
prefix_order.append(prefix) |
|
1337 |
return split_by_prefix, prefix_order |
|
1338 |
||
4039.3.7
by John Arbash Meinel
Some direct tests for _group_keys_for_io |
1339 |
def _group_keys_for_io(self, keys, non_local_keys, positions, |
1340 |
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE): |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1341 |
"""For the given keys, group them into 'best-sized' requests.
|
1342 |
||
1343 |
The idea is to avoid making 1 request per file, but to never try to
|
|
1344 |
unpack an entire 1.5GB source tree in a single pass. Also when
|
|
1345 |
possible, we should try to group requests to the same pack file
|
|
1346 |
together.
|
|
1347 |
||
4039.3.7
by John Arbash Meinel
Some direct tests for _group_keys_for_io |
1348 |
:return: list of (keys, non_local) tuples that indicate what keys
|
1349 |
should be fetched next.
|
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1350 |
"""
|
1351 |
# TODO: Ideally we would group on 2 factors. We want to extract texts
|
|
1352 |
# from the same pack file together, and we want to extract all
|
|
1353 |
# the texts for a given build-chain together. Ultimately it
|
|
1354 |
# probably needs a better global view.
|
|
4039.3.3
by John Arbash Meinel
Add some debugging code. |
1355 |
total_keys = len(keys) |
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1356 |
prefix_split_keys, prefix_order = self._split_by_prefix(keys) |
1357 |
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys) |
|
1358 |
cur_keys = [] |
|
1359 |
cur_non_local = set() |
|
1360 |
cur_size = 0 |
|
4039.3.3
by John Arbash Meinel
Add some debugging code. |
1361 |
result = [] |
1362 |
sizes = [] |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1363 |
for prefix in prefix_order: |
1364 |
keys = prefix_split_keys[prefix] |
|
1365 |
non_local = prefix_split_non_local_keys.get(prefix, []) |
|
4039.3.4
by John Arbash Meinel
Properly determine the total number of bytes needed for a given key. |
1366 |
|
1367 |
this_size = self._index._get_total_build_size(keys, positions) |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1368 |
cur_size += this_size |
1369 |
cur_keys.extend(keys) |
|
1370 |
cur_non_local.update(non_local) |
|
4039.3.7
by John Arbash Meinel
Some direct tests for _group_keys_for_io |
1371 |
if cur_size > _min_buffer_size: |
4039.3.3
by John Arbash Meinel
Add some debugging code. |
1372 |
result.append((cur_keys, cur_non_local)) |
1373 |
sizes.append(cur_size) |
|
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1374 |
cur_keys = [] |
4039.3.4
by John Arbash Meinel
Properly determine the total number of bytes needed for a given key. |
1375 |
cur_non_local = set() |
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1376 |
cur_size = 0 |
1377 |
if cur_keys: |
|
4039.3.3
by John Arbash Meinel
Add some debugging code. |
1378 |
result.append((cur_keys, cur_non_local)) |
1379 |
sizes.append(cur_size) |
|
1380 |
return result |
|
3763.4.1
by John Arbash Meinel
Possible fix for bug #269456. |
1381 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1382 |
def get_record_stream(self, keys, ordering, include_delta_closure): |
1383 |
"""Get a stream of records for keys.
|
|
1384 |
||
1385 |
:param keys: The keys to include.
|
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
1386 |
:param ordering: Either 'unordered' or 'topological'. A topologically
|
1387 |
sorted stream has compression parents strictly before their
|
|
1388 |
children.
|
|
1389 |
:param include_delta_closure: If True then the closure across any
|
|
1390 |
compression parents will be included (in the opaque data).
|
|
1391 |
:return: An iterator of ContentFactory objects, each of which is only
|
|
1392 |
valid until the iterator is advanced.
|
|
1393 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1394 |
# keys might be a generator
|
1395 |
keys = set(keys) |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1396 |
if not keys: |
1397 |
return
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1398 |
if not self._index.has_graph: |
4111.1.1
by Robert Collins
Add a groupcompress sort order. |
1399 |
# Cannot sort when no graph has been stored.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1400 |
ordering = 'unordered' |
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
1401 |
|
1402 |
remaining_keys = keys |
|
1403 |
while True: |
|
1404 |
try: |
|
1405 |
keys = set(remaining_keys) |
|
1406 |
for content_factory in self._get_remaining_record_stream(keys, |
|
1407 |
ordering, include_delta_closure): |
|
1408 |
remaining_keys.discard(content_factory.key) |
|
1409 |
yield content_factory |
|
1410 |
return
|
|
1411 |
except errors.RetryWithNewPacks, e: |
|
3789.2.11
by John Arbash Meinel
KnitVersionedFile.get_record_stream now retries *and* fails correctly. |
1412 |
self._access.reload_or_raise(e) |
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
1413 |
|
1414 |
def _get_remaining_record_stream(self, keys, ordering, |
|
1415 |
include_delta_closure): |
|
3789.2.4
by John Arbash Meinel
Add a multiple-record test, though it isn't quite what we want for the readv tests. |
1416 |
"""This function is the 'retry' portion for get_record_stream."""
|
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
1417 |
if include_delta_closure: |
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1418 |
positions = self._get_components_positions(keys, allow_missing=True) |
3350.3.3
by Robert Collins
Functional get_record_stream interface tests covering full interface. |
1419 |
else: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1420 |
build_details = self._index.get_build_details(keys) |
3350.6.11
by Martin Pool
Review cleanups and documentation from Robert's mail on 2080618 |
1421 |
# map from key to
|
1422 |
# (record_details, access_memo, compression_parent_key)
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1423 |
positions = dict((key, self._build_details_to_components(details)) |
1424 |
for key, details in build_details.iteritems()) |
|
1425 |
absent_keys = keys.difference(set(positions)) |
|
1426 |
# There may be more absent keys : if we're missing the basis component
|
|
1427 |
# and are trying to include the delta closure.
|
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1428 |
# XXX: We should not ever need to examine remote sources because we do
|
1429 |
# not permit deltas across versioned files boundaries.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1430 |
if include_delta_closure: |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1431 |
needed_from_fallback = set() |
3350.6.11
by Martin Pool
Review cleanups and documentation from Robert's mail on 2080618 |
1432 |
# Build up reconstructable_keys dict. key:True in this dict means
|
1433 |
# the key can be reconstructed.
|
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1434 |
reconstructable_keys = {} |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1435 |
for key in keys: |
1436 |
# the delta chain
|
|
1437 |
try: |
|
1438 |
chain = [key, positions[key][2]] |
|
1439 |
except KeyError: |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1440 |
needed_from_fallback.add(key) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1441 |
continue
|
1442 |
result = True |
|
1443 |
while chain[-1] is not None: |
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1444 |
if chain[-1] in reconstructable_keys: |
1445 |
result = reconstructable_keys[chain[-1]] |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1446 |
break
|
1447 |
else: |
|
1448 |
try: |
|
1449 |
chain.append(positions[chain[-1]][2]) |
|
1450 |
except KeyError: |
|
1451 |
# missing basis component
|
|
3350.8.10
by Robert Collins
Stacked insert_record_stream. |
1452 |
needed_from_fallback.add(chain[-1]) |
1453 |
result = True |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1454 |
break
|
1455 |
for chain_key in chain[:-1]: |
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1456 |
reconstructable_keys[chain_key] = result |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1457 |
if not result: |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1458 |
needed_from_fallback.add(key) |
1459 |
# Double index lookups here : need a unified api ?
|
|
3350.8.14
by Robert Collins
Review feedback. |
1460 |
global_map, parent_maps = self._get_parent_map_with_sources(keys) |
4111.1.1
by Robert Collins
Add a groupcompress sort order. |
1461 |
if ordering in ('topological', 'groupcompress'): |
1462 |
if ordering == 'topological': |
|
1463 |
# Global topological sort
|
|
1464 |
present_keys = tsort.topo_sort(global_map) |
|
1465 |
else: |
|
1466 |
present_keys = sort_groupcompress(global_map) |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1467 |
# Now group by source:
|
1468 |
source_keys = [] |
|
1469 |
current_source = None |
|
1470 |
for key in present_keys: |
|
1471 |
for parent_map in parent_maps: |
|
1472 |
if key in parent_map: |
|
1473 |
key_source = parent_map |
|
1474 |
break
|
|
1475 |
if current_source is not key_source: |
|
1476 |
source_keys.append((key_source, [])) |
|
1477 |
current_source = key_source |
|
1478 |
source_keys[-1][1].append(key) |
|
1479 |
else: |
|
3606.7.7
by John Arbash Meinel
Add tests for the fetching behavior. |
1480 |
if ordering != 'unordered': |
1481 |
raise AssertionError('valid values for ordering are:' |
|
4111.1.1
by Robert Collins
Add a groupcompress sort order. |
1482 |
' "unordered", "groupcompress" or "topological" not: %r' |
3606.7.7
by John Arbash Meinel
Add tests for the fetching behavior. |
1483 |
% (ordering,)) |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1484 |
# Just group by source; remote sources first.
|
1485 |
present_keys = [] |
|
1486 |
source_keys = [] |
|
1487 |
for parent_map in reversed(parent_maps): |
|
1488 |
source_keys.append((parent_map, [])) |
|
1489 |
for key in parent_map: |
|
1490 |
present_keys.append(key) |
|
1491 |
source_keys[-1][1].append(key) |
|
3878.1.1
by John Arbash Meinel
KVF.get_record_stream('unordered') now returns the records based on I/O ordering. |
1492 |
# We have been requested to return these records in an order that
|
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
1493 |
# suits us. So we ask the index to give us an optimally sorted
|
1494 |
# order.
|
|
3878.1.1
by John Arbash Meinel
KVF.get_record_stream('unordered') now returns the records based on I/O ordering. |
1495 |
for source, sub_keys in source_keys: |
1496 |
if source is parent_maps[0]: |
|
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
1497 |
# Only sort the keys for this VF
|
1498 |
self._index._sort_keys_by_io(sub_keys, positions) |
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1499 |
absent_keys = keys - set(global_map) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1500 |
for key in absent_keys: |
1501 |
yield AbsentContentFactory(key) |
|
1502 |
# restrict our view to the keys we can answer.
|
|
1503 |
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
|
|
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1504 |
# XXX: At that point we need to consider the impact of double reads by
|
1505 |
# utilising components multiple times.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1506 |
if include_delta_closure: |
1507 |
# XXX: get_content_maps performs its own index queries; allow state
|
|
1508 |
# to be passed in.
|
|
3763.4.1
by John Arbash Meinel
Possible fix for bug #269456. |
1509 |
non_local_keys = needed_from_fallback - absent_keys |
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
1510 |
for keys, non_local_keys in self._group_keys_for_io(present_keys, |
1511 |
non_local_keys, |
|
1512 |
positions): |
|
1513 |
generator = _VFContentMapGenerator(self, keys, non_local_keys, |
|
4537.3.1
by John Arbash Meinel
Start working on tests that get_record_stream gives reasonable results w/ stacking. |
1514 |
global_map, |
1515 |
ordering=ordering) |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1516 |
for record in generator.get_record_stream(): |
1517 |
yield record |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1518 |
else: |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1519 |
for source, keys in source_keys: |
1520 |
if source is parent_maps[0]: |
|
1521 |
# this KnitVersionedFiles
|
|
1522 |
records = [(key, positions[key][1]) for key in keys] |
|
4082.1.1
by Andrew Bennetts
Use _read_records_iter_unchecked in _get_remaining_record_stream. |
1523 |
for key, raw_data in self._read_records_iter_unchecked(records): |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1524 |
(record_details, index_memo, _) = positions[key] |
1525 |
yield KnitContentFactory(key, global_map[key], |
|
4082.1.1
by Andrew Bennetts
Use _read_records_iter_unchecked in _get_remaining_record_stream. |
1526 |
record_details, None, raw_data, self._factory.annotated, None) |
3350.8.6
by Robert Collins
get_record_stream stacking for delta access. |
1527 |
else: |
1528 |
vf = self._fallback_vfs[parent_maps.index(source) - 1] |
|
1529 |
for record in vf.get_record_stream(keys, ordering, |
|
1530 |
include_delta_closure): |
|
1531 |
yield record |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1532 |
|
1533 |
def get_sha1s(self, keys): |
|
1534 |
"""See VersionedFiles.get_sha1s()."""
|
|
3350.8.3
by Robert Collins
VF.get_sha1s needed changing to be stackable. |
1535 |
missing = set(keys) |
1536 |
record_map = self._get_record_map(missing, allow_missing=True) |
|
1537 |
result = {} |
|
1538 |
for key, details in record_map.iteritems(): |
|
1539 |
if key not in missing: |
|
1540 |
continue
|
|
1541 |
# record entry 2 is the 'digest'.
|
|
1542 |
result[key] = details[2] |
|
1543 |
missing.difference_update(set(result)) |
|
1544 |
for source in self._fallback_vfs: |
|
1545 |
if not missing: |
|
1546 |
break
|
|
1547 |
new_result = source.get_sha1s(missing) |
|
1548 |
result.update(new_result) |
|
1549 |
missing.difference_update(set(new_result)) |
|
1550 |
return result |
|
3052.2.2
by Robert Collins
* Operations pulling data from a smart server where the underlying |
1551 |
|
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1552 |
def insert_record_stream(self, stream): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1553 |
"""Insert a record stream into this container.
|
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1554 |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1555 |
:param stream: A stream of records to insert.
|
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1556 |
:return: None
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1557 |
:seealso VersionedFiles.get_record_stream:
|
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1558 |
"""
|
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1559 |
def get_adapter(adapter_key): |
1560 |
try: |
|
1561 |
return adapters[adapter_key] |
|
1562 |
except KeyError: |
|
1563 |
adapter_factory = adapter_registry.get(adapter_key) |
|
1564 |
adapter = adapter_factory(self) |
|
1565 |
adapters[adapter_key] = adapter |
|
1566 |
return adapter |
|
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1567 |
delta_types = set() |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1568 |
if self._factory.annotated: |
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1569 |
# self is annotated, we need annotated knits to use directly.
|
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1570 |
annotated = "annotated-" |
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1571 |
convertibles = [] |
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1572 |
else: |
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1573 |
# self is not annotated, but we can strip annotations cheaply.
|
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1574 |
annotated = "" |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1575 |
convertibles = set(["knit-annotated-ft-gz"]) |
1576 |
if self._max_delta_chain: |
|
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1577 |
delta_types.add("knit-annotated-delta-gz") |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1578 |
convertibles.add("knit-annotated-delta-gz") |
3350.3.22
by Robert Collins
Review feedback. |
1579 |
# The set of types we can cheaply adapt without needing basis texts.
|
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1580 |
native_types = set() |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1581 |
if self._max_delta_chain: |
1582 |
native_types.add("knit-%sdelta-gz" % annotated) |
|
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1583 |
delta_types.add("knit-%sdelta-gz" % annotated) |
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1584 |
native_types.add("knit-%sft-gz" % annotated) |
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1585 |
knit_types = native_types.union(convertibles) |
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1586 |
adapters = {} |
3350.3.22
by Robert Collins
Review feedback. |
1587 |
# Buffer all index entries that we can't add immediately because their
|
3350.3.17
by Robert Collins
Prevent corrupt knits being created when a stream is interrupted with basis parents not present. |
1588 |
# basis parent is missing. We don't buffer all because generating
|
1589 |
# annotations may require access to some of the new records. However we
|
|
1590 |
# can't generate annotations from new deltas until their basis parent
|
|
1591 |
# is present anyway, so we get away with not needing an index that
|
|
3350.3.22
by Robert Collins
Review feedback. |
1592 |
# includes the new keys.
|
3830.3.15
by Martin Pool
Check against all parents when deciding whether to store a fulltext in a stacked repository |
1593 |
#
|
1594 |
# See <http://launchpad.net/bugs/300177> about ordering of compression
|
|
1595 |
# parents in the records - to be conservative, we insist that all
|
|
1596 |
# parents must be present to avoid expanding to a fulltext.
|
|
1597 |
#
|
|
3350.3.17
by Robert Collins
Prevent corrupt knits being created when a stream is interrupted with basis parents not present. |
1598 |
# key = basis_parent, value = index entry to add
|
4009.3.7
by Andrew Bennetts
Most tests passing. |
1599 |
buffered_index_entries = {} |
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1600 |
for record in stream: |
4082.1.3
by Andrew Bennetts
Add knit header paranoia to insert_record_stream to replace the paranoia removed from get_record_stream. |
1601 |
kind = record.storage_kind |
1602 |
if kind.startswith('knit-') and kind.endswith('-gz'): |
|
1603 |
# Check that the ID in the header of the raw knit bytes matches
|
|
1604 |
# the record metadata.
|
|
1605 |
raw_data = record._raw_record |
|
1606 |
df, rec = self._parse_record_header(record.key, raw_data) |
|
1607 |
df.close() |
|
4052.1.2
by Robert Collins
Review feedback - fix flushing buffered records in knit's insert_record_stream. |
1608 |
buffered = False |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1609 |
parents = record.parents |
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1610 |
if record.storage_kind in delta_types: |
1611 |
# TODO: eventually the record itself should track
|
|
1612 |
# compression_parent
|
|
1613 |
compression_parent = parents[0] |
|
1614 |
else: |
|
1615 |
compression_parent = None |
|
3350.3.15
by Robert Collins
Update the insert_record_stream contract to error if an absent record is provided. |
1616 |
# Raise an error when a record is missing.
|
1617 |
if record.storage_kind == 'absent': |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1618 |
raise RevisionNotPresent([record.key], self) |
3830.3.15
by Martin Pool
Check against all parents when deciding whether to store a fulltext in a stacked repository |
1619 |
elif ((record.storage_kind in knit_types) |
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1620 |
and (compression_parent is None |
3830.3.18
by Martin Pool
Faster expression evaluation order |
1621 |
or not self._fallback_vfs |
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1622 |
or self._index.has_key(compression_parent) |
1623 |
or not self.has_key(compression_parent))): |
|
3830.3.9
by Martin Pool
Simplify kvf insert_record_stream; add has_key shorthand methods; update stacking effort tests |
1624 |
# we can insert the knit record literally if either it has no
|
1625 |
# compression parent OR we already have its basis in this kvf
|
|
1626 |
# OR the basis is not present even in the fallbacks. In the
|
|
1627 |
# last case it will either turn up later in the stream and all
|
|
1628 |
# will be well, or it won't turn up at all and we'll raise an
|
|
1629 |
# error at the end.
|
|
3830.3.13
by Martin Pool
review cleanups to insert_record_stream |
1630 |
#
|
1631 |
# TODO: self.has_key is somewhat redundant with
|
|
1632 |
# self._index.has_key; we really want something that directly
|
|
1633 |
# asks if it's only present in the fallbacks. -- mbp 20081119
|
|
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1634 |
if record.storage_kind not in native_types: |
1635 |
try: |
|
1636 |
adapter_key = (record.storage_kind, "knit-delta-gz") |
|
1637 |
adapter = get_adapter(adapter_key) |
|
1638 |
except KeyError: |
|
1639 |
adapter_key = (record.storage_kind, "knit-ft-gz") |
|
1640 |
adapter = get_adapter(adapter_key) |
|
4005.3.1
by Robert Collins
Change the signature on VersionedFiles adapters to allow less typing and more flexability inside adapters. |
1641 |
bytes = adapter.get_bytes(record) |
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1642 |
else: |
4005.3.2
by Robert Collins
First passing NetworkRecordStream test - a fulltext from any record type which isn't a chunked or fulltext can be serialised and deserialised successfully. |
1643 |
# It's a knit record, it has a _raw_record field (even if
|
1644 |
# it was reconstituted from a network stream).
|
|
1645 |
bytes = record._raw_record |
|
3350.3.9
by Robert Collins
Avoid full text reconstruction when transferring knit to knit via record streams. |
1646 |
options = [record._build_details[0]] |
1647 |
if record._build_details[1]: |
|
1648 |
options.append('no-eol') |
|
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1649 |
# Just blat it across.
|
1650 |
# Note: This does end up adding data on duplicate keys. As
|
|
1651 |
# modern repositories use atomic insertions this should not
|
|
1652 |
# lead to excessive growth in the event of interrupted fetches.
|
|
1653 |
# 'knit' repositories may suffer excessive growth, but as a
|
|
1654 |
# deprecated format this is tolerable. It can be fixed if
|
|
1655 |
# needed by in the kndx index support raising on a duplicate
|
|
1656 |
# add with identical parents and options.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1657 |
access_memo = self._access.add_raw_records( |
1658 |
[(record.key, len(bytes))], bytes)[0] |
|
1659 |
index_entry = (record.key, options, access_memo, parents) |
|
3350.3.17
by Robert Collins
Prevent corrupt knits being created when a stream is interrupted with basis parents not present. |
1660 |
if 'fulltext' not in options: |
3830.3.24
by John Arbash Meinel
We don't require all parents to be present, just the compression parent. |
1661 |
# Not a fulltext, so we need to make sure the compression
|
1662 |
# parent will also be present.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1663 |
# Note that pack backed knits don't need to buffer here
|
1664 |
# because they buffer all writes to the transaction level,
|
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1665 |
# but we don't expose that difference at the index level. If
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1666 |
# the query here has sufficient cost to show up in
|
1667 |
# profiling we should do that.
|
|
3830.3.24
by John Arbash Meinel
We don't require all parents to be present, just the compression parent. |
1668 |
#
|
3830.3.7
by Martin Pool
KnitVersionedFiles.insert_record_stream checks that compression parents are in the same kvf, not in a fallback |
1669 |
# They're required to be physically in this
|
1670 |
# KnitVersionedFiles, not in a fallback.
|
|
3871.4.3
by John Arbash Meinel
We should only care if the compression parent is not available, not if all parents are available. |
1671 |
if not self._index.has_key(compression_parent): |
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
1672 |
pending = buffered_index_entries.setdefault( |
1673 |
compression_parent, []) |
|
1674 |
pending.append(index_entry) |
|
4009.3.9
by Andrew Bennetts
Remove some XXXs. |
1675 |
buffered = True |
1676 |
if not buffered: |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1677 |
self._index.add_records([index_entry]) |
3890.2.9
by John Arbash Meinel
Start using osutils.chunks_as_lines rather than osutils.split_lines. |
1678 |
elif record.storage_kind == 'chunked': |
1679 |
self.add_lines(record.key, parents, |
|
1680 |
osutils.chunks_to_lines(record.get_bytes_as('chunked'))) |
|
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1681 |
else: |
4005.3.8
by Robert Collins
Handle record streams where a fulltext is obtainable from a record but not the storage_kind. |
1682 |
# Not suitable for direct insertion as a
|
3849.3.2
by Andrew Bennetts
Expand a comment inside insert_record_stream slightly. |
1683 |
# delta, either because it's not the right format, or this
|
1684 |
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
|
|
1685 |
# 0) or because it depends on a base only present in the
|
|
1686 |
# fallback kvfs.
|
|
4187.3.6
by Andrew Bennetts
Move the flush in KnitVersionedFiles.insert_record_stream so that it covers the add_lines call of the fallback case, not just the adapter.get_bytes. |
1687 |
self._access.flush() |
4005.3.8
by Robert Collins
Handle record streams where a fulltext is obtainable from a record but not the storage_kind. |
1688 |
try: |
1689 |
# Try getting a fulltext directly from the record.
|
|
1690 |
bytes = record.get_bytes_as('fulltext') |
|
1691 |
except errors.UnavailableRepresentation: |
|
1692 |
adapter_key = record.storage_kind, 'fulltext' |
|
1693 |
adapter = get_adapter(adapter_key) |
|
1694 |
bytes = adapter.get_bytes(record) |
|
1695 |
lines = split_lines(bytes) |
|
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1696 |
try: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1697 |
self.add_lines(record.key, parents, lines) |
3350.3.11
by Robert Collins
Test inserting a stream that overlaps the current content of a knit does not error. |
1698 |
except errors.RevisionAlreadyPresent: |
1699 |
pass
|
|
3350.3.17
by Robert Collins
Prevent corrupt knits being created when a stream is interrupted with basis parents not present. |
1700 |
# Add any records whose basis parent is now available.
|
4052.1.2
by Robert Collins
Review feedback - fix flushing buffered records in knit's insert_record_stream. |
1701 |
if not buffered: |
1702 |
added_keys = [record.key] |
|
1703 |
while added_keys: |
|
1704 |
key = added_keys.pop(0) |
|
1705 |
if key in buffered_index_entries: |
|
1706 |
index_entries = buffered_index_entries[key] |
|
1707 |
self._index.add_records(index_entries) |
|
1708 |
added_keys.extend( |
|
1709 |
[index_entry[0] for index_entry in index_entries]) |
|
1710 |
del buffered_index_entries[key] |
|
4009.3.8
by Andrew Bennetts
Fix test failure. |
1711 |
if buffered_index_entries: |
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
1712 |
# There were index entries buffered at the end of the stream,
|
1713 |
# So these need to be added (if the index supports holding such
|
|
1714 |
# entries for later insertion)
|
|
4634.84.1
by Andrew Bennetts
Pass all buffered_index_entries together so that records that are buffered because their parent is buffered are not mistaken as having a missing parent. |
1715 |
all_entries = [] |
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
1716 |
for key in buffered_index_entries: |
1717 |
index_entries = buffered_index_entries[key] |
|
4634.84.1
by Andrew Bennetts
Pass all buffered_index_entries together so that records that are buffered because their parent is buffered are not mistaken as having a missing parent. |
1718 |
all_entries.extend(index_entries) |
4634.84.2
by Andrew Bennetts
Add test. |
1719 |
self._index.add_records( |
1720 |
all_entries, missing_compression_parents=True) |
|
4009.3.2
by Andrew Bennetts
Add test_insert_record_stream_delta_missing_basis_can_be_added_later. |
1721 |
|
1722 |
def get_missing_compression_parent_keys(self): |
|
4009.3.3
by Andrew Bennetts
Add docstrings. |
1723 |
"""Return an iterable of keys of missing compression parents.
|
1724 |
||
1725 |
Check this after calling insert_record_stream to find out if there are
|
|
1726 |
any missing compression parents. If there are, the records that
|
|
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
1727 |
depend on them are not able to be inserted safely. For atomic
|
1728 |
KnitVersionedFiles built on packs, the transaction should be aborted or
|
|
1729 |
suspended - commit will fail at this point. Nonatomic knits will error
|
|
1730 |
earlier because they have no staging area to put pending entries into.
|
|
4009.3.3
by Andrew Bennetts
Add docstrings. |
1731 |
"""
|
4009.3.7
by Andrew Bennetts
Most tests passing. |
1732 |
return self._index.get_missing_compression_parents() |
3350.3.8
by Robert Collins
Basic stream insertion, no fast path yet for knit to knit. |
1733 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1734 |
def iter_lines_added_or_present_in_keys(self, keys, pb=None): |
1735 |
"""Iterate over the lines in the versioned files from keys.
|
|
1736 |
||
1737 |
This may return lines from other keys. Each item the returned
|
|
1738 |
iterator yields is a tuple of a line and a text version that that line
|
|
1739 |
is present in (not introduced in).
|
|
1740 |
||
1741 |
Ordering of results is in whatever order is most suitable for the
|
|
1742 |
underlying storage format.
|
|
1743 |
||
1744 |
If a progress bar is supplied, it may be used to indicate progress.
|
|
1745 |
The caller is responsible for cleaning up progress bars (because this
|
|
1746 |
is an iterator).
|
|
1747 |
||
1748 |
NOTES:
|
|
3830.3.17
by Martin Pool
Don't assume versions being unmentioned by iter_lines_added_or_changed implies the versions aren't present |
1749 |
* Lines are normalised by the underlying store: they will all have \\n
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1750 |
terminators.
|
1751 |
* Lines are returned in arbitrary order.
|
|
3830.3.17
by Martin Pool
Don't assume versions being unmentioned by iter_lines_added_or_changed implies the versions aren't present |
1752 |
* If a requested key did not change any lines (or didn't have any
|
1753 |
lines), it may not be mentioned at all in the result.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1754 |
|
4110.2.10
by Martin Pool
Tweak iter_lines progress messages |
1755 |
:param pb: Progress bar supplied by caller.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1756 |
:return: An iterator over (line, key).
|
1757 |
"""
|
|
1758 |
if pb is None: |
|
4961.2.2
by Martin Pool
Change some dummy progress bars to be real tasks |
1759 |
pb = ui.ui_factory.nested_progress_bar() |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1760 |
keys = set(keys) |
3350.8.5
by Robert Collins
Iter_lines_added_or_present_in_keys stacks. |
1761 |
total = len(keys) |
3789.2.12
by John Arbash Meinel
iter_lines_added_or_present now retries. |
1762 |
done = False |
1763 |
while not done: |
|
1764 |
try: |
|
1765 |
# we don't care about inclusions, the caller cares.
|
|
1766 |
# but we need to setup a list of records to visit.
|
|
1767 |
# we need key, position, length
|
|
1768 |
key_records = [] |
|
1769 |
build_details = self._index.get_build_details(keys) |
|
1770 |
for key, details in build_details.iteritems(): |
|
1771 |
if key in keys: |
|
1772 |
key_records.append((key, details[0])) |
|
1773 |
records_iter = enumerate(self._read_records_iter(key_records)) |
|
1774 |
for (key_idx, (key, data, sha_value)) in records_iter: |
|
4103.3.2
by Martin Pool
Remove trailing punctuation from progress messages |
1775 |
pb.update('Walking content', key_idx, total) |
3789.2.12
by John Arbash Meinel
iter_lines_added_or_present now retries. |
1776 |
compression_parent = build_details[key][1] |
1777 |
if compression_parent is None: |
|
1778 |
# fulltext
|
|
1779 |
line_iterator = self._factory.get_fulltext_content(data) |
|
1780 |
else: |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1781 |
# Delta
|
3789.2.12
by John Arbash Meinel
iter_lines_added_or_present now retries. |
1782 |
line_iterator = self._factory.get_linedelta_content(data) |
1783 |
# Now that we are yielding the data for this key, remove it
|
|
1784 |
# from the list
|
|
1785 |
keys.remove(key) |
|
1786 |
# XXX: It might be more efficient to yield (key,
|
|
1787 |
# line_iterator) in the future. However for now, this is a
|
|
1788 |
# simpler change to integrate into the rest of the
|
|
1789 |
# codebase. RBC 20071110
|
|
1790 |
for line in line_iterator: |
|
1791 |
yield line, key |
|
1792 |
done = True |
|
1793 |
except errors.RetryWithNewPacks, e: |
|
1794 |
self._access.reload_or_raise(e) |
|
3830.3.17
by Martin Pool
Don't assume versions being unmentioned by iter_lines_added_or_changed implies the versions aren't present |
1795 |
# If there are still keys we've not yet found, we look in the fallback
|
1796 |
# vfs, and hope to find them there. Note that if the keys are found
|
|
1797 |
# but had no changes or no content, the fallback may not return
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1798 |
# anything.
|
3830.3.17
by Martin Pool
Don't assume versions being unmentioned by iter_lines_added_or_changed implies the versions aren't present |
1799 |
if keys and not self._fallback_vfs: |
1800 |
# XXX: strictly the second parameter is meant to be the file id
|
|
1801 |
# but it's not easily accessible here.
|
|
1802 |
raise RevisionNotPresent(keys, repr(self)) |
|
3350.8.5
by Robert Collins
Iter_lines_added_or_present_in_keys stacks. |
1803 |
for source in self._fallback_vfs: |
1804 |
if not keys: |
|
1805 |
break
|
|
1806 |
source_keys = set() |
|
1807 |
for line, key in source.iter_lines_added_or_present_in_keys(keys): |
|
1808 |
source_keys.add(key) |
|
1809 |
yield line, key |
|
1810 |
keys.difference_update(source_keys) |
|
4103.3.2
by Martin Pool
Remove trailing punctuation from progress messages |
1811 |
pb.update('Walking content', total, total) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1812 |
|
1813 |
def _make_line_delta(self, delta_seq, new_content): |
|
1814 |
"""Generate a line delta from delta_seq and new_content."""
|
|
1815 |
diff_hunks = [] |
|
1816 |
for op in delta_seq.get_opcodes(): |
|
1817 |
if op[0] == 'equal': |
|
1818 |
continue
|
|
1819 |
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]])) |
|
1820 |
return diff_hunks |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
1821 |
|
1596.2.34
by Robert Collins
Optimise knit add to only diff once per parent, not once per parent + once for the delta generation. |
1822 |
def _merge_annotations(self, content, parents, parent_texts={}, |
2520.4.140
by Aaron Bentley
Use matching blocks from mpdiff for knit delta creation |
1823 |
delta=None, annotated=None, |
1824 |
left_matching_blocks=None): |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1825 |
"""Merge annotations for content and generate deltas.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1826 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1827 |
This is done by comparing the annotations based on changes to the text
|
1828 |
and generating a delta on the resulting full texts. If annotations are
|
|
1829 |
not being created then a simple delta is created.
|
|
1596.2.27
by Robert Collins
Note potential improvements in knit adds. |
1830 |
"""
|
2520.4.146
by Aaron Bentley
Avoid get_matching_blocks for un-annotated text |
1831 |
if left_matching_blocks is not None: |
1832 |
delta_seq = diff._PrematchedMatcher(left_matching_blocks) |
|
1833 |
else: |
|
1834 |
delta_seq = None |
|
1596.2.34
by Robert Collins
Optimise knit add to only diff once per parent, not once per parent + once for the delta generation. |
1835 |
if annotated: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1836 |
for parent_key in parents: |
1837 |
merge_content = self._get_content(parent_key, parent_texts) |
|
1838 |
if (parent_key == parents[0] and delta_seq is not None): |
|
2520.4.146
by Aaron Bentley
Avoid get_matching_blocks for un-annotated text |
1839 |
seq = delta_seq |
2520.4.140
by Aaron Bentley
Use matching blocks from mpdiff for knit delta creation |
1840 |
else: |
1841 |
seq = patiencediff.PatienceSequenceMatcher( |
|
1842 |
None, merge_content.text(), content.text()) |
|
1596.2.34
by Robert Collins
Optimise knit add to only diff once per parent, not once per parent + once for the delta generation. |
1843 |
for i, j, n in seq.get_matching_blocks(): |
1844 |
if n == 0: |
|
1845 |
continue
|
|
3460.2.1
by Robert Collins
* Inserting a bundle which changes the contents of a file with no trailing |
1846 |
# this copies (origin, text) pairs across to the new
|
1847 |
# content for any line that matches the last-checked
|
|
2520.4.146
by Aaron Bentley
Avoid get_matching_blocks for un-annotated text |
1848 |
# parent.
|
1596.2.34
by Robert Collins
Optimise knit add to only diff once per parent, not once per parent + once for the delta generation. |
1849 |
content._lines[j:j+n] = merge_content._lines[i:i+n] |
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1850 |
# XXX: Robert says the following block is a workaround for a
|
1851 |
# now-fixed bug and it can probably be deleted. -- mbp 20080618
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1852 |
if content._lines and content._lines[-1][1][-1] != '\n': |
1853 |
# The copied annotation was from a line without a trailing EOL,
|
|
1854 |
# reinstate one for the content object, to ensure correct
|
|
1855 |
# serialization.
|
|
1856 |
line = content._lines[-1][1] + '\n' |
|
1857 |
content._lines[-1] = (content._lines[-1][0], line) |
|
1596.2.36
by Robert Collins
add a get_delta api to versioned_file. |
1858 |
if delta: |
2520.4.146
by Aaron Bentley
Avoid get_matching_blocks for un-annotated text |
1859 |
if delta_seq is None: |
1596.2.36
by Robert Collins
add a get_delta api to versioned_file. |
1860 |
reference_content = self._get_content(parents[0], parent_texts) |
1861 |
new_texts = content.text() |
|
1862 |
old_texts = reference_content.text() |
|
2104.4.2
by John Arbash Meinel
Small cleanup and NEWS entry about fixing bug #65714 |
1863 |
delta_seq = patiencediff.PatienceSequenceMatcher( |
2100.2.1
by wang
Replace python's difflib by patiencediff because the worst case |
1864 |
None, old_texts, new_texts) |
1596.2.36
by Robert Collins
add a get_delta api to versioned_file. |
1865 |
return self._make_line_delta(delta_seq, content) |
1866 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1867 |
def _parse_record(self, version_id, data): |
1868 |
"""Parse an original format knit record.
|
|
1869 |
||
1870 |
These have the last element of the key only present in the stored data.
|
|
1871 |
"""
|
|
1872 |
rec, record_contents = self._parse_record_unchecked(data) |
|
1873 |
self._check_header_version(rec, version_id) |
|
1874 |
return record_contents, rec[3] |
|
1875 |
||
1876 |
def _parse_record_header(self, key, raw_data): |
|
1877 |
"""Parse a record header for consistency.
|
|
1878 |
||
1879 |
:return: the header and the decompressor stream.
|
|
1880 |
as (stream, header_record)
|
|
1881 |
"""
|
|
3535.5.1
by John Arbash Meinel
cleanup a few imports to be lazily loaded. |
1882 |
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data)) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1883 |
try: |
1884 |
# Current serialise
|
|
1885 |
rec = self._check_header(key, df.readline()) |
|
1886 |
except Exception, e: |
|
1887 |
raise KnitCorrupt(self, |
|
1888 |
"While reading {%s} got %s(%s)" |
|
1889 |
% (key, e.__class__.__name__, str(e))) |
|
1890 |
return df, rec |
|
1891 |
||
1892 |
def _parse_record_unchecked(self, data): |
|
1893 |
# profiling notes:
|
|
1894 |
# 4168 calls in 2880 217 internal
|
|
1895 |
# 4168 calls to _parse_record_header in 2121
|
|
1896 |
# 4168 calls to readlines in 330
|
|
3535.5.1
by John Arbash Meinel
cleanup a few imports to be lazily loaded. |
1897 |
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data)) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1898 |
try: |
1899 |
record_contents = df.readlines() |
|
1900 |
except Exception, e: |
|
1901 |
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" % |
|
1902 |
(data, e.__class__.__name__, str(e))) |
|
1903 |
header = record_contents.pop(0) |
|
1904 |
rec = self._split_header(header) |
|
1905 |
last_line = record_contents.pop() |
|
1906 |
if len(record_contents) != int(rec[2]): |
|
1907 |
raise KnitCorrupt(self, |
|
1908 |
'incorrect number of lines %s != %s' |
|
1909 |
' for version {%s} %s' |
|
1910 |
% (len(record_contents), int(rec[2]), |
|
1911 |
rec[1], record_contents)) |
|
1912 |
if last_line != 'end %s\n' % rec[1]: |
|
1913 |
raise KnitCorrupt(self, |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1914 |
'unexpected version end line %r, wanted %r' |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1915 |
% (last_line, rec[1])) |
1916 |
df.close() |
|
1917 |
return rec, record_contents |
|
1918 |
||
1919 |
def _read_records_iter(self, records): |
|
1920 |
"""Read text records from data file and yield result.
|
|
1921 |
||
1922 |
The result will be returned in whatever is the fastest to read.
|
|
1923 |
Not by the order requested. Also, multiple requests for the same
|
|
1924 |
record will only yield 1 response.
|
|
1925 |
:param records: A list of (key, access_memo) entries
|
|
1926 |
:return: Yields (key, contents, digest) in the order
|
|
1927 |
read, not the order requested
|
|
1928 |
"""
|
|
1929 |
if not records: |
|
1930 |
return
|
|
1931 |
||
1932 |
# XXX: This smells wrong, IO may not be getting ordered right.
|
|
1933 |
needed_records = sorted(set(records), key=operator.itemgetter(1)) |
|
1934 |
if not needed_records: |
|
1935 |
return
|
|
1936 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1937 |
# The transport optimizes the fetching as well
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1938 |
# (ie, reads continuous ranges.)
|
1939 |
raw_data = self._access.get_raw_records( |
|
1940 |
[index_memo for key, index_memo in needed_records]) |
|
1941 |
||
1942 |
for (key, index_memo), data in \ |
|
1943 |
izip(iter(needed_records), raw_data): |
|
1944 |
content, digest = self._parse_record(key[-1], data) |
|
1945 |
yield key, content, digest |
|
1946 |
||
1947 |
def _read_records_iter_raw(self, records): |
|
1948 |
"""Read text records from data file and yield raw data.
|
|
1949 |
||
1950 |
This unpacks enough of the text record to validate the id is
|
|
1951 |
as expected but thats all.
|
|
1952 |
||
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1953 |
Each item the iterator yields is (key, bytes,
|
1954 |
expected_sha1_of_full_text).
|
|
1955 |
"""
|
|
1956 |
for key, data in self._read_records_iter_unchecked(records): |
|
1957 |
# validate the header (note that we can only use the suffix in
|
|
1958 |
# current knit records).
|
|
1959 |
df, rec = self._parse_record_header(key, data) |
|
1960 |
df.close() |
|
1961 |
yield key, data, rec[3] |
|
1962 |
||
1963 |
def _read_records_iter_unchecked(self, records): |
|
1964 |
"""Read text records from data file and yield raw data.
|
|
1965 |
||
1966 |
No validation is done.
|
|
1967 |
||
1968 |
Yields tuples of (key, data).
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1969 |
"""
|
1970 |
# setup an iterator of the external records:
|
|
1971 |
# uses readv so nice and fast we hope.
|
|
1972 |
if len(records): |
|
1973 |
# grab the disk data needed.
|
|
1974 |
needed_offsets = [index_memo for key, index_memo |
|
1975 |
in records] |
|
1976 |
raw_records = self._access.get_raw_records(needed_offsets) |
|
1977 |
||
1978 |
for key, index_memo in records: |
|
1979 |
data = raw_records.next() |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
1980 |
yield key, data |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1981 |
|
1982 |
def _record_to_data(self, key, digest, lines, dense_lines=None): |
|
1983 |
"""Convert key, digest, lines into a raw data block.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1984 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1985 |
:param key: The key of the record. Currently keys are always serialised
|
1986 |
using just the trailing component.
|
|
1987 |
:param dense_lines: The bytes of lines but in a denser form. For
|
|
1988 |
instance, if lines is a list of 1000 bytestrings each ending in \n,
|
|
1989 |
dense_lines may be a list with one line in it, containing all the
|
|
1990 |
1000's lines and their \n's. Using dense_lines if it is already
|
|
1991 |
known is a win because the string join to create bytes in this
|
|
1992 |
function spends less time resizing the final string.
|
|
1993 |
:return: (len, a StringIO instance with the raw data ready to read.)
|
|
1994 |
"""
|
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
1995 |
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)] |
1996 |
chunks.extend(dense_lines or lines) |
|
1997 |
chunks.append("end %s\n" % key[-1]) |
|
1998 |
for chunk in chunks: |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
1999 |
if type(chunk) is not str: |
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
2000 |
raise AssertionError( |
2001 |
'data must be plain bytes was %s' % type(chunk)) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2002 |
if lines and lines[-1][-1] != '\n': |
2003 |
raise ValueError('corrupt lines value %r' % lines) |
|
4398.8.3
by John Arbash Meinel
Rewrite some of the internals of KnitVersionedFiles._add() |
2004 |
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2005 |
return len(compressed_bytes), compressed_bytes |
2006 |
||
2007 |
def _split_header(self, line): |
|
2008 |
rec = line.split() |
|
2009 |
if len(rec) != 4: |
|
2010 |
raise KnitCorrupt(self, |
|
2011 |
'unexpected number of elements in record header') |
|
2012 |
return rec |
|
2013 |
||
2014 |
def keys(self): |
|
2015 |
"""See VersionedFiles.keys."""
|
|
2016 |
if 'evil' in debug.debug_flags: |
|
2017 |
trace.mutter_callsite(2, "keys scales with size of history") |
|
3350.8.4
by Robert Collins
Vf.keys() stacking support. |
2018 |
sources = [self._index] + self._fallback_vfs |
2019 |
result = set() |
|
2020 |
for source in sources: |
|
2021 |
result.update(source.keys()) |
|
2022 |
return result |
|
2023 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2024 |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2025 |
class _ContentMapGenerator(object): |
2026 |
"""Generate texts or expose raw deltas for a set of texts."""
|
|
2027 |
||
4537.3.1
by John Arbash Meinel
Start working on tests that get_record_stream gives reasonable results w/ stacking. |
2028 |
def __init__(self, ordering='unordered'): |
2029 |
self._ordering = ordering |
|
2030 |
||
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2031 |
def _get_content(self, key): |
2032 |
"""Get the content object for key."""
|
|
4005.3.7
by Robert Collins
Review feedback. |
2033 |
# Note that _get_content is only called when the _ContentMapGenerator
|
2034 |
# has been constructed with just one key requested for reconstruction.
|
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2035 |
if key in self.nonlocal_keys: |
2036 |
record = self.get_record_stream().next() |
|
2037 |
# Create a content object on the fly
|
|
2038 |
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked')) |
|
2039 |
return PlainKnitContent(lines, record.key) |
|
2040 |
else: |
|
2041 |
# local keys we can ask for directly
|
|
2042 |
return self._get_one_work(key) |
|
2043 |
||
2044 |
def get_record_stream(self): |
|
2045 |
"""Get a record stream for the keys requested during __init__."""
|
|
2046 |
for record in self._work(): |
|
2047 |
yield record |
|
2048 |
||
2049 |
def _work(self): |
|
2050 |
"""Produce maps of text and KnitContents as dicts.
|
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
2051 |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2052 |
:return: (text_map, content_map) where text_map contains the texts for
|
2053 |
the requested versions and content_map contains the KnitContents.
|
|
2054 |
"""
|
|
2055 |
# NB: By definition we never need to read remote sources unless texts
|
|
2056 |
# are requested from them: we don't delta across stores - and we
|
|
2057 |
# explicitly do not want to to prevent data loss situations.
|
|
2058 |
if self.global_map is None: |
|
2059 |
self.global_map = self.vf.get_parent_map(self.keys) |
|
2060 |
nonlocal_keys = self.nonlocal_keys |
|
2061 |
||
2062 |
missing_keys = set(nonlocal_keys) |
|
2063 |
# Read from remote versioned file instances and provide to our caller.
|
|
2064 |
for source in self.vf._fallback_vfs: |
|
2065 |
if not missing_keys: |
|
2066 |
break
|
|
2067 |
# Loop over fallback repositories asking them for texts - ignore
|
|
2068 |
# any missing from a particular fallback.
|
|
2069 |
for record in source.get_record_stream(missing_keys, |
|
4537.3.1
by John Arbash Meinel
Start working on tests that get_record_stream gives reasonable results w/ stacking. |
2070 |
self._ordering, True): |
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2071 |
if record.storage_kind == 'absent': |
2072 |
# Not in thie particular stream, may be in one of the
|
|
2073 |
# other fallback vfs objects.
|
|
2074 |
continue
|
|
2075 |
missing_keys.remove(record.key) |
|
2076 |
yield record |
|
2077 |
||
4454.2.1
by John Arbash Meinel
Don't populate self._raw_record_map in _work, it was done in __init__ |
2078 |
if self._raw_record_map is None: |
2079 |
raise AssertionError('_raw_record_map should have been filled') |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2080 |
first = True |
2081 |
for key in self.keys: |
|
2082 |
if key in self.nonlocal_keys: |
|
2083 |
continue
|
|
2084 |
yield LazyKnitContentFactory(key, self.global_map[key], self, first) |
|
2085 |
first = False |
|
2086 |
||
2087 |
def _get_one_work(self, requested_key): |
|
2088 |
# Now, if we have calculated everything already, just return the
|
|
2089 |
# desired text.
|
|
2090 |
if requested_key in self._contents_map: |
|
2091 |
return self._contents_map[requested_key] |
|
4005.3.7
by Robert Collins
Review feedback. |
2092 |
# To simplify things, parse everything at once - code that wants one text
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2093 |
# probably wants them all.
|
2094 |
# FUTURE: This function could be improved for the 'extract many' case
|
|
2095 |
# by tracking each component and only doing the copy when the number of
|
|
2096 |
# children than need to apply delta's to it is > 1 or it is part of the
|
|
2097 |
# final output.
|
|
2098 |
multiple_versions = len(self.keys) != 1 |
|
2099 |
if self._record_map is None: |
|
2100 |
self._record_map = self.vf._raw_map_to_record_map( |
|
2101 |
self._raw_record_map) |
|
2102 |
record_map = self._record_map |
|
2103 |
# raw_record_map is key:
|
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
2104 |
# Have read and parsed records at this point.
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2105 |
for key in self.keys: |
2106 |
if key in self.nonlocal_keys: |
|
2107 |
# already handled
|
|
2108 |
continue
|
|
2109 |
components = [] |
|
2110 |
cursor = key |
|
2111 |
while cursor is not None: |
|
2112 |
try: |
|
2113 |
record, record_details, digest, next = record_map[cursor] |
|
2114 |
except KeyError: |
|
2115 |
raise RevisionNotPresent(cursor, self) |
|
2116 |
components.append((cursor, record, record_details, digest)) |
|
2117 |
cursor = next |
|
2118 |
if cursor in self._contents_map: |
|
2119 |
# no need to plan further back
|
|
2120 |
components.append((cursor, None, None, None)) |
|
2121 |
break
|
|
2122 |
||
2123 |
content = None |
|
2124 |
for (component_id, record, record_details, |
|
2125 |
digest) in reversed(components): |
|
2126 |
if component_id in self._contents_map: |
|
2127 |
content = self._contents_map[component_id] |
|
2128 |
else: |
|
2129 |
content, delta = self._factory.parse_record(key[-1], |
|
2130 |
record, record_details, content, |
|
2131 |
copy_base_content=multiple_versions) |
|
2132 |
if multiple_versions: |
|
2133 |
self._contents_map[component_id] = content |
|
2134 |
||
2135 |
# digest here is the digest from the last applied component.
|
|
2136 |
text = content.text() |
|
2137 |
actual_sha = sha_strings(text) |
|
2138 |
if actual_sha != digest: |
|
2139 |
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text) |
|
2140 |
if multiple_versions: |
|
2141 |
return self._contents_map[requested_key] |
|
2142 |
else: |
|
2143 |
return content |
|
2144 |
||
2145 |
def _wire_bytes(self): |
|
2146 |
"""Get the bytes to put on the wire for 'key'.
|
|
2147 |
||
2148 |
The first collection of bytes asked for returns the serialised
|
|
2149 |
raw_record_map and the additional details (key, parent) for key.
|
|
2150 |
Subsequent calls return just the additional details (key, parent).
|
|
2151 |
The wire storage_kind given for the first key is 'knit-delta-closure',
|
|
2152 |
For subsequent keys it is 'knit-delta-closure-ref'.
|
|
2153 |
||
2154 |
:param key: A key from the content generator.
|
|
2155 |
:return: Bytes to put on the wire.
|
|
2156 |
"""
|
|
2157 |
lines = [] |
|
2158 |
# kind marker for dispatch on the far side,
|
|
2159 |
lines.append('knit-delta-closure') |
|
2160 |
# Annotated or not
|
|
2161 |
if self.vf._factory.annotated: |
|
2162 |
lines.append('annotated') |
|
2163 |
else: |
|
2164 |
lines.append('') |
|
2165 |
# then the list of keys
|
|
2166 |
lines.append('\t'.join(['\x00'.join(key) for key in self.keys |
|
2167 |
if key not in self.nonlocal_keys])) |
|
2168 |
# then the _raw_record_map in serialised form:
|
|
2169 |
map_byte_list = [] |
|
2170 |
# for each item in the map:
|
|
2171 |
# 1 line with key
|
|
2172 |
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
|
|
2173 |
# one line with method
|
|
2174 |
# one line with noeol
|
|
2175 |
# one line with next ('' for None)
|
|
2176 |
# one line with byte count of the record bytes
|
|
2177 |
# the record bytes
|
|
2178 |
for key, (record_bytes, (method, noeol), next) in \ |
|
2179 |
self._raw_record_map.iteritems(): |
|
2180 |
key_bytes = '\x00'.join(key) |
|
2181 |
parents = self.global_map.get(key, None) |
|
2182 |
if parents is None: |
|
2183 |
parent_bytes = 'None:' |
|
2184 |
else: |
|
2185 |
parent_bytes = '\t'.join('\x00'.join(key) for key in parents) |
|
2186 |
method_bytes = method |
|
2187 |
if noeol: |
|
2188 |
noeol_bytes = "T" |
|
2189 |
else: |
|
2190 |
noeol_bytes = "F" |
|
2191 |
if next: |
|
2192 |
next_bytes = '\x00'.join(next) |
|
2193 |
else: |
|
2194 |
next_bytes = '' |
|
2195 |
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % ( |
|
2196 |
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes, |
|
2197 |
len(record_bytes), record_bytes)) |
|
2198 |
map_bytes = ''.join(map_byte_list) |
|
2199 |
lines.append(map_bytes) |
|
2200 |
bytes = '\n'.join(lines) |
|
2201 |
return bytes |
|
2202 |
||
2203 |
||
2204 |
class _VFContentMapGenerator(_ContentMapGenerator): |
|
2205 |
"""Content map generator reading from a VersionedFiles object."""
|
|
2206 |
||
2207 |
def __init__(self, versioned_files, keys, nonlocal_keys=None, |
|
4537.3.1
by John Arbash Meinel
Start working on tests that get_record_stream gives reasonable results w/ stacking. |
2208 |
global_map=None, raw_record_map=None, ordering='unordered'): |
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2209 |
"""Create a _ContentMapGenerator.
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
2210 |
|
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2211 |
:param versioned_files: The versioned files that the texts are being
|
2212 |
extracted from.
|
|
2213 |
:param keys: The keys to produce content maps for.
|
|
2214 |
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
|
|
2215 |
which are known to not be in this knit, but rather in one of the
|
|
2216 |
fallback knits.
|
|
2217 |
:param global_map: The result of get_parent_map(keys) (or a supermap).
|
|
2218 |
This is required if get_record_stream() is to be used.
|
|
2219 |
:param raw_record_map: A unparsed raw record map to use for answering
|
|
2220 |
contents.
|
|
2221 |
"""
|
|
4537.3.1
by John Arbash Meinel
Start working on tests that get_record_stream gives reasonable results w/ stacking. |
2222 |
_ContentMapGenerator.__init__(self, ordering=ordering) |
4005.3.6
by Robert Collins
Support delta_closure=True with NetworkRecordStream to transmit deltas over the wire when full text extraction is required on the far end. |
2223 |
# The vf to source data from
|
2224 |
self.vf = versioned_files |
|
2225 |
# The keys desired
|
|
2226 |
self.keys = list(keys) |
|
2227 |
# Keys known to be in fallback vfs objects
|
|
2228 |
if nonlocal_keys is None: |
|
2229 |
self.nonlocal_keys = set() |
|
2230 |
else: |
|
2231 |
self.nonlocal_keys = frozenset(nonlocal_keys) |
|
2232 |
# Parents data for keys to be returned in get_record_stream
|
|
2233 |
self.global_map = global_map |
|
2234 |
# The chunked lists for self.keys in text form
|
|
2235 |
self._text_map = {} |
|
2236 |
# A cache of KnitContent objects used in extracting texts.
|
|
2237 |
self._contents_map = {} |
|
2238 |
# All the knit records needed to assemble the requested keys as full
|
|
2239 |
# texts.
|
|
2240 |
self._record_map = None |
|
2241 |
if raw_record_map is None: |
|
2242 |
self._raw_record_map = self.vf._get_record_map_unparsed(keys, |
|
2243 |
allow_missing=True) |
|
2244 |
else: |
|
2245 |
self._raw_record_map = raw_record_map |
|
2246 |
# the factory for parsing records
|
|
2247 |
self._factory = self.vf._factory |
|
2248 |
||
2249 |
||
2250 |
class _NetworkContentMapGenerator(_ContentMapGenerator): |
|
2251 |
"""Content map generator sourced from a network stream."""
|
|
2252 |
||
2253 |
def __init__(self, bytes, line_end): |
|
2254 |
"""Construct a _NetworkContentMapGenerator from a bytes block."""
|
|
2255 |
self._bytes = bytes |
|
2256 |
self.global_map = {} |
|
2257 |
self._raw_record_map = {} |
|
2258 |
self._contents_map = {} |
|
2259 |
self._record_map = None |
|
2260 |
self.nonlocal_keys = [] |
|
2261 |
# Get access to record parsing facilities
|
|
2262 |
self.vf = KnitVersionedFiles(None, None) |
|
2263 |
start = line_end |
|
2264 |
# Annotated or not
|
|
2265 |
line_end = bytes.find('\n', start) |
|
2266 |
line = bytes[start:line_end] |
|
2267 |
start = line_end + 1 |
|
2268 |
if line == 'annotated': |
|
2269 |
self._factory = KnitAnnotateFactory() |
|
2270 |
else: |
|
2271 |
self._factory = KnitPlainFactory() |
|
2272 |
# list of keys to emit in get_record_stream
|
|
2273 |
line_end = bytes.find('\n', start) |
|
2274 |
line = bytes[start:line_end] |
|
2275 |
start = line_end + 1 |
|
2276 |
self.keys = [ |
|
2277 |
tuple(segment.split('\x00')) for segment in line.split('\t') |
|
2278 |
if segment] |
|
2279 |
# now a loop until the end. XXX: It would be nice if this was just a
|
|
2280 |
# bunch of the same records as get_record_stream(..., False) gives, but
|
|
2281 |
# there is a decent sized gap stopping that at the moment.
|
|
2282 |
end = len(bytes) |
|
2283 |
while start < end: |
|
2284 |
# 1 line with key
|
|
2285 |
line_end = bytes.find('\n', start) |
|
2286 |
key = tuple(bytes[start:line_end].split('\x00')) |
|
2287 |
start = line_end + 1 |
|
2288 |
# 1 line with parents (None: for None, '' for ())
|
|
2289 |
line_end = bytes.find('\n', start) |
|
2290 |
line = bytes[start:line_end] |
|
2291 |
if line == 'None:': |
|
2292 |
parents = None |
|
2293 |
else: |
|
2294 |
parents = tuple( |
|
2295 |
[tuple(segment.split('\x00')) for segment in line.split('\t') |
|
2296 |
if segment]) |
|
2297 |
self.global_map[key] = parents |
|
2298 |
start = line_end + 1 |
|
2299 |
# one line with method
|
|
2300 |
line_end = bytes.find('\n', start) |
|
2301 |
line = bytes[start:line_end] |
|
2302 |
method = line |
|
2303 |
start = line_end + 1 |
|
2304 |
# one line with noeol
|
|
2305 |
line_end = bytes.find('\n', start) |
|
2306 |
line = bytes[start:line_end] |
|
2307 |
noeol = line == "T" |
|
2308 |
start = line_end + 1 |
|
2309 |
# one line with next ('' for None)
|
|
2310 |
line_end = bytes.find('\n', start) |
|
2311 |
line = bytes[start:line_end] |
|
2312 |
if not line: |
|
2313 |
next = None |
|
2314 |
else: |
|
2315 |
next = tuple(bytes[start:line_end].split('\x00')) |
|
2316 |
start = line_end + 1 |
|
2317 |
# one line with byte count of the record bytes
|
|
2318 |
line_end = bytes.find('\n', start) |
|
2319 |
line = bytes[start:line_end] |
|
2320 |
count = int(line) |
|
2321 |
start = line_end + 1 |
|
2322 |
# the record bytes
|
|
2323 |
record_bytes = bytes[start:start+count] |
|
2324 |
start = start + count |
|
2325 |
# put it in the map
|
|
2326 |
self._raw_record_map[key] = (record_bytes, (method, noeol), next) |
|
2327 |
||
2328 |
def get_record_stream(self): |
|
2329 |
"""Get a record stream for for keys requested by the bytestream."""
|
|
2330 |
first = True |
|
2331 |
for key in self.keys: |
|
2332 |
yield LazyKnitContentFactory(key, self.global_map[key], self, first) |
|
2333 |
first = False |
|
2334 |
||
2335 |
def _wire_bytes(self): |
|
2336 |
return self._bytes |
|
2337 |
||
2338 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2339 |
class _KndxIndex(object): |
2340 |
"""Manages knit index files
|
|
2341 |
||
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
2342 |
The index is kept in memory and read on startup, to enable
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
2343 |
fast lookups of revision information. The cursor of the index
|
2344 |
file is always pointing to the end, making it easy to append
|
|
2345 |
entries.
|
|
2346 |
||
2347 |
_cache is a cache for fast mapping from version id to a Index
|
|
2348 |
object.
|
|
2349 |
||
2350 |
_history is a cache for fast mapping from indexes to version ids.
|
|
2351 |
||
2352 |
The index data format is dictionary compressed when it comes to
|
|
2353 |
parent references; a index entry may only have parents that with a
|
|
2354 |
lover index number. As a result, the index is topological sorted.
|
|
1563.2.11
by Robert Collins
Consolidate reweave and join as we have no separate usage, make reweave tests apply to all versionedfile implementations and deprecate the old reweave apis. |
2355 |
|
2356 |
Duplicate entries may be written to the index for a single version id
|
|
2357 |
if this is done then the latter one completely replaces the former:
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2358 |
this allows updates to correct version and parent information.
|
1563.2.11
by Robert Collins
Consolidate reweave and join as we have no separate usage, make reweave tests apply to all versionedfile implementations and deprecate the old reweave apis. |
2359 |
Note that the two entries may share the delta, and that successive
|
2360 |
annotations and references MUST point to the first entry.
|
|
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2361 |
|
2362 |
The index file on disc contains a header, followed by one line per knit
|
|
2363 |
record. The same revision can be present in an index file more than once.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2364 |
The first occurrence gets assigned a sequence number starting from 0.
|
2365 |
||
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2366 |
The format of a single line is
|
2367 |
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
|
|
2368 |
REVISION_ID is a utf8-encoded revision id
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2369 |
FLAGS is a comma separated list of flags about the record. Values include
|
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2370 |
no-eol, line-delta, fulltext.
|
2371 |
BYTE_OFFSET is the ascii representation of the byte offset in the data file
|
|
4775.1.1
by Martin Pool
Remove several 'the the' typos |
2372 |
that the compressed data starts at.
|
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2373 |
LENGTH is the ascii representation of the length of the data file.
|
2374 |
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
|
|
2375 |
REVISION_ID.
|
|
2376 |
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
|
|
2377 |
revision id already in the knit that is a parent of REVISION_ID.
|
|
2378 |
The ' :' marker is the end of record marker.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2379 |
|
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2380 |
partial writes:
|
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
2381 |
when a write is interrupted to the index file, it will result in a line
|
2382 |
that does not end in ' :'. If the ' :' is not present at the end of a line,
|
|
2383 |
or at the end of the file, then the record that is missing it will be
|
|
2384 |
ignored by the parser.
|
|
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2385 |
|
1759.2.1
by Jelmer Vernooij
Fix some types (found using aspell). |
2386 |
When writing new records to the index file, the data is preceded by '\n'
|
1641.1.2
by Robert Collins
Change knit index files to be robust in the presence of partial writes. |
2387 |
to ensure that records always start on new lines even if the last write was
|
2388 |
interrupted. As a result its normal for the last line in the index to be
|
|
2389 |
missing a trailing newline. One can be added with no harmful effects.
|
|
3350.6.11
by Martin Pool
Review cleanups and documentation from Robert's mail on 2080618 |
2390 |
|
2391 |
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
|
|
2392 |
where prefix is e.g. the (fileid,) for .texts instances or () for
|
|
2393 |
constant-mapped things like .revisions, and the old state is
|
|
2394 |
tuple(history_vector, cache_dict). This is used to prevent having an
|
|
2395 |
ABI change with the C extension that reads .kndx files.
|
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
2396 |
"""
|
2397 |
||
1666.1.6
by Robert Collins
Make knit the default format. |
2398 |
HEADER = "# bzr knit index 8\n" |
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
2399 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2400 |
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked): |
2401 |
"""Create a _KndxIndex on transport using mapper."""
|
|
2402 |
self._transport = transport |
|
2403 |
self._mapper = mapper |
|
2404 |
self._get_scope = get_scope |
|
2405 |
self._allow_writes = allow_writes |
|
2406 |
self._is_locked = is_locked |
|
2407 |
self._reset_cache() |
|
2408 |
self.has_graph = True |
|
2409 |
||
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2410 |
def add_records(self, records, random_id=False, missing_compression_parents=False): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2411 |
"""Add multiple records to the index.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2412 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2413 |
:param records: a list of tuples:
|
2414 |
(key, options, access_memo, parents).
|
|
2415 |
:param random_id: If True the ids being added were randomly generated
|
|
2416 |
and no check for existence will be performed.
|
|
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2417 |
:param missing_compression_parents: If True the records being added are
|
2418 |
only compressed against texts already in the index (or inside
|
|
2419 |
records). If False the records all refer to unavailable texts (or
|
|
2420 |
texts inside records) as compression parents.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2421 |
"""
|
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2422 |
if missing_compression_parents: |
2423 |
# It might be nice to get the edge of the records. But keys isn't
|
|
2424 |
# _wrong_.
|
|
2425 |
keys = sorted(record[0] for record in records) |
|
2426 |
raise errors.RevisionNotPresent(keys, self) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2427 |
paths = {} |
2428 |
for record in records: |
|
2429 |
key = record[0] |
|
2430 |
prefix = key[:-1] |
|
2431 |
path = self._mapper.map(key) + '.kndx' |
|
2432 |
path_keys = paths.setdefault(path, (prefix, [])) |
|
2433 |
path_keys[1].append(record) |
|
2434 |
for path in sorted(paths): |
|
2435 |
prefix, path_keys = paths[path] |
|
2436 |
self._load_prefixes([prefix]) |
|
2437 |
lines = [] |
|
2438 |
orig_history = self._kndx_cache[prefix][1][:] |
|
2439 |
orig_cache = self._kndx_cache[prefix][0].copy() |
|
2440 |
||
2441 |
try: |
|
2442 |
for key, options, (_, pos, size), parents in path_keys: |
|
2443 |
if parents is None: |
|
2444 |
# kndx indices cannot be parentless.
|
|
2445 |
parents = () |
|
2446 |
line = "\n%s %s %s %s %s :" % ( |
|
2447 |
key[-1], ','.join(options), pos, size, |
|
2448 |
self._dictionary_compress(parents)) |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
2449 |
if type(line) is not str: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2450 |
raise AssertionError( |
2451 |
'data must be utf8 was %s' % type(line)) |
|
2452 |
lines.append(line) |
|
2453 |
self._cache_key(key, options, pos, size, parents) |
|
2454 |
if len(orig_history): |
|
2455 |
self._transport.append_bytes(path, ''.join(lines)) |
|
2456 |
else: |
|
2457 |
self._init_index(path, lines) |
|
2458 |
except: |
|
2459 |
# If any problems happen, restore the original values and re-raise
|
|
2460 |
self._kndx_cache[prefix] = (orig_cache, orig_history) |
|
2461 |
raise
|
|
2462 |
||
4011.5.7
by Andrew Bennetts
Remove leading underscore from _scan_unvalidate_index, explicitly NotImplementedError it for _KndxIndex. |
2463 |
def scan_unvalidated_index(self, graph_index): |
2464 |
"""See _KnitGraphIndex.scan_unvalidated_index."""
|
|
4011.5.11
by Robert Collins
Polish the KnitVersionedFiles.scan_unvalidated_index api. |
2465 |
# Because kndx files do not support atomic insertion via separate index
|
2466 |
# files, they do not support this method.
|
|
4011.5.7
by Andrew Bennetts
Remove leading underscore from _scan_unvalidate_index, explicitly NotImplementedError it for _KndxIndex. |
2467 |
raise NotImplementedError(self.scan_unvalidated_index) |
2468 |
||
2469 |
def get_missing_compression_parents(self): |
|
2470 |
"""See _KnitGraphIndex.get_missing_compression_parents."""
|
|
4011.5.11
by Robert Collins
Polish the KnitVersionedFiles.scan_unvalidated_index api. |
2471 |
# Because kndx files do not support atomic insertion via separate index
|
2472 |
# files, they do not support this method.
|
|
2473 |
raise NotImplementedError(self.get_missing_compression_parents) |
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
2474 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2475 |
def _cache_key(self, key, options, pos, size, parent_keys): |
1596.2.18
by Robert Collins
More microopimisations on index reading, now down to 16000 records/seconds. |
2476 |
"""Cache a version record in the history array and index cache.
|
2158.3.1
by Dmitry Vasiliev
KnitIndex tests/fixes/optimizations |
2477 |
|
2478 |
This is inlined into _load_data for performance. KEEP IN SYNC.
|
|
1596.2.18
by Robert Collins
More microopimisations on index reading, now down to 16000 records/seconds. |
2479 |
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
|
2480 |
indexes).
|
|
2481 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2482 |
prefix = key[:-1] |
2483 |
version_id = key[-1] |
|
2484 |
# last-element only for compatibilty with the C load_data.
|
|
2485 |
parents = tuple(parent[-1] for parent in parent_keys) |
|
2486 |
for parent in parent_keys: |
|
2487 |
if parent[:-1] != prefix: |
|
2488 |
raise ValueError("mismatched prefixes for %r, %r" % ( |
|
2489 |
key, parent_keys)) |
|
2490 |
cache, history = self._kndx_cache[prefix] |
|
1596.2.14
by Robert Collins
Make knit parsing non quadratic? |
2491 |
# only want the _history index to reference the 1st index entry
|
2492 |
# for version_id
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2493 |
if version_id not in cache: |
2494 |
index = len(history) |
|
2495 |
history.append(version_id) |
|
1628.1.1
by Robert Collins
Cache the index number of versions in the knit index's self._cache so that |
2496 |
else: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2497 |
index = cache[version_id][5] |
2498 |
cache[version_id] = (version_id, |
|
1628.1.1
by Robert Collins
Cache the index number of versions in the knit index's self._cache so that |
2499 |
options, |
2500 |
pos, |
|
2501 |
size, |
|
2502 |
parents, |
|
2503 |
index) |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
2504 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2505 |
def check_header(self, fp): |
2506 |
line = fp.readline() |
|
2507 |
if line == '': |
|
2508 |
# An empty file can actually be treated as though the file doesn't
|
|
2509 |
# exist yet.
|
|
2510 |
raise errors.NoSuchFile(self) |
|
2511 |
if line != self.HEADER: |
|
2512 |
raise KnitHeaderError(badline=line, filename=self) |
|
2513 |
||
2514 |
def _check_read(self): |
|
2515 |
if not self._is_locked(): |
|
2516 |
raise errors.ObjectNotLocked(self) |
|
2517 |
if self._get_scope() != self._scope: |
|
2518 |
self._reset_cache() |
|
2519 |
||
3316.2.3
by Robert Collins
Remove manual notification of transaction finishing on versioned files. |
2520 |
def _check_write_ok(self): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2521 |
"""Assert if not writes are permitted."""
|
2522 |
if not self._is_locked(): |
|
2523 |
raise errors.ObjectNotLocked(self) |
|
3316.2.5
by Robert Collins
Review feedback. |
2524 |
if self._get_scope() != self._scope: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2525 |
self._reset_cache() |
3316.2.3
by Robert Collins
Remove manual notification of transaction finishing on versioned files. |
2526 |
if self._mode != 'w': |
2527 |
raise errors.ReadOnlyObjectDirtiedError(self) |
|
2528 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2529 |
def get_build_details(self, keys): |
2530 |
"""Get the method, index_memo and compression parent for keys.
|
|
3218.1.1
by Robert Collins
Reduce index query pressure for text construction by batching the individual queries into single batch queries. |
2531 |
|
3224.1.29
by John Arbash Meinel
Properly handle annotating when ghosts are present. |
2532 |
Ghosts are omitted from the result.
|
2533 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2534 |
:param keys: An iterable of keys.
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
2535 |
:return: A dict of key:(index_memo, compression_parent, parents,
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2536 |
record_details).
|
3224.1.14
by John Arbash Meinel
Switch to making content_details opaque, step 1 |
2537 |
index_memo
|
2538 |
opaque structure to pass to read_records to extract the raw
|
|
2539 |
data
|
|
2540 |
compression_parent
|
|
2541 |
Content that this record is built upon, may be None
|
|
2542 |
parents
|
|
2543 |
Logical parents of this node
|
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
2544 |
record_details
|
3224.1.14
by John Arbash Meinel
Switch to making content_details opaque, step 1 |
2545 |
extra information about the content which needs to be passed to
|
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
2546 |
Factory.parse_record
|
3218.1.1
by Robert Collins
Reduce index query pressure for text construction by batching the individual queries into single batch queries. |
2547 |
"""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2548 |
parent_map = self.get_parent_map(keys) |
3218.1.1
by Robert Collins
Reduce index query pressure for text construction by batching the individual queries into single batch queries. |
2549 |
result = {} |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2550 |
for key in keys: |
2551 |
if key not in parent_map: |
|
2552 |
continue # Ghost |
|
2553 |
method = self.get_method(key) |
|
2554 |
parents = parent_map[key] |
|
3218.1.1
by Robert Collins
Reduce index query pressure for text construction by batching the individual queries into single batch queries. |
2555 |
if method == 'fulltext': |
2556 |
compression_parent = None |
|
2557 |
else: |
|
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
2558 |
compression_parent = parents[0] |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2559 |
noeol = 'no-eol' in self.get_options(key) |
2560 |
index_memo = self.get_position(key) |
|
2561 |
result[key] = (index_memo, compression_parent, |
|
3224.1.14
by John Arbash Meinel
Switch to making content_details opaque, step 1 |
2562 |
parents, (method, noeol)) |
3218.1.1
by Robert Collins
Reduce index query pressure for text construction by batching the individual queries into single batch queries. |
2563 |
return result |
2564 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2565 |
def get_method(self, key): |
2566 |
"""Return compression method of specified key."""
|
|
2567 |
options = self.get_options(key) |
|
2568 |
if 'fulltext' in options: |
|
2569 |
return 'fulltext' |
|
2570 |
elif 'line-delta' in options: |
|
2571 |
return 'line-delta' |
|
2572 |
else: |
|
2573 |
raise errors.KnitIndexUnknownMethod(self, options) |
|
2574 |
||
2575 |
def get_options(self, key): |
|
2576 |
"""Return a list representing options.
|
|
2577 |
||
2578 |
e.g. ['foo', 'bar']
|
|
2579 |
"""
|
|
2580 |
prefix, suffix = self._split_key(key) |
|
2581 |
self._load_prefixes([prefix]) |
|
3350.8.9
by Robert Collins
define behaviour for add_lines with stacked storage. |
2582 |
try: |
2583 |
return self._kndx_cache[prefix][0][suffix][1] |
|
2584 |
except KeyError: |
|
2585 |
raise RevisionNotPresent(key, self) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2586 |
|
4593.5.35
by John Arbash Meinel
Start working on a per-vf implementation test of find_ancestry. |
2587 |
def find_ancestry(self, keys): |
2588 |
"""See CombinedGraphIndex.find_ancestry()"""
|
|
2589 |
prefixes = set(key[:-1] for key in keys) |
|
2590 |
self._load_prefixes(prefixes) |
|
2591 |
result = {} |
|
2592 |
parent_map = {} |
|
2593 |
missing_keys = set() |
|
2594 |
pending_keys = list(keys) |
|
2595 |
# This assumes that keys will not reference parents in a different
|
|
2596 |
# prefix, which is accurate so far.
|
|
2597 |
while pending_keys: |
|
2598 |
key = pending_keys.pop() |
|
2599 |
if key in parent_map: |
|
2600 |
continue
|
|
2601 |
prefix = key[:-1] |
|
2602 |
try: |
|
2603 |
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4] |
|
2604 |
except KeyError: |
|
2605 |
missing_keys.add(key) |
|
2606 |
else: |
|
2607 |
parent_keys = tuple([prefix + (suffix,) |
|
2608 |
for suffix in suffix_parents]) |
|
2609 |
parent_map[key] = parent_keys |
|
2610 |
pending_keys.extend([p for p in parent_keys |
|
2611 |
if p not in parent_map]) |
|
2612 |
return parent_map, missing_keys |
|
2613 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2614 |
def get_parent_map(self, keys): |
2615 |
"""Get a map of the parents of keys.
|
|
2616 |
||
2617 |
:param keys: The keys to look up parents for.
|
|
2618 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
2619 |
the mapping.
|
|
2620 |
"""
|
|
2621 |
# Parse what we need to up front, this potentially trades off I/O
|
|
2622 |
# locality (.kndx and .knit in the same block group for the same file
|
|
2623 |
# id) for less checking in inner loops.
|
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
2624 |
prefixes = set(key[:-1] for key in keys) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2625 |
self._load_prefixes(prefixes) |
2626 |
result = {} |
|
2627 |
for key in keys: |
|
2628 |
prefix = key[:-1] |
|
2629 |
try: |
|
2630 |
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4] |
|
2631 |
except KeyError: |
|
2632 |
pass
|
|
2633 |
else: |
|
2634 |
result[key] = tuple(prefix + (suffix,) for |
|
2635 |
suffix in suffix_parents) |
|
2636 |
return result |
|
2637 |
||
2638 |
def get_position(self, key): |
|
2639 |
"""Return details needed to access the version.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2640 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2641 |
:return: a tuple (key, data position, size) to hand to the access
|
2642 |
logic to get the record.
|
|
2643 |
"""
|
|
2644 |
prefix, suffix = self._split_key(key) |
|
2645 |
self._load_prefixes([prefix]) |
|
2646 |
entry = self._kndx_cache[prefix][0][suffix] |
|
2647 |
return key, entry[2], entry[3] |
|
2648 |
||
3830.3.12
by Martin Pool
Review cleanups: unify has_key impls, add missing_keys(), clean up exception blocks |
2649 |
has_key = _mod_index._has_key_from_parent_map |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2650 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2651 |
def _init_index(self, path, extra_lines=[]): |
2652 |
"""Initialize an index."""
|
|
2653 |
sio = StringIO() |
|
2654 |
sio.write(self.HEADER) |
|
2655 |
sio.writelines(extra_lines) |
|
2656 |
sio.seek(0) |
|
2657 |
self._transport.put_file_non_atomic(path, sio, |
|
2658 |
create_parent_dir=True) |
|
2659 |
# self._create_parent_dir)
|
|
2660 |
# mode=self._file_mode,
|
|
2661 |
# dir_mode=self._dir_mode)
|
|
2662 |
||
2663 |
def keys(self): |
|
2664 |
"""Get all the keys in the collection.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2665 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2666 |
The keys are not ordered.
|
2667 |
"""
|
|
2668 |
result = set() |
|
2669 |
# Identify all key prefixes.
|
|
2670 |
# XXX: A bit hacky, needs polish.
|
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
2671 |
if type(self._mapper) is ConstantMapper: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2672 |
prefixes = [()] |
2673 |
else: |
|
2674 |
relpaths = set() |
|
2675 |
for quoted_relpath in self._transport.iter_files_recursive(): |
|
2676 |
path, ext = os.path.splitext(quoted_relpath) |
|
2677 |
relpaths.add(path) |
|
2678 |
prefixes = [self._mapper.unmap(path) for path in relpaths] |
|
2679 |
self._load_prefixes(prefixes) |
|
2680 |
for prefix in prefixes: |
|
2681 |
for suffix in self._kndx_cache[prefix][1]: |
|
2682 |
result.add(prefix + (suffix,)) |
|
2683 |
return result |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2684 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2685 |
def _load_prefixes(self, prefixes): |
2686 |
"""Load the indices for prefixes."""
|
|
2687 |
self._check_read() |
|
2688 |
for prefix in prefixes: |
|
2689 |
if prefix not in self._kndx_cache: |
|
2690 |
# the load_data interface writes to these variables.
|
|
2691 |
self._cache = {} |
|
2692 |
self._history = [] |
|
2693 |
self._filename = prefix |
|
2694 |
try: |
|
2695 |
path = self._mapper.map(prefix) + '.kndx' |
|
2696 |
fp = self._transport.get(path) |
|
2697 |
try: |
|
2698 |
# _load_data may raise NoSuchFile if the target knit is
|
|
2699 |
# completely empty.
|
|
2700 |
_load_data(self, fp) |
|
2701 |
finally: |
|
2702 |
fp.close() |
|
2703 |
self._kndx_cache[prefix] = (self._cache, self._history) |
|
2704 |
del self._cache |
|
2705 |
del self._filename |
|
2706 |
del self._history |
|
2707 |
except NoSuchFile: |
|
2708 |
self._kndx_cache[prefix] = ({}, []) |
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
2709 |
if type(self._mapper) is ConstantMapper: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2710 |
# preserve behaviour for revisions.kndx etc.
|
2711 |
self._init_index(path) |
|
2712 |
del self._cache |
|
2713 |
del self._filename |
|
2714 |
del self._history |
|
2715 |
||
3830.3.12
by Martin Pool
Review cleanups: unify has_key impls, add missing_keys(), clean up exception blocks |
2716 |
missing_keys = _mod_index._missing_keys_from_parent_map |
2717 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2718 |
def _partition_keys(self, keys): |
2719 |
"""Turn keys into a dict of prefix:suffix_list."""
|
|
2720 |
result = {} |
|
2721 |
for key in keys: |
|
2722 |
prefix_keys = result.setdefault(key[:-1], []) |
|
2723 |
prefix_keys.append(key[-1]) |
|
2724 |
return result |
|
2725 |
||
2726 |
def _dictionary_compress(self, keys): |
|
2727 |
"""Dictionary compress keys.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2728 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2729 |
:param keys: The keys to generate references to.
|
2730 |
:return: A string representation of keys. keys which are present are
|
|
2731 |
dictionary compressed, and others are emitted as fulltext with a
|
|
2732 |
'.' prefix.
|
|
2733 |
"""
|
|
2734 |
if not keys: |
|
2735 |
return '' |
|
1594.2.8
by Robert Collins
add ghost aware apis to knits. |
2736 |
result_list = [] |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2737 |
prefix = keys[0][:-1] |
2738 |
cache = self._kndx_cache[prefix][0] |
|
2739 |
for key in keys: |
|
2740 |
if key[:-1] != prefix: |
|
2741 |
# kndx indices cannot refer across partitioned storage.
|
|
2742 |
raise ValueError("mismatched prefixes for %r" % keys) |
|
2743 |
if key[-1] in cache: |
|
1628.1.1
by Robert Collins
Cache the index number of versions in the knit index's self._cache so that |
2744 |
# -- inlined lookup() --
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2745 |
result_list.append(str(cache[key[-1]][5])) |
1628.1.1
by Robert Collins
Cache the index number of versions in the knit index's self._cache so that |
2746 |
# -- end lookup () --
|
1594.2.8
by Robert Collins
add ghost aware apis to knits. |
2747 |
else: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2748 |
result_list.append('.' + key[-1]) |
1594.2.8
by Robert Collins
add ghost aware apis to knits. |
2749 |
return ' '.join(result_list) |
2750 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2751 |
def _reset_cache(self): |
2752 |
# Possibly this should be a LRU cache. A dictionary from key_prefix to
|
|
2753 |
# (cache_dict, history_vector) for parsed kndx files.
|
|
2754 |
self._kndx_cache = {} |
|
2755 |
self._scope = self._get_scope() |
|
2756 |
allow_writes = self._allow_writes() |
|
2757 |
if allow_writes: |
|
2758 |
self._mode = 'w' |
|
1563.2.4
by Robert Collins
First cut at including the knit implementation of versioned_file. |
2759 |
else: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2760 |
self._mode = 'r' |
2761 |
||
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
2762 |
def _sort_keys_by_io(self, keys, positions): |
2763 |
"""Figure out an optimal order to read the records for the given keys.
|
|
2764 |
||
2765 |
Sort keys, grouped by index and sorted by position.
|
|
2766 |
||
2767 |
:param keys: A list of keys whose records we want to read. This will be
|
|
2768 |
sorted 'in-place'.
|
|
2769 |
:param positions: A dict, such as the one returned by
|
|
2770 |
_get_components_positions()
|
|
2771 |
:return: None
|
|
2772 |
"""
|
|
3878.1.3
by John Arbash Meinel
Add a comment about what data we are sorting by. |
2773 |
def get_sort_key(key): |
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
2774 |
index_memo = positions[key][1] |
2775 |
# Group by prefix and position. index_memo[0] is the key, so it is
|
|
2776 |
# (file_id, revision_id) and we don't want to sort on revision_id,
|
|
2777 |
# index_memo[1] is the position, and index_memo[2] is the size,
|
|
2778 |
# which doesn't matter for the sort
|
|
2779 |
return index_memo[0][:-1], index_memo[1] |
|
3878.1.3
by John Arbash Meinel
Add a comment about what data we are sorting by. |
2780 |
return keys.sort(key=get_sort_key) |
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
2781 |
|
4039.3.5
by John Arbash Meinel
Add direct tests for _get_total_build_size. |
2782 |
_get_total_build_size = _get_total_build_size |
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
2783 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2784 |
def _split_key(self, key): |
2785 |
"""Split key into a prefix and suffix."""
|
|
2786 |
return key[:-1], key[-1] |
|
2787 |
||
2788 |
||
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2789 |
class _KeyRefs(object): |
2790 |
||
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2791 |
def __init__(self, track_new_keys=False): |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2792 |
# dict mapping 'key' to 'set of keys referring to that key'
|
2793 |
self.refs = {} |
|
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2794 |
if track_new_keys: |
4634.29.16
by Andrew Bennetts
Fix buggy TestKeyDependencies test, tweak error string and comment. |
2795 |
# set remembering all new keys
|
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2796 |
self.new_keys = set() |
2797 |
else: |
|
2798 |
self.new_keys = None |
|
2799 |
||
2800 |
def clear(self): |
|
2801 |
if self.refs: |
|
2802 |
self.refs.clear() |
|
2803 |
if self.new_keys: |
|
2804 |
self.new_keys.clear() |
|
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2805 |
|
2806 |
def add_references(self, key, refs): |
|
2807 |
# Record the new references
|
|
2808 |
for referenced in refs: |
|
2809 |
try: |
|
2810 |
needed_by = self.refs[referenced] |
|
2811 |
except KeyError: |
|
2812 |
needed_by = self.refs[referenced] = set() |
|
2813 |
needed_by.add(key) |
|
2814 |
# Discard references satisfied by the new key
|
|
2815 |
self.add_key(key) |
|
2816 |
||
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2817 |
def get_new_keys(self): |
2818 |
return self.new_keys |
|
2819 |
||
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2820 |
def get_unsatisfied_refs(self): |
2821 |
return self.refs.iterkeys() |
|
2822 |
||
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2823 |
def _satisfy_refs_for_key(self, key): |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2824 |
try: |
2825 |
del self.refs[key] |
|
2826 |
except KeyError: |
|
2827 |
# No keys depended on this key. That's ok.
|
|
2828 |
pass
|
|
2829 |
||
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2830 |
def add_key(self, key): |
2831 |
# satisfy refs for key, and remember that we've seen this key.
|
|
2832 |
self._satisfy_refs_for_key(key) |
|
2833 |
if self.new_keys is not None: |
|
2834 |
self.new_keys.add(key) |
|
2835 |
||
2836 |
def satisfy_refs_for_keys(self, keys): |
|
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2837 |
for key in keys: |
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
2838 |
self._satisfy_refs_for_key(key) |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2839 |
|
4309.1.2
by Andrew Bennetts
Tentative fix for bug 368418: only fail the missing parent inventories check if there are missing texts that appear to be altered by the inventories with missing parents. |
2840 |
def get_referrers(self): |
2841 |
result = set() |
|
2842 |
for referrers in self.refs.itervalues(): |
|
2843 |
result.update(referrers) |
|
2844 |
return result |
|
2845 |
||
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2846 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2847 |
class _KnitGraphIndex(object): |
2848 |
"""A KnitVersionedFiles index layered on GraphIndex."""
|
|
2849 |
||
2850 |
def __init__(self, graph_index, is_locked, deltas=False, parents=True, |
|
4634.29.3
by Andrew Bennetts
Simplify further. |
2851 |
add_callback=None, track_external_parent_refs=False): |
2592.3.2
by Robert Collins
Implement a get_graph for a new KnitGraphIndex that will implement a KnitIndex on top of the GraphIndex API. |
2852 |
"""Construct a KnitGraphIndex on a graph_index.
|
2853 |
||
2854 |
:param graph_index: An implementation of bzrlib.index.GraphIndex.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2855 |
:param is_locked: A callback to check whether the object should answer
|
2856 |
queries.
|
|
2592.3.13
by Robert Collins
Implement KnitGraphIndex.get_method. |
2857 |
:param deltas: Allow delta-compressed records.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2858 |
:param parents: If True, record knits parents, if not do not record
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2859 |
parents.
|
2592.3.19
by Robert Collins
Change KnitGraphIndex from returning data to performing a callback on insertions. |
2860 |
:param add_callback: If not None, allow additions to the index and call
|
2861 |
this callback with a list of added GraphIndex nodes:
|
|
2592.3.33
by Robert Collins
Change the order of index refs and values to make the no-graph knit index easier. |
2862 |
[(node, value, node_refs), ...]
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2863 |
:param is_locked: A callback, returns True if the index is locked and
|
2864 |
thus usable.
|
|
4257.4.11
by Andrew Bennetts
Polish the patch. |
2865 |
:param track_external_parent_refs: If True, record all external parent
|
2866 |
references parents from added records. These can be retrieved
|
|
2867 |
later by calling get_missing_parents().
|
|
2592.3.2
by Robert Collins
Implement a get_graph for a new KnitGraphIndex that will implement a KnitIndex on top of the GraphIndex API. |
2868 |
"""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2869 |
self._add_callback = add_callback |
2592.3.2
by Robert Collins
Implement a get_graph for a new KnitGraphIndex that will implement a KnitIndex on top of the GraphIndex API. |
2870 |
self._graph_index = graph_index |
2592.3.13
by Robert Collins
Implement KnitGraphIndex.get_method. |
2871 |
self._deltas = deltas |
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2872 |
self._parents = parents |
2873 |
if deltas and not parents: |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2874 |
# XXX: TODO: Delta tree and parent graph should be conceptually
|
2875 |
# separate.
|
|
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2876 |
raise KnitCorrupt(self, "Cannot do delta compression without " |
2877 |
"parent tracking.") |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2878 |
self.has_graph = parents |
2879 |
self._is_locked = is_locked |
|
4011.5.1
by Andrew Bennetts
Start to add _add_unvalidated_index/get_missing_compression_parents methods to _KnitGraphIndex. |
2880 |
self._missing_compression_parents = set() |
4257.4.11
by Andrew Bennetts
Polish the patch. |
2881 |
if track_external_parent_refs: |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2882 |
self._key_dependencies = _KeyRefs() |
4257.4.10
by Andrew Bennetts
Observe new revisions in _KnitGraphIndex.add_record rather than iterating all the uncommitted packs' indices. |
2883 |
else: |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2884 |
self._key_dependencies = None |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2885 |
|
3517.4.13
by Martin Pool
Add repr methods |
2886 |
def __repr__(self): |
2887 |
return "%s(%r)" % (self.__class__.__name__, self._graph_index) |
|
2888 |
||
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2889 |
def add_records(self, records, random_id=False, |
2890 |
missing_compression_parents=False): |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2891 |
"""Add multiple records to the index.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2892 |
|
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2893 |
This function does not insert data into the Immutable GraphIndex
|
2894 |
backing the KnitGraphIndex, instead it prepares data for insertion by
|
|
2592.3.19
by Robert Collins
Change KnitGraphIndex from returning data to performing a callback on insertions. |
2895 |
the caller and checks that it is safe to insert then calls
|
2896 |
self._add_callback with the prepared GraphIndex nodes.
|
|
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2897 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2898 |
:param records: a list of tuples:
|
2899 |
(key, options, access_memo, parents).
|
|
2841.2.1
by Robert Collins
* Commit no longer checks for new text keys during insertion when the |
2900 |
:param random_id: If True the ids being added were randomly generated
|
2901 |
and no check for existence will be performed.
|
|
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2902 |
:param missing_compression_parents: If True the records being added are
|
2903 |
only compressed against texts already in the index (or inside
|
|
2904 |
records). If False the records all refer to unavailable texts (or
|
|
2905 |
texts inside records) as compression parents.
|
|
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2906 |
"""
|
2592.3.19
by Robert Collins
Change KnitGraphIndex from returning data to performing a callback on insertions. |
2907 |
if not self._add_callback: |
2908 |
raise errors.ReadOnlyError(self) |
|
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2909 |
# we hope there are no repositories with inconsistent parentage
|
2910 |
# anymore.
|
|
2911 |
||
2912 |
keys = {} |
|
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2913 |
compression_parents = set() |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2914 |
key_dependencies = self._key_dependencies |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2915 |
for (key, options, access_memo, parents) in records: |
2916 |
if self._parents: |
|
2917 |
parents = tuple(parents) |
|
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2918 |
if key_dependencies is not None: |
2919 |
key_dependencies.add_references(key, parents) |
|
2670.2.2
by Robert Collins
* In ``bzrlib.knit`` the internal interface has been altered to use |
2920 |
index, pos, size = access_memo |
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2921 |
if 'no-eol' in options: |
2922 |
value = 'N' |
|
2923 |
else: |
|
2924 |
value = ' ' |
|
2925 |
value += "%d %d" % (pos, size) |
|
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2926 |
if not self._deltas: |
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2927 |
if 'line-delta' in options: |
2928 |
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit") |
|
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2929 |
if self._parents: |
2930 |
if self._deltas: |
|
2931 |
if 'line-delta' in options: |
|
2624.2.5
by Robert Collins
Change bzrlib.index.Index keys to be 1-tuples, not strings. |
2932 |
node_refs = (parents, (parents[0],)) |
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2933 |
if missing_compression_parents: |
2934 |
compression_parents.add(parents[0]) |
|
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2935 |
else: |
2624.2.5
by Robert Collins
Change bzrlib.index.Index keys to be 1-tuples, not strings. |
2936 |
node_refs = (parents, ()) |
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2937 |
else: |
2624.2.5
by Robert Collins
Change bzrlib.index.Index keys to be 1-tuples, not strings. |
2938 |
node_refs = (parents, ) |
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2939 |
else: |
2940 |
if parents: |
|
2941 |
raise KnitCorrupt(self, "attempt to add node with parents " |
|
2942 |
"in parentless index.") |
|
2943 |
node_refs = () |
|
2624.2.5
by Robert Collins
Change bzrlib.index.Index keys to be 1-tuples, not strings. |
2944 |
keys[key] = (value, node_refs) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2945 |
# check for dups
|
2841.2.1
by Robert Collins
* Commit no longer checks for new text keys during insertion when the |
2946 |
if not random_id: |
2947 |
present_nodes = self._get_entries(keys) |
|
2948 |
for (index, key, value, node_refs) in present_nodes: |
|
4789.28.3
by John Arbash Meinel
Add a static_tuple.as_tuples() helper. |
2949 |
parents = node_refs[:1] |
2950 |
# Sometimes these are passed as a list rather than a tuple
|
|
2951 |
passed = static_tuple.as_tuples(keys[key]) |
|
4789.28.4
by John Arbash Meinel
Fix a small typo |
2952 |
passed_parents = passed[1][:1] |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2953 |
if (value[0] != keys[key][0][0] or |
4789.28.3
by John Arbash Meinel
Add a static_tuple.as_tuples() helper. |
2954 |
parents != passed_parents): |
2955 |
node_refs = static_tuple.as_tuples(node_refs) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2956 |
raise KnitCorrupt(self, "inconsistent details in add_records" |
4789.28.3
by John Arbash Meinel
Add a static_tuple.as_tuples() helper. |
2957 |
": %s %s" % ((value, node_refs), passed)) |
2841.2.1
by Robert Collins
* Commit no longer checks for new text keys during insertion when the |
2958 |
del keys[key] |
2592.3.17
by Robert Collins
Add add_version(s) to KnitGraphIndex, completing the required api for KnitVersionedFile. |
2959 |
result = [] |
2592.3.34
by Robert Collins
Rough unfactored support for parentless KnitGraphIndexs. |
2960 |
if self._parents: |
2961 |
for key, (value, node_refs) in keys.iteritems(): |
|
2962 |
result.append((key, value, node_refs)) |
|
2963 |
else: |
|
2964 |
for key, (value, node_refs) in keys.iteritems(): |
|
2965 |
result.append((key, value)) |
|
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2966 |
self._add_callback(result) |
2967 |
if missing_compression_parents: |
|
2968 |
# This may appear to be incorrect (it does not check for
|
|
2969 |
# compression parents that are in the existing graph index),
|
|
2970 |
# but such records won't have been buffered, so this is
|
|
2971 |
# actually correct: every entry when
|
|
2972 |
# missing_compression_parents==True either has a missing parent, or
|
|
2973 |
# a parent that is one of the keys in records.
|
|
2974 |
compression_parents.difference_update(keys) |
|
2975 |
self._missing_compression_parents.update(compression_parents) |
|
2976 |
# Adding records may have satisfied missing compression parents.
|
|
4009.3.7
by Andrew Bennetts
Most tests passing. |
2977 |
self._missing_compression_parents.difference_update(keys) |
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
2978 |
|
4011.5.7
by Andrew Bennetts
Remove leading underscore from _scan_unvalidate_index, explicitly NotImplementedError it for _KndxIndex. |
2979 |
def scan_unvalidated_index(self, graph_index): |
4011.5.1
by Andrew Bennetts
Start to add _add_unvalidated_index/get_missing_compression_parents methods to _KnitGraphIndex. |
2980 |
"""Inform this _KnitGraphIndex that there is an unvalidated index.
|
2981 |
||
2982 |
This allows this _KnitGraphIndex to keep track of any missing
|
|
2983 |
compression parents we may want to have filled in to make those
|
|
2984 |
indices valid.
|
|
2985 |
||
2986 |
:param graph_index: A GraphIndex
|
|
2987 |
"""
|
|
4011.5.11
by Robert Collins
Polish the KnitVersionedFiles.scan_unvalidated_index api. |
2988 |
if self._deltas: |
2989 |
new_missing = graph_index.external_references(ref_list_num=1) |
|
2990 |
new_missing.difference_update(self.get_parent_map(new_missing)) |
|
2991 |
self._missing_compression_parents.update(new_missing) |
|
4634.29.3
by Andrew Bennetts
Simplify further. |
2992 |
if self._key_dependencies is not None: |
2993 |
# Add parent refs from graph_index (and discard parent refs that
|
|
2994 |
# the graph_index has).
|
|
2995 |
for node in graph_index.iter_all_entries(): |
|
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
2996 |
self._key_dependencies.add_references(node[1], node[3][0]) |
4009.3.7
by Andrew Bennetts
Most tests passing. |
2997 |
|
4011.5.1
by Andrew Bennetts
Start to add _add_unvalidated_index/get_missing_compression_parents methods to _KnitGraphIndex. |
2998 |
def get_missing_compression_parents(self): |
4009.3.12
by Robert Collins
Polish on inserting record streams with missing compression parents. |
2999 |
"""Return the keys of missing compression parents.
|
3000 |
||
3001 |
Missing compression parents occur when a record stream was missing
|
|
3002 |
basis texts, or a index was scanned that had missing basis texts.
|
|
4011.5.1
by Andrew Bennetts
Start to add _add_unvalidated_index/get_missing_compression_parents methods to _KnitGraphIndex. |
3003 |
"""
|
3004 |
return frozenset(self._missing_compression_parents) |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3005 |
|
4257.4.11
by Andrew Bennetts
Polish the patch. |
3006 |
def get_missing_parents(self): |
3007 |
"""Return the keys of missing parents."""
|
|
4343.3.21
by John Arbash Meinel
Implement get_missing_parents in terms of _KeyRefs. |
3008 |
# If updating this, you should also update
|
3009 |
# groupcompress._GCGraphIndex.get_missing_parents
|
|
4257.4.11
by Andrew Bennetts
Polish the patch. |
3010 |
# We may have false positives, so filter those out.
|
4634.29.6
by Andrew Bennetts
Put new key tracking in _KeyRefs rather than alongside it. |
3011 |
self._key_dependencies.satisfy_refs_for_keys( |
4309.1.1
by Andrew Bennetts
Track which keys referenced the missing parents. |
3012 |
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs())) |
3013 |
return frozenset(self._key_dependencies.get_unsatisfied_refs()) |
|
4257.4.11
by Andrew Bennetts
Polish the patch. |
3014 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3015 |
def _check_read(self): |
3016 |
"""raise if reads are not permitted."""
|
|
3017 |
if not self._is_locked(): |
|
3018 |
raise errors.ObjectNotLocked(self) |
|
3019 |
||
3020 |
def _check_write_ok(self): |
|
3021 |
"""Assert if writes are not permitted."""
|
|
3022 |
if not self._is_locked(): |
|
3023 |
raise errors.ObjectNotLocked(self) |
|
3024 |
||
3025 |
def _compression_parent(self, an_entry): |
|
3026 |
# return the key that an_entry is compressed against, or None
|
|
3027 |
# Grab the second parent list (as deltas implies parents currently)
|
|
3028 |
compression_parents = an_entry[3][1] |
|
3029 |
if not compression_parents: |
|
3030 |
return None |
|
3031 |
if len(compression_parents) != 1: |
|
3032 |
raise AssertionError( |
|
3033 |
"Too many compression parents: %r" % compression_parents) |
|
3034 |
return compression_parents[0] |
|
3035 |
||
3036 |
def get_build_details(self, keys): |
|
3037 |
"""Get the method, index_memo and compression parent for version_ids.
|
|
3038 |
||
3039 |
Ghosts are omitted from the result.
|
|
3040 |
||
3041 |
:param keys: An iterable of keys.
|
|
3042 |
:return: A dict of key:
|
|
3043 |
(index_memo, compression_parent, parents, record_details).
|
|
3044 |
index_memo
|
|
3045 |
opaque structure to pass to read_records to extract the raw
|
|
3046 |
data
|
|
3047 |
compression_parent
|
|
3048 |
Content that this record is built upon, may be None
|
|
3049 |
parents
|
|
3050 |
Logical parents of this node
|
|
3051 |
record_details
|
|
3052 |
extra information about the content which needs to be passed to
|
|
3053 |
Factory.parse_record
|
|
3054 |
"""
|
|
3055 |
self._check_read() |
|
3056 |
result = {} |
|
3057 |
entries = self._get_entries(keys, False) |
|
3058 |
for entry in entries: |
|
3059 |
key = entry[1] |
|
3060 |
if not self._parents: |
|
3061 |
parents = () |
|
3062 |
else: |
|
3063 |
parents = entry[3][0] |
|
3064 |
if not self._deltas: |
|
3065 |
compression_parent_key = None |
|
3066 |
else: |
|
3067 |
compression_parent_key = self._compression_parent(entry) |
|
3068 |
noeol = (entry[2][0] == 'N') |
|
3069 |
if compression_parent_key: |
|
3070 |
method = 'line-delta' |
|
3071 |
else: |
|
3072 |
method = 'fulltext' |
|
3073 |
result[key] = (self._node_to_position(entry), |
|
3074 |
compression_parent_key, parents, |
|
3075 |
(method, noeol)) |
|
3076 |
return result |
|
3077 |
||
3078 |
def _get_entries(self, keys, check_present=False): |
|
3079 |
"""Get the entries for keys.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3080 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3081 |
:param keys: An iterable of index key tuples.
|
3082 |
"""
|
|
3083 |
keys = set(keys) |
|
3084 |
found_keys = set() |
|
3085 |
if self._parents: |
|
3086 |
for node in self._graph_index.iter_entries(keys): |
|
3087 |
yield node |
|
3088 |
found_keys.add(node[1]) |
|
3089 |
else: |
|
3090 |
# adapt parentless index to the rest of the code.
|
|
3091 |
for node in self._graph_index.iter_entries(keys): |
|
3092 |
yield node[0], node[1], node[2], () |
|
3093 |
found_keys.add(node[1]) |
|
3094 |
if check_present: |
|
3095 |
missing_keys = keys.difference(found_keys) |
|
3096 |
if missing_keys: |
|
3097 |
raise RevisionNotPresent(missing_keys.pop(), self) |
|
3098 |
||
3099 |
def get_method(self, key): |
|
3100 |
"""Return compression method of specified key."""
|
|
3101 |
return self._get_method(self._get_node(key)) |
|
3102 |
||
3103 |
def _get_method(self, node): |
|
3104 |
if not self._deltas: |
|
3105 |
return 'fulltext' |
|
3106 |
if self._compression_parent(node): |
|
3107 |
return 'line-delta' |
|
3108 |
else: |
|
3109 |
return 'fulltext' |
|
3110 |
||
3111 |
def _get_node(self, key): |
|
3112 |
try: |
|
3113 |
return list(self._get_entries([key]))[0] |
|
3114 |
except IndexError: |
|
3115 |
raise RevisionNotPresent(key, self) |
|
3116 |
||
3117 |
def get_options(self, key): |
|
3118 |
"""Return a list representing options.
|
|
3119 |
||
3120 |
e.g. ['foo', 'bar']
|
|
3121 |
"""
|
|
3122 |
node = self._get_node(key) |
|
3123 |
options = [self._get_method(node)] |
|
3124 |
if node[2][0] == 'N': |
|
3125 |
options.append('no-eol') |
|
3126 |
return options |
|
3127 |
||
4593.5.35
by John Arbash Meinel
Start working on a per-vf implementation test of find_ancestry. |
3128 |
def find_ancestry(self, keys): |
3129 |
"""See CombinedGraphIndex.find_ancestry()"""
|
|
3130 |
return self._graph_index.find_ancestry(keys, 0) |
|
3131 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3132 |
def get_parent_map(self, keys): |
3133 |
"""Get a map of the parents of keys.
|
|
3134 |
||
3135 |
:param keys: The keys to look up parents for.
|
|
3136 |
:return: A mapping from keys to parents. Absent keys are absent from
|
|
3137 |
the mapping.
|
|
3138 |
"""
|
|
3139 |
self._check_read() |
|
3140 |
nodes = self._get_entries(keys) |
|
3141 |
result = {} |
|
3142 |
if self._parents: |
|
3143 |
for node in nodes: |
|
3144 |
result[node[1]] = node[3][0] |
|
3145 |
else: |
|
3146 |
for node in nodes: |
|
3147 |
result[node[1]] = None |
|
3148 |
return result |
|
3149 |
||
3150 |
def get_position(self, key): |
|
3151 |
"""Return details needed to access the version.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3152 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3153 |
:return: a tuple (index, data position, size) to hand to the access
|
3154 |
logic to get the record.
|
|
3155 |
"""
|
|
3156 |
node = self._get_node(key) |
|
3157 |
return self._node_to_position(node) |
|
3158 |
||
3830.3.12
by Martin Pool
Review cleanups: unify has_key impls, add missing_keys(), clean up exception blocks |
3159 |
has_key = _mod_index._has_key_from_parent_map |
3830.3.9
by Martin Pool
Simplify kvf insert_record_stream; add has_key shorthand methods; update stacking effort tests |
3160 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3161 |
def keys(self): |
3162 |
"""Get all the keys in the collection.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3163 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3164 |
The keys are not ordered.
|
3165 |
"""
|
|
3166 |
self._check_read() |
|
3167 |
return [node[1] for node in self._graph_index.iter_all_entries()] |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3168 |
|
3830.3.12
by Martin Pool
Review cleanups: unify has_key impls, add missing_keys(), clean up exception blocks |
3169 |
missing_keys = _mod_index._missing_keys_from_parent_map |
3170 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3171 |
def _node_to_position(self, node): |
3172 |
"""Convert an index value to position details."""
|
|
3173 |
bits = node[2][1:].split(' ') |
|
3174 |
return node[0], int(bits[0]), int(bits[1]) |
|
3175 |
||
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
3176 |
def _sort_keys_by_io(self, keys, positions): |
3177 |
"""Figure out an optimal order to read the records for the given keys.
|
|
3178 |
||
3179 |
Sort keys, grouped by index and sorted by position.
|
|
3180 |
||
3181 |
:param keys: A list of keys whose records we want to read. This will be
|
|
3182 |
sorted 'in-place'.
|
|
3183 |
:param positions: A dict, such as the one returned by
|
|
3184 |
_get_components_positions()
|
|
3185 |
:return: None
|
|
3186 |
"""
|
|
3187 |
def get_index_memo(key): |
|
3878.1.3
by John Arbash Meinel
Add a comment about what data we are sorting by. |
3188 |
# index_memo is at offset [1]. It is made up of (GraphIndex,
|
3189 |
# position, size). GI is an object, which will be unique for each
|
|
3190 |
# pack file. This causes us to group by pack file, then sort by
|
|
3191 |
# position. Size doesn't matter, but it isn't worth breaking up the
|
|
3192 |
# tuple.
|
|
3878.1.2
by John Arbash Meinel
Move the sorting into each index, and customize it for Kndx access. |
3193 |
return positions[key][1] |
3194 |
return keys.sort(key=get_index_memo) |
|
3195 |
||
4039.3.5
by John Arbash Meinel
Add direct tests for _get_total_build_size. |
3196 |
_get_total_build_size = _get_total_build_size |
4039.3.2
by John Arbash Meinel
Batch get_record_stream(fulltexts) into 5MB requests. |
3197 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3198 |
|
3199 |
class _KnitKeyAccess(object): |
|
3200 |
"""Access to records in .knit files."""
|
|
3201 |
||
3202 |
def __init__(self, transport, mapper): |
|
3203 |
"""Create a _KnitKeyAccess with transport and mapper.
|
|
3204 |
||
3205 |
:param transport: The transport the access object is rooted at.
|
|
3206 |
:param mapper: The mapper used to map keys to .knit files.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3207 |
"""
|
3208 |
self._transport = transport |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3209 |
self._mapper = mapper |
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3210 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3211 |
def add_raw_records(self, key_sizes, raw_data): |
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3212 |
"""Add raw knit bytes to a storage area.
|
3213 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3214 |
The data is spooled to the container writer in one bytes-record per
|
3215 |
raw data item.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3216 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3217 |
:param sizes: An iterable of tuples containing the key and size of each
|
3218 |
raw data segment.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3219 |
:param raw_data: A bytestring containing the data.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3220 |
:return: A list of memos to retrieve the record later. Each memo is an
|
3221 |
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
|
|
3222 |
length), where the key is the record key.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3223 |
"""
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
3224 |
if type(raw_data) is not str: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3225 |
raise AssertionError( |
3226 |
'data must be plain bytes was %s' % type(raw_data)) |
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3227 |
result = [] |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3228 |
offset = 0 |
3229 |
# TODO: This can be tuned for writing to sftp and other servers where
|
|
3230 |
# append() is relatively expensive by grouping the writes to each key
|
|
3231 |
# prefix.
|
|
3232 |
for key, size in key_sizes: |
|
3233 |
path = self._mapper.map(key) |
|
3234 |
try: |
|
3235 |
base = self._transport.append_bytes(path + '.knit', |
|
3236 |
raw_data[offset:offset+size]) |
|
3237 |
except errors.NoSuchFile: |
|
3238 |
self._transport.mkdir(osutils.dirname(path)) |
|
3239 |
base = self._transport.append_bytes(path + '.knit', |
|
3240 |
raw_data[offset:offset+size]) |
|
3241 |
# if base == 0:
|
|
3242 |
# chmod.
|
|
3243 |
offset += size |
|
3244 |
result.append((key, base, size)) |
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3245 |
return result |
3246 |
||
4187.3.3
by Andrew Bennetts
In KnitVersionedFiles.insert_record_stream, flush the access object before expanding a delta into a fulltext. |
3247 |
def flush(self): |
4187.3.4
by Andrew Bennetts
Better docstrings and comments. |
3248 |
"""Flush pending writes on this access object.
|
3249 |
|
|
3250 |
For .knit files this is a no-op.
|
|
3251 |
"""
|
|
4187.3.3
by Andrew Bennetts
In KnitVersionedFiles.insert_record_stream, flush the access object before expanding a delta into a fulltext. |
3252 |
pass
|
3253 |
||
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3254 |
def get_raw_records(self, memos_for_retrieval): |
3255 |
"""Get the raw bytes for a records.
|
|
3256 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3257 |
:param memos_for_retrieval: An iterable containing the access memo for
|
3258 |
retrieving the bytes.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3259 |
:return: An iterator over the bytes of the records.
|
3260 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3261 |
# first pass, group into same-index request to minimise readv's issued.
|
3262 |
request_lists = [] |
|
3263 |
current_prefix = None |
|
3264 |
for (key, offset, length) in memos_for_retrieval: |
|
3265 |
if current_prefix == key[:-1]: |
|
3266 |
current_list.append((offset, length)) |
|
3267 |
else: |
|
3268 |
if current_prefix is not None: |
|
3269 |
request_lists.append((current_prefix, current_list)) |
|
3270 |
current_prefix = key[:-1] |
|
3271 |
current_list = [(offset, length)] |
|
3272 |
# handle the last entry
|
|
3273 |
if current_prefix is not None: |
|
3274 |
request_lists.append((current_prefix, current_list)) |
|
3275 |
for prefix, read_vector in request_lists: |
|
3276 |
path = self._mapper.map(prefix) + '.knit' |
|
3277 |
for pos, data in self._transport.readv(path, read_vector): |
|
3278 |
yield data |
|
3279 |
||
3280 |
||
3281 |
class _DirectPackAccess(object): |
|
3282 |
"""Access to data in one or more packs with less translation."""
|
|
3283 |
||
4187.3.3
by Andrew Bennetts
In KnitVersionedFiles.insert_record_stream, flush the access object before expanding a delta into a fulltext. |
3284 |
def __init__(self, index_to_packs, reload_func=None, flush_func=None): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3285 |
"""Create a _DirectPackAccess object.
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3286 |
|
3287 |
:param index_to_packs: A dict mapping index objects to the transport
|
|
3288 |
and file names for obtaining data.
|
|
3789.2.5
by John Arbash Meinel
Change _DirectPackAccess to only raise Retry when _reload_func is defined. |
3289 |
:param reload_func: A function to call if we determine that the pack
|
3290 |
files have moved and we need to reload our caches. See
|
|
3291 |
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3292 |
"""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3293 |
self._container_writer = None |
3294 |
self._write_index = None |
|
3295 |
self._indices = index_to_packs |
|
3789.2.5
by John Arbash Meinel
Change _DirectPackAccess to only raise Retry when _reload_func is defined. |
3296 |
self._reload_func = reload_func |
4187.3.3
by Andrew Bennetts
In KnitVersionedFiles.insert_record_stream, flush the access object before expanding a delta into a fulltext. |
3297 |
self._flush_func = flush_func |
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3298 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3299 |
def add_raw_records(self, key_sizes, raw_data): |
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3300 |
"""Add raw knit bytes to a storage area.
|
3301 |
||
2670.2.3
by Robert Collins
Review feedback. |
3302 |
The data is spooled to the container writer in one bytes-record per
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3303 |
raw data item.
|
3304 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3305 |
:param sizes: An iterable of tuples containing the key and size of each
|
3306 |
raw data segment.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3307 |
:param raw_data: A bytestring containing the data.
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3308 |
:return: A list of memos to retrieve the record later. Each memo is an
|
3309 |
opaque index memo. For _DirectPackAccess the memo is (index, pos,
|
|
3310 |
length), where the index field is the write_index object supplied
|
|
3311 |
to the PackAccess object.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3312 |
"""
|
4398.8.8
by John Arbash Meinel
Respond to Andrew's review comments. |
3313 |
if type(raw_data) is not str: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3314 |
raise AssertionError( |
3315 |
'data must be plain bytes was %s' % type(raw_data)) |
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3316 |
result = [] |
3317 |
offset = 0 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3318 |
for key, size in key_sizes: |
3319 |
p_offset, p_length = self._container_writer.add_bytes_record( |
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3320 |
raw_data[offset:offset+size], []) |
3321 |
offset += size |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3322 |
result.append((self._write_index, p_offset, p_length)) |
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3323 |
return result |
3324 |
||
4187.3.3
by Andrew Bennetts
In KnitVersionedFiles.insert_record_stream, flush the access object before expanding a delta into a fulltext. |
3325 |
def flush(self): |
4187.3.4
by Andrew Bennetts
Better docstrings and comments. |
3326 |
"""Flush pending writes on this access object.
|
3327 |
||
3328 |
This will flush any buffered writes to a NewPack.
|
|
3329 |
"""
|
|
4187.3.3
by Andrew Bennetts
In KnitVersionedFiles.insert_record_stream, flush the access object before expanding a delta into a fulltext. |
3330 |
if self._flush_func is not None: |
3331 |
self._flush_func() |
|
3332 |
||
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3333 |
def get_raw_records(self, memos_for_retrieval): |
3334 |
"""Get the raw bytes for a records.
|
|
3335 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3336 |
:param memos_for_retrieval: An iterable containing the (index, pos,
|
2670.2.2
by Robert Collins
* In ``bzrlib.knit`` the internal interface has been altered to use |
3337 |
length) memo for retrieving the bytes. The Pack access method
|
3338 |
looks up the pack to use for a given record in its index_to_pack
|
|
3339 |
map.
|
|
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3340 |
:return: An iterator over the bytes of the records.
|
3341 |
"""
|
|
3342 |
# first pass, group into same-index requests
|
|
3343 |
request_lists = [] |
|
3344 |
current_index = None |
|
3345 |
for (index, offset, length) in memos_for_retrieval: |
|
3346 |
if current_index == index: |
|
3347 |
current_list.append((offset, length)) |
|
3348 |
else: |
|
3349 |
if current_index is not None: |
|
3350 |
request_lists.append((current_index, current_list)) |
|
3351 |
current_index = index |
|
3352 |
current_list = [(offset, length)] |
|
3353 |
# handle the last entry
|
|
3354 |
if current_index is not None: |
|
3355 |
request_lists.append((current_index, current_list)) |
|
3356 |
for index, offsets in request_lists: |
|
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
3357 |
try: |
3358 |
transport, path = self._indices[index] |
|
3359 |
except KeyError: |
|
3360 |
# A KeyError here indicates that someone has triggered an index
|
|
3361 |
# reload, and this index has gone missing, we need to start
|
|
3362 |
# over.
|
|
3789.2.5
by John Arbash Meinel
Change _DirectPackAccess to only raise Retry when _reload_func is defined. |
3363 |
if self._reload_func is None: |
3364 |
# If we don't have a _reload_func there is nothing that can
|
|
3365 |
# be done
|
|
3366 |
raise
|
|
3789.2.28
by John Arbash Meinel
We don't actually have a transport yet, so we can't use it as context. |
3367 |
raise errors.RetryWithNewPacks(index, |
3789.2.27
by John Arbash Meinel
Add some context information to the Retry exceptions. |
3368 |
reload_occurred=True, |
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
3369 |
exc_info=sys.exc_info()) |
3370 |
try: |
|
3371 |
reader = pack.make_readv_reader(transport, path, offsets) |
|
3372 |
for names, read_func in reader.iter_records(): |
|
3373 |
yield read_func(None) |
|
3374 |
except errors.NoSuchFile: |
|
3375 |
# A NoSuchFile error indicates that a pack file has gone
|
|
3376 |
# missing on disk, we need to trigger a reload, and start over.
|
|
3789.2.5
by John Arbash Meinel
Change _DirectPackAccess to only raise Retry when _reload_func is defined. |
3377 |
if self._reload_func is None: |
3378 |
raise
|
|
3789.2.27
by John Arbash Meinel
Add some context information to the Retry exceptions. |
3379 |
raise errors.RetryWithNewPacks(transport.abspath(path), |
3380 |
reload_occurred=False, |
|
3789.2.1
by John Arbash Meinel
_DirectPackAccess can now raise RetryWithNewPacks when we think something has happened. |
3381 |
exc_info=sys.exc_info()) |
2592.3.66
by Robert Collins
Allow adaption of KnitData to pack files. |
3382 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3383 |
def set_writer(self, writer, index, transport_packname): |
2592.3.70
by Robert Collins
Allow setting a writer after creating a knit._PackAccess object. |
3384 |
"""Set a writer to use for adding data."""
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
3385 |
if index is not None: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3386 |
self._indices[index] = transport_packname |
3387 |
self._container_writer = writer |
|
3388 |
self._write_index = index |
|
1684.3.3
by Robert Collins
Add a special cased weaves to knit converter. |
3389 |
|
3789.2.5
by John Arbash Meinel
Change _DirectPackAccess to only raise Retry when _reload_func is defined. |
3390 |
def reload_or_raise(self, retry_exc): |
3391 |
"""Try calling the reload function, or re-raise the original exception.
|
|
3392 |
||
3393 |
This should be called after _DirectPackAccess raises a
|
|
3394 |
RetryWithNewPacks exception. This function will handle the common logic
|
|
3395 |
of determining when the error is fatal versus being temporary.
|
|
3396 |
It will also make sure that the original exception is raised, rather
|
|
3397 |
than the RetryWithNewPacks exception.
|
|
3398 |
||
3399 |
If this function returns, then the calling function should retry
|
|
3400 |
whatever operation was being performed. Otherwise an exception will
|
|
3401 |
be raised.
|
|
3402 |
||
3403 |
:param retry_exc: A RetryWithNewPacks exception.
|
|
3404 |
"""
|
|
3789.2.6
by John Arbash Meinel
Make _DirectPackAccess.reload_or_raise maintain the logic. |
3405 |
is_error = False |
3406 |
if self._reload_func is None: |
|
3407 |
is_error = True |
|
3408 |
elif not self._reload_func(): |
|
3409 |
# The reload claimed that nothing changed
|
|
3410 |
if not retry_exc.reload_occurred: |
|
3411 |
# If there wasn't an earlier reload, then we really were
|
|
3412 |
# expecting to find changes. We didn't find them, so this is a
|
|
3413 |
# hard error
|
|
3414 |
is_error = True |
|
3415 |
if is_error: |
|
3416 |
exc_class, exc_value, exc_traceback = retry_exc.exc_info |
|
3417 |
raise exc_class, exc_value, exc_traceback |
|
3789.2.5
by John Arbash Meinel
Change _DirectPackAccess to only raise Retry when _reload_func is defined. |
3418 |
|
1684.3.3
by Robert Collins
Add a special cased weaves to knit converter. |
3419 |
|
2781.1.1
by Martin Pool
merge cpatiencediff from Lukas |
3420 |
# Deprecated, use PatienceSequenceMatcher instead
|
3421 |
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher |
|
2484.1.1
by John Arbash Meinel
Add an initial function to read knit indexes in pyrex. |
3422 |
|
3423 |
||
2770.1.2
by Aaron Bentley
Convert to knit-only annotation |
3424 |
def annotate_knit(knit, revision_id): |
3425 |
"""Annotate a knit with no cached annotations.
|
|
3426 |
||
3427 |
This implementation is for knits with no cached annotations.
|
|
3428 |
It will work for knits with cached annotations, but this is not
|
|
3429 |
recommended.
|
|
3430 |
"""
|
|
3224.1.7
by John Arbash Meinel
_StreamIndex also needs to return the proper values for get_build_details. |
3431 |
annotator = _KnitAnnotator(knit) |
4454.3.26
by John Arbash Meinel
The new _KnitAnnotator based on Annotator seems to pass the test suite. |
3432 |
return iter(annotator.annotate_flat(revision_id)) |
3224.1.7
by John Arbash Meinel
_StreamIndex also needs to return the proper values for get_build_details. |
3433 |
|
3434 |
||
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3435 |
class _KnitAnnotator(annotate.Annotator): |
3224.1.5
by John Arbash Meinel
Start using a helper class for doing the knit-pack annotations. |
3436 |
"""Build up the annotations for a text."""
|
3437 |
||
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3438 |
def __init__(self, vf): |
3439 |
annotate.Annotator.__init__(self, vf) |
|
3440 |
||
3441 |
# TODO: handle Nodes which cannot be extracted
|
|
3442 |
# self._ghosts = set()
|
|
3443 |
||
4454.3.38
by John Arbash Meinel
Start using left-matching-blocks during the actual annotation. |
3444 |
# Map from (key, parent_key) => matching_blocks, should be 'use once'
|
3445 |
self._matching_blocks = {} |
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3446 |
|
3447 |
# KnitContent objects
|
|
3448 |
self._content_objects = {} |
|
3449 |
# The number of children that depend on this fulltext content object
|
|
3450 |
self._num_compression_children = {} |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3451 |
# Delta records that need their compression parent before they can be
|
3452 |
# expanded
|
|
3453 |
self._pending_deltas = {} |
|
4454.3.30
by John Arbash Meinel
add a bit more work to be able to process 'pending_annotations'. |
3454 |
# Fulltext records that are waiting for their parents fulltexts before
|
3455 |
# they can be yielded for annotation
|
|
3456 |
self._pending_annotation = {} |
|
3224.1.19
by John Arbash Meinel
Work on removing nodes from the working set once they aren't needed. |
3457 |
|
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3458 |
self._all_build_details = {} |
3459 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3460 |
def _get_build_graph(self, key): |
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3461 |
"""Get the graphs for building texts and annotations.
|
3462 |
||
3463 |
The data you need for creating a full text may be different than the
|
|
3464 |
data you need to annotate that text. (At a minimum, you need both
|
|
3465 |
parents to create an annotation, but only need 1 parent to generate the
|
|
3466 |
fulltext.)
|
|
3467 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3468 |
:return: A list of (key, index_memo) records, suitable for
|
4371.2.1
by Vincent Ladeuil
Start fixing annotate for gc. |
3469 |
passing to read_records_iter to start reading in the raw data from
|
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3470 |
the pack file.
|
3471 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3472 |
pending = set([key]) |
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3473 |
records = [] |
4454.3.64
by John Arbash Meinel
Ensure that _KnitAnnotator also supports add_special_text. |
3474 |
ann_keys = set() |
4454.3.26
by John Arbash Meinel
The new _KnitAnnotator based on Annotator seems to pass the test suite. |
3475 |
self._num_needed_children[key] = 1 |
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3476 |
while pending: |
3477 |
# get all pending nodes
|
|
3478 |
this_iteration = pending |
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3479 |
build_details = self._vf._index.get_build_details(this_iteration) |
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3480 |
self._all_build_details.update(build_details) |
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3481 |
# new_nodes = self._vf._index._get_entries(this_iteration)
|
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3482 |
pending = set() |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3483 |
for key, details in build_details.iteritems(): |
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3484 |
(index_memo, compression_parent, parent_keys, |
3224.1.15
by John Arbash Meinel
Finish removing method and noeol from general knowledge, |
3485 |
record_details) = details |
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3486 |
self._parent_map[key] = parent_keys |
4454.3.41
by John Arbash Meinel
Cache the heads provider as long as we know that the parent_map hasn't changed. |
3487 |
self._heads_provider = None |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3488 |
records.append((key, index_memo)) |
3224.1.19
by John Arbash Meinel
Work on removing nodes from the working set once they aren't needed. |
3489 |
# Do we actually need to check _annotated_lines?
|
4454.3.64
by John Arbash Meinel
Ensure that _KnitAnnotator also supports add_special_text. |
3490 |
pending.update([p for p in parent_keys |
3491 |
if p not in self._all_build_details]) |
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3492 |
if parent_keys: |
3493 |
for parent_key in parent_keys: |
|
3494 |
if parent_key in self._num_needed_children: |
|
3495 |
self._num_needed_children[parent_key] += 1 |
|
3496 |
else: |
|
3497 |
self._num_needed_children[parent_key] = 1 |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3498 |
if compression_parent: |
3499 |
if compression_parent in self._num_compression_children: |
|
3500 |
self._num_compression_children[compression_parent] += 1 |
|
3501 |
else: |
|
3502 |
self._num_compression_children[compression_parent] = 1 |
|
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3503 |
|
3504 |
missing_versions = this_iteration.difference(build_details.keys()) |
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3505 |
if missing_versions: |
4454.3.64
by John Arbash Meinel
Ensure that _KnitAnnotator also supports add_special_text. |
3506 |
for key in missing_versions: |
3507 |
if key in self._parent_map and key in self._text_cache: |
|
3508 |
# We already have this text ready, we just need to
|
|
3509 |
# yield it later so we get it annotated
|
|
3510 |
ann_keys.add(key) |
|
3511 |
parent_keys = self._parent_map[key] |
|
3512 |
for parent_key in parent_keys: |
|
3513 |
if parent_key in self._num_needed_children: |
|
3514 |
self._num_needed_children[parent_key] += 1 |
|
3515 |
else: |
|
3516 |
self._num_needed_children[parent_key] = 1 |
|
3517 |
pending.update([p for p in parent_keys |
|
3518 |
if p not in self._all_build_details]) |
|
3519 |
else: |
|
4454.3.65
by John Arbash Meinel
Tests that VF implementations support .get_annotator() |
3520 |
raise errors.RevisionNotPresent(key, self._vf) |
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3521 |
# Generally we will want to read the records in reverse order, because
|
3522 |
# we find the parent nodes after the children
|
|
3523 |
records.reverse() |
|
4454.3.64
by John Arbash Meinel
Ensure that _KnitAnnotator also supports add_special_text. |
3524 |
return records, ann_keys |
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3525 |
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3526 |
def _get_needed_texts(self, key, pb=None): |
4454.3.32
by John Arbash Meinel
using this custom extraction code drops us from 30.5s => 17.6s for annotate NEWS. |
3527 |
# if True or len(self._vf._fallback_vfs) > 0:
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3528 |
if len(self._vf._fallback_vfs) > 0: |
3529 |
# If we have fallbacks, go to the generic path
|
|
4454.3.43
by John Arbash Meinel
Initial implementation of a Pyrex annotator. |
3530 |
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb): |
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3531 |
yield v |
4454.3.26
by John Arbash Meinel
The new _KnitAnnotator based on Annotator seems to pass the test suite. |
3532 |
return
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3533 |
while True: |
3534 |
try: |
|
4454.3.64
by John Arbash Meinel
Ensure that _KnitAnnotator also supports add_special_text. |
3535 |
records, ann_keys = self._get_build_graph(key) |
4454.3.59
by John Arbash Meinel
Track down why the annotate retry code was failing. |
3536 |
for idx, (sub_key, text, num_lines) in enumerate( |
4454.3.42
by John Arbash Meinel
Make use of the passed in progress bar. |
3537 |
self._extract_texts(records)): |
3538 |
if pb is not None: |
|
3539 |
pb.update('annotating', idx, len(records)) |
|
4454.3.59
by John Arbash Meinel
Track down why the annotate retry code was failing. |
3540 |
yield sub_key, text, num_lines |
4454.3.64
by John Arbash Meinel
Ensure that _KnitAnnotator also supports add_special_text. |
3541 |
for sub_key in ann_keys: |
3542 |
text = self._text_cache[sub_key] |
|
3543 |
num_lines = len(text) # bad assumption |
|
3544 |
yield sub_key, text, num_lines |
|
4454.3.26
by John Arbash Meinel
The new _KnitAnnotator based on Annotator seems to pass the test suite. |
3545 |
return
|
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3546 |
except errors.RetryWithNewPacks, e: |
3547 |
self._vf._access.reload_or_raise(e) |
|
3548 |
# The cached build_details are no longer valid
|
|
3549 |
self._all_build_details.clear() |
|
3550 |
||
4454.3.37
by John Arbash Meinel
Add tests tha left-matching-blocks gets populated. |
3551 |
def _cache_delta_blocks(self, key, compression_parent, delta, lines): |
3552 |
parent_lines = self._text_cache[compression_parent] |
|
3553 |
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines)) |
|
4454.3.38
by John Arbash Meinel
Start using left-matching-blocks during the actual annotation. |
3554 |
self._matching_blocks[(key, compression_parent)] = blocks |
4454.3.37
by John Arbash Meinel
Add tests tha left-matching-blocks gets populated. |
3555 |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3556 |
def _expand_record(self, key, parent_keys, compression_parent, record, |
3557 |
record_details): |
|
4454.3.37
by John Arbash Meinel
Add tests tha left-matching-blocks gets populated. |
3558 |
delta = None |
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3559 |
if compression_parent: |
3560 |
if compression_parent not in self._content_objects: |
|
3561 |
# Waiting for the parent
|
|
3562 |
self._pending_deltas.setdefault(compression_parent, []).append( |
|
3563 |
(key, parent_keys, record, record_details)) |
|
3564 |
return None |
|
3565 |
# We have the basis parent, so expand the delta
|
|
4454.3.33
by John Arbash Meinel
Change the _expand_record code to pop out old content objects. |
3566 |
num = self._num_compression_children[compression_parent] |
3567 |
num -= 1 |
|
3568 |
if num == 0: |
|
3569 |
base_content = self._content_objects.pop(compression_parent) |
|
3570 |
self._num_compression_children.pop(compression_parent) |
|
3571 |
else: |
|
3572 |
self._num_compression_children[compression_parent] = num |
|
3573 |
base_content = self._content_objects[compression_parent] |
|
4454.3.35
by John Arbash Meinel
Figure out why we don't want to copy_base_content=False. |
3574 |
# It is tempting to want to copy_base_content=False for the last
|
3575 |
# child object. However, whenever noeol=False,
|
|
3576 |
# self._text_cache[parent_key] is content._lines. So mutating it
|
|
3577 |
# gives very bad results.
|
|
4454.3.36
by John Arbash Meinel
Only cache the content objects that we will reuse. |
3578 |
# The alternative is to copy the lines into text cache, but then we
|
3579 |
# are copying anyway, so just do it here.
|
|
4454.3.37
by John Arbash Meinel
Add tests tha left-matching-blocks gets populated. |
3580 |
content, delta = self._vf._factory.parse_record( |
4454.3.30
by John Arbash Meinel
add a bit more work to be able to process 'pending_annotations'. |
3581 |
key, record, record_details, base_content, |
3582 |
copy_base_content=True) |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3583 |
else: |
3584 |
# Fulltext record
|
|
3585 |
content, _ = self._vf._factory.parse_record( |
|
3586 |
key, record, record_details, None) |
|
4454.3.36
by John Arbash Meinel
Only cache the content objects that we will reuse. |
3587 |
if self._num_compression_children.get(key, 0) > 0: |
3588 |
self._content_objects[key] = content |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3589 |
lines = content.text() |
3590 |
self._text_cache[key] = lines |
|
4454.3.37
by John Arbash Meinel
Add tests tha left-matching-blocks gets populated. |
3591 |
if delta is not None: |
3592 |
self._cache_delta_blocks(key, compression_parent, delta, lines) |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3593 |
return lines |
3594 |
||
4454.3.38
by John Arbash Meinel
Start using left-matching-blocks during the actual annotation. |
3595 |
def _get_parent_annotations_and_matches(self, key, text, parent_key): |
3596 |
"""Get the list of annotations for the parent, and the matching lines.
|
|
3597 |
||
3598 |
:param text: The opaque value given by _get_needed_texts
|
|
3599 |
:param parent_key: The key for the parent text
|
|
3600 |
:return: (parent_annotations, matching_blocks)
|
|
3601 |
parent_annotations is a list as long as the number of lines in
|
|
3602 |
parent
|
|
3603 |
matching_blocks is a list of (parent_idx, text_idx, len) tuples
|
|
3604 |
indicating which lines match between the two texts
|
|
3605 |
"""
|
|
3606 |
block_key = (key, parent_key) |
|
3607 |
if block_key in self._matching_blocks: |
|
3608 |
blocks = self._matching_blocks.pop(block_key) |
|
3609 |
parent_annotations = self._annotations_cache[parent_key] |
|
3610 |
return parent_annotations, blocks |
|
4454.3.43
by John Arbash Meinel
Initial implementation of a Pyrex annotator. |
3611 |
return annotate.Annotator._get_parent_annotations_and_matches(self, |
4454.3.38
by John Arbash Meinel
Start using left-matching-blocks during the actual annotation. |
3612 |
key, text, parent_key) |
3613 |
||
4454.3.30
by John Arbash Meinel
add a bit more work to be able to process 'pending_annotations'. |
3614 |
def _process_pending(self, key): |
3615 |
"""The content for 'key' was just processed.
|
|
3616 |
||
3617 |
Determine if there is any more pending work to be processed.
|
|
3618 |
"""
|
|
3619 |
to_return = [] |
|
4454.3.31
by John Arbash Meinel
Change the processing lines to now handle fallbacks properly. |
3620 |
if key in self._pending_deltas: |
3621 |
compression_parent = key |
|
3622 |
children = self._pending_deltas.pop(key) |
|
3623 |
for child_key, parent_keys, record, record_details in children: |
|
3624 |
lines = self._expand_record(child_key, parent_keys, |
|
3625 |
compression_parent, |
|
3626 |
record, record_details) |
|
3627 |
if self._check_ready_for_annotations(child_key, parent_keys): |
|
3628 |
to_return.append(child_key) |
|
3629 |
# Also check any children that are waiting for this parent to be
|
|
3630 |
# annotation ready
|
|
3631 |
if key in self._pending_annotation: |
|
3632 |
children = self._pending_annotation.pop(key) |
|
3633 |
to_return.extend([c for c, p_keys in children |
|
3634 |
if self._check_ready_for_annotations(c, p_keys)]) |
|
3635 |
return to_return |
|
4454.3.30
by John Arbash Meinel
add a bit more work to be able to process 'pending_annotations'. |
3636 |
|
3637 |
def _check_ready_for_annotations(self, key, parent_keys): |
|
3638 |
"""return true if this text is ready to be yielded.
|
|
3639 |
||
3640 |
Otherwise, this will return False, and queue the text into
|
|
3641 |
self._pending_annotation
|
|
3642 |
"""
|
|
3643 |
for parent_key in parent_keys: |
|
3644 |
if parent_key not in self._annotations_cache: |
|
3645 |
# still waiting on at least one parent text, so queue it up
|
|
3646 |
# Note that if there are multiple parents, we need to wait
|
|
3647 |
# for all of them.
|
|
3648 |
self._pending_annotation.setdefault(parent_key, |
|
3649 |
[]).append((key, parent_keys)) |
|
3650 |
return False |
|
3651 |
return True |
|
3652 |
||
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3653 |
def _extract_texts(self, records): |
3654 |
"""Extract the various texts needed based on records"""
|
|
3224.1.6
by John Arbash Meinel
Refactor the annotation logic into a helper class. |
3655 |
# We iterate in the order read, rather than a strict order requested
|
3224.1.22
by John Arbash Meinel
Cleanup the extra debugging info, and some >80 char lines. |
3656 |
# However, process what we can, and put off to the side things that
|
3657 |
# still need parents, cleaning them up when those parents are
|
|
3658 |
# processed.
|
|
4454.3.29
by John Arbash Meinel
Some code comments about what needs to happen. |
3659 |
# Basic data flow:
|
3660 |
# 1) As 'records' are read, see if we can expand these records into
|
|
3661 |
# Content objects (and thus lines)
|
|
3662 |
# 2) If a given line-delta is waiting on its compression parent, it
|
|
3663 |
# gets queued up into self._pending_deltas, otherwise we expand
|
|
3664 |
# it, and put it into self._text_cache and self._content_objects
|
|
3665 |
# 3) If we expanded the text, we will then check to see if all
|
|
3666 |
# parents have also been processed. If so, this text gets yielded,
|
|
3667 |
# else this record gets set aside into pending_annotation
|
|
3668 |
# 4) Further, if we expanded the text in (2), we will then check to
|
|
3669 |
# see if there are any children in self._pending_deltas waiting to
|
|
3670 |
# also be processed. If so, we go back to (2) for those
|
|
3671 |
# 5) Further again, if we yielded the text, we can then check if that
|
|
3672 |
# 'unlocks' any of the texts in pending_annotations, which should
|
|
3673 |
# then get yielded as well
|
|
3674 |
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
|
|
3675 |
# compression child could unlock yet another, and yielding a fulltext
|
|
3676 |
# will also 'unlock' the children that are waiting on that annotation.
|
|
3677 |
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
|
|
3678 |
# if other parents are also waiting.)
|
|
3679 |
# We want to yield content before expanding child content objects, so
|
|
3680 |
# that we know when we can re-use the content lines, and the annotation
|
|
3681 |
# code can know when it can stop caching fulltexts, as well.
|
|
3682 |
||
4454.3.23
by John Arbash Meinel
Initial attempt at refactoring _KnitAnnotator to derive from Annotator. |
3683 |
# Children that are missing their compression parent
|
3684 |
pending_deltas = {} |
|
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3685 |
for (key, record, digest) in self._vf._read_records_iter(records): |
3686 |
# ghosts?
|
|
4454.3.26
by John Arbash Meinel
The new _KnitAnnotator based on Annotator seems to pass the test suite. |
3687 |
details = self._all_build_details[key] |
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3688 |
(_, compression_parent, parent_keys, record_details) = details |
3689 |
lines = self._expand_record(key, parent_keys, compression_parent, |
|
3690 |
record, record_details) |
|
3691 |
if lines is None: |
|
3692 |
# Pending delta should be queued up
|
|
3693 |
continue
|
|
3694 |
# At this point, we may be able to yield this content, if all
|
|
3695 |
# parents are also finished
|
|
4454.3.30
by John Arbash Meinel
add a bit more work to be able to process 'pending_annotations'. |
3696 |
yield_this_text = self._check_ready_for_annotations(key, |
3697 |
parent_keys) |
|
4454.3.29
by John Arbash Meinel
Some code comments about what needs to happen. |
3698 |
if yield_this_text: |
4454.3.28
by John Arbash Meinel
Continue breaking things to build it up cleanly. |
3699 |
# All parents present
|
3700 |
yield key, lines, len(lines) |
|
4454.3.31
by John Arbash Meinel
Change the processing lines to now handle fallbacks properly. |
3701 |
to_process = self._process_pending(key) |
3702 |
while to_process: |
|
3703 |
this_process = to_process |
|
3704 |
to_process = [] |
|
3705 |
for key in this_process: |
|
3706 |
lines = self._text_cache[key] |
|
3707 |
yield key, lines, len(lines) |
|
3708 |
to_process.extend(self._process_pending(key)) |
|
3224.1.10
by John Arbash Meinel
Introduce the heads_provider for reannotate. |
3709 |
|
2484.1.1
by John Arbash Meinel
Add an initial function to read knit indexes in pyrex. |
3710 |
try: |
4573.1.1
by Andrew Bennetts
Fix imports for _knit_load_data_pyx, which was recently renamed. |
3711 |
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data |
4574.3.6
by Martin Pool
More warnings when failing to load extensions |
3712 |
except ImportError, e: |
4574.3.8
by Martin Pool
Only mutter extension load errors when they occur, and record for later |
3713 |
osutils.failed_to_load_extension(e) |
2484.1.12
by John Arbash Meinel
Switch the layout to use a matching _knit_load_data_py.py and _knit_load_data_c.pyx |
3714 |
from bzrlib._knit_load_data_py import _load_data_py as _load_data |