161
162
def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
162
163
output_to, reverse=False):
163
164
"""Perform a diff between two entries of the same kind."""
165
def find_previous_heads(self, previous_inventories,
166
versioned_file_store,
169
"""Return the revisions and entries that directly precede this.
171
Returned as a map from revision to inventory entry.
173
This is a map containing the file revisions in all parents
174
for which the file exists, and its revision is not a parent of
175
any other. If the file is new, the set will be empty.
177
:param versioned_file_store: A store where ancestry data on this
178
file id can be queried.
179
:param transaction: The transaction that queries to the versioned
180
file store should be completed under.
181
:param entry_vf: The entry versioned file, if its already available.
166
def parent_candidates(self, previous_inventories):
167
"""Find possible per-file graph parents.
169
This is currently defined by:
170
- Select the last changed revision in the parent inventory.
171
- Do deal with a short lived bug in bzr 0.8's development two entries
172
that have the same last changed but different 'x' bit settings are
183
def get_ancestors(weave, entry):
184
return set(weave.get_ancestry(entry.revision, topo_sorted=False))
185
175
# revision:ie mapping for each ie found in previous_inventories.
187
# revision:ie mapping with one revision for each head.
189
# revision: ancestor list for each head
191
177
# identify candidate head revision ids.
192
178
for inv in previous_inventories:
193
179
if self.file_id in inv:
194
180
ie = inv[self.file_id]
195
181
assert ie.file_id == self.file_id
196
if ie.kind != self.kind:
197
# Can't be a candidate if the kind has changed.
199
182
if ie.revision in candidates:
200
183
# same revision value in two different inventories:
201
184
# correct possible inconsistencies:
213
196
# add this revision as a candidate.
214
197
candidates[ie.revision] = ie
200
@deprecated_method(zero_ninetyone)
201
def find_previous_heads(self, previous_inventories,
202
versioned_file_store,
205
"""Return the revisions and entries that directly precede this.
207
Returned as a map from revision to inventory entry.
209
This is a map containing the file revisions in all parents
210
for which the file exists, and its revision is not a parent of
211
any other. If the file is new, the set will be empty.
213
:param versioned_file_store: A store where ancestry data on this
214
file id can be queried.
215
:param transaction: The transaction that queries to the versioned
216
file store should be completed under.
217
:param entry_vf: The entry versioned file, if its already available.
219
candidates = self.parent_candidates(previous_inventories)
221
# revision:ie mapping with one revision for each head.
216
223
# common case optimisation
217
224
if len(candidates) == 1:
218
225
# if there is only one candidate revision found
219
# then we can opening the versioned file to access ancestry:
226
# then we can avoid opening the versioned file to access ancestry:
220
227
# there cannot be any ancestors to eliminate when there is
221
228
# only one revision available.
222
heads[ie.revision] = ie
231
# --- what follows is now encapsulated in repository.get_graph.heads(),
232
# but that is not accessible from here as we have no repository
233
# pointer. Note that the repository.get_graph.heads() call can return
234
# different results *at the moment* because of the kind-changing check
235
# we have in parent_candidates().
225
237
# eliminate ancestors amongst the available candidates:
226
238
# heads are those that are not an ancestor of any other candidate
227
239
# - this provides convergence at a per-file level.
240
def get_ancestors(weave, entry):
241
return set(weave.get_ancestry(entry.revision, topo_sorted=False))
242
# revision: ancestor list for each head
228
244
for ie in candidates.values():
229
245
# may be an ancestor of a known head:
230
246
already_present = 0 != len(
419
def snapshot(self, revision, path, previous_entries,
420
work_tree, commit_builder):
421
"""Make a snapshot of this entry which may or may not have changed.
423
This means that all its fields are populated, that it has its
424
text stored in the text store or weave.
426
:return: True if anything was recorded
428
# cannot be unchanged unless there is only one parent file rev.
429
self._read_tree_state(path, work_tree)
430
if len(previous_entries) == 1:
431
parent_ie = previous_entries.values()[0]
432
if self._unchanged(parent_ie):
433
self.revision = parent_ie.revision
435
self.revision = revision
436
return self._snapshot_text(previous_entries, work_tree, commit_builder)
438
def _snapshot_text(self, file_parents, work_tree, commit_builder):
439
"""Record the 'text' of this entry, whatever form that takes.
441
:return: True if anything was recorded
443
raise NotImplementedError(self._snapshot_text)
445
435
def __eq__(self, other):
446
436
if not isinstance(other, InventoryEntry):
447
437
return NotImplemented
700
685
def _forget_tree_state(self):
701
686
self.text_sha1 = None
703
def snapshot(self, revision, path, previous_entries,
704
work_tree, commit_builder):
705
"""See InventoryEntry.snapshot."""
706
# Note: We use a custom implementation of this method for files
707
# because it's a performance critical part of commit.
709
# If this is the initial commit for this file, we know the sha is
710
# coming later so skip calculating it now (in _read_tree_state())
711
if len(previous_entries) == 0:
712
self.executable = work_tree.is_executable(self.file_id, path=path)
714
self._read_tree_state(path, work_tree)
716
# If nothing is changed from the sole parent, there's nothing to do
717
if len(previous_entries) == 1:
718
parent_ie = previous_entries.values()[0]
719
if self._unchanged(parent_ie):
720
self.revision = parent_ie.revision
723
# Add the file to the repository
724
self.revision = revision
725
def get_content_byte_lines():
726
return work_tree.get_file(self.file_id, path).readlines()
727
self.text_sha1, self.text_size = commit_builder.modified_file_text(
728
self.file_id, previous_entries, get_content_byte_lines,
729
self.text_sha1, self.text_size)
732
688
def _unchanged(self, previous_ie):
733
689
"""See InventoryEntry._unchanged."""
734
690
compatible = super(InventoryFile, self)._unchanged(previous_ie)
924
877
self.revision_id = revision_id
880
return "<Inventory object at %x, contents=%r>" % (id(self), self._byid)
882
def apply_delta(self, delta):
883
"""Apply a delta to this inventory.
885
:param delta: A list of changes to apply. After all the changes are
886
applied the final inventory must be internally consistent, but it
887
is ok to supply changes which, if only half-applied would have an
888
invalid result - such as supplying two changes which rename two
889
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
890
('B', 'A', 'B-id', b_entry)].
892
Each change is a tuple, of the form (old_path, new_path, file_id,
895
When new_path is None, the change indicates the removal of an entry
896
from the inventory and new_entry will be ignored (using None is
897
appropriate). If new_path is not None, then new_entry must be an
898
InventoryEntry instance, which will be incorporated into the
899
inventory (and replace any existing entry with the same file id).
901
When old_path is None, the change indicates the addition of
902
a new entry to the inventory.
904
When neither new_path nor old_path are None, the change is a
905
modification to an entry, such as a rename, reparent, kind change
908
The children attribute of new_entry is ignored. This is because
909
this method preserves children automatically across alterations to
910
the parent of the children, and cases where the parent id of a
911
child is changing require the child to be passed in as a separate
912
change regardless. E.g. in the recursive deletion of a directory -
913
the directory's children must be included in the delta, or the
914
final inventory will be invalid.
917
# Remove all affected items which were in the original inventory,
918
# starting with the longest paths, thus ensuring parents are examined
919
# after their children, which means that everything we examine has no
920
# modified children remaining by the time we examine it.
921
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
922
if op is not None), reverse=True):
923
if file_id not in self:
926
# Preserve unaltered children of file_id for later reinsertion.
927
children[file_id] = getattr(self[file_id], 'children', {})
928
# Remove file_id and the unaltered children. If file_id is not
929
# being deleted it will be reinserted back later.
930
self.remove_recursive_id(file_id)
931
# Insert all affected which should be in the new inventory, reattaching
932
# their children if they had any. This is done from shortest path to
933
# longest, ensuring that items which were modified and whose parents in
934
# the resulting inventory were also modified, are inserted after their
936
for new_path, new_entry in sorted((np, e) for op, np, f, e in
937
delta if np is not None):
938
if new_entry.kind == 'directory':
939
new_entry.children = children.get(new_entry.file_id, {})
926
942
def _set_root(self, ie):
928
944
self._byid = {self.root.file_id: self.root}
1402
1425
file_id = generate_ids.gen_file_id(name)
1404
1427
file_id = osutils.safe_file_id(file_id)
1428
name = ensure_normalized_name(name)
1430
factory = entry_factory[kind]
1432
raise BzrError("unknown kind %r" % kind)
1433
return factory(file_id, name, parent_id)
1436
def ensure_normalized_name(name):
1439
:raises InvalidNormalization: When name is not normalized, and cannot be
1440
accessed on this platform by the normalized path.
1441
:return: The NFC/NFKC normalised version of name.
1406
1443
#------- This has been copied to bzrlib.dirstate.DirState.add, please
1407
1444
# keep them synchronised.
1408
1445
# we dont import normalized_filename directly because we want to be