78
90
>>> i.add(InventoryDirectory('123', 'src', ROOT_ID))
79
InventoryDirectory('123', 'src', parent_id='TREE_ROOT')
91
InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None)
80
92
>>> i.add(InventoryFile('2323', 'hello.c', parent_id='123'))
81
InventoryFile('2323', 'hello.c', parent_id='123')
82
>>> shouldbe = {0: 'src', 1: os.path.join('src','hello.c')}
93
InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None)
94
>>> shouldbe = {0: '', 1: 'src', 2: 'src/hello.c'}
83
95
>>> for ix, j in enumerate(i.iter_entries()):
84
96
... print (j[0] == shouldbe[ix], j[1])
86
(True, InventoryDirectory('123', 'src', parent_id='TREE_ROOT'))
87
(True, InventoryFile('2323', 'hello.c', parent_id='123'))
88
>>> i.add(InventoryFile('2323', 'bye.c', '123'))
89
Traceback (most recent call last):
91
BzrError: inventory already contains entry with id {2323}
98
(True, InventoryDirectory('TREE_ROOT', u'', parent_id=None, revision=None))
99
(True, InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None))
100
(True, InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None))
92
101
>>> i.add(InventoryFile('2324', 'bye.c', '123'))
93
InventoryFile('2324', 'bye.c', parent_id='123')
102
InventoryFile('2324', 'bye.c', parent_id='123', sha1=None, len=None)
94
103
>>> i.add(InventoryDirectory('2325', 'wibble', '123'))
95
InventoryDirectory('2325', 'wibble', parent_id='123')
104
InventoryDirectory('2325', 'wibble', parent_id='123', revision=None)
96
105
>>> i.path2id('src/wibble')
100
109
>>> i.add(InventoryFile('2326', 'wibble.c', '2325'))
101
InventoryFile('2326', 'wibble.c', parent_id='2325')
110
InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None)
103
InventoryFile('2326', 'wibble.c', parent_id='2325')
112
InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None)
104
113
>>> for path, entry in i.iter_entries():
105
... print path.replace('\\\\', '/') # for win32 os.sep
106
115
... assert i.path2id(path)
112
122
src/wibble/wibble.c
113
>>> i.id2path('2326').replace('\\\\', '/')
123
>>> i.id2path('2326')
114
124
'src/wibble/wibble.c'
127
# Constants returned by describe_change()
129
# TODO: These should probably move to some kind of FileChangeDescription
130
# class; that's like what's inside a TreeDelta but we want to be able to
131
# generate them just for one file at a time.
133
MODIFIED_AND_RENAMED = 'modified and renamed'
117
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
118
'text_id', 'parent_id', 'children', 'executable',
121
def _add_text_to_weave(self, new_lines, parents, weave_store, transaction):
122
weave_store.add_text(self.file_id, self.revision, new_lines, parents,
125
137
def detect_changes(self, old_entry):
126
138
"""Return a (text_modified, meta_modified) from this to old_entry.
150
163
def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
151
164
output_to, reverse=False):
152
165
"""Perform a diff between two entries of the same kind."""
154
def find_previous_heads(self, previous_inventories, entry_weave):
155
"""Return the revisions and entries that directly preceed this.
157
Returned as a map from revision to inventory entry.
159
This is a map containing the file revisions in all parents
160
for which the file exists, and its revision is not a parent of
161
any other. If the file is new, the set will be empty.
167
def parent_candidates(self, previous_inventories):
168
"""Find possible per-file graph parents.
170
This is currently defined by:
171
- Select the last changed revision in the parent inventory.
172
- Do deal with a short lived bug in bzr 0.8's development two entries
173
that have the same last changed but different 'x' bit settings are
163
def get_ancestors(weave, entry):
164
return set(map(weave.idx_to_name,
165
weave.inclusions([weave.lookup(entry.revision)])))
176
# revision:ie mapping for each ie found in previous_inventories.
178
# identify candidate head revision ids.
168
179
for inv in previous_inventories:
169
180
if self.file_id in inv:
170
181
ie = inv[self.file_id]
171
182
assert ie.file_id == self.file_id
172
if ie.revision in heads:
173
# fixup logic, there was a bug in revision updates.
174
# with x bit support.
183
if ie.revision in candidates:
184
# same revision value in two different inventories:
185
# correct possible inconsistencies:
186
# * there was a bug in revision updates with 'x' bit
176
if heads[ie.revision].executable != ie.executable:
177
heads[ie.revision].executable = False
189
if candidates[ie.revision].executable != ie.executable:
190
candidates[ie.revision].executable = False
178
191
ie.executable = False
179
192
except AttributeError:
181
assert heads[ie.revision] == ie
194
# must now be the same.
195
assert candidates[ie.revision] == ie
183
# may want to add it.
184
# may already be covered:
185
already_present = 0 != len(
186
[head for head in heads
187
if ie.revision in head_ancestors[head]])
189
# an ancestor of a known head.
192
ancestors = get_ancestors(entry_weave, ie)
193
# may knock something else out:
194
check_heads = list(heads.keys())
195
for head in check_heads:
196
if head in ancestors:
197
# this head is not really a head
199
head_ancestors[ie.revision] = ancestors
200
heads[ie.revision] = ie
197
# add this revision as a candidate.
198
candidates[ie.revision] = ie
201
@deprecated_method(symbol_versioning.zero_ninetyone)
202
def find_previous_heads(self, previous_inventories,
203
versioned_file_store,
206
"""Return the revisions and entries that directly precede this.
208
Returned as a map from revision to inventory entry.
210
This is a map containing the file revisions in all parents
211
for which the file exists, and its revision is not a parent of
212
any other. If the file is new, the set will be empty.
214
:param versioned_file_store: A store where ancestry data on this
215
file id can be queried.
216
:param transaction: The transaction that queries to the versioned
217
file store should be completed under.
218
:param entry_vf: The entry versioned file, if its already available.
220
candidates = self.parent_candidates(previous_inventories)
222
# revision:ie mapping with one revision for each head.
224
# common case optimisation
225
if len(candidates) == 1:
226
# if there is only one candidate revision found
227
# then we can avoid opening the versioned file to access ancestry:
228
# there cannot be any ancestors to eliminate when there is
229
# only one revision available.
232
# --- what follows is now encapsulated in repository.get_graph.heads(),
233
# but that is not accessible from here as we have no repository
234
# pointer. Note that the repository.get_graph.heads() call can return
235
# different results *at the moment* because of the kind-changing check
236
# we have in parent_candidates().
238
# eliminate ancestors amongst the available candidates:
239
# heads are those that are not an ancestor of any other candidate
240
# - this provides convergence at a per-file level.
241
def get_ancestors(weave, entry):
242
return set(weave.get_ancestry(entry.revision, topo_sorted=False))
243
# revision: ancestor list for each head
245
for ie in candidates.values():
246
# may be an ancestor of a known head:
247
already_present = 0 != len(
248
[head for head in heads
249
if ie.revision in head_ancestors[head]])
251
# an ancestor of an analyzed candidate.
253
# not an ancestor of a known head:
254
# load the versioned file for this file id if needed
256
entry_vf = versioned_file_store.get_weave_or_empty(
257
self.file_id, transaction)
258
ancestors = get_ancestors(entry_vf, ie)
259
# may knock something else out:
260
check_heads = list(heads.keys())
261
for head in check_heads:
262
if head in ancestors:
263
# this previously discovered 'head' is not
264
# really a head - its an ancestor of the newly
267
head_ancestors[ie.revision] = ancestors
268
heads[ie.revision] = ie
203
271
def get_tar_item(self, root, dp, now, tree):
204
272
"""Get a tarfile item and a file stream for its content."""
205
item = tarfile.TarInfo(os.path.join(root, dp))
273
item = tarfile.TarInfo(osutils.pathjoin(root, dp).encode('utf8'))
206
274
# TODO: would be cool to actually set it to the timestamp of the
207
275
# revision it was last changed
268
339
This is a template method - implement _put_on_disk in subclasses.
270
fullpath = appendpath(dest, dp)
341
fullpath = osutils.pathjoin(dest, dp)
271
342
self._put_on_disk(fullpath, tree)
272
mutter(" export {%s} kind %s to %s", self.file_id,
343
# mutter(" export {%s} kind %s to %s", self.file_id,
344
# self.kind, fullpath)
275
346
def _put_on_disk(self, fullpath, tree):
276
347
"""Put this entry onto disk at fullpath, from tree tree."""
277
348
raise BzrError("don't know how to export {%s} of kind %r" % (self.file_id, self.kind))
279
350
def sorted_children(self):
280
l = self.children.items()
351
return sorted(self.children.items())
285
354
def versionable_kind(kind):
286
return kind in ('file', 'directory', 'symlink')
355
return (kind in ('file', 'directory', 'symlink', 'tree-reference'))
288
357
def check(self, checker, rev_id, inv, tree):
289
358
"""Check this inventory entry is intact.
291
360
This is a template method, override _check for kind specific
363
:param checker: Check object providing context for the checks;
364
can be used to find out what parts of the repository have already
366
:param rev_id: Revision id from which this InventoryEntry was loaded.
367
Not necessarily the last-changed revision for this file.
368
:param inv: Inventory from which the entry was loaded.
369
:param tree: RevisionTree for this entry.
294
if self.parent_id != None:
371
if self.parent_id is not None:
295
372
if not inv.has_id(self.parent_id):
296
373
raise BzrCheckError('missing parent {%s} in inventory for revision {%s}'
297
374
% (self.parent_id, rev_id))
302
379
raise BzrCheckError('unknown entry kind %r in revision {%s}' %
303
380
(self.kind, rev_id))
307
383
"""Clone this inventory entry."""
308
384
raise NotImplementedError
310
def _get_snapshot_change(self, previous_entries):
311
if len(previous_entries) > 1:
313
elif len(previous_entries) == 0:
387
def describe_change(old_entry, new_entry):
388
"""Describe the change between old_entry and this.
390
This smells of being an InterInventoryEntry situation, but as its
391
the first one, we're making it a static method for now.
393
An entry with a different parent, or different name is considered
394
to be renamed. Reparenting is an internal detail.
395
Note that renaming the parent does not trigger a rename for the
398
# TODO: Perhaps return an object rather than just a string
399
if old_entry is new_entry:
400
# also the case of both being None
402
elif old_entry is None:
316
return 'modified/renamed/reparented'
404
elif new_entry is None:
406
if old_entry.kind != new_entry.kind:
408
text_modified, meta_modified = new_entry.detect_changes(old_entry)
409
if text_modified or meta_modified:
413
# TODO 20060511 (mbp, rbc) factor out 'detect_rename' here.
414
if old_entry.parent_id != new_entry.parent_id:
416
elif old_entry.name != new_entry.name:
420
if renamed and not modified:
421
return InventoryEntry.RENAMED
422
if modified and not renamed:
424
if modified and renamed:
425
return InventoryEntry.MODIFIED_AND_RENAMED
318
428
def __repr__(self):
319
return ("%s(%r, %r, parent_id=%r)"
429
return ("%s(%r, %r, parent_id=%r, revision=%r)"
320
430
% (self.__class__.__name__,
325
def snapshot(self, revision, path, previous_entries,
326
work_tree, weave_store, transaction):
327
"""Make a snapshot of this entry which may or may not have changed.
329
This means that all its fields are populated, that it has its
330
text stored in the text store or weave.
332
mutter('new parents of %s are %r', path, previous_entries)
333
self._read_tree_state(path, work_tree)
334
if len(previous_entries) == 1:
335
# cannot be unchanged unless there is only one parent file rev.
336
parent_ie = previous_entries.values()[0]
337
if self._unchanged(parent_ie):
338
mutter("found unchanged entry")
339
self.revision = parent_ie.revision
341
return self.snapshot_revision(revision, previous_entries,
342
work_tree, weave_store, transaction)
344
def snapshot_revision(self, revision, previous_entries, work_tree,
345
weave_store, transaction):
346
"""Record this revision unconditionally."""
347
mutter('new revision for {%s}', self.file_id)
348
self.revision = revision
349
change = self._get_snapshot_change(previous_entries)
350
self._snapshot_text(previous_entries, work_tree, weave_store,
354
def _snapshot_text(self, file_parents, work_tree, weave_store, transaction):
355
"""Record the 'text' of this entry, whatever form that takes.
357
This default implementation simply adds an empty text.
359
mutter('storing file {%s} in revision {%s}',
360
self.file_id, self.revision)
361
self._add_text_to_weave([], file_parents, weave_store, transaction)
363
436
def __eq__(self, other):
364
437
if not isinstance(other, InventoryEntry):
470
561
class InventoryFile(InventoryEntry):
471
562
"""A file in an inventory."""
473
def _check(self, checker, rev_id, tree):
564
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
565
'text_id', 'parent_id', 'children', 'executable',
566
'revision', 'symlink_target', 'reference_revision']
568
def _check(self, checker, tree_revision_id, tree):
474
569
"""See InventoryEntry._check"""
475
revision = self.revision
476
t = (self.file_id, revision)
570
t = (self.file_id, self.revision)
477
571
if t in checker.checked_texts:
478
prev_sha = checker.checked_texts[t]
572
prev_sha = checker.checked_texts[t]
479
573
if prev_sha != self.text_sha1:
480
raise BzrCheckError('mismatched sha1 on {%s} in {%s}' %
481
(self.file_id, rev_id))
575
'mismatched sha1 on {%s} in {%s} (%s != %s) %r' %
576
(self.file_id, tree_revision_id, prev_sha, self.text_sha1,
483
579
checker.repeated_text_cnt += 1
485
mutter('check version {%s} of {%s}', rev_id, self.file_id)
486
file_lines = tree.get_file_lines(self.file_id)
487
checker.checked_text_cnt += 1
488
if self.text_size != sum(map(len, file_lines)):
489
raise BzrCheckError('text {%s} wrong size' % self.text_id)
490
if self.text_sha1 != sha_strings(file_lines):
491
raise BzrCheckError('text {%s} wrong sha1' % self.text_id)
582
if self.file_id not in checker.checked_weaves:
583
mutter('check weave {%s}', self.file_id)
584
w = tree._get_weave(self.file_id)
585
# Not passing a progress bar, because it creates a new
586
# progress, which overwrites the current progress,
587
# and doesn't look nice
589
checker.checked_weaves[self.file_id] = True
591
w = tree._get_weave(self.file_id)
593
mutter('check version {%s} of {%s}', tree_revision_id, self.file_id)
594
checker.checked_text_cnt += 1
595
# We can't check the length, because Weave doesn't store that
596
# information, and the whole point of looking at the weave's
597
# sha1sum is that we don't have to extract the text.
598
if self.text_sha1 != w.get_sha1(self.revision):
599
raise BzrCheckError('text {%s} version {%s} wrong sha1'
600
% (self.file_id, self.revision))
492
601
checker.checked_texts[t] = self.text_sha1
549
660
def _put_on_disk(self, fullpath, tree):
550
661
"""See InventoryEntry._put_on_disk."""
551
pumpfile(tree.get_file(self.file_id), file(fullpath, 'wb'))
662
osutils.pumpfile(tree.get_file(self.file_id), file(fullpath, 'wb'))
552
663
if tree.is_executable(self.file_id):
553
664
os.chmod(fullpath, 0755)
555
666
def _read_tree_state(self, path, work_tree):
556
667
"""See InventoryEntry._read_tree_state."""
557
self.text_sha1 = work_tree.get_file_sha1(self.file_id)
558
self.executable = work_tree.is_executable(self.file_id)
560
def _snapshot_text(self, file_parents, work_tree, weave_store, transaction):
561
"""See InventoryEntry._snapshot_text."""
562
mutter('storing file {%s} in revision {%s}',
563
self.file_id, self.revision)
564
# special case to avoid diffing on renames or
566
if (len(file_parents) == 1
567
and self.text_sha1 == file_parents.values()[0].text_sha1
568
and self.text_size == file_parents.values()[0].text_size):
569
previous_ie = file_parents.values()[0]
570
weave_store.add_identical_text(
571
self.file_id, previous_ie.revision,
572
self.revision, file_parents, transaction)
574
new_lines = work_tree.get_file(self.file_id).readlines()
575
self._add_text_to_weave(new_lines, file_parents, weave_store,
577
self.text_sha1 = sha_strings(new_lines)
578
self.text_size = sum(map(len, new_lines))
668
self.text_sha1 = work_tree.get_file_sha1(self.file_id, path=path)
669
# FIXME: 20050930 probe for the text size when getting sha1
670
# in _read_tree_state
671
self.executable = work_tree.is_executable(self.file_id, path=path)
674
return ("%s(%r, %r, parent_id=%r, sha1=%r, len=%s)"
675
% (self.__class__.__name__,
682
def _forget_tree_state(self):
683
self.text_sha1 = None
581
685
def _unchanged(self, previous_ie):
582
686
"""See InventoryEntry._unchanged."""
722
867
The inventory is created with a default root directory, with
725
# We are letting Branch.initialize() create a unique inventory
726
# root id. Rather than generating a random one here.
728
# root_id = bzrlib.branch.gen_file_id('TREE_ROOT')
729
self.root = RootEntry(root_id)
870
if root_id is not None:
871
assert root_id.__class__ == str
872
self._set_root(InventoryDirectory(root_id, u'', None))
876
self.revision_id = revision_id
879
return "<Inventory object at %x, contents=%r>" % (id(self), self._byid)
881
def apply_delta(self, delta):
882
"""Apply a delta to this inventory.
884
:param delta: A list of changes to apply. After all the changes are
885
applied the final inventory must be internally consistent, but it
886
is ok to supply changes which, if only half-applied would have an
887
invalid result - such as supplying two changes which rename two
888
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
889
('B', 'A', 'B-id', b_entry)].
891
Each change is a tuple, of the form (old_path, new_path, file_id,
894
When new_path is None, the change indicates the removal of an entry
895
from the inventory and new_entry will be ignored (using None is
896
appropriate). If new_path is not None, then new_entry must be an
897
InventoryEntry instance, which will be incorporated into the
898
inventory (and replace any existing entry with the same file id).
900
When old_path is None, the change indicates the addition of
901
a new entry to the inventory.
903
When neither new_path nor old_path are None, the change is a
904
modification to an entry, such as a rename, reparent, kind change
907
The children attribute of new_entry is ignored. This is because
908
this method preserves children automatically across alterations to
909
the parent of the children, and cases where the parent id of a
910
child is changing require the child to be passed in as a separate
911
change regardless. E.g. in the recursive deletion of a directory -
912
the directory's children must be included in the delta, or the
913
final inventory will be invalid.
916
# Remove all affected items which were in the original inventory,
917
# starting with the longest paths, thus ensuring parents are examined
918
# after their children, which means that everything we examine has no
919
# modified children remaining by the time we examine it.
920
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
921
if op is not None), reverse=True):
922
if file_id not in self:
925
# Preserve unaltered children of file_id for later reinsertion.
926
children[file_id] = getattr(self[file_id], 'children', {})
927
# Remove file_id and the unaltered children. If file_id is not
928
# being deleted it will be reinserted back later.
929
self.remove_recursive_id(file_id)
930
# Insert all affected which should be in the new inventory, reattaching
931
# their children if they had any. This is done from shortest path to
932
# longest, ensuring that items which were modified and whose parents in
933
# the resulting inventory were also modified, are inserted after their
935
for new_path, new_entry in sorted((np, e) for op, np, f, e in
936
delta if np is not None):
937
if new_entry.kind == 'directory':
938
new_entry.children = children.get(new_entry.file_id, {})
941
def _set_root(self, ie):
730
943
self._byid = {self.root.file_id: self.root}
734
other = Inventory(self.root.file_id)
946
# TODO: jam 20051218 Should copy also copy the revision_id?
947
entries = self.iter_entries()
948
if self.root is None:
949
return Inventory(root_id=None)
950
other = Inventory(entries.next()[1].file_id)
735
951
# copy recursively so we know directories will be added before
736
952
# their children. There are more efficient ways than this...
737
for path, entry in self.iter_entries():
738
if entry == self.root:
953
for path, entry in entries:
740
954
other.add(entry.copy())
744
957
def __iter__(self):
745
958
return iter(self._byid)
748
960
def __len__(self):
749
961
"""Returns number of entries."""
750
962
return len(self._byid)
753
964
def iter_entries(self, from_dir=None):
754
965
"""Return (path, entry) pairs, in order by name."""
758
elif isinstance(from_dir, basestring):
759
from_dir = self._byid[from_dir]
761
kids = from_dir.children.items()
763
for name, ie in kids:
765
if ie.kind == 'directory':
766
for cn, cie in self.iter_entries(from_dir=ie.file_id):
767
yield os.path.join(name, cn), cie
967
if self.root is None:
971
elif isinstance(from_dir, basestring):
972
from_dir = self._byid[from_dir]
974
# unrolling the recursive called changed the time from
975
# 440ms/663ms (inline/total) to 116ms/116ms
976
children = from_dir.children.items()
978
children = collections.deque(children)
979
stack = [(u'', children)]
981
from_dir_relpath, children = stack[-1]
984
name, ie = children.popleft()
986
# we know that from_dir_relpath never ends in a slash
987
# and 'f' doesn't begin with one, we can do a string op, rather
988
# than the checks of pathjoin(), though this means that all paths
990
path = from_dir_relpath + '/' + name
994
if ie.kind != 'directory':
997
# But do this child first
998
new_children = ie.children.items()
1000
new_children = collections.deque(new_children)
1001
stack.append((path, new_children))
1002
# Break out of inner loop, so that we start outer loop with child
1005
# if we finished all children, pop it off the stack
1008
def iter_entries_by_dir(self, from_dir=None, specific_file_ids=None,
1009
yield_parents=False):
1010
"""Iterate over the entries in a directory first order.
1012
This returns all entries for a directory before returning
1013
the entries for children of a directory. This is not
1014
lexicographically sorted order, and is a hybrid between
1015
depth-first and breadth-first.
1017
:param yield_parents: If True, yield the parents from the root leading
1018
down to specific_file_ids that have been requested. This has no
1019
impact if specific_file_ids is None.
1020
:return: This yields (path, entry) pairs
1022
if specific_file_ids and not isinstance(specific_file_ids, set):
1023
specific_file_ids = set(specific_file_ids)
1024
# TODO? Perhaps this should return the from_dir so that the root is
1025
# yielded? or maybe an option?
1026
if from_dir is None:
1027
if self.root is None:
1029
# Optimize a common case
1030
if (not yield_parents and specific_file_ids is not None and
1031
len(specific_file_ids) == 1):
1032
file_id = list(specific_file_ids)[0]
1034
yield self.id2path(file_id), self[file_id]
1036
from_dir = self.root
1037
if (specific_file_ids is None or yield_parents or
1038
self.root.file_id in specific_file_ids):
1039
yield u'', self.root
1040
elif isinstance(from_dir, basestring):
1041
from_dir = self._byid[from_dir]
1043
if specific_file_ids is not None:
1044
# TODO: jam 20070302 This could really be done as a loop rather
1045
# than a bunch of recursive calls.
1048
def add_ancestors(file_id):
1049
if file_id not in byid:
1051
parent_id = byid[file_id].parent_id
1052
if parent_id is None:
1054
if parent_id not in parents:
1055
parents.add(parent_id)
1056
add_ancestors(parent_id)
1057
for file_id in specific_file_ids:
1058
add_ancestors(file_id)
1062
stack = [(u'', from_dir)]
1064
cur_relpath, cur_dir = stack.pop()
1067
for child_name, child_ie in sorted(cur_dir.children.iteritems()):
1069
child_relpath = cur_relpath + child_name
1071
if (specific_file_ids is None or
1072
child_ie.file_id in specific_file_ids or
1073
(yield_parents and child_ie.file_id in parents)):
1074
yield child_relpath, child_ie
1076
if child_ie.kind == 'directory':
1077
if parents is None or child_ie.file_id in parents:
1078
child_dirs.append((child_relpath+'/', child_ie))
1079
stack.extend(reversed(child_dirs))
1081
def make_entry(self, kind, name, parent_id, file_id=None):
1082
"""Simple thunk to bzrlib.inventory.make_entry."""
1083
return make_entry(kind, name, parent_id, file_id)
770
1085
def entries(self):
771
1086
"""Return list of (path, ie) for all entries except the root.
799
1113
for name, child_ie in kids:
800
child_path = os.path.join(parent_path, name)
1114
child_path = osutils.pathjoin(parent_path, name)
801
1115
descend(child_ie, child_path)
802
1116
descend(self.root, u'')
807
1119
def __contains__(self, file_id):
808
1120
"""True if this entry contains a file with given id.
810
1122
>>> inv = Inventory()
811
1123
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
812
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT')
1124
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None)
813
1125
>>> '123' in inv
815
1127
>>> '456' in inv
818
return file_id in self._byid
1130
return (file_id in self._byid)
821
1132
def __getitem__(self, file_id):
822
1133
"""Return the entry for given file_id.
824
1135
>>> inv = Inventory()
825
1136
>>> inv.add(InventoryFile('123123', 'hello.c', ROOT_ID))
826
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT')
1137
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None)
827
1138
>>> inv['123123'].name
831
1142
return self._byid[file_id]
832
1143
except KeyError:
834
raise BzrError("can't look up file_id None")
836
raise BzrError("file_id {%s} not in inventory" % file_id)
1144
# really we're passing an inventory, not a tree...
1145
raise errors.NoSuchId(self, file_id)
839
1147
def get_file_kind(self, file_id):
840
1148
return self._byid[file_id].kind
852
1169
Returns the new entry object.
854
1171
if entry.file_id in self._byid:
855
raise BzrError("inventory already contains entry with id {%s}" % entry.file_id)
857
if entry.parent_id == ROOT_ID or entry.parent_id is None:
858
entry.parent_id = self.root.file_id
861
parent = self._byid[entry.parent_id]
863
raise BzrError("parent_id {%s} not in inventory" % entry.parent_id)
865
if parent.children.has_key(entry.name):
866
raise BzrError("%s is already versioned" %
867
appendpath(self.id2path(parent.file_id), entry.name))
869
self._byid[entry.file_id] = entry
870
parent.children[entry.name] = entry
874
def add_path(self, relpath, kind, file_id=None):
1172
raise errors.DuplicateFileId(entry.file_id,
1173
self._byid[entry.file_id])
1175
if entry.parent_id is None:
1176
assert self.root is None and len(self._byid) == 0
1180
parent = self._byid[entry.parent_id]
1182
raise BzrError("parent_id {%s} not in inventory" %
1185
if entry.name in parent.children:
1186
raise BzrError("%s is already versioned" %
1187
osutils.pathjoin(self.id2path(parent.file_id),
1188
entry.name).encode('utf-8'))
1189
parent.children[entry.name] = entry
1190
return self._add_child(entry)
1192
def add_path(self, relpath, kind, file_id=None, parent_id=None):
875
1193
"""Add entry from a path.
877
1195
The immediate parent must already be versioned.
879
1197
Returns the new entry object."""
880
from bzrlib.workingtree import gen_file_id
882
parts = bzrlib.osutils.splitpath(relpath)
1199
parts = osutils.splitpath(relpath)
883
1201
if len(parts) == 0:
884
raise BzrError("cannot re-add root of inventory")
887
file_id = gen_file_id(relpath)
889
parent_path = parts[:-1]
890
parent_id = self.path2id(parent_path)
891
if parent_id == None:
892
raise NotVersionedError(path=parent_path)
893
if kind == 'directory':
894
ie = InventoryDirectory(file_id, parts[-1], parent_id)
896
ie = InventoryFile(file_id, parts[-1], parent_id)
897
elif kind == 'symlink':
898
ie = InventoryLink(file_id, parts[-1], parent_id)
1203
file_id = generate_ids.gen_root_id()
1204
self.root = InventoryDirectory(file_id, '', None)
1205
self._byid = {self.root.file_id: self.root}
900
raise BzrError("unknown kind %r" % kind)
1208
parent_path = parts[:-1]
1209
parent_id = self.path2id(parent_path)
1210
if parent_id is None:
1211
raise errors.NotVersionedError(path=parent_path)
1212
ie = make_entry(kind, parts[-1], parent_id, file_id)
901
1213
return self.add(ie)
904
1215
def __delitem__(self, file_id):
905
1216
"""Remove entry by id.
907
1218
>>> inv = Inventory()
908
1219
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
909
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT')
1220
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None)
910
1221
>>> '123' in inv
912
1223
>>> del inv['123']
1063
1388
file_ie.name = new_name
1064
1389
file_ie.parent_id = new_parent_id
1391
def is_root(self, file_id):
1392
return self.root is not None and file_id == self.root.file_id
1396
'directory': InventoryDirectory,
1397
'file': InventoryFile,
1398
'symlink': InventoryLink,
1399
'tree-reference': TreeReference
1402
def make_entry(kind, name, parent_id, file_id=None):
1403
"""Create an inventory entry.
1405
:param kind: the type of inventory entry to create.
1406
:param name: the basename of the entry.
1407
:param parent_id: the parent_id of the entry.
1408
:param file_id: the file_id to use. if None, one will be created.
1411
file_id = generate_ids.gen_file_id(name)
1412
name = ensure_normalized_name(name)
1414
factory = entry_factory[kind]
1416
raise BzrError("unknown kind %r" % kind)
1417
return factory(file_id, name, parent_id)
1420
def ensure_normalized_name(name):
1423
:raises InvalidNormalization: When name is not normalized, and cannot be
1424
accessed on this platform by the normalized path.
1425
:return: The NFC normalised version of name.
1427
#------- This has been copied to bzrlib.dirstate.DirState.add, please
1428
# keep them synchronised.
1429
# we dont import normalized_filename directly because we want to be
1430
# able to change the implementation at runtime for tests.
1431
norm_name, can_access = osutils.normalized_filename(name)
1432
if norm_name != name:
1436
# TODO: jam 20060701 This would probably be more useful
1437
# if the error was raised with the full path
1438
raise errors.InvalidNormalization(name)
1069
1442
_NAME_RE = None
1071
1444
def is_valid_name(name):
1072
1445
global _NAME_RE
1073
if _NAME_RE == None:
1446
if _NAME_RE is None:
1074
1447
_NAME_RE = re.compile(r'^[^/\\]+$')
1076
1449
return bool(_NAME_RE.match(name))