90
78
>>> i.add(InventoryDirectory('123', 'src', ROOT_ID))
91
InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None)
79
InventoryDirectory('123', 'src', parent_id='TREE_ROOT')
92
80
>>> i.add(InventoryFile('2323', 'hello.c', parent_id='123'))
93
InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None)
94
>>> shouldbe = {0: '', 1: 'src', 2: 'src/hello.c'}
81
InventoryFile('2323', 'hello.c', parent_id='123')
82
>>> shouldbe = {0: 'src', 1: pathjoin('src','hello.c')}
95
83
>>> for ix, j in enumerate(i.iter_entries()):
96
84
... print (j[0] == shouldbe[ix], j[1])
98
(True, InventoryDirectory('TREE_ROOT', u'', parent_id=None, revision=None))
99
(True, InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None))
100
(True, InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None))
86
(True, InventoryDirectory('123', 'src', parent_id='TREE_ROOT'))
87
(True, InventoryFile('2323', 'hello.c', parent_id='123'))
88
>>> i.add(InventoryFile('2323', 'bye.c', '123'))
89
Traceback (most recent call last):
91
BzrError: inventory already contains entry with id {2323}
101
92
>>> i.add(InventoryFile('2324', 'bye.c', '123'))
102
InventoryFile('2324', 'bye.c', parent_id='123', sha1=None, len=None)
93
InventoryFile('2324', 'bye.c', parent_id='123')
103
94
>>> i.add(InventoryDirectory('2325', 'wibble', '123'))
104
InventoryDirectory('2325', 'wibble', parent_id='123', revision=None)
95
InventoryDirectory('2325', 'wibble', parent_id='123')
105
96
>>> i.path2id('src/wibble')
109
100
>>> i.add(InventoryFile('2326', 'wibble.c', '2325'))
110
InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None)
101
InventoryFile('2326', 'wibble.c', parent_id='2325')
112
InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None)
103
InventoryFile('2326', 'wibble.c', parent_id='2325')
113
104
>>> for path, entry in i.iter_entries():
106
... assert i.path2id(path)
142
133
return False, False
135
def diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
136
output_to, reverse=False):
137
"""Perform a diff from this to to_entry.
139
text_diff will be used for textual difference calculation.
140
This is a template method, override _diff in child classes.
142
self._read_tree_state(tree.id2path(self.file_id), tree)
144
# cannot diff from one kind to another - you must do a removal
145
# and an addif they do not match.
146
assert self.kind == to_entry.kind
147
to_entry._read_tree_state(to_tree.id2path(to_entry.file_id),
149
self._diff(text_diff, from_label, tree, to_label, to_entry, to_tree,
144
152
def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
145
153
output_to, reverse=False):
146
154
"""Perform a diff between two entries of the same kind."""
148
def parent_candidates(self, previous_inventories):
149
"""Find possible per-file graph parents.
151
This is currently defined by:
152
- Select the last changed revision in the parent inventory.
153
- Do deal with a short lived bug in bzr 0.8's development two entries
154
that have the same last changed but different 'x' bit settings are
156
def find_previous_heads(self, previous_inventories,
157
versioned_file_store,
160
"""Return the revisions and entries that directly preceed this.
162
Returned as a map from revision to inventory entry.
164
This is a map containing the file revisions in all parents
165
for which the file exists, and its revision is not a parent of
166
any other. If the file is new, the set will be empty.
168
:param versioned_file_store: A store where ancestry data on this
169
file id can be queried.
170
:param transaction: The transaction that queries to the versioned
171
file store should be completed under.
172
:param entry_vf: The entry versioned file, if its already available.
174
def get_ancestors(weave, entry):
175
return set(weave.get_ancestry(entry.revision))
157
176
# revision:ie mapping for each ie found in previous_inventories.
178
# revision:ie mapping with one revision for each head.
180
# revision: ancestor list for each head
159
182
# identify candidate head revision ids.
160
183
for inv in previous_inventories:
161
184
if self.file_id in inv:
162
185
ie = inv[self.file_id]
186
assert ie.file_id == self.file_id
163
187
if ie.revision in candidates:
164
188
# same revision value in two different inventories:
165
189
# correct possible inconsistencies:
171
195
ie.executable = False
172
196
except AttributeError:
198
# must now be the same.
199
assert candidates[ie.revision] == ie
175
201
# add this revision as a candidate.
176
202
candidates[ie.revision] = ie
179
@deprecated_method(deprecated_in((1, 6, 0)))
204
# common case optimisation
205
if len(candidates) == 1:
206
# if there is only one candidate revision found
207
# then we can opening the versioned file to access ancestry:
208
# there cannot be any ancestors to eliminate when there is
209
# only one revision available.
210
heads[ie.revision] = ie
213
# eliminate ancestors amongst the available candidates:
214
# heads are those that are not an ancestor of any other candidate
215
# - this provides convergence at a per-file level.
216
for ie in candidates.values():
217
# may be an ancestor of a known head:
218
already_present = 0 != len(
219
[head for head in heads
220
if ie.revision in head_ancestors[head]])
222
# an ancestor of an analyzed candidate.
224
# not an ancestor of a known head:
225
# load the versioned file for this file id if needed
227
entry_vf = versioned_file_store.get_weave_or_empty(
228
self.file_id, transaction)
229
ancestors = get_ancestors(entry_vf, ie)
230
# may knock something else out:
231
check_heads = list(heads.keys())
232
for head in check_heads:
233
if head in ancestors:
234
# this previously discovered 'head' is not
235
# really a head - its an ancestor of the newly
238
head_ancestors[ie.revision] = ancestors
239
heads[ie.revision] = ie
180
242
def get_tar_item(self, root, dp, now, tree):
181
243
"""Get a tarfile item and a file stream for its content."""
182
item = tarfile.TarInfo(osutils.pathjoin(root, dp).encode('utf8'))
244
item = tarfile.TarInfo(pathjoin(root, dp))
183
245
# TODO: would be cool to actually set it to the timestamp of the
184
246
# revision it was last changed
290
353
"""Clone this inventory entry."""
291
354
raise NotImplementedError
294
def describe_change(old_entry, new_entry):
295
"""Describe the change between old_entry and this.
297
This smells of being an InterInventoryEntry situation, but as its
298
the first one, we're making it a static method for now.
300
An entry with a different parent, or different name is considered
301
to be renamed. Reparenting is an internal detail.
302
Note that renaming the parent does not trigger a rename for the
356
def _describe_snapshot_change(self, previous_entries):
357
"""Describe how this entry will have changed in a new commit.
359
:param previous_entries: Dictionary from revision_id to inventory entry.
361
:returns: One-word description: "merged", "added", "renamed", "modified".
305
# TODO: Perhaps return an object rather than just a string
306
if old_entry is new_entry:
307
# also the case of both being None
309
elif old_entry is None:
363
# XXX: This assumes that the file *has* changed -- it should probably
364
# be fused with whatever does that detection. Why not just a single
365
# thing to compare the entries?
367
# TODO: Return some kind of object describing all the possible
368
# dimensions that can change, not just a string. That can then give
369
# both old and new names for renames, etc.
371
if len(previous_entries) > 1:
373
elif len(previous_entries) == 0:
311
elif new_entry is None:
313
if old_entry.kind != new_entry.kind:
315
text_modified, meta_modified = new_entry.detect_changes(old_entry)
316
if text_modified or meta_modified:
320
# TODO 20060511 (mbp, rbc) factor out 'detect_rename' here.
321
if old_entry.parent_id != new_entry.parent_id:
323
elif old_entry.name != new_entry.name:
327
if renamed and not modified:
328
return InventoryEntry.RENAMED
329
if modified and not renamed:
331
if modified and renamed:
332
return InventoryEntry.MODIFIED_AND_RENAMED
375
the_parent, = previous_entries.values()
376
if self.parent_id != the_parent.parent_id:
377
# actually, moved to another directory
379
elif self.name != the_parent.name:
335
383
def __repr__(self):
336
return ("%s(%r, %r, parent_id=%r, revision=%r)"
384
return ("%s(%r, %r, parent_id=%r)"
337
385
% (self.__class__.__name__,
390
def snapshot(self, revision, path, previous_entries,
391
work_tree, weave_store, transaction):
392
"""Make a snapshot of this entry which may or may not have changed.
394
This means that all its fields are populated, that it has its
395
text stored in the text store or weave.
397
mutter('new parents of %s are %r', path, previous_entries)
398
self._read_tree_state(path, work_tree)
399
if len(previous_entries) == 1:
400
# cannot be unchanged unless there is only one parent file rev.
401
parent_ie = previous_entries.values()[0]
402
if self._unchanged(parent_ie):
403
mutter("found unchanged entry")
404
self.revision = parent_ie.revision
406
return self._snapshot_into_revision(revision, previous_entries,
407
work_tree, weave_store, transaction)
409
def _snapshot_into_revision(self, revision, previous_entries, work_tree,
410
weave_store, transaction):
411
"""Record this revision unconditionally into a store.
413
The entry's last-changed revision property (`revision`) is updated to
414
that of the new revision.
416
:param revision: id of the new revision that is being recorded.
418
:returns: String description of the commit (e.g. "merged", "modified"), etc.
420
mutter('new revision {%s} for {%s}', revision, self.file_id)
421
self.revision = revision
422
change = self._describe_snapshot_change(previous_entries)
423
self._snapshot_text(previous_entries, work_tree, weave_store,
427
def _snapshot_text(self, file_parents, work_tree, weave_store, transaction):
428
"""Record the 'text' of this entry, whatever form that takes.
430
This default implementation simply adds an empty text.
432
mutter('storing file {%s} in revision {%s}',
433
self.file_id, self.revision)
434
self._add_text_to_weave([], file_parents.keys(), weave_store, transaction)
343
436
def __eq__(self, other):
344
437
if not isinstance(other, InventoryEntry):
468
546
class InventoryFile(InventoryEntry):
469
547
"""A file in an inventory."""
471
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
472
'text_id', 'parent_id', 'children', 'executable',
473
'revision', 'symlink_target', 'reference_revision']
475
549
def _check(self, checker, tree_revision_id, tree):
476
550
"""See InventoryEntry._check"""
477
key = (self.file_id, self.revision)
478
if key in checker.checked_texts:
479
prev_sha = checker.checked_texts[key]
551
t = (self.file_id, self.revision)
552
if t in checker.checked_texts:
553
prev_sha = checker.checked_texts[t]
480
554
if prev_sha != self.text_sha1:
482
'mismatched sha1 on {%s} in {%s} (%s != %s) %r' %
483
(self.file_id, tree_revision_id, prev_sha, self.text_sha1,
555
raise BzrCheckError('mismatched sha1 on {%s} in {%s}' %
556
(self.file_id, tree_revision_id))
486
558
checker.repeated_text_cnt += 1
561
if self.file_id not in checker.checked_weaves:
562
mutter('check weave {%s}', self.file_id)
563
w = tree.get_weave(self.file_id)
564
# Not passing a progress bar, because it creates a new
565
# progress, which overwrites the current progress,
566
# and doesn't look nice
568
checker.checked_weaves[self.file_id] = True
570
w = tree.get_weave(self.file_id)
489
572
mutter('check version {%s} of {%s}', tree_revision_id, self.file_id)
490
573
checker.checked_text_cnt += 1
491
574
# We can't check the length, because Weave doesn't store that
492
575
# information, and the whole point of looking at the weave's
493
576
# sha1sum is that we don't have to extract the text.
494
if (self.text_sha1 != tree._repository.texts.get_sha1s([key])[key]):
495
raise BzrCheckError('text {%s} version {%s} wrong sha1' % key)
496
checker.checked_texts[key] = self.text_sha1
577
if self.text_sha1 != w.get_sha1(self.revision):
578
raise BzrCheckError('text {%s} version {%s} wrong sha1'
579
% (self.file_id, self.revision))
580
checker.checked_texts[t] = self.text_sha1
499
583
other = InventoryFile(self.file_id, self.name, self.parent_id)
513
599
def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
514
600
output_to, reverse=False):
515
601
"""See InventoryEntry._diff."""
516
from bzrlib.diff import DiffText
517
from_file_id = self.file_id
519
to_file_id = to_entry.file_id
523
to_file_id, from_file_id = from_file_id, to_file_id
524
tree, to_tree = to_tree, tree
525
from_label, to_label = to_label, from_label
526
differ = DiffText(tree, to_tree, output_to, 'utf-8', '', '',
528
return differ.diff_text(from_file_id, to_file_id, from_label, to_label)
603
from_text = tree.get_file(self.file_id).readlines()
605
to_text = to_tree.get_file(to_entry.file_id).readlines()
609
text_diff(from_label, from_text,
610
to_label, to_text, output_to)
612
text_diff(to_label, to_text,
613
from_label, from_text, output_to)
616
label_pair = (to_label, from_label)
618
label_pair = (from_label, to_label)
619
print >> output_to, "Binary files %s and %s differ" % label_pair
530
621
def has_text(self):
531
622
"""See InventoryEntry.has_text."""
553
644
def _put_on_disk(self, fullpath, tree):
554
645
"""See InventoryEntry._put_on_disk."""
555
osutils.pumpfile(tree.get_file(self.file_id), file(fullpath, 'wb'))
646
pumpfile(tree.get_file(self.file_id), file(fullpath, 'wb'))
556
647
if tree.is_executable(self.file_id):
557
648
os.chmod(fullpath, 0755)
559
650
def _read_tree_state(self, path, work_tree):
560
651
"""See InventoryEntry._read_tree_state."""
561
self.text_sha1 = work_tree.get_file_sha1(self.file_id, path=path)
562
# FIXME: 20050930 probe for the text size when getting sha1
563
# in _read_tree_state
564
self.executable = work_tree.is_executable(self.file_id, path=path)
567
return ("%s(%r, %r, parent_id=%r, sha1=%r, len=%s)"
568
% (self.__class__.__name__,
652
self.text_sha1 = work_tree.get_file_sha1(self.file_id)
653
self.executable = work_tree.is_executable(self.file_id)
575
655
def _forget_tree_state(self):
576
656
self.text_sha1 = None
657
self.executable = None
659
def _snapshot_text(self, file_parents, work_tree, weave_store, transaction):
660
"""See InventoryEntry._snapshot_text."""
661
mutter('storing file {%s} in revision {%s}',
662
self.file_id, self.revision)
663
# special case to avoid diffing on renames or
665
if (len(file_parents) == 1
666
and self.text_sha1 == file_parents.values()[0].text_sha1
667
and self.text_size == file_parents.values()[0].text_size):
668
previous_ie = file_parents.values()[0]
669
versionedfile = weave_store.get_weave(self.file_id, transaction)
670
versionedfile.clone_text(self.revision, previous_ie.revision, file_parents.keys())
672
new_lines = work_tree.get_file(self.file_id).readlines()
673
self._add_text_to_weave(new_lines, file_parents.keys(), weave_store,
675
self.text_sha1 = sha_strings(new_lines)
676
self.text_size = sum(map(len, new_lines))
578
679
def _unchanged(self, previous_ie):
579
680
"""See InventoryEntry._unchanged."""
760
823
The inventory is created with a default root directory, with
763
if root_id is not None:
764
self._set_root(InventoryDirectory(root_id, u'', None))
826
# We are letting Branch.create() create a unique inventory
827
# root id. Rather than generating a random one here.
829
# root_id = bzrlib.branch.gen_file_id('TREE_ROOT')
830
self.root = RootEntry(root_id)
768
831
self.revision_id = revision_id
771
return "<Inventory object at %x, contents=%r>" % (id(self), self._byid)
773
def apply_delta(self, delta):
774
"""Apply a delta to this inventory.
776
:param delta: A list of changes to apply. After all the changes are
777
applied the final inventory must be internally consistent, but it
778
is ok to supply changes which, if only half-applied would have an
779
invalid result - such as supplying two changes which rename two
780
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
781
('B', 'A', 'B-id', b_entry)].
783
Each change is a tuple, of the form (old_path, new_path, file_id,
786
When new_path is None, the change indicates the removal of an entry
787
from the inventory and new_entry will be ignored (using None is
788
appropriate). If new_path is not None, then new_entry must be an
789
InventoryEntry instance, which will be incorporated into the
790
inventory (and replace any existing entry with the same file id).
792
When old_path is None, the change indicates the addition of
793
a new entry to the inventory.
795
When neither new_path nor old_path are None, the change is a
796
modification to an entry, such as a rename, reparent, kind change
799
The children attribute of new_entry is ignored. This is because
800
this method preserves children automatically across alterations to
801
the parent of the children, and cases where the parent id of a
802
child is changing require the child to be passed in as a separate
803
change regardless. E.g. in the recursive deletion of a directory -
804
the directory's children must be included in the delta, or the
805
final inventory will be invalid.
808
# Remove all affected items which were in the original inventory,
809
# starting with the longest paths, thus ensuring parents are examined
810
# after their children, which means that everything we examine has no
811
# modified children remaining by the time we examine it.
812
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
813
if op is not None), reverse=True):
814
if file_id not in self:
817
# Preserve unaltered children of file_id for later reinsertion.
818
file_id_children = getattr(self[file_id], 'children', {})
819
if len(file_id_children):
820
children[file_id] = file_id_children
821
# Remove file_id and the unaltered children. If file_id is not
822
# being deleted it will be reinserted back later.
823
self.remove_recursive_id(file_id)
824
# Insert all affected which should be in the new inventory, reattaching
825
# their children if they had any. This is done from shortest path to
826
# longest, ensuring that items which were modified and whose parents in
827
# the resulting inventory were also modified, are inserted after their
829
for new_path, new_entry in sorted((np, e) for op, np, f, e in
830
delta if np is not None):
831
if new_entry.kind == 'directory':
832
# Pop the child which to allow detection of children whose
833
# parents were deleted and which were not reattached to a new
835
new_entry.children = children.pop(new_entry.file_id, {})
838
# Get the parent id that was deleted
839
parent_id, children = children.popitem()
840
raise errors.InconsistentDelta("<deleted>", parent_id,
841
"The file id was deleted but its children were not deleted.")
843
def _set_root(self, ie):
845
832
self._byid = {self.root.file_id: self.root}
848
836
# TODO: jam 20051218 Should copy also copy the revision_id?
849
entries = self.iter_entries()
850
if self.root is None:
851
return Inventory(root_id=None)
852
other = Inventory(entries.next()[1].file_id)
853
other.root.revision = self.root.revision
837
other = Inventory(self.root.file_id)
854
838
# copy recursively so we know directories will be added before
855
839
# their children. There are more efficient ways than this...
856
for path, entry in entries:
840
for path, entry in self.iter_entries():
841
if entry == self.root:
857
843
other.add(entry.copy())
860
847
def __iter__(self):
861
848
return iter(self._byid)
863
851
def __len__(self):
864
852
"""Returns number of entries."""
865
853
return len(self._byid)
867
856
def iter_entries(self, from_dir=None):
868
857
"""Return (path, entry) pairs, in order by name."""
870
if self.root is None:
874
elif isinstance(from_dir, basestring):
875
from_dir = self._byid[from_dir]
877
# unrolling the recursive called changed the time from
878
# 440ms/663ms (inline/total) to 116ms/116ms
879
children = from_dir.children.items()
881
children = collections.deque(children)
882
stack = [(u'', children)]
884
from_dir_relpath, children = stack[-1]
887
name, ie = children.popleft()
889
# we know that from_dir_relpath never ends in a slash
890
# and 'f' doesn't begin with one, we can do a string op, rather
891
# than the checks of pathjoin(), though this means that all paths
893
path = from_dir_relpath + '/' + name
897
if ie.kind != 'directory':
900
# But do this child first
901
new_children = ie.children.items()
903
new_children = collections.deque(new_children)
904
stack.append((path, new_children))
905
# Break out of inner loop, so that we start outer loop with child
908
# if we finished all children, pop it off the stack
911
def iter_entries_by_dir(self, from_dir=None, specific_file_ids=None,
912
yield_parents=False):
913
"""Iterate over the entries in a directory first order.
915
This returns all entries for a directory before returning
916
the entries for children of a directory. This is not
917
lexicographically sorted order, and is a hybrid between
918
depth-first and breadth-first.
920
:param yield_parents: If True, yield the parents from the root leading
921
down to specific_file_ids that have been requested. This has no
922
impact if specific_file_ids is None.
923
:return: This yields (path, entry) pairs
925
if specific_file_ids and not isinstance(specific_file_ids, set):
926
specific_file_ids = set(specific_file_ids)
927
# TODO? Perhaps this should return the from_dir so that the root is
928
# yielded? or maybe an option?
930
if self.root is None:
932
# Optimize a common case
933
if (not yield_parents and specific_file_ids is not None and
934
len(specific_file_ids) == 1):
935
file_id = list(specific_file_ids)[0]
937
yield self.id2path(file_id), self[file_id]
940
if (specific_file_ids is None or yield_parents or
941
self.root.file_id in specific_file_ids):
943
elif isinstance(from_dir, basestring):
944
from_dir = self._byid[from_dir]
946
if specific_file_ids is not None:
947
# TODO: jam 20070302 This could really be done as a loop rather
948
# than a bunch of recursive calls.
951
def add_ancestors(file_id):
952
if file_id not in byid:
954
parent_id = byid[file_id].parent_id
955
if parent_id is None:
957
if parent_id not in parents:
958
parents.add(parent_id)
959
add_ancestors(parent_id)
960
for file_id in specific_file_ids:
961
add_ancestors(file_id)
965
stack = [(u'', from_dir)]
967
cur_relpath, cur_dir = stack.pop()
970
for child_name, child_ie in sorted(cur_dir.children.iteritems()):
972
child_relpath = cur_relpath + child_name
974
if (specific_file_ids is None or
975
child_ie.file_id in specific_file_ids or
976
(yield_parents and child_ie.file_id in parents)):
977
yield child_relpath, child_ie
979
if child_ie.kind == 'directory':
980
if parents is None or child_ie.file_id in parents:
981
child_dirs.append((child_relpath+'/', child_ie))
982
stack.extend(reversed(child_dirs))
984
def make_entry(self, kind, name, parent_id, file_id=None):
985
"""Simple thunk to bzrlib.inventory.make_entry."""
986
return make_entry(kind, name, parent_id, file_id)
861
elif isinstance(from_dir, basestring):
862
from_dir = self._byid[from_dir]
864
kids = from_dir.children.items()
866
for name, ie in kids:
868
if ie.kind == 'directory':
869
for cn, cie in self.iter_entries(from_dir=ie.file_id):
870
yield pathjoin(name, cn), cie
988
873
def entries(self):
989
874
"""Return list of (path, ie) for all entries except the root.
1072
955
Returns the new entry object.
1074
957
if entry.file_id in self._byid:
1075
raise errors.DuplicateFileId(entry.file_id,
1076
self._byid[entry.file_id])
1078
if entry.parent_id is None:
1082
parent = self._byid[entry.parent_id]
1084
raise BzrError("parent_id {%s} not in inventory" %
1087
if entry.name in parent.children:
1088
raise BzrError("%s is already versioned" %
1089
osutils.pathjoin(self.id2path(parent.file_id),
1090
entry.name).encode('utf-8'))
1091
parent.children[entry.name] = entry
1092
return self._add_child(entry)
1094
def add_path(self, relpath, kind, file_id=None, parent_id=None):
958
raise BzrError("inventory already contains entry with id {%s}" % entry.file_id)
960
if entry.parent_id == ROOT_ID or entry.parent_id is None:
961
entry.parent_id = self.root.file_id
964
parent = self._byid[entry.parent_id]
966
raise BzrError("parent_id {%s} not in inventory" % entry.parent_id)
968
if parent.children.has_key(entry.name):
969
raise BzrError("%s is already versioned" %
970
pathjoin(self.id2path(parent.file_id), entry.name))
972
self._byid[entry.file_id] = entry
973
parent.children[entry.name] = entry
977
def add_path(self, relpath, kind, file_id=None):
1095
978
"""Add entry from a path.
1097
980
The immediate parent must already be versioned.
1099
982
Returns the new entry object."""
983
from bzrlib.workingtree import gen_file_id
1101
parts = osutils.splitpath(relpath)
985
parts = bzrlib.osutils.splitpath(relpath)
988
file_id = gen_file_id(relpath)
1103
990
if len(parts) == 0:
1105
file_id = generate_ids.gen_root_id()
1106
self.root = InventoryDirectory(file_id, '', None)
991
self.root = RootEntry(file_id)
1107
992
self._byid = {self.root.file_id: self.root}
1110
995
parent_path = parts[:-1]
1111
996
parent_id = self.path2id(parent_path)
1112
if parent_id is None:
1113
raise errors.NotVersionedError(path=parent_path)
1114
ie = make_entry(kind, parts[-1], parent_id, file_id)
997
if parent_id == None:
998
raise NotVersionedError(path=parent_path)
999
if kind == 'directory':
1000
ie = InventoryDirectory(file_id, parts[-1], parent_id)
1001
elif kind == 'file':
1002
ie = InventoryFile(file_id, parts[-1], parent_id)
1003
elif kind == 'symlink':
1004
ie = InventoryLink(file_id, parts[-1], parent_id)
1006
raise BzrError("unknown kind %r" % kind)
1115
1007
return self.add(ie)
1117
1010
def __delitem__(self, file_id):
1118
1011
"""Remove entry by id.
1120
1013
>>> inv = Inventory()
1121
1014
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
1122
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None)
1015
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT')
1123
1016
>>> '123' in inv
1125
1018
>>> del inv['123']
1284
1166
file_ie.name = new_name
1285
1167
file_ie.parent_id = new_parent_id
1287
def is_root(self, file_id):
1288
return self.root is not None and file_id == self.root.file_id
1292
'directory': InventoryDirectory,
1293
'file': InventoryFile,
1294
'symlink': InventoryLink,
1295
'tree-reference': TreeReference
1298
def make_entry(kind, name, parent_id, file_id=None):
1299
"""Create an inventory entry.
1301
:param kind: the type of inventory entry to create.
1302
:param name: the basename of the entry.
1303
:param parent_id: the parent_id of the entry.
1304
:param file_id: the file_id to use. if None, one will be created.
1307
file_id = generate_ids.gen_file_id(name)
1308
name = ensure_normalized_name(name)
1310
factory = entry_factory[kind]
1312
raise BzrError("unknown kind %r" % kind)
1313
return factory(file_id, name, parent_id)
1316
def ensure_normalized_name(name):
1319
:raises InvalidNormalization: When name is not normalized, and cannot be
1320
accessed on this platform by the normalized path.
1321
:return: The NFC normalised version of name.
1323
#------- This has been copied to bzrlib.dirstate.DirState.add, please
1324
# keep them synchronised.
1325
# we dont import normalized_filename directly because we want to be
1326
# able to change the implementation at runtime for tests.
1327
norm_name, can_access = osutils.normalized_filename(name)
1328
if norm_name != name:
1332
# TODO: jam 20060701 This would probably be more useful
1333
# if the error was raised with the full path
1334
raise errors.InvalidNormalization(name)
1338
1172
_NAME_RE = None
1340
1174
def is_valid_name(name):
1341
1175
global _NAME_RE
1342
if _NAME_RE is None:
1176
if _NAME_RE == None:
1343
1177
_NAME_RE = re.compile(r'^[^/\\]+$')
1345
1179
return bool(_NAME_RE.match(name))