79
65
sha-1 of the text of the file
82
68
size in bytes of the text of the file
84
70
(reading a version 4 tree created a text_id field.)
86
72
>>> i = Inventory()
89
75
>>> i.add(InventoryDirectory('123', 'src', ROOT_ID))
90
InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None)
76
InventoryDirectory('123', 'src', parent_id='TREE_ROOT')
91
77
>>> i.add(InventoryFile('2323', 'hello.c', parent_id='123'))
92
InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None, revision=None)
93
>>> shouldbe = {0: '', 1: 'src', 2: 'src/hello.c'}
94
>>> for ix, j in enumerate(i.iter_entries()):
95
... print (j[0] == shouldbe[ix], j[1])
78
InventoryFile('2323', 'hello.c', parent_id='123')
79
>>> for j in i.iter_entries():
82
('src', InventoryDirectory('123', 'src', parent_id='TREE_ROOT'))
83
('src/hello.c', InventoryFile('2323', 'hello.c', parent_id='123'))
84
>>> i.add(InventoryFile('2323', 'bye.c', '123'))
85
Traceback (most recent call last):
97
(True, InventoryDirectory('TREE_ROOT', u'', parent_id=None, revision=None))
98
(True, InventoryDirectory('123', 'src', parent_id='TREE_ROOT', revision=None))
99
(True, InventoryFile('2323', 'hello.c', parent_id='123', sha1=None, len=None, revision=None))
87
BzrError: inventory already contains entry with id {2323}
100
88
>>> i.add(InventoryFile('2324', 'bye.c', '123'))
101
InventoryFile('2324', 'bye.c', parent_id='123', sha1=None, len=None, revision=None)
89
InventoryFile('2324', 'bye.c', parent_id='123')
102
90
>>> i.add(InventoryDirectory('2325', 'wibble', '123'))
103
InventoryDirectory('2325', 'wibble', parent_id='123', revision=None)
91
InventoryDirectory('2325', 'wibble', parent_id='123')
104
92
>>> i.path2id('src/wibble')
108
96
>>> i.add(InventoryFile('2326', 'wibble.c', '2325'))
109
InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None, revision=None)
97
InventoryFile('2326', 'wibble.c', parent_id='2325')
111
InventoryFile('2326', 'wibble.c', parent_id='2325', sha1=None, len=None, revision=None)
99
InventoryFile('2326', 'wibble.c', parent_id='2325')
112
100
>>> for path, entry in i.iter_entries():
101
... print path.replace('\\\\', '/') # for win32 os.sep
102
... assert i.path2id(path)
120
108
src/wibble/wibble.c
121
>>> i.id2path('2326')
109
>>> i.id2path('2326').replace('\\\\', '/')
122
110
'src/wibble/wibble.c'
125
# Constants returned by describe_change()
127
# TODO: These should probably move to some kind of FileChangeDescription
128
# class; that's like what's inside a TreeDelta but we want to be able to
129
# generate them just for one file at a time.
131
MODIFIED_AND_RENAMED = 'modified and renamed'
113
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
114
'text_id', 'parent_id', 'children', 'executable',
117
def _add_text_to_weave(self, new_lines, parents, weave_store, transaction):
118
weave_store.add_text(self.file_id, self.revision, new_lines, parents,
135
121
def detect_changes(self, old_entry):
136
122
"""Return a (text_modified, meta_modified) from this to old_entry.
138
_read_tree_state must have been called on self and old_entry prior to
124
_read_tree_state must have been called on self and old_entry prior to
139
125
calling detect_changes.
141
127
return False, False
129
def diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
130
output_to, reverse=False):
131
"""Perform a diff from this to to_entry.
133
text_diff will be used for textual difference calculation.
134
This is a template method, override _diff in child classes.
136
self._read_tree_state(tree.id2path(self.file_id), tree)
138
# cannot diff from one kind to another - you must do a removal
139
# and an addif they do not match.
140
assert self.kind == to_entry.kind
141
to_entry._read_tree_state(to_tree.id2path(to_entry.file_id),
143
self._diff(text_diff, from_label, tree, to_label, to_entry, to_tree,
143
146
def _diff(self, text_diff, from_label, tree, to_label, to_entry, to_tree,
144
147
output_to, reverse=False):
145
148
"""Perform a diff between two entries of the same kind."""
147
def parent_candidates(self, previous_inventories):
148
"""Find possible per-file graph parents.
150
This is currently defined by:
151
- Select the last changed revision in the parent inventory.
152
- Do deal with a short lived bug in bzr 0.8's development two entries
153
that have the same last changed but different 'x' bit settings are
150
def find_previous_heads(self, previous_inventories, entry_weave):
151
"""Return the revisions and entries that directly preceed this.
153
Returned as a map from revision to inventory entry.
155
This is a map containing the file revisions in all parents
156
for which the file exists, and its revision is not a parent of
157
any other. If the file is new, the set will be empty.
156
# revision:ie mapping for each ie found in previous_inventories.
158
# identify candidate head revision ids.
159
def get_ancestors(weave, entry):
160
return set(map(weave.idx_to_name,
161
weave.inclusions([weave.lookup(entry.revision)])))
159
164
for inv in previous_inventories:
160
165
if self.file_id in inv:
161
166
ie = inv[self.file_id]
162
if ie.revision in candidates:
163
# same revision value in two different inventories:
164
# correct possible inconsistencies:
165
# * there was a bug in revision updates with 'x' bit
167
assert ie.file_id == self.file_id
168
if ie.revision in heads:
169
# fixup logic, there was a bug in revision updates.
170
# with x bit support.
168
if candidates[ie.revision].executable != ie.executable:
169
candidates[ie.revision].executable = False
172
if heads[ie.revision].executable != ie.executable:
173
heads[ie.revision].executable = False
170
174
ie.executable = False
171
175
except AttributeError:
177
assert heads[ie.revision] == ie
174
# add this revision as a candidate.
175
candidates[ie.revision] = ie
179
# may want to add it.
180
# may already be covered:
181
already_present = 0 != len(
182
[head for head in heads
183
if ie.revision in head_ancestors[head]])
185
# an ancestor of a known head.
188
ancestors = get_ancestors(entry_weave, ie)
189
# may knock something else out:
190
check_heads = list(heads.keys())
191
for head in check_heads:
192
if head in ancestors:
193
# this head is not really a head
195
head_ancestors[ie.revision] = ancestors
196
heads[ie.revision] = ie
178
@deprecated_method(deprecated_in((1, 6, 0)))
179
199
def get_tar_item(self, root, dp, now, tree):
180
200
"""Get a tarfile item and a file stream for its content."""
181
item = tarfile.TarInfo(osutils.pathjoin(root, dp).encode('utf8'))
201
item = tarfile.TarInfo(os.path.join(root, dp))
182
202
# TODO: would be cool to actually set it to the timestamp of the
183
203
# revision it was last changed
238
259
raise BzrError("don't know how to export {%s} of kind %r" %
239
260
(self.file_id, self.kind))
241
@deprecated_method(deprecated_in((1, 6, 0)))
242
262
def put_on_disk(self, dest, dp, tree):
243
263
"""Create a representation of self on disk in the prefix dest.
245
265
This is a template method - implement _put_on_disk in subclasses.
247
fullpath = osutils.pathjoin(dest, dp)
267
fullpath = appendpath(dest, dp)
248
268
self._put_on_disk(fullpath, tree)
249
# mutter(" export {%s} kind %s to %s", self.file_id,
250
# self.kind, fullpath)
269
mutter(" export {%s} kind %s to %s" % (self.file_id, self.kind, fullpath))
252
271
def _put_on_disk(self, fullpath, tree):
253
272
"""Put this entry onto disk at fullpath, from tree tree."""
254
273
raise BzrError("don't know how to export {%s} of kind %r" % (self.file_id, self.kind))
256
275
def sorted_children(self):
257
return sorted(self.children.items())
276
l = self.children.items()
260
281
def versionable_kind(kind):
261
return (kind in ('file', 'directory', 'symlink', 'tree-reference'))
282
return kind in ('file', 'directory', 'symlink')
263
def check(self, checker, rev_id, inv):
284
def check(self, checker, rev_id, inv, tree):
264
285
"""Check this inventory entry is intact.
266
287
This is a template method, override _check for kind specific
269
:param checker: Check object providing context for the checks;
270
can be used to find out what parts of the repository have already
272
:param rev_id: Revision id from which this InventoryEntry was loaded.
273
Not necessarily the last-changed revision for this file.
274
:param inv: Inventory from which the entry was loaded.
276
if self.parent_id is not None:
290
if self.parent_id != None:
277
291
if not inv.has_id(self.parent_id):
278
292
raise BzrCheckError('missing parent {%s} in inventory for revision {%s}'
279
293
% (self.parent_id, rev_id))
280
checker._add_entry_to_text_key_references(inv, self)
281
self._check(checker, rev_id)
294
self._check(checker, rev_id, tree)
283
def _check(self, checker, rev_id):
296
def _check(self, checker, rev_id, tree):
284
297
"""Check this inventory entry for kind specific errors."""
285
checker._report_items.append(
286
'unknown entry kind %r in revision {%s}' % (self.kind, rev_id))
298
raise BzrCheckError('unknown entry kind %r in revision {%s}' %
289
303
"""Clone this inventory entry."""
290
304
raise NotImplementedError
293
def describe_change(old_entry, new_entry):
294
"""Describe the change between old_entry and this.
296
This smells of being an InterInventoryEntry situation, but as its
297
the first one, we're making it a static method for now.
299
An entry with a different parent, or different name is considered
300
to be renamed. Reparenting is an internal detail.
301
Note that renaming the parent does not trigger a rename for the
304
# TODO: Perhaps return an object rather than just a string
305
if old_entry is new_entry:
306
# also the case of both being None
308
elif old_entry is None:
306
def _get_snapshot_change(self, previous_entries):
307
if len(previous_entries) > 1:
309
elif len(previous_entries) == 0:
310
elif new_entry is None:
312
if old_entry.kind != new_entry.kind:
314
text_modified, meta_modified = new_entry.detect_changes(old_entry)
315
if text_modified or meta_modified:
319
# TODO 20060511 (mbp, rbc) factor out 'detect_rename' here.
320
if old_entry.parent_id != new_entry.parent_id:
322
elif old_entry.name != new_entry.name:
326
if renamed and not modified:
327
return InventoryEntry.RENAMED
328
if modified and not renamed:
330
if modified and renamed:
331
return InventoryEntry.MODIFIED_AND_RENAMED
312
return 'modified/renamed/reparented'
334
314
def __repr__(self):
335
return ("%s(%r, %r, parent_id=%r, revision=%r)"
315
return ("%s(%r, %r, parent_id=%r)"
336
316
% (self.__class__.__name__,
321
def snapshot(self, revision, path, previous_entries,
322
work_tree, weave_store, transaction):
323
"""Make a snapshot of this entry which may or may not have changed.
325
This means that all its fields are populated, that it has its
326
text stored in the text store or weave.
328
mutter('new parents of %s are %r', path, previous_entries)
329
self._read_tree_state(path, work_tree)
330
if len(previous_entries) == 1:
331
# cannot be unchanged unless there is only one parent file rev.
332
parent_ie = previous_entries.values()[0]
333
if self._unchanged(parent_ie):
334
mutter("found unchanged entry")
335
self.revision = parent_ie.revision
337
return self.snapshot_revision(revision, previous_entries,
338
work_tree, weave_store, transaction)
340
def snapshot_revision(self, revision, previous_entries, work_tree,
341
weave_store, transaction):
342
"""Record this revision unconditionally."""
343
mutter('new revision for {%s}', self.file_id)
344
self.revision = revision
345
change = self._get_snapshot_change(previous_entries)
346
self._snapshot_text(previous_entries, work_tree, weave_store,
350
def _snapshot_text(self, file_parents, work_tree, weave_store, transaction):
351
"""Record the 'text' of this entry, whatever form that takes.
353
This default implementation simply adds an empty text.
355
mutter('storing file {%s} in revision {%s}',
356
self.file_id, self.revision)
357
self._add_text_to_weave([], file_parents, weave_store, transaction)
342
359
def __eq__(self, other):
344
# For the case when objects are cached
346
360
if not isinstance(other, InventoryEntry):
347
361
return NotImplemented
686
670
return compatible
689
class TreeReference(InventoryEntry):
691
kind = 'tree-reference'
693
def __init__(self, file_id, name, parent_id, revision=None,
694
reference_revision=None):
695
InventoryEntry.__init__(self, file_id, name, parent_id)
696
self.revision = revision
697
self.reference_revision = reference_revision
700
return TreeReference(self.file_id, self.name, self.parent_id,
701
self.revision, self.reference_revision)
703
def _read_tree_state(self, path, work_tree):
704
"""Populate fields in the inventory entry from the given tree.
706
self.reference_revision = work_tree.get_reference_revision(
709
def _forget_tree_state(self):
710
self.reference_revision = None
712
def _unchanged(self, previous_ie):
713
"""See InventoryEntry._unchanged."""
714
compatible = super(TreeReference, self)._unchanged(previous_ie)
715
if self.reference_revision != previous_ie.reference_revision:
720
class CommonInventory(object):
721
"""Basic inventory logic, defined in terms of primitives like has_id.
723
An inventory is the metadata about the contents of a tree.
725
This is broadly a map from file_id to entries such as directories, files,
726
symlinks and tree references. Each entry maintains its own metadata like
727
SHA1 and length for files, or children for a directory.
673
class Inventory(object):
674
"""Inventory of versioned files in a tree.
676
This describes which file_id is present at each point in the tree,
677
and possibly the SHA-1 or other information about the file.
729
678
Entries can be looked up either by path or by file_id.
680
The inventory represents a typical unix file tree, with
681
directories containing files and subdirectories. We never store
682
the full path to a file, because renaming a directory implicitly
683
moves all of its contents. This class internally maintains a
684
lookup tree that allows the children under a directory to be
731
687
InventoryEntry objects must not be modified after they are
732
688
inserted, other than through the Inventory API.
690
>>> inv = Inventory()
691
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
692
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT')
693
>>> inv['123-123'].name
696
May be treated as an iterator or set to look up file ids:
698
>>> bool(inv.path2id('hello.c'))
703
May also look up by name:
705
>>> [x[0] for x in inv.iter_entries()]
707
>>> inv = Inventory('TREE_ROOT-12345678-12345678')
708
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
709
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT-12345678-12345678')
735
def __contains__(self, file_id):
736
"""True if this entry contains a file with given id.
738
>>> inv = Inventory()
739
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
740
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
746
Note that this method along with __iter__ are not encouraged for use as
747
they are less clear than specific query methods - they may be rmeoved
750
return self.has_id(file_id)
752
def has_filename(self, filename):
753
return bool(self.path2id(filename))
755
def id2path(self, file_id):
756
"""Return as a string the path to file_id.
759
>>> e = i.add(InventoryDirectory('src-id', 'src', ROOT_ID))
760
>>> e = i.add(InventoryFile('foo-id', 'foo.c', parent_id='src-id'))
761
>>> print i.id2path('foo-id')
764
:raises NoSuchId: If file_id is not present in the inventory.
766
# get all names, skipping root
767
return '/'.join(reversed(
768
[parent.name for parent in
769
self._iter_file_id_parents(file_id)][:-1]))
771
def iter_entries(self, from_dir=None, recursive=True):
772
"""Return (path, entry) pairs, in order by name.
774
:param from_dir: if None, start from the root,
775
otherwise start from this directory (either file-id or entry)
776
:param recursive: recurse into directories or not
779
if self.root is None:
783
elif isinstance(from_dir, basestring):
784
from_dir = self[from_dir]
786
# unrolling the recursive called changed the time from
787
# 440ms/663ms (inline/total) to 116ms/116ms
788
children = from_dir.children.items()
791
for name, ie in children:
794
children = collections.deque(children)
795
stack = [(u'', children)]
797
from_dir_relpath, children = stack[-1]
800
name, ie = children.popleft()
802
# we know that from_dir_relpath never ends in a slash
803
# and 'f' doesn't begin with one, we can do a string op, rather
804
# than the checks of pathjoin(), though this means that all paths
806
path = from_dir_relpath + '/' + name
810
if ie.kind != 'directory':
813
# But do this child first
814
new_children = ie.children.items()
816
new_children = collections.deque(new_children)
817
stack.append((path, new_children))
818
# Break out of inner loop, so that we start outer loop with child
821
# if we finished all children, pop it off the stack
824
def iter_entries_by_dir(self, from_dir=None, specific_file_ids=None,
825
yield_parents=False):
826
"""Iterate over the entries in a directory first order.
828
This returns all entries for a directory before returning
829
the entries for children of a directory. This is not
830
lexicographically sorted order, and is a hybrid between
831
depth-first and breadth-first.
833
:param yield_parents: If True, yield the parents from the root leading
834
down to specific_file_ids that have been requested. This has no
835
impact if specific_file_ids is None.
836
:return: This yields (path, entry) pairs
838
if specific_file_ids and not isinstance(specific_file_ids, set):
839
specific_file_ids = set(specific_file_ids)
840
# TODO? Perhaps this should return the from_dir so that the root is
841
# yielded? or maybe an option?
843
if self.root is None:
845
# Optimize a common case
846
if (not yield_parents and specific_file_ids is not None and
847
len(specific_file_ids) == 1):
848
file_id = list(specific_file_ids)[0]
850
yield self.id2path(file_id), self[file_id]
853
if (specific_file_ids is None or yield_parents or
854
self.root.file_id in specific_file_ids):
856
elif isinstance(from_dir, basestring):
857
from_dir = self[from_dir]
859
if specific_file_ids is not None:
860
# TODO: jam 20070302 This could really be done as a loop rather
861
# than a bunch of recursive calls.
864
def add_ancestors(file_id):
865
if file_id not in byid:
867
parent_id = byid[file_id].parent_id
868
if parent_id is None:
870
if parent_id not in parents:
871
parents.add(parent_id)
872
add_ancestors(parent_id)
873
for file_id in specific_file_ids:
874
add_ancestors(file_id)
878
stack = [(u'', from_dir)]
880
cur_relpath, cur_dir = stack.pop()
883
for child_name, child_ie in sorted(cur_dir.children.iteritems()):
885
child_relpath = cur_relpath + child_name
887
if (specific_file_ids is None or
888
child_ie.file_id in specific_file_ids or
889
(yield_parents and child_ie.file_id in parents)):
890
yield child_relpath, child_ie
892
if child_ie.kind == 'directory':
893
if parents is None or child_ie.file_id in parents:
894
child_dirs.append((child_relpath+'/', child_ie))
895
stack.extend(reversed(child_dirs))
897
def _make_delta(self, old):
898
"""Make an inventory delta from two inventories."""
901
adds = new_ids - old_ids
902
deletes = old_ids - new_ids
903
common = old_ids.intersection(new_ids)
905
for file_id in deletes:
906
delta.append((old.id2path(file_id), None, file_id, None))
908
delta.append((None, self.id2path(file_id), file_id, self[file_id]))
909
for file_id in common:
910
if old[file_id] != self[file_id]:
911
delta.append((old.id2path(file_id), self.id2path(file_id),
912
file_id, self[file_id]))
915
def _get_mutable_inventory(self):
916
"""Returns a mutable copy of the object.
918
Some inventories are immutable, yet working trees, for example, needs
919
to mutate exisiting inventories instead of creating a new one.
921
raise NotImplementedError(self._get_mutable_inventory)
923
def make_entry(self, kind, name, parent_id, file_id=None):
924
"""Simple thunk to bzrlib.inventory.make_entry."""
925
return make_entry(kind, name, parent_id, file_id)
711
def __init__(self, root_id=ROOT_ID):
712
"""Create or read an inventory.
714
If a working directory is specified, the inventory is read
715
from there. If the file is specified, read from that. If not,
716
the inventory is created empty.
718
The inventory is created with a default root directory, with
721
# We are letting Branch.initialize() create a unique inventory
722
# root id. Rather than generating a random one here.
724
# root_id = bzrlib.branch.gen_file_id('TREE_ROOT')
725
self.root = RootEntry(root_id)
726
self._byid = {self.root.file_id: self.root}
730
other = Inventory(self.root.file_id)
731
# copy recursively so we know directories will be added before
732
# their children. There are more efficient ways than this...
733
for path, entry in self.iter_entries():
734
if entry == self.root:
736
other.add(entry.copy())
741
return iter(self._byid)
745
"""Returns number of entries."""
746
return len(self._byid)
749
def iter_entries(self, from_dir=None):
750
"""Return (path, entry) pairs, in order by name."""
754
elif isinstance(from_dir, basestring):
755
from_dir = self._byid[from_dir]
757
kids = from_dir.children.items()
759
for name, ie in kids:
761
if ie.kind == 'directory':
762
for cn, cie in self.iter_entries(from_dir=ie.file_id):
763
yield os.path.join(name, cn), cie
927
766
def entries(self):
928
767
"""Return list of (path, ie) for all entries except the root.
934
773
kids = dir_ie.children.items()
936
775
for name, ie in kids:
937
child_path = osutils.pathjoin(dir_path, name)
776
child_path = os.path.join(dir_path, name)
938
777
accum.append((child_path, ie))
939
778
if ie.kind == 'directory':
940
779
descend(ie, child_path)
942
descend(self.root, u'')
781
descend(self.root, '')
945
785
def directories(self):
946
786
"""Return (path, entry) pairs for all directories, including the root.
949
789
def descend(parent_ie, parent_path):
950
790
accum.append((parent_path, parent_ie))
952
792
kids = [(ie.name, ie) for ie in parent_ie.children.itervalues() if ie.kind == 'directory']
955
795
for name, child_ie in kids:
956
child_path = osutils.pathjoin(parent_path, name)
796
child_path = os.path.join(parent_path, name)
957
797
descend(child_ie, child_path)
958
descend(self.root, u'')
798
descend(self.root, '')
961
def path2id(self, name):
962
"""Walk down through directories to return entry of last component.
964
names may be either a list of path components, or a single
965
string, in which case it is automatically split.
967
This returns the entry of the last component in the path,
968
which may be either a file or a directory.
970
Returns None IFF the path is not found.
972
if isinstance(name, basestring):
973
name = osutils.splitpath(name)
975
# mutter("lookup path %r" % name)
979
except errors.NoSuchId:
980
# root doesn't exist yet so nothing else can
986
children = getattr(parent, 'children', None)
995
return parent.file_id
997
def filter(self, specific_fileids):
998
"""Get an inventory view filtered against a set of file-ids.
1000
Children of directories and parents are included.
1002
The result may or may not reference the underlying inventory
1003
so it should be treated as immutable.
1005
interesting_parents = set()
1006
for fileid in specific_fileids:
1008
interesting_parents.update(self.get_idpath(fileid))
1009
except errors.NoSuchId:
1010
# This fileid is not in the inventory - that's ok
1012
entries = self.iter_entries()
1013
if self.root is None:
1014
return Inventory(root_id=None)
1015
other = Inventory(entries.next()[1].file_id)
1016
other.root.revision = self.root.revision
1017
other.revision_id = self.revision_id
1018
directories_to_expand = set()
1019
for path, entry in entries:
1020
file_id = entry.file_id
1021
if (file_id in specific_fileids
1022
or entry.parent_id in directories_to_expand):
1023
if entry.kind == 'directory':
1024
directories_to_expand.add(file_id)
1025
elif file_id not in interesting_parents:
1027
other.add(entry.copy())
1030
def get_idpath(self, file_id):
1031
"""Return a list of file_ids for the path to an entry.
1033
The list contains one element for each directory followed by
1034
the id of the file itself. So the length of the returned list
1035
is equal to the depth of the file in the tree, counting the
1036
root directory as depth 1.
1039
for parent in self._iter_file_id_parents(file_id):
1040
p.insert(0, parent.file_id)
1044
class Inventory(CommonInventory):
1045
"""Mutable dict based in-memory inventory.
1047
We never store the full path to a file, because renaming a directory
1048
implicitly moves all of its contents. This class internally maintains a
1049
lookup tree that allows the children under a directory to be
1052
>>> inv = Inventory()
1053
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
1054
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
1055
>>> inv['123-123'].name
1058
Id's may be looked up from paths:
1060
>>> inv.path2id('hello.c')
1062
>>> '123-123' in inv
1065
There are iterators over the contents:
1067
>>> [entry[0] for entry in inv.iter_entries()]
1071
def __init__(self, root_id=ROOT_ID, revision_id=None):
1072
"""Create or read an inventory.
1074
If a working directory is specified, the inventory is read
1075
from there. If the file is specified, read from that. If not,
1076
the inventory is created empty.
1078
The inventory is created with a default root directory, with
1081
if root_id is not None:
1082
self._set_root(InventoryDirectory(root_id, u'', None))
1086
self.revision_id = revision_id
1089
# More than one page of ouput is not useful anymore to debug
1092
contents = repr(self._byid)
1093
if len(contents) > max_len:
1094
contents = contents[:(max_len-len(closing))] + closing
1095
return "<Inventory object at %x, contents=%r>" % (id(self), contents)
1097
def apply_delta(self, delta):
1098
"""Apply a delta to this inventory.
1100
See the inventory developers documentation for the theory behind
1103
If delta application fails the inventory is left in an indeterminate
1104
state and must not be used.
1106
:param delta: A list of changes to apply. After all the changes are
1107
applied the final inventory must be internally consistent, but it
1108
is ok to supply changes which, if only half-applied would have an
1109
invalid result - such as supplying two changes which rename two
1110
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
1111
('B', 'A', 'B-id', b_entry)].
1113
Each change is a tuple, of the form (old_path, new_path, file_id,
1116
When new_path is None, the change indicates the removal of an entry
1117
from the inventory and new_entry will be ignored (using None is
1118
appropriate). If new_path is not None, then new_entry must be an
1119
InventoryEntry instance, which will be incorporated into the
1120
inventory (and replace any existing entry with the same file id).
1122
When old_path is None, the change indicates the addition of
1123
a new entry to the inventory.
1125
When neither new_path nor old_path are None, the change is a
1126
modification to an entry, such as a rename, reparent, kind change
1129
The children attribute of new_entry is ignored. This is because
1130
this method preserves children automatically across alterations to
1131
the parent of the children, and cases where the parent id of a
1132
child is changing require the child to be passed in as a separate
1133
change regardless. E.g. in the recursive deletion of a directory -
1134
the directory's children must be included in the delta, or the
1135
final inventory will be invalid.
1137
Note that a file_id must only appear once within a given delta.
1138
An AssertionError is raised otherwise.
1140
# Check that the delta is legal. It would be nice if this could be
1141
# done within the loops below but it's safer to validate the delta
1142
# before starting to mutate the inventory, as there isn't a rollback
1144
list(_check_delta_unique_ids(_check_delta_unique_new_paths(
1145
_check_delta_unique_old_paths(_check_delta_ids_match_entry(
1146
_check_delta_ids_are_valid(
1147
_check_delta_new_path_entry_both_or_None(
1151
# Remove all affected items which were in the original inventory,
1152
# starting with the longest paths, thus ensuring parents are examined
1153
# after their children, which means that everything we examine has no
1154
# modified children remaining by the time we examine it.
1155
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
1156
if op is not None), reverse=True):
1157
# Preserve unaltered children of file_id for later reinsertion.
1158
file_id_children = getattr(self[file_id], 'children', {})
1159
if len(file_id_children):
1160
children[file_id] = file_id_children
1161
if self.id2path(file_id) != old_path:
1162
raise errors.InconsistentDelta(old_path, file_id,
1163
"Entry was at wrong other path %r." % self.id2path(file_id))
1164
# Remove file_id and the unaltered children. If file_id is not
1165
# being deleted it will be reinserted back later.
1166
self.remove_recursive_id(file_id)
1167
# Insert all affected which should be in the new inventory, reattaching
1168
# their children if they had any. This is done from shortest path to
1169
# longest, ensuring that items which were modified and whose parents in
1170
# the resulting inventory were also modified, are inserted after their
1172
for new_path, f, new_entry in sorted((np, f, e) for op, np, f, e in
1173
delta if np is not None):
1174
if new_entry.kind == 'directory':
1175
# Pop the child which to allow detection of children whose
1176
# parents were deleted and which were not reattached to a new
1178
replacement = InventoryDirectory(new_entry.file_id,
1179
new_entry.name, new_entry.parent_id)
1180
replacement.revision = new_entry.revision
1181
replacement.children = children.pop(replacement.file_id, {})
1182
new_entry = replacement
1185
except errors.DuplicateFileId:
1186
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1187
"New id is already present in target.")
1188
except AttributeError:
1189
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1190
"Parent is not a directory.")
1191
if self.id2path(new_entry.file_id) != new_path:
1192
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1193
"New path is not consistent with parent path.")
1195
# Get the parent id that was deleted
1196
parent_id, children = children.popitem()
1197
raise errors.InconsistentDelta("<deleted>", parent_id,
1198
"The file id was deleted but its children were not deleted.")
1200
def _set_root(self, ie):
1202
self._byid = {self.root.file_id: self.root}
1205
# TODO: jam 20051218 Should copy also copy the revision_id?
1206
entries = self.iter_entries()
1207
if self.root is None:
1208
return Inventory(root_id=None)
1209
other = Inventory(entries.next()[1].file_id)
1210
other.root.revision = self.root.revision
1211
# copy recursively so we know directories will be added before
1212
# their children. There are more efficient ways than this...
1213
for path, entry in entries:
1214
other.add(entry.copy())
1217
def _get_mutable_inventory(self):
1218
"""See CommonInventory._get_mutable_inventory."""
1219
return copy.deepcopy(self)
1222
"""Iterate over all file-ids."""
1223
return iter(self._byid)
1225
def iter_just_entries(self):
1226
"""Iterate over all entries.
1228
Unlike iter_entries(), just the entries are returned (not (path, ie))
1229
and the order of entries is undefined.
1231
XXX: We may not want to merge this into bzr.dev.
803
def __contains__(self, file_id):
804
"""True if this entry contains a file with given id.
806
>>> inv = Inventory()
807
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
808
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT')
1233
if self.root is None:
1235
for _, ie in self._byid.iteritems():
814
return file_id in self._byid
1239
"""Returns number of entries."""
1240
return len(self._byid)
1242
817
def __getitem__(self, file_id):
1243
818
"""Return the entry for given file_id.
1245
820
>>> inv = Inventory()
1246
821
>>> inv.add(InventoryFile('123123', 'hello.c', ROOT_ID))
1247
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
822
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT')
1248
823
>>> inv['123123'].name
1252
827
return self._byid[file_id]
1253
828
except KeyError:
1254
# really we're passing an inventory, not a tree...
1255
raise errors.NoSuchId(self, file_id)
830
raise BzrError("can't look up file_id None")
832
raise BzrError("file_id {%s} not in inventory" % file_id)
1257
835
def get_file_kind(self, file_id):
1258
836
return self._byid[file_id].kind
1456
1057
del old_parent.children[file_ie.name]
1457
1058
new_parent.children[new_name] = file_ie
1459
1060
file_ie.name = new_name
1460
1061
file_ie.parent_id = new_parent_id
1462
def is_root(self, file_id):
1463
return self.root is not None and file_id == self.root.file_id
1466
class CHKInventory(CommonInventory):
1467
"""An inventory persisted in a CHK store.
1469
By design, a CHKInventory is immutable so many of the methods
1470
supported by Inventory - add, rename, apply_delta, etc - are *not*
1471
supported. To create a new CHKInventory, use create_by_apply_delta()
1472
or from_inventory(), say.
1474
Internally, a CHKInventory has one or two CHKMaps:
1476
* id_to_entry - a map from (file_id,) => InventoryEntry as bytes
1477
* parent_id_basename_to_file_id - a map from (parent_id, basename_utf8)
1480
The second map is optional and not present in early CHkRepository's.
1482
No caching is performed: every method call or item access will perform
1483
requests to the storage layer. As such, keep references to objects you
1487
def __init__(self, search_key_name):
1488
CommonInventory.__init__(self)
1489
self._fileid_to_entry_cache = {}
1490
self._path_to_fileid_cache = {}
1491
self._search_key_name = search_key_name
1494
def __eq__(self, other):
1495
"""Compare two sets by comparing their contents."""
1496
if not isinstance(other, CHKInventory):
1497
return NotImplemented
1499
this_key = self.id_to_entry.key()
1500
other_key = other.id_to_entry.key()
1501
this_pid_key = self.parent_id_basename_to_file_id.key()
1502
other_pid_key = other.parent_id_basename_to_file_id.key()
1503
if None in (this_key, this_pid_key, other_key, other_pid_key):
1505
return this_key == other_key and this_pid_key == other_pid_key
1507
def _entry_to_bytes(self, entry):
1508
"""Serialise entry as a single bytestring.
1510
:param Entry: An inventory entry.
1511
:return: A bytestring for the entry.
1514
ENTRY ::= FILE | DIR | SYMLINK | TREE
1515
FILE ::= "file: " COMMON SEP SHA SEP SIZE SEP EXECUTABLE
1516
DIR ::= "dir: " COMMON
1517
SYMLINK ::= "symlink: " COMMON SEP TARGET_UTF8
1518
TREE ::= "tree: " COMMON REFERENCE_REVISION
1519
COMMON ::= FILE_ID SEP PARENT_ID SEP NAME_UTF8 SEP REVISION
1522
if entry.parent_id is not None:
1523
parent_str = entry.parent_id
1526
name_str = entry.name.encode("utf8")
1527
if entry.kind == 'file':
1528
if entry.executable:
1532
return "file: %s\n%s\n%s\n%s\n%s\n%d\n%s" % (
1533
entry.file_id, parent_str, name_str, entry.revision,
1534
entry.text_sha1, entry.text_size, exec_str)
1535
elif entry.kind == 'directory':
1536
return "dir: %s\n%s\n%s\n%s" % (
1537
entry.file_id, parent_str, name_str, entry.revision)
1538
elif entry.kind == 'symlink':
1539
return "symlink: %s\n%s\n%s\n%s\n%s" % (
1540
entry.file_id, parent_str, name_str, entry.revision,
1541
entry.symlink_target.encode("utf8"))
1542
elif entry.kind == 'tree-reference':
1543
return "tree: %s\n%s\n%s\n%s\n%s" % (
1544
entry.file_id, parent_str, name_str, entry.revision,
1545
entry.reference_revision)
1547
raise ValueError("unknown kind %r" % entry.kind)
1550
def _bytes_to_utf8name_key(bytes):
1551
"""Get the file_id, revision_id key out of bytes."""
1552
# We don't normally care about name, except for times when we want
1553
# to filter out empty names because of non rich-root...
1554
sections = bytes.split('\n')
1555
kind, file_id = sections[0].split(': ')
1556
return (sections[2], file_id, sections[3])
1558
def _bytes_to_entry(self, bytes):
1559
"""Deserialise a serialised entry."""
1560
sections = bytes.split('\n')
1561
if sections[0].startswith("file: "):
1562
result = InventoryFile(sections[0][6:],
1563
sections[2].decode('utf8'),
1565
result.text_sha1 = sections[4]
1566
result.text_size = int(sections[5])
1567
result.executable = sections[6] == "Y"
1568
elif sections[0].startswith("dir: "):
1569
result = CHKInventoryDirectory(sections[0][5:],
1570
sections[2].decode('utf8'),
1572
elif sections[0].startswith("symlink: "):
1573
result = InventoryLink(sections[0][9:],
1574
sections[2].decode('utf8'),
1576
result.symlink_target = sections[4].decode('utf8')
1577
elif sections[0].startswith("tree: "):
1578
result = TreeReference(sections[0][6:],
1579
sections[2].decode('utf8'),
1581
result.reference_revision = sections[4]
1583
raise ValueError("Not a serialised entry %r" % bytes)
1584
result.revision = sections[3]
1585
if result.parent_id == '':
1586
result.parent_id = None
1587
self._fileid_to_entry_cache[result.file_id] = result
1590
def _get_mutable_inventory(self):
1591
"""See CommonInventory._get_mutable_inventory."""
1592
entries = self.iter_entries()
1593
inv = Inventory(None, self.revision_id)
1594
for path, inv_entry in entries:
1595
inv.add(inv_entry.copy())
1598
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1599
propagate_caches=False):
1600
"""Create a new CHKInventory by applying inventory_delta to this one.
1602
See the inventory developers documentation for the theory behind
1605
:param inventory_delta: The inventory delta to apply. See
1606
Inventory.apply_delta for details.
1607
:param new_revision_id: The revision id of the resulting CHKInventory.
1608
:param propagate_caches: If True, the caches for this inventory are
1609
copied to and updated for the result.
1610
:return: The new CHKInventory.
1612
split = osutils.split
1613
result = CHKInventory(self._search_key_name)
1614
if propagate_caches:
1615
# Just propagate the path-to-fileid cache for now
1616
result._path_to_fileid_cache = dict(self._path_to_fileid_cache.iteritems())
1617
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1618
self.id_to_entry._ensure_root()
1619
maximum_size = self.id_to_entry._root_node.maximum_size
1620
result.revision_id = new_revision_id
1621
result.id_to_entry = chk_map.CHKMap(
1622
self.id_to_entry._store,
1623
self.id_to_entry.key(),
1624
search_key_func=search_key_func)
1625
result.id_to_entry._ensure_root()
1626
result.id_to_entry._root_node.set_maximum_size(maximum_size)
1627
# Change to apply to the parent_id_basename delta. The dict maps
1628
# (parent_id, basename) -> (old_key, new_value). We use a dict because
1629
# when a path has its id replaced (e.g. the root is changed, or someone
1630
# does bzr mv a b, bzr mv c a, we should output a single change to this
1631
# map rather than two.
1632
parent_id_basename_delta = {}
1633
if self.parent_id_basename_to_file_id is not None:
1634
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1635
self.parent_id_basename_to_file_id._store,
1636
self.parent_id_basename_to_file_id.key(),
1637
search_key_func=search_key_func)
1638
result.parent_id_basename_to_file_id._ensure_root()
1639
self.parent_id_basename_to_file_id._ensure_root()
1640
result_p_id_root = result.parent_id_basename_to_file_id._root_node
1641
p_id_root = self.parent_id_basename_to_file_id._root_node
1642
result_p_id_root.set_maximum_size(p_id_root.maximum_size)
1643
result_p_id_root._key_width = p_id_root._key_width
1645
result.parent_id_basename_to_file_id = None
1646
result.root_id = self.root_id
1647
id_to_entry_delta = []
1648
# inventory_delta is only traversed once, so we just update the
1650
# Check for repeated file ids
1651
inventory_delta = _check_delta_unique_ids(inventory_delta)
1652
# Repeated old paths
1653
inventory_delta = _check_delta_unique_old_paths(inventory_delta)
1654
# Check for repeated new paths
1655
inventory_delta = _check_delta_unique_new_paths(inventory_delta)
1656
# Check for entries that don't match the fileid
1657
inventory_delta = _check_delta_ids_match_entry(inventory_delta)
1658
# Check for nonsense fileids
1659
inventory_delta = _check_delta_ids_are_valid(inventory_delta)
1660
# Check for new_path <-> entry consistency
1661
inventory_delta = _check_delta_new_path_entry_both_or_None(
1663
# All changed entries need to have their parents be directories and be
1664
# at the right path. This set contains (path, id) tuples.
1666
# When we delete an item, all the children of it must be either deleted
1667
# or altered in their own right. As we batch process the change via
1668
# CHKMap.apply_delta, we build a set of things to use to validate the
1672
for old_path, new_path, file_id, entry in inventory_delta:
1675
result.root_id = file_id
1676
if new_path is None:
1681
if propagate_caches:
1683
del result._path_to_fileid_cache[old_path]
1686
deletes.add(file_id)
1688
new_key = (file_id,)
1689
new_value = result._entry_to_bytes(entry)
1690
# Update caches. It's worth doing this whether
1691
# we're propagating the old caches or not.
1692
result._path_to_fileid_cache[new_path] = file_id
1693
parents.add((split(new_path)[0], entry.parent_id))
1694
if old_path is None:
1697
old_key = (file_id,)
1698
if self.id2path(file_id) != old_path:
1699
raise errors.InconsistentDelta(old_path, file_id,
1700
"Entry was at wrong other path %r." %
1701
self.id2path(file_id))
1702
altered.add(file_id)
1703
id_to_entry_delta.append((old_key, new_key, new_value))
1704
if result.parent_id_basename_to_file_id is not None:
1705
# parent_id, basename changes
1706
if old_path is None:
1709
old_entry = self[file_id]
1710
old_key = self._parent_id_basename_key(old_entry)
1711
if new_path is None:
1715
new_key = self._parent_id_basename_key(entry)
1717
# If the two keys are the same, the value will be unchanged
1718
# as its always the file id for this entry.
1719
if old_key != new_key:
1720
# Transform a change into explicit delete/add preserving
1721
# a possible match on the key from a different file id.
1722
if old_key is not None:
1723
parent_id_basename_delta.setdefault(
1724
old_key, [None, None])[0] = old_key
1725
if new_key is not None:
1726
parent_id_basename_delta.setdefault(
1727
new_key, [None, None])[1] = new_value
1728
# validate that deletes are complete.
1729
for file_id in deletes:
1730
entry = self[file_id]
1731
if entry.kind != 'directory':
1733
# This loop could potentially be better by using the id_basename
1734
# map to just get the child file ids.
1735
for child in entry.children.values():
1736
if child.file_id not in altered:
1737
raise errors.InconsistentDelta(self.id2path(child.file_id),
1738
child.file_id, "Child not deleted or reparented when "
1740
result.id_to_entry.apply_delta(id_to_entry_delta)
1741
if parent_id_basename_delta:
1742
# Transform the parent_id_basename delta data into a linear delta
1743
# with only one record for a given key. Optimally this would allow
1744
# re-keying, but its simpler to just output that as a delete+add
1745
# to spend less time calculating the delta.
1747
for key, (old_key, value) in parent_id_basename_delta.iteritems():
1748
if value is not None:
1749
delta_list.append((old_key, key, value))
1751
delta_list.append((old_key, None, None))
1752
result.parent_id_basename_to_file_id.apply_delta(delta_list)
1753
parents.discard(('', None))
1754
for parent_path, parent in parents:
1756
if result[parent].kind != 'directory':
1757
raise errors.InconsistentDelta(result.id2path(parent), parent,
1758
'Not a directory, but given children')
1759
except errors.NoSuchId:
1760
raise errors.InconsistentDelta("<unknown>", parent,
1761
"Parent is not present in resulting inventory.")
1762
if result.path2id(parent_path) != parent:
1763
raise errors.InconsistentDelta(parent_path, parent,
1764
"Parent has wrong path %r." % result.path2id(parent_path))
1768
def deserialise(klass, chk_store, bytes, expected_revision_id):
1769
"""Deserialise a CHKInventory.
1771
:param chk_store: A CHK capable VersionedFiles instance.
1772
:param bytes: The serialised bytes.
1773
:param expected_revision_id: The revision ID we think this inventory is
1775
:return: A CHKInventory
1777
lines = bytes.split('\n')
1779
raise AssertionError('bytes to deserialize must end with an eol')
1781
if lines[0] != 'chkinventory:':
1782
raise ValueError("not a serialised CHKInventory: %r" % bytes)
1784
allowed_keys = frozenset(['root_id', 'revision_id', 'search_key_name',
1785
'parent_id_basename_to_file_id',
1787
for line in lines[1:]:
1788
key, value = line.split(': ', 1)
1789
if key not in allowed_keys:
1790
raise errors.BzrError('Unknown key in inventory: %r\n%r'
1793
raise errors.BzrError('Duplicate key in inventory: %r\n%r'
1796
revision_id = info['revision_id']
1797
root_id = info['root_id']
1798
search_key_name = info.get('search_key_name', 'plain')
1799
parent_id_basename_to_file_id = info.get(
1800
'parent_id_basename_to_file_id', None)
1801
id_to_entry = info['id_to_entry']
1803
result = CHKInventory(search_key_name)
1804
result.revision_id = revision_id
1805
result.root_id = root_id
1806
search_key_func = chk_map.search_key_registry.get(
1807
result._search_key_name)
1808
if parent_id_basename_to_file_id is not None:
1809
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1810
chk_store, (parent_id_basename_to_file_id,),
1811
search_key_func=search_key_func)
1813
result.parent_id_basename_to_file_id = None
1815
result.id_to_entry = chk_map.CHKMap(chk_store, (id_to_entry,),
1816
search_key_func=search_key_func)
1817
if (result.revision_id,) != expected_revision_id:
1818
raise ValueError("Mismatched revision id and expected: %r, %r" %
1819
(result.revision_id, expected_revision_id))
1823
def from_inventory(klass, chk_store, inventory, maximum_size=0, search_key_name='plain'):
1824
"""Create a CHKInventory from an existing inventory.
1826
The content of inventory is copied into the chk_store, and a
1827
CHKInventory referencing that is returned.
1829
:param chk_store: A CHK capable VersionedFiles instance.
1830
:param inventory: The inventory to copy.
1831
:param maximum_size: The CHKMap node size limit.
1832
:param search_key_name: The identifier for the search key function
1834
result = klass(search_key_name)
1835
result.revision_id = inventory.revision_id
1836
result.root_id = inventory.root.file_id
1838
entry_to_bytes = result._entry_to_bytes
1839
parent_id_basename_key = result._parent_id_basename_key
1840
id_to_entry_dict = {}
1841
parent_id_basename_dict = {}
1842
for path, entry in inventory.iter_entries():
1843
id_to_entry_dict[(entry.file_id,)] = entry_to_bytes(entry)
1844
p_id_key = parent_id_basename_key(entry)
1845
parent_id_basename_dict[p_id_key] = entry.file_id
1847
result._populate_from_dicts(chk_store, id_to_entry_dict,
1848
parent_id_basename_dict, maximum_size=maximum_size)
1851
def _populate_from_dicts(self, chk_store, id_to_entry_dict,
1852
parent_id_basename_dict, maximum_size):
1853
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1854
root_key = chk_map.CHKMap.from_dict(chk_store, id_to_entry_dict,
1855
maximum_size=maximum_size, key_width=1,
1856
search_key_func=search_key_func)
1857
self.id_to_entry = chk_map.CHKMap(chk_store, root_key,
1859
root_key = chk_map.CHKMap.from_dict(chk_store,
1860
parent_id_basename_dict,
1861
maximum_size=maximum_size, key_width=2,
1862
search_key_func=search_key_func)
1863
self.parent_id_basename_to_file_id = chk_map.CHKMap(chk_store,
1864
root_key, search_key_func)
1866
def _parent_id_basename_key(self, entry):
1867
"""Create a key for a entry in a parent_id_basename_to_file_id index."""
1868
if entry.parent_id is not None:
1869
parent_id = entry.parent_id
1872
return parent_id, entry.name.encode('utf8')
1874
def __getitem__(self, file_id):
1875
"""map a single file_id -> InventoryEntry."""
1877
raise errors.NoSuchId(self, file_id)
1878
result = self._fileid_to_entry_cache.get(file_id, None)
1879
if result is not None:
1882
return self._bytes_to_entry(
1883
self.id_to_entry.iteritems([(file_id,)]).next()[1])
1884
except StopIteration:
1885
# really we're passing an inventory, not a tree...
1886
raise errors.NoSuchId(self, file_id)
1888
def has_id(self, file_id):
1889
# Perhaps have an explicit 'contains' method on CHKMap ?
1890
if self._fileid_to_entry_cache.get(file_id, None) is not None:
1892
return len(list(self.id_to_entry.iteritems([(file_id,)]))) == 1
1894
def is_root(self, file_id):
1895
return file_id == self.root_id
1897
def _iter_file_id_parents(self, file_id):
1898
"""Yield the parents of file_id up to the root."""
1899
while file_id is not None:
1903
raise errors.NoSuchId(tree=self, file_id=file_id)
1905
file_id = ie.parent_id
1908
"""Iterate over all file-ids."""
1909
for key, _ in self.id_to_entry.iteritems():
1912
def iter_just_entries(self):
1913
"""Iterate over all entries.
1915
Unlike iter_entries(), just the entries are returned (not (path, ie))
1916
and the order of entries is undefined.
1918
XXX: We may not want to merge this into bzr.dev.
1920
for key, entry in self.id_to_entry.iteritems():
1922
ie = self._fileid_to_entry_cache.get(file_id, None)
1924
ie = self._bytes_to_entry(entry)
1925
self._fileid_to_entry_cache[file_id] = ie
1928
def iter_changes(self, basis):
1929
"""Generate a Tree.iter_changes change list between this and basis.
1931
:param basis: Another CHKInventory.
1932
:return: An iterator over the changes between self and basis, as per
1933
tree.iter_changes().
1935
# We want: (file_id, (path_in_source, path_in_target),
1936
# changed_content, versioned, parent, name, kind,
1938
for key, basis_value, self_value in \
1939
self.id_to_entry.iter_changes(basis.id_to_entry):
1941
if basis_value is not None:
1942
basis_entry = basis._bytes_to_entry(basis_value)
1943
path_in_source = basis.id2path(file_id)
1944
basis_parent = basis_entry.parent_id
1945
basis_name = basis_entry.name
1946
basis_executable = basis_entry.executable
1948
path_in_source = None
1951
basis_executable = None
1952
if self_value is not None:
1953
self_entry = self._bytes_to_entry(self_value)
1954
path_in_target = self.id2path(file_id)
1955
self_parent = self_entry.parent_id
1956
self_name = self_entry.name
1957
self_executable = self_entry.executable
1959
path_in_target = None
1962
self_executable = None
1963
if basis_value is None:
1965
kind = (None, self_entry.kind)
1966
versioned = (False, True)
1967
elif self_value is None:
1969
kind = (basis_entry.kind, None)
1970
versioned = (True, False)
1972
kind = (basis_entry.kind, self_entry.kind)
1973
versioned = (True, True)
1974
changed_content = False
1975
if kind[0] != kind[1]:
1976
changed_content = True
1977
elif kind[0] == 'file':
1978
if (self_entry.text_size != basis_entry.text_size or
1979
self_entry.text_sha1 != basis_entry.text_sha1):
1980
changed_content = True
1981
elif kind[0] == 'symlink':
1982
if self_entry.symlink_target != basis_entry.symlink_target:
1983
changed_content = True
1984
elif kind[0] == 'tree-reference':
1985
if (self_entry.reference_revision !=
1986
basis_entry.reference_revision):
1987
changed_content = True
1988
parent = (basis_parent, self_parent)
1989
name = (basis_name, self_name)
1990
executable = (basis_executable, self_executable)
1991
if (not changed_content
1992
and parent[0] == parent[1]
1993
and name[0] == name[1]
1994
and executable[0] == executable[1]):
1995
# Could happen when only the revision changed for a directory
1998
yield (file_id, (path_in_source, path_in_target), changed_content,
1999
versioned, parent, name, kind, executable)
2002
"""Return the number of entries in the inventory."""
2003
return len(self.id_to_entry)
2005
def _make_delta(self, old):
2006
"""Make an inventory delta from two inventories."""
2007
if type(old) != CHKInventory:
2008
return CommonInventory._make_delta(self, old)
2010
for key, old_value, self_value in \
2011
self.id_to_entry.iter_changes(old.id_to_entry):
2013
if old_value is not None:
2014
old_path = old.id2path(file_id)
2017
if self_value is not None:
2018
entry = self._bytes_to_entry(self_value)
2019
self._fileid_to_entry_cache[file_id] = entry
2020
new_path = self.id2path(file_id)
2024
delta.append((old_path, new_path, file_id, entry))
2027
def path2id(self, name):
2028
"""See CommonInventory.path2id()."""
2029
# TODO: perhaps support negative hits?
2030
result = self._path_to_fileid_cache.get(name, None)
2031
if result is not None:
2033
if isinstance(name, basestring):
2034
names = osutils.splitpath(name)
2037
current_id = self.root_id
2038
if current_id is None:
2040
parent_id_index = self.parent_id_basename_to_file_id
2041
for basename in names:
2042
# TODO: Cache each path we figure out in this function.
2043
basename_utf8 = basename.encode('utf8')
2044
key_filter = [(current_id, basename_utf8)]
2046
for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
2047
key_filter=key_filter):
2048
if parent_id != current_id or name_utf8 != basename_utf8:
2049
raise errors.BzrError("corrupt inventory lookup! "
2050
"%r %r %r %r" % (parent_id, current_id, name_utf8,
2054
current_id = file_id
2055
self._path_to_fileid_cache[name] = current_id
2059
"""Serialise the inventory to lines."""
2060
lines = ["chkinventory:\n"]
2061
if self._search_key_name != 'plain':
2062
# custom ordering grouping things that don't change together
2063
lines.append('search_key_name: %s\n' % (self._search_key_name,))
2064
lines.append("root_id: %s\n" % self.root_id)
2065
lines.append('parent_id_basename_to_file_id: %s\n' %
2066
self.parent_id_basename_to_file_id.key())
2067
lines.append("revision_id: %s\n" % self.revision_id)
2068
lines.append("id_to_entry: %s\n" % self.id_to_entry.key())
2070
lines.append("revision_id: %s\n" % self.revision_id)
2071
lines.append("root_id: %s\n" % self.root_id)
2072
if self.parent_id_basename_to_file_id is not None:
2073
lines.append('parent_id_basename_to_file_id: %s\n' %
2074
self.parent_id_basename_to_file_id.key())
2075
lines.append("id_to_entry: %s\n" % self.id_to_entry.key())
2080
"""Get the root entry."""
2081
return self[self.root_id]
2084
class CHKInventoryDirectory(InventoryDirectory):
2085
"""A directory in an inventory."""
2087
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
2088
'text_id', 'parent_id', '_children', 'executable',
2089
'revision', 'symlink_target', 'reference_revision',
2092
def __init__(self, file_id, name, parent_id, chk_inventory):
2093
# Don't call InventoryDirectory.__init__ - it isn't right for this
2095
InventoryEntry.__init__(self, file_id, name, parent_id)
2096
self._children = None
2097
self.kind = 'directory'
2098
self._chk_inventory = chk_inventory
2102
"""Access the list of children of this directory.
2104
With a parent_id_basename_to_file_id index, loads all the children,
2105
without loads the entire index. Without is bad. A more sophisticated
2106
proxy object might be nice, to allow partial loading of children as
2107
well when specific names are accessed. (So path traversal can be
2108
written in the obvious way but not examine siblings.).
2110
if self._children is not None:
2111
return self._children
2112
# No longer supported
2113
if self._chk_inventory.parent_id_basename_to_file_id is None:
2114
raise AssertionError("Inventories without"
2115
" parent_id_basename_to_file_id are no longer supported")
2117
# XXX: Todo - use proxy objects for the children rather than loading
2118
# all when the attribute is referenced.
2119
parent_id_index = self._chk_inventory.parent_id_basename_to_file_id
2121
for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
2122
key_filter=[(self.file_id,)]):
2123
child_keys.add((file_id,))
2125
for file_id_key in child_keys:
2126
entry = self._chk_inventory._fileid_to_entry_cache.get(
2127
file_id_key[0], None)
2128
if entry is not None:
2129
result[entry.name] = entry
2130
cached.add(file_id_key)
2131
child_keys.difference_update(cached)
2132
# populate; todo: do by name
2133
id_to_entry = self._chk_inventory.id_to_entry
2134
for file_id_key, bytes in id_to_entry.iteritems(child_keys):
2135
entry = self._chk_inventory._bytes_to_entry(bytes)
2136
result[entry.name] = entry
2137
self._chk_inventory._fileid_to_entry_cache[file_id_key[0]] = entry
2138
self._children = result
2142
'directory': InventoryDirectory,
2143
'file': InventoryFile,
2144
'symlink': InventoryLink,
2145
'tree-reference': TreeReference
2148
def make_entry(kind, name, parent_id, file_id=None):
2149
"""Create an inventory entry.
2151
:param kind: the type of inventory entry to create.
2152
:param name: the basename of the entry.
2153
:param parent_id: the parent_id of the entry.
2154
:param file_id: the file_id to use. if None, one will be created.
2157
file_id = generate_ids.gen_file_id(name)
2158
name = ensure_normalized_name(name)
2160
factory = entry_factory[kind]
2162
raise BzrError("unknown kind %r" % kind)
2163
return factory(file_id, name, parent_id)
2166
def ensure_normalized_name(name):
2169
:raises InvalidNormalization: When name is not normalized, and cannot be
2170
accessed on this platform by the normalized path.
2171
:return: The NFC normalised version of name.
2173
#------- This has been copied to bzrlib.dirstate.DirState.add, please
2174
# keep them synchronised.
2175
# we dont import normalized_filename directly because we want to be
2176
# able to change the implementation at runtime for tests.
2177
norm_name, can_access = osutils.normalized_filename(name)
2178
if norm_name != name:
2182
# TODO: jam 20060701 This would probably be more useful
2183
# if the error was raised with the full path
2184
raise errors.InvalidNormalization(name)
2188
1066
_NAME_RE = None
2190
1068
def is_valid_name(name):
2191
1069
global _NAME_RE
2192
if _NAME_RE is None:
1070
if _NAME_RE == None:
2193
1071
_NAME_RE = re.compile(r'^[^/\\]+$')
2195
1073
return bool(_NAME_RE.match(name))
2198
def _check_delta_unique_ids(delta):
2199
"""Decorate a delta and check that the file ids in it are unique.
2201
:return: A generator over delta.
2205
length = len(ids) + 1
2207
if len(ids) != length:
2208
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2213
def _check_delta_unique_new_paths(delta):
2214
"""Decorate a delta and check that the new paths in it are unique.
2216
:return: A generator over delta.
2220
length = len(paths) + 1
2222
if path is not None:
2224
if len(paths) != length:
2225
raise errors.InconsistentDelta(path, item[2], "repeated path")
2229
def _check_delta_unique_old_paths(delta):
2230
"""Decorate a delta and check that the old paths in it are unique.
2232
:return: A generator over delta.
2236
length = len(paths) + 1
2238
if path is not None:
2240
if len(paths) != length:
2241
raise errors.InconsistentDelta(path, item[2], "repeated path")
2245
def _check_delta_ids_are_valid(delta):
2246
"""Decorate a delta and check that the ids in it are valid.
2248
:return: A generator over delta.
2253
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2254
"entry with file_id None %r" % entry)
2255
if type(item[2]) != str:
2256
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2257
"entry with non bytes file_id %r" % entry)
2261
def _check_delta_ids_match_entry(delta):
2262
"""Decorate a delta and check that the ids in it match the entry.file_id.
2264
:return: A generator over delta.
2268
if entry is not None:
2269
if entry.file_id != item[2]:
2270
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2271
"mismatched id with %r" % entry)
2275
def _check_delta_new_path_entry_both_or_None(delta):
2276
"""Decorate a delta and check that the new_path and entry are paired.
2278
:return: A generator over delta.
2283
if new_path is None and entry is not None:
2284
raise errors.InconsistentDelta(item[0], item[1],
2285
"Entry with no new_path")
2286
if new_path is not None and entry is None:
2287
raise errors.InconsistentDelta(new_path, item[1],
2288
"new_path with no entry")