687
794
return compatible
690
class TreeReference(InventoryEntry):
692
kind = 'tree-reference'
694
def __init__(self, file_id, name, parent_id, revision=None,
695
reference_revision=None):
696
InventoryEntry.__init__(self, file_id, name, parent_id)
697
self.revision = revision
698
self.reference_revision = reference_revision
701
return TreeReference(self.file_id, self.name, self.parent_id,
702
self.revision, self.reference_revision)
704
def _read_tree_state(self, path, work_tree):
705
"""Populate fields in the inventory entry from the given tree.
707
self.reference_revision = work_tree.get_reference_revision(
710
def _forget_tree_state(self):
711
self.reference_revision = None
713
def _unchanged(self, previous_ie):
714
"""See InventoryEntry._unchanged."""
715
compatible = super(TreeReference, self)._unchanged(previous_ie)
716
if self.reference_revision != previous_ie.reference_revision:
721
class CommonInventory(object):
722
"""Basic inventory logic, defined in terms of primitives like has_id.
724
An inventory is the metadata about the contents of a tree.
726
This is broadly a map from file_id to entries such as directories, files,
727
symlinks and tree references. Each entry maintains its own metadata like
728
SHA1 and length for files, or children for a directory.
797
class Inventory(object):
798
"""Inventory of versioned files in a tree.
800
This describes which file_id is present at each point in the tree,
801
and possibly the SHA-1 or other information about the file.
730
802
Entries can be looked up either by path or by file_id.
804
The inventory represents a typical unix file tree, with
805
directories containing files and subdirectories. We never store
806
the full path to a file, because renaming a directory implicitly
807
moves all of its contents. This class internally maintains a
808
lookup tree that allows the children under a directory to be
732
811
InventoryEntry objects must not be modified after they are
733
812
inserted, other than through the Inventory API.
736
def __contains__(self, file_id):
737
"""True if this entry contains a file with given id.
739
>>> inv = Inventory()
740
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
741
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
747
Note that this method along with __iter__ are not encouraged for use as
748
they are less clear than specific query methods - they may be rmeoved
751
return self.has_id(file_id)
753
def has_filename(self, filename):
754
return bool(self.path2id(filename))
756
def id2path(self, file_id):
757
"""Return as a string the path to file_id.
760
>>> e = i.add(InventoryDirectory('src-id', 'src', ROOT_ID))
761
>>> e = i.add(InventoryFile('foo-id', 'foo.c', parent_id='src-id'))
762
>>> print i.id2path('foo-id')
765
:raises NoSuchId: If file_id is not present in the inventory.
767
# get all names, skipping root
768
return '/'.join(reversed(
769
[parent.name for parent in
770
self._iter_file_id_parents(file_id)][:-1]))
772
def iter_entries(self, from_dir=None, recursive=True):
773
"""Return (path, entry) pairs, in order by name.
775
:param from_dir: if None, start from the root,
776
otherwise start from this directory (either file-id or entry)
777
:param recursive: recurse into directories or not
780
if self.root is None:
784
elif isinstance(from_dir, basestring):
785
from_dir = self[from_dir]
787
# unrolling the recursive called changed the time from
788
# 440ms/663ms (inline/total) to 116ms/116ms
789
children = from_dir.children.items()
792
for name, ie in children:
795
children = collections.deque(children)
796
stack = [(u'', children)]
798
from_dir_relpath, children = stack[-1]
801
name, ie = children.popleft()
803
# we know that from_dir_relpath never ends in a slash
804
# and 'f' doesn't begin with one, we can do a string op, rather
805
# than the checks of pathjoin(), though this means that all paths
807
path = from_dir_relpath + '/' + name
811
if ie.kind != 'directory':
814
# But do this child first
815
new_children = ie.children.items()
817
new_children = collections.deque(new_children)
818
stack.append((path, new_children))
819
# Break out of inner loop, so that we start outer loop with child
822
# if we finished all children, pop it off the stack
825
def iter_entries_by_dir(self, from_dir=None, specific_file_ids=None,
826
yield_parents=False):
827
"""Iterate over the entries in a directory first order.
829
This returns all entries for a directory before returning
830
the entries for children of a directory. This is not
831
lexicographically sorted order, and is a hybrid between
832
depth-first and breadth-first.
834
:param yield_parents: If True, yield the parents from the root leading
835
down to specific_file_ids that have been requested. This has no
836
impact if specific_file_ids is None.
837
:return: This yields (path, entry) pairs
839
if specific_file_ids and not isinstance(specific_file_ids, set):
840
specific_file_ids = set(specific_file_ids)
841
# TODO? Perhaps this should return the from_dir so that the root is
842
# yielded? or maybe an option?
844
if self.root is None:
846
# Optimize a common case
847
if (not yield_parents and specific_file_ids is not None and
848
len(specific_file_ids) == 1):
849
file_id = list(specific_file_ids)[0]
851
yield self.id2path(file_id), self[file_id]
854
if (specific_file_ids is None or yield_parents or
855
self.root.file_id in specific_file_ids):
857
elif isinstance(from_dir, basestring):
858
from_dir = self[from_dir]
860
if specific_file_ids is not None:
861
# TODO: jam 20070302 This could really be done as a loop rather
862
# than a bunch of recursive calls.
865
def add_ancestors(file_id):
866
if file_id not in byid:
868
parent_id = byid[file_id].parent_id
869
if parent_id is None:
871
if parent_id not in parents:
872
parents.add(parent_id)
873
add_ancestors(parent_id)
874
for file_id in specific_file_ids:
875
add_ancestors(file_id)
879
stack = [(u'', from_dir)]
881
cur_relpath, cur_dir = stack.pop()
884
for child_name, child_ie in sorted(cur_dir.children.iteritems()):
886
child_relpath = cur_relpath + child_name
888
if (specific_file_ids is None or
889
child_ie.file_id in specific_file_ids or
890
(yield_parents and child_ie.file_id in parents)):
891
yield child_relpath, child_ie
893
if child_ie.kind == 'directory':
894
if parents is None or child_ie.file_id in parents:
895
child_dirs.append((child_relpath+'/', child_ie))
896
stack.extend(reversed(child_dirs))
898
def _make_delta(self, old):
899
"""Make an inventory delta from two inventories."""
902
adds = new_ids - old_ids
903
deletes = old_ids - new_ids
904
common = old_ids.intersection(new_ids)
906
for file_id in deletes:
907
delta.append((old.id2path(file_id), None, file_id, None))
909
delta.append((None, self.id2path(file_id), file_id, self[file_id]))
910
for file_id in common:
911
if old[file_id] != self[file_id]:
912
delta.append((old.id2path(file_id), self.id2path(file_id),
913
file_id, self[file_id]))
916
def _get_mutable_inventory(self):
917
"""Returns a mutable copy of the object.
919
Some inventories are immutable, yet working trees, for example, needs
920
to mutate exisiting inventories instead of creating a new one.
922
raise NotImplementedError(self._get_mutable_inventory)
924
def make_entry(self, kind, name, parent_id, file_id=None):
925
"""Simple thunk to bzrlib.inventory.make_entry."""
926
return make_entry(kind, name, parent_id, file_id)
929
"""Return list of (path, ie) for all entries except the root.
931
This may be faster than iter_entries.
934
def descend(dir_ie, dir_path):
935
kids = dir_ie.children.items()
937
for name, ie in kids:
938
child_path = osutils.pathjoin(dir_path, name)
939
accum.append((child_path, ie))
940
if ie.kind == 'directory':
941
descend(ie, child_path)
943
descend(self.root, u'')
946
def directories(self):
947
"""Return (path, entry) pairs for all directories, including the root.
950
def descend(parent_ie, parent_path):
951
accum.append((parent_path, parent_ie))
953
kids = [(ie.name, ie) for ie in parent_ie.children.itervalues() if ie.kind == 'directory']
956
for name, child_ie in kids:
957
child_path = osutils.pathjoin(parent_path, name)
958
descend(child_ie, child_path)
959
descend(self.root, u'')
962
def path2id(self, relpath):
963
"""Walk down through directories to return entry of last component.
965
:param relpath: may be either a list of path components, or a single
966
string, in which case it is automatically split.
968
This returns the entry of the last component in the path,
969
which may be either a file or a directory.
971
Returns None IFF the path is not found.
973
if isinstance(relpath, basestring):
974
names = osutils.splitpath(relpath)
980
except errors.NoSuchId:
981
# root doesn't exist yet so nothing else can
987
children = getattr(parent, 'children', None)
996
return parent.file_id
998
def filter(self, specific_fileids):
999
"""Get an inventory view filtered against a set of file-ids.
1001
Children of directories and parents are included.
1003
The result may or may not reference the underlying inventory
1004
so it should be treated as immutable.
1006
interesting_parents = set()
1007
for fileid in specific_fileids:
1009
interesting_parents.update(self.get_idpath(fileid))
1010
except errors.NoSuchId:
1011
# This fileid is not in the inventory - that's ok
1013
entries = self.iter_entries()
1014
if self.root is None:
1015
return Inventory(root_id=None)
1016
other = Inventory(entries.next()[1].file_id)
1017
other.root.revision = self.root.revision
1018
other.revision_id = self.revision_id
1019
directories_to_expand = set()
1020
for path, entry in entries:
1021
file_id = entry.file_id
1022
if (file_id in specific_fileids
1023
or entry.parent_id in directories_to_expand):
1024
if entry.kind == 'directory':
1025
directories_to_expand.add(file_id)
1026
elif file_id not in interesting_parents:
1028
other.add(entry.copy())
1031
def get_idpath(self, file_id):
1032
"""Return a list of file_ids for the path to an entry.
1034
The list contains one element for each directory followed by
1035
the id of the file itself. So the length of the returned list
1036
is equal to the depth of the file in the tree, counting the
1037
root directory as depth 1.
1040
for parent in self._iter_file_id_parents(file_id):
1041
p.insert(0, parent.file_id)
1045
class Inventory(CommonInventory):
1046
"""Mutable dict based in-memory inventory.
1048
We never store the full path to a file, because renaming a directory
1049
implicitly moves all of its contents. This class internally maintains a
1050
lookup tree that allows the children under a directory to be
1053
814
>>> inv = Inventory()
1054
815
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
1055
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
816
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT')
1056
817
>>> inv['123-123'].name
1059
Id's may be looked up from paths:
1061
>>> inv.path2id('hello.c')
820
May be treated as an iterator or set to look up file ids:
822
>>> bool(inv.path2id('hello.c'))
1063
824
>>> '123-123' in inv
1066
There are iterators over the contents:
827
May also look up by name:
1068
>>> [entry[0] for entry in inv.iter_entries()]
829
>>> [x[0] for x in inv.iter_entries()]
831
>>> inv = Inventory('TREE_ROOT-12345678-12345678')
832
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
833
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT-12345678-12345678')
1072
835
def __init__(self, root_id=ROOT_ID, revision_id=None):
1073
836
"""Create or read an inventory.
1079
842
The inventory is created with a default root directory, with
1082
if root_id is not None:
1083
self._set_root(InventoryDirectory(root_id, u'', None))
845
# We are letting Branch.create() create a unique inventory
846
# root id. Rather than generating a random one here.
848
# root_id = bzrlib.branch.gen_file_id('TREE_ROOT')
849
self.root = RootEntry(root_id)
1087
850
self.revision_id = revision_id
1090
# More than one page of ouput is not useful anymore to debug
1093
contents = repr(self._byid)
1094
if len(contents) > max_len:
1095
contents = contents[:(max_len-len(closing))] + closing
1096
return "<Inventory object at %x, contents=%r>" % (id(self), contents)
1098
def apply_delta(self, delta):
1099
"""Apply a delta to this inventory.
1101
See the inventory developers documentation for the theory behind
1104
If delta application fails the inventory is left in an indeterminate
1105
state and must not be used.
1107
:param delta: A list of changes to apply. After all the changes are
1108
applied the final inventory must be internally consistent, but it
1109
is ok to supply changes which, if only half-applied would have an
1110
invalid result - such as supplying two changes which rename two
1111
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
1112
('B', 'A', 'B-id', b_entry)].
1114
Each change is a tuple, of the form (old_path, new_path, file_id,
1117
When new_path is None, the change indicates the removal of an entry
1118
from the inventory and new_entry will be ignored (using None is
1119
appropriate). If new_path is not None, then new_entry must be an
1120
InventoryEntry instance, which will be incorporated into the
1121
inventory (and replace any existing entry with the same file id).
1123
When old_path is None, the change indicates the addition of
1124
a new entry to the inventory.
1126
When neither new_path nor old_path are None, the change is a
1127
modification to an entry, such as a rename, reparent, kind change
1130
The children attribute of new_entry is ignored. This is because
1131
this method preserves children automatically across alterations to
1132
the parent of the children, and cases where the parent id of a
1133
child is changing require the child to be passed in as a separate
1134
change regardless. E.g. in the recursive deletion of a directory -
1135
the directory's children must be included in the delta, or the
1136
final inventory will be invalid.
1138
Note that a file_id must only appear once within a given delta.
1139
An AssertionError is raised otherwise.
1141
# Check that the delta is legal. It would be nice if this could be
1142
# done within the loops below but it's safer to validate the delta
1143
# before starting to mutate the inventory, as there isn't a rollback
1145
list(_check_delta_unique_ids(_check_delta_unique_new_paths(
1146
_check_delta_unique_old_paths(_check_delta_ids_match_entry(
1147
_check_delta_ids_are_valid(
1148
_check_delta_new_path_entry_both_or_None(
1152
# Remove all affected items which were in the original inventory,
1153
# starting with the longest paths, thus ensuring parents are examined
1154
# after their children, which means that everything we examine has no
1155
# modified children remaining by the time we examine it.
1156
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
1157
if op is not None), reverse=True):
1158
# Preserve unaltered children of file_id for later reinsertion.
1159
file_id_children = getattr(self[file_id], 'children', {})
1160
if len(file_id_children):
1161
children[file_id] = file_id_children
1162
if self.id2path(file_id) != old_path:
1163
raise errors.InconsistentDelta(old_path, file_id,
1164
"Entry was at wrong other path %r." % self.id2path(file_id))
1165
# Remove file_id and the unaltered children. If file_id is not
1166
# being deleted it will be reinserted back later.
1167
self.remove_recursive_id(file_id)
1168
# Insert all affected which should be in the new inventory, reattaching
1169
# their children if they had any. This is done from shortest path to
1170
# longest, ensuring that items which were modified and whose parents in
1171
# the resulting inventory were also modified, are inserted after their
1173
for new_path, f, new_entry in sorted((np, f, e) for op, np, f, e in
1174
delta if np is not None):
1175
if new_entry.kind == 'directory':
1176
# Pop the child which to allow detection of children whose
1177
# parents were deleted and which were not reattached to a new
1179
replacement = InventoryDirectory(new_entry.file_id,
1180
new_entry.name, new_entry.parent_id)
1181
replacement.revision = new_entry.revision
1182
replacement.children = children.pop(replacement.file_id, {})
1183
new_entry = replacement
1186
except errors.DuplicateFileId:
1187
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1188
"New id is already present in target.")
1189
except AttributeError:
1190
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1191
"Parent is not a directory.")
1192
if self.id2path(new_entry.file_id) != new_path:
1193
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1194
"New path is not consistent with parent path.")
1196
# Get the parent id that was deleted
1197
parent_id, children = children.popitem()
1198
raise errors.InconsistentDelta("<deleted>", parent_id,
1199
"The file id was deleted but its children were not deleted.")
1201
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1202
propagate_caches=False):
1203
"""See CHKInventory.create_by_apply_delta()"""
1204
new_inv = self.copy()
1205
new_inv.apply_delta(inventory_delta)
1206
new_inv.revision_id = new_revision_id
1209
def _set_root(self, ie):
1211
851
self._byid = {self.root.file_id: self.root}
1214
855
# TODO: jam 20051218 Should copy also copy the revision_id?
1215
entries = self.iter_entries()
1216
if self.root is None:
1217
return Inventory(root_id=None)
1218
other = Inventory(entries.next()[1].file_id)
1219
other.root.revision = self.root.revision
856
other = Inventory(self.root.file_id)
1220
857
# copy recursively so we know directories will be added before
1221
858
# their children. There are more efficient ways than this...
1222
for path, entry in entries:
859
for path, entry in self.iter_entries():
860
if entry == self.root:
1223
862
other.add(entry.copy())
1226
def _get_mutable_inventory(self):
1227
"""See CommonInventory._get_mutable_inventory."""
1228
return copy.deepcopy(self)
1230
866
def __iter__(self):
1231
"""Iterate over all file-ids."""
1232
867
return iter(self._byid)
1234
def iter_just_entries(self):
1235
"""Iterate over all entries.
1237
Unlike iter_entries(), just the entries are returned (not (path, ie))
1238
and the order of entries is undefined.
1240
XXX: We may not want to merge this into bzr.dev.
1242
if self.root is None:
1244
for _, ie in self._byid.iteritems():
1247
870
def __len__(self):
1248
871
"""Returns number of entries."""
1249
872
return len(self._byid)
875
def iter_entries(self, from_dir=None):
876
"""Return (path, entry) pairs, in order by name."""
880
elif isinstance(from_dir, basestring):
881
from_dir = self._byid[from_dir]
883
kids = from_dir.children.items()
885
for name, ie in kids:
887
if ie.kind == 'directory':
888
for cn, cie in self.iter_entries(from_dir=ie.file_id):
889
yield pathjoin(name, cn), cie
893
"""Return list of (path, ie) for all entries except the root.
895
This may be faster than iter_entries.
898
def descend(dir_ie, dir_path):
899
kids = dir_ie.children.items()
901
for name, ie in kids:
902
child_path = pathjoin(dir_path, name)
903
accum.append((child_path, ie))
904
if ie.kind == 'directory':
905
descend(ie, child_path)
907
descend(self.root, u'')
911
def directories(self):
912
"""Return (path, entry) pairs for all directories, including the root.
915
def descend(parent_ie, parent_path):
916
accum.append((parent_path, parent_ie))
918
kids = [(ie.name, ie) for ie in parent_ie.children.itervalues() if ie.kind == 'directory']
921
for name, child_ie in kids:
922
child_path = pathjoin(parent_path, name)
923
descend(child_ie, child_path)
924
descend(self.root, u'')
929
def __contains__(self, file_id):
930
"""True if this entry contains a file with given id.
932
>>> inv = Inventory()
933
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
934
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT')
940
return file_id in self._byid
1251
943
def __getitem__(self, file_id):
1252
944
"""Return the entry for given file_id.
1254
946
>>> inv = Inventory()
1255
947
>>> inv.add(InventoryFile('123123', 'hello.c', ROOT_ID))
1256
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
948
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT')
1257
949
>>> inv['123123'].name
1261
953
return self._byid[file_id]
1262
954
except KeyError:
1263
# really we're passing an inventory, not a tree...
1264
raise errors.NoSuchId(self, file_id)
956
raise BzrError("can't look up file_id None")
958
raise BzrError("file_id {%s} not in inventory" % file_id)
1266
961
def get_file_kind(self, file_id):
1267
962
return self._byid[file_id].kind
1465
1182
del old_parent.children[file_ie.name]
1466
1183
new_parent.children[new_name] = file_ie
1468
1185
file_ie.name = new_name
1469
1186
file_ie.parent_id = new_parent_id
1471
def is_root(self, file_id):
1472
return self.root is not None and file_id == self.root.file_id
1475
class CHKInventory(CommonInventory):
1476
"""An inventory persisted in a CHK store.
1478
By design, a CHKInventory is immutable so many of the methods
1479
supported by Inventory - add, rename, apply_delta, etc - are *not*
1480
supported. To create a new CHKInventory, use create_by_apply_delta()
1481
or from_inventory(), say.
1483
Internally, a CHKInventory has one or two CHKMaps:
1485
* id_to_entry - a map from (file_id,) => InventoryEntry as bytes
1486
* parent_id_basename_to_file_id - a map from (parent_id, basename_utf8)
1489
The second map is optional and not present in early CHkRepository's.
1491
No caching is performed: every method call or item access will perform
1492
requests to the storage layer. As such, keep references to objects you
1496
def __init__(self, search_key_name):
1497
CommonInventory.__init__(self)
1498
self._fileid_to_entry_cache = {}
1499
self._path_to_fileid_cache = {}
1500
self._search_key_name = search_key_name
1503
def __eq__(self, other):
1504
"""Compare two sets by comparing their contents."""
1505
if not isinstance(other, CHKInventory):
1506
return NotImplemented
1508
this_key = self.id_to_entry.key()
1509
other_key = other.id_to_entry.key()
1510
this_pid_key = self.parent_id_basename_to_file_id.key()
1511
other_pid_key = other.parent_id_basename_to_file_id.key()
1512
if None in (this_key, this_pid_key, other_key, other_pid_key):
1514
return this_key == other_key and this_pid_key == other_pid_key
1516
def _entry_to_bytes(self, entry):
1517
"""Serialise entry as a single bytestring.
1519
:param Entry: An inventory entry.
1520
:return: A bytestring for the entry.
1523
ENTRY ::= FILE | DIR | SYMLINK | TREE
1524
FILE ::= "file: " COMMON SEP SHA SEP SIZE SEP EXECUTABLE
1525
DIR ::= "dir: " COMMON
1526
SYMLINK ::= "symlink: " COMMON SEP TARGET_UTF8
1527
TREE ::= "tree: " COMMON REFERENCE_REVISION
1528
COMMON ::= FILE_ID SEP PARENT_ID SEP NAME_UTF8 SEP REVISION
1531
if entry.parent_id is not None:
1532
parent_str = entry.parent_id
1535
name_str = entry.name.encode("utf8")
1536
if entry.kind == 'file':
1537
if entry.executable:
1541
return "file: %s\n%s\n%s\n%s\n%s\n%d\n%s" % (
1542
entry.file_id, parent_str, name_str, entry.revision,
1543
entry.text_sha1, entry.text_size, exec_str)
1544
elif entry.kind == 'directory':
1545
return "dir: %s\n%s\n%s\n%s" % (
1546
entry.file_id, parent_str, name_str, entry.revision)
1547
elif entry.kind == 'symlink':
1548
return "symlink: %s\n%s\n%s\n%s\n%s" % (
1549
entry.file_id, parent_str, name_str, entry.revision,
1550
entry.symlink_target.encode("utf8"))
1551
elif entry.kind == 'tree-reference':
1552
return "tree: %s\n%s\n%s\n%s\n%s" % (
1553
entry.file_id, parent_str, name_str, entry.revision,
1554
entry.reference_revision)
1556
raise ValueError("unknown kind %r" % entry.kind)
1558
def _expand_fileids_to_parents_and_children(self, file_ids):
1559
"""Give a more wholistic view starting with the given file_ids.
1561
For any file_id which maps to a directory, we will include all children
1562
of that directory. We will also include all directories which are
1563
parents of the given file_ids, but we will not include their children.
1570
fringle # fringle-id
1574
if given [foo-id] we will include
1575
TREE_ROOT as interesting parents
1577
foo-id, baz-id, frob-id, fringle-id
1581
# TODO: Pre-pass over the list of fileids to see if anything is already
1582
# deserialized in self._fileid_to_entry_cache
1584
directories_to_expand = set()
1585
children_of_parent_id = {}
1586
# It is okay if some of the fileids are missing
1587
for entry in self._getitems(file_ids):
1588
if entry.kind == 'directory':
1589
directories_to_expand.add(entry.file_id)
1590
interesting.add(entry.parent_id)
1591
children_of_parent_id.setdefault(entry.parent_id, []
1592
).append(entry.file_id)
1594
# Now, interesting has all of the direct parents, but not the
1595
# parents of those parents. It also may have some duplicates with
1597
remaining_parents = interesting.difference(file_ids)
1598
# When we hit the TREE_ROOT, we'll get an interesting parent of None,
1599
# but we don't actually want to recurse into that
1600
interesting.add(None) # this will auto-filter it in the loop
1601
remaining_parents.discard(None)
1602
while remaining_parents:
1603
next_parents = set()
1604
for entry in self._getitems(remaining_parents):
1605
next_parents.add(entry.parent_id)
1606
children_of_parent_id.setdefault(entry.parent_id, []
1607
).append(entry.file_id)
1608
# Remove any search tips we've already processed
1609
remaining_parents = next_parents.difference(interesting)
1610
interesting.update(remaining_parents)
1611
# We should probably also .difference(directories_to_expand)
1612
interesting.update(file_ids)
1613
interesting.discard(None)
1614
while directories_to_expand:
1615
# Expand directories by looking in the
1616
# parent_id_basename_to_file_id map
1617
keys = [StaticTuple(f,).intern() for f in directories_to_expand]
1618
directories_to_expand = set()
1619
items = self.parent_id_basename_to_file_id.iteritems(keys)
1620
next_file_ids = set([item[1] for item in items])
1621
next_file_ids = next_file_ids.difference(interesting)
1622
interesting.update(next_file_ids)
1623
for entry in self._getitems(next_file_ids):
1624
if entry.kind == 'directory':
1625
directories_to_expand.add(entry.file_id)
1626
children_of_parent_id.setdefault(entry.parent_id, []
1627
).append(entry.file_id)
1628
return interesting, children_of_parent_id
1630
def filter(self, specific_fileids):
1631
"""Get an inventory view filtered against a set of file-ids.
1633
Children of directories and parents are included.
1635
The result may or may not reference the underlying inventory
1636
so it should be treated as immutable.
1639
parent_to_children) = self._expand_fileids_to_parents_and_children(
1641
# There is some overlap here, but we assume that all interesting items
1642
# are in the _fileid_to_entry_cache because we had to read them to
1643
# determine if they were a dir we wanted to recurse, or just a file
1644
# This should give us all the entries we'll want to add, so start
1646
other = Inventory(self.root_id)
1647
other.root.revision = self.root.revision
1648
other.revision_id = self.revision_id
1649
if not interesting or not parent_to_children:
1650
# empty filter, or filtering entrys that don't exist
1651
# (if even 1 existed, then we would have populated
1652
# parent_to_children with at least the tree root.)
1654
cache = self._fileid_to_entry_cache
1656
remaining_children = collections.deque(parent_to_children[self.root_id])
1658
import pdb; pdb.set_trace()
1660
while remaining_children:
1661
file_id = remaining_children.popleft()
1663
if ie.kind == 'directory':
1664
ie = ie.copy() # We create a copy to depopulate the .children attribute
1665
# TODO: depending on the uses of 'other' we should probably alwyas
1666
# '.copy()' to prevent someone from mutating other and
1667
# invaliding our internal cache
1669
if file_id in parent_to_children:
1670
remaining_children.extend(parent_to_children[file_id])
1674
def _bytes_to_utf8name_key(bytes):
1675
"""Get the file_id, revision_id key out of bytes."""
1676
# We don't normally care about name, except for times when we want
1677
# to filter out empty names because of non rich-root...
1678
sections = bytes.split('\n')
1679
kind, file_id = sections[0].split(': ')
1680
return (sections[2], intern(file_id), intern(sections[3]))
1682
def _bytes_to_entry(self, bytes):
1683
"""Deserialise a serialised entry."""
1684
sections = bytes.split('\n')
1685
if sections[0].startswith("file: "):
1686
result = InventoryFile(sections[0][6:],
1687
sections[2].decode('utf8'),
1689
result.text_sha1 = sections[4]
1690
result.text_size = int(sections[5])
1691
result.executable = sections[6] == "Y"
1692
elif sections[0].startswith("dir: "):
1693
result = CHKInventoryDirectory(sections[0][5:],
1694
sections[2].decode('utf8'),
1696
elif sections[0].startswith("symlink: "):
1697
result = InventoryLink(sections[0][9:],
1698
sections[2].decode('utf8'),
1700
result.symlink_target = sections[4].decode('utf8')
1701
elif sections[0].startswith("tree: "):
1702
result = TreeReference(sections[0][6:],
1703
sections[2].decode('utf8'),
1705
result.reference_revision = sections[4]
1707
raise ValueError("Not a serialised entry %r" % bytes)
1708
result.file_id = intern(result.file_id)
1709
result.revision = intern(sections[3])
1710
if result.parent_id == '':
1711
result.parent_id = None
1712
self._fileid_to_entry_cache[result.file_id] = result
1715
def _get_mutable_inventory(self):
1716
"""See CommonInventory._get_mutable_inventory."""
1717
entries = self.iter_entries()
1718
inv = Inventory(None, self.revision_id)
1719
for path, inv_entry in entries:
1720
inv.add(inv_entry.copy())
1723
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1724
propagate_caches=False):
1725
"""Create a new CHKInventory by applying inventory_delta to this one.
1727
See the inventory developers documentation for the theory behind
1730
:param inventory_delta: The inventory delta to apply. See
1731
Inventory.apply_delta for details.
1732
:param new_revision_id: The revision id of the resulting CHKInventory.
1733
:param propagate_caches: If True, the caches for this inventory are
1734
copied to and updated for the result.
1735
:return: The new CHKInventory.
1737
split = osutils.split
1738
result = CHKInventory(self._search_key_name)
1739
if propagate_caches:
1740
# Just propagate the path-to-fileid cache for now
1741
result._path_to_fileid_cache = dict(self._path_to_fileid_cache.iteritems())
1742
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1743
self.id_to_entry._ensure_root()
1744
maximum_size = self.id_to_entry._root_node.maximum_size
1745
result.revision_id = new_revision_id
1746
result.id_to_entry = chk_map.CHKMap(
1747
self.id_to_entry._store,
1748
self.id_to_entry.key(),
1749
search_key_func=search_key_func)
1750
result.id_to_entry._ensure_root()
1751
result.id_to_entry._root_node.set_maximum_size(maximum_size)
1752
# Change to apply to the parent_id_basename delta. The dict maps
1753
# (parent_id, basename) -> (old_key, new_value). We use a dict because
1754
# when a path has its id replaced (e.g. the root is changed, or someone
1755
# does bzr mv a b, bzr mv c a, we should output a single change to this
1756
# map rather than two.
1757
parent_id_basename_delta = {}
1758
if self.parent_id_basename_to_file_id is not None:
1759
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1760
self.parent_id_basename_to_file_id._store,
1761
self.parent_id_basename_to_file_id.key(),
1762
search_key_func=search_key_func)
1763
result.parent_id_basename_to_file_id._ensure_root()
1764
self.parent_id_basename_to_file_id._ensure_root()
1765
result_p_id_root = result.parent_id_basename_to_file_id._root_node
1766
p_id_root = self.parent_id_basename_to_file_id._root_node
1767
result_p_id_root.set_maximum_size(p_id_root.maximum_size)
1768
result_p_id_root._key_width = p_id_root._key_width
1770
result.parent_id_basename_to_file_id = None
1771
result.root_id = self.root_id
1772
id_to_entry_delta = []
1773
# inventory_delta is only traversed once, so we just update the
1775
# Check for repeated file ids
1776
inventory_delta = _check_delta_unique_ids(inventory_delta)
1777
# Repeated old paths
1778
inventory_delta = _check_delta_unique_old_paths(inventory_delta)
1779
# Check for repeated new paths
1780
inventory_delta = _check_delta_unique_new_paths(inventory_delta)
1781
# Check for entries that don't match the fileid
1782
inventory_delta = _check_delta_ids_match_entry(inventory_delta)
1783
# Check for nonsense fileids
1784
inventory_delta = _check_delta_ids_are_valid(inventory_delta)
1785
# Check for new_path <-> entry consistency
1786
inventory_delta = _check_delta_new_path_entry_both_or_None(
1788
# All changed entries need to have their parents be directories and be
1789
# at the right path. This set contains (path, id) tuples.
1791
# When we delete an item, all the children of it must be either deleted
1792
# or altered in their own right. As we batch process the change via
1793
# CHKMap.apply_delta, we build a set of things to use to validate the
1797
for old_path, new_path, file_id, entry in inventory_delta:
1800
result.root_id = file_id
1801
if new_path is None:
1806
if propagate_caches:
1808
del result._path_to_fileid_cache[old_path]
1811
deletes.add(file_id)
1813
new_key = StaticTuple(file_id,)
1814
new_value = result._entry_to_bytes(entry)
1815
# Update caches. It's worth doing this whether
1816
# we're propagating the old caches or not.
1817
result._path_to_fileid_cache[new_path] = file_id
1818
parents.add((split(new_path)[0], entry.parent_id))
1819
if old_path is None:
1822
old_key = StaticTuple(file_id,)
1823
if self.id2path(file_id) != old_path:
1824
raise errors.InconsistentDelta(old_path, file_id,
1825
"Entry was at wrong other path %r." %
1826
self.id2path(file_id))
1827
altered.add(file_id)
1828
id_to_entry_delta.append(StaticTuple(old_key, new_key, new_value))
1829
if result.parent_id_basename_to_file_id is not None:
1830
# parent_id, basename changes
1831
if old_path is None:
1834
old_entry = self[file_id]
1835
old_key = self._parent_id_basename_key(old_entry)
1836
if new_path is None:
1840
new_key = self._parent_id_basename_key(entry)
1842
# If the two keys are the same, the value will be unchanged
1843
# as its always the file id for this entry.
1844
if old_key != new_key:
1845
# Transform a change into explicit delete/add preserving
1846
# a possible match on the key from a different file id.
1847
if old_key is not None:
1848
parent_id_basename_delta.setdefault(
1849
old_key, [None, None])[0] = old_key
1850
if new_key is not None:
1851
parent_id_basename_delta.setdefault(
1852
new_key, [None, None])[1] = new_value
1853
# validate that deletes are complete.
1854
for file_id in deletes:
1855
entry = self[file_id]
1856
if entry.kind != 'directory':
1858
# This loop could potentially be better by using the id_basename
1859
# map to just get the child file ids.
1860
for child in entry.children.values():
1861
if child.file_id not in altered:
1862
raise errors.InconsistentDelta(self.id2path(child.file_id),
1863
child.file_id, "Child not deleted or reparented when "
1865
result.id_to_entry.apply_delta(id_to_entry_delta)
1866
if parent_id_basename_delta:
1867
# Transform the parent_id_basename delta data into a linear delta
1868
# with only one record for a given key. Optimally this would allow
1869
# re-keying, but its simpler to just output that as a delete+add
1870
# to spend less time calculating the delta.
1872
for key, (old_key, value) in parent_id_basename_delta.iteritems():
1873
if value is not None:
1874
delta_list.append((old_key, key, value))
1876
delta_list.append((old_key, None, None))
1877
result.parent_id_basename_to_file_id.apply_delta(delta_list)
1878
parents.discard(('', None))
1879
for parent_path, parent in parents:
1881
if result[parent].kind != 'directory':
1882
raise errors.InconsistentDelta(result.id2path(parent), parent,
1883
'Not a directory, but given children')
1884
except errors.NoSuchId:
1885
raise errors.InconsistentDelta("<unknown>", parent,
1886
"Parent is not present in resulting inventory.")
1887
if result.path2id(parent_path) != parent:
1888
raise errors.InconsistentDelta(parent_path, parent,
1889
"Parent has wrong path %r." % result.path2id(parent_path))
1893
def deserialise(klass, chk_store, bytes, expected_revision_id):
1894
"""Deserialise a CHKInventory.
1896
:param chk_store: A CHK capable VersionedFiles instance.
1897
:param bytes: The serialised bytes.
1898
:param expected_revision_id: The revision ID we think this inventory is
1900
:return: A CHKInventory
1902
lines = bytes.split('\n')
1904
raise AssertionError('bytes to deserialize must end with an eol')
1906
if lines[0] != 'chkinventory:':
1907
raise ValueError("not a serialised CHKInventory: %r" % bytes)
1909
allowed_keys = frozenset(['root_id', 'revision_id', 'search_key_name',
1910
'parent_id_basename_to_file_id',
1912
for line in lines[1:]:
1913
key, value = line.split(': ', 1)
1914
if key not in allowed_keys:
1915
raise errors.BzrError('Unknown key in inventory: %r\n%r'
1918
raise errors.BzrError('Duplicate key in inventory: %r\n%r'
1921
revision_id = intern(info['revision_id'])
1922
root_id = intern(info['root_id'])
1923
search_key_name = intern(info.get('search_key_name', 'plain'))
1924
parent_id_basename_to_file_id = intern(info.get(
1925
'parent_id_basename_to_file_id', None))
1926
if not parent_id_basename_to_file_id.startswith('sha1:'):
1927
raise ValueError('parent_id_basename_to_file_id should be a sha1'
1928
' key not %r' % (parent_id_basename_to_file_id,))
1929
id_to_entry = info['id_to_entry']
1930
if not id_to_entry.startswith('sha1:'):
1931
raise ValueError('id_to_entry should be a sha1'
1932
' key not %r' % (id_to_entry,))
1934
result = CHKInventory(search_key_name)
1935
result.revision_id = revision_id
1936
result.root_id = root_id
1937
search_key_func = chk_map.search_key_registry.get(
1938
result._search_key_name)
1939
if parent_id_basename_to_file_id is not None:
1940
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1941
chk_store, StaticTuple(parent_id_basename_to_file_id,),
1942
search_key_func=search_key_func)
1944
result.parent_id_basename_to_file_id = None
1946
result.id_to_entry = chk_map.CHKMap(chk_store,
1947
StaticTuple(id_to_entry,),
1948
search_key_func=search_key_func)
1949
if (result.revision_id,) != expected_revision_id:
1950
raise ValueError("Mismatched revision id and expected: %r, %r" %
1951
(result.revision_id, expected_revision_id))
1955
def from_inventory(klass, chk_store, inventory, maximum_size=0, search_key_name='plain'):
1956
"""Create a CHKInventory from an existing inventory.
1958
The content of inventory is copied into the chk_store, and a
1959
CHKInventory referencing that is returned.
1961
:param chk_store: A CHK capable VersionedFiles instance.
1962
:param inventory: The inventory to copy.
1963
:param maximum_size: The CHKMap node size limit.
1964
:param search_key_name: The identifier for the search key function
1966
result = klass(search_key_name)
1967
result.revision_id = inventory.revision_id
1968
result.root_id = inventory.root.file_id
1970
entry_to_bytes = result._entry_to_bytes
1971
parent_id_basename_key = result._parent_id_basename_key
1972
id_to_entry_dict = {}
1973
parent_id_basename_dict = {}
1974
for path, entry in inventory.iter_entries():
1975
key = StaticTuple(entry.file_id,).intern()
1976
id_to_entry_dict[key] = entry_to_bytes(entry)
1977
p_id_key = parent_id_basename_key(entry)
1978
parent_id_basename_dict[p_id_key] = entry.file_id
1980
result._populate_from_dicts(chk_store, id_to_entry_dict,
1981
parent_id_basename_dict, maximum_size=maximum_size)
1984
def _populate_from_dicts(self, chk_store, id_to_entry_dict,
1985
parent_id_basename_dict, maximum_size):
1986
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1987
root_key = chk_map.CHKMap.from_dict(chk_store, id_to_entry_dict,
1988
maximum_size=maximum_size, key_width=1,
1989
search_key_func=search_key_func)
1990
self.id_to_entry = chk_map.CHKMap(chk_store, root_key,
1992
root_key = chk_map.CHKMap.from_dict(chk_store,
1993
parent_id_basename_dict,
1994
maximum_size=maximum_size, key_width=2,
1995
search_key_func=search_key_func)
1996
self.parent_id_basename_to_file_id = chk_map.CHKMap(chk_store,
1997
root_key, search_key_func)
1999
def _parent_id_basename_key(self, entry):
2000
"""Create a key for a entry in a parent_id_basename_to_file_id index."""
2001
if entry.parent_id is not None:
2002
parent_id = entry.parent_id
2005
return StaticTuple(parent_id, entry.name.encode('utf8')).intern()
2007
def __getitem__(self, file_id):
2008
"""map a single file_id -> InventoryEntry."""
2010
raise errors.NoSuchId(self, file_id)
2011
result = self._fileid_to_entry_cache.get(file_id, None)
2012
if result is not None:
2015
return self._bytes_to_entry(
2016
self.id_to_entry.iteritems([StaticTuple(file_id,)]).next()[1])
2017
except StopIteration:
2018
# really we're passing an inventory, not a tree...
2019
raise errors.NoSuchId(self, file_id)
2021
def _getitems(self, file_ids):
2022
"""Similar to __getitem__, but lets you query for multiple.
2024
The returned order is undefined. And currently if an item doesn't
2025
exist, it isn't included in the output.
2029
for file_id in file_ids:
2030
entry = self._fileid_to_entry_cache.get(file_id, None)
2032
remaining.append(file_id)
2034
result.append(entry)
2035
file_keys = [StaticTuple(f,).intern() for f in remaining]
2036
for file_key, value in self.id_to_entry.iteritems(file_keys):
2037
entry = self._bytes_to_entry(value)
2038
result.append(entry)
2039
self._fileid_to_entry_cache[entry.file_id] = entry
2042
def has_id(self, file_id):
2043
# Perhaps have an explicit 'contains' method on CHKMap ?
2044
if self._fileid_to_entry_cache.get(file_id, None) is not None:
2047
self.id_to_entry.iteritems([StaticTuple(file_id,)]))) == 1
2049
def is_root(self, file_id):
2050
return file_id == self.root_id
2052
def _iter_file_id_parents(self, file_id):
2053
"""Yield the parents of file_id up to the root."""
2054
while file_id is not None:
2058
raise errors.NoSuchId(tree=self, file_id=file_id)
2060
file_id = ie.parent_id
2063
"""Iterate over all file-ids."""
2064
for key, _ in self.id_to_entry.iteritems():
2067
def iter_just_entries(self):
2068
"""Iterate over all entries.
2070
Unlike iter_entries(), just the entries are returned (not (path, ie))
2071
and the order of entries is undefined.
2073
XXX: We may not want to merge this into bzr.dev.
2075
for key, entry in self.id_to_entry.iteritems():
2077
ie = self._fileid_to_entry_cache.get(file_id, None)
2079
ie = self._bytes_to_entry(entry)
2080
self._fileid_to_entry_cache[file_id] = ie
2083
def iter_changes(self, basis):
2084
"""Generate a Tree.iter_changes change list between this and basis.
2086
:param basis: Another CHKInventory.
2087
:return: An iterator over the changes between self and basis, as per
2088
tree.iter_changes().
2090
# We want: (file_id, (path_in_source, path_in_target),
2091
# changed_content, versioned, parent, name, kind,
2093
for key, basis_value, self_value in \
2094
self.id_to_entry.iter_changes(basis.id_to_entry):
2096
if basis_value is not None:
2097
basis_entry = basis._bytes_to_entry(basis_value)
2098
path_in_source = basis.id2path(file_id)
2099
basis_parent = basis_entry.parent_id
2100
basis_name = basis_entry.name
2101
basis_executable = basis_entry.executable
2103
path_in_source = None
2106
basis_executable = None
2107
if self_value is not None:
2108
self_entry = self._bytes_to_entry(self_value)
2109
path_in_target = self.id2path(file_id)
2110
self_parent = self_entry.parent_id
2111
self_name = self_entry.name
2112
self_executable = self_entry.executable
2114
path_in_target = None
2117
self_executable = None
2118
if basis_value is None:
2120
kind = (None, self_entry.kind)
2121
versioned = (False, True)
2122
elif self_value is None:
2124
kind = (basis_entry.kind, None)
2125
versioned = (True, False)
2127
kind = (basis_entry.kind, self_entry.kind)
2128
versioned = (True, True)
2129
changed_content = False
2130
if kind[0] != kind[1]:
2131
changed_content = True
2132
elif kind[0] == 'file':
2133
if (self_entry.text_size != basis_entry.text_size or
2134
self_entry.text_sha1 != basis_entry.text_sha1):
2135
changed_content = True
2136
elif kind[0] == 'symlink':
2137
if self_entry.symlink_target != basis_entry.symlink_target:
2138
changed_content = True
2139
elif kind[0] == 'tree-reference':
2140
if (self_entry.reference_revision !=
2141
basis_entry.reference_revision):
2142
changed_content = True
2143
parent = (basis_parent, self_parent)
2144
name = (basis_name, self_name)
2145
executable = (basis_executable, self_executable)
2146
if (not changed_content
2147
and parent[0] == parent[1]
2148
and name[0] == name[1]
2149
and executable[0] == executable[1]):
2150
# Could happen when only the revision changed for a directory
2153
yield (file_id, (path_in_source, path_in_target), changed_content,
2154
versioned, parent, name, kind, executable)
2157
"""Return the number of entries in the inventory."""
2158
return len(self.id_to_entry)
2160
def _make_delta(self, old):
2161
"""Make an inventory delta from two inventories."""
2162
if type(old) != CHKInventory:
2163
return CommonInventory._make_delta(self, old)
2165
for key, old_value, self_value in \
2166
self.id_to_entry.iter_changes(old.id_to_entry):
2168
if old_value is not None:
2169
old_path = old.id2path(file_id)
2172
if self_value is not None:
2173
entry = self._bytes_to_entry(self_value)
2174
self._fileid_to_entry_cache[file_id] = entry
2175
new_path = self.id2path(file_id)
2179
delta.append((old_path, new_path, file_id, entry))
2182
def path2id(self, relpath):
2183
"""See CommonInventory.path2id()."""
2184
# TODO: perhaps support negative hits?
2185
result = self._path_to_fileid_cache.get(relpath, None)
2186
if result is not None:
2188
if isinstance(relpath, basestring):
2189
names = osutils.splitpath(relpath)
2192
current_id = self.root_id
2193
if current_id is None:
2195
parent_id_index = self.parent_id_basename_to_file_id
2197
for basename in names:
2198
if cur_path is None:
2201
cur_path = cur_path + '/' + basename
2202
basename_utf8 = basename.encode('utf8')
2203
file_id = self._path_to_fileid_cache.get(cur_path, None)
2205
key_filter = [StaticTuple(current_id, basename_utf8)]
2206
items = parent_id_index.iteritems(key_filter)
2207
for (parent_id, name_utf8), file_id in items:
2208
if parent_id != current_id or name_utf8 != basename_utf8:
2209
raise errors.BzrError("corrupt inventory lookup! "
2210
"%r %r %r %r" % (parent_id, current_id, name_utf8,
2215
self._path_to_fileid_cache[cur_path] = file_id
2216
current_id = file_id
2220
"""Serialise the inventory to lines."""
2221
lines = ["chkinventory:\n"]
2222
if self._search_key_name != 'plain':
2223
# custom ordering grouping things that don't change together
2224
lines.append('search_key_name: %s\n' % (self._search_key_name,))
2225
lines.append("root_id: %s\n" % self.root_id)
2226
lines.append('parent_id_basename_to_file_id: %s\n' %
2227
(self.parent_id_basename_to_file_id.key()[0],))
2228
lines.append("revision_id: %s\n" % self.revision_id)
2229
lines.append("id_to_entry: %s\n" % (self.id_to_entry.key()[0],))
2231
lines.append("revision_id: %s\n" % self.revision_id)
2232
lines.append("root_id: %s\n" % self.root_id)
2233
if self.parent_id_basename_to_file_id is not None:
2234
lines.append('parent_id_basename_to_file_id: %s\n' %
2235
(self.parent_id_basename_to_file_id.key()[0],))
2236
lines.append("id_to_entry: %s\n" % (self.id_to_entry.key()[0],))
2241
"""Get the root entry."""
2242
return self[self.root_id]
2245
class CHKInventoryDirectory(InventoryDirectory):
2246
"""A directory in an inventory."""
2248
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
2249
'text_id', 'parent_id', '_children', 'executable',
2250
'revision', 'symlink_target', 'reference_revision',
2253
def __init__(self, file_id, name, parent_id, chk_inventory):
2254
# Don't call InventoryDirectory.__init__ - it isn't right for this
2256
InventoryEntry.__init__(self, file_id, name, parent_id)
2257
self._children = None
2258
self.kind = 'directory'
2259
self._chk_inventory = chk_inventory
2263
"""Access the list of children of this directory.
2265
With a parent_id_basename_to_file_id index, loads all the children,
2266
without loads the entire index. Without is bad. A more sophisticated
2267
proxy object might be nice, to allow partial loading of children as
2268
well when specific names are accessed. (So path traversal can be
2269
written in the obvious way but not examine siblings.).
2271
if self._children is not None:
2272
return self._children
2273
# No longer supported
2274
if self._chk_inventory.parent_id_basename_to_file_id is None:
2275
raise AssertionError("Inventories without"
2276
" parent_id_basename_to_file_id are no longer supported")
2278
# XXX: Todo - use proxy objects for the children rather than loading
2279
# all when the attribute is referenced.
2280
parent_id_index = self._chk_inventory.parent_id_basename_to_file_id
2282
for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
2283
key_filter=[StaticTuple(self.file_id,)]):
2284
child_keys.add(StaticTuple(file_id,))
2286
for file_id_key in child_keys:
2287
entry = self._chk_inventory._fileid_to_entry_cache.get(
2288
file_id_key[0], None)
2289
if entry is not None:
2290
result[entry.name] = entry
2291
cached.add(file_id_key)
2292
child_keys.difference_update(cached)
2293
# populate; todo: do by name
2294
id_to_entry = self._chk_inventory.id_to_entry
2295
for file_id_key, bytes in id_to_entry.iteritems(child_keys):
2296
entry = self._chk_inventory._bytes_to_entry(bytes)
2297
result[entry.name] = entry
2298
self._chk_inventory._fileid_to_entry_cache[file_id_key[0]] = entry
2299
self._children = result
2303
'directory': InventoryDirectory,
2304
'file': InventoryFile,
2305
'symlink': InventoryLink,
2306
'tree-reference': TreeReference
2309
def make_entry(kind, name, parent_id, file_id=None):
2310
"""Create an inventory entry.
2312
:param kind: the type of inventory entry to create.
2313
:param name: the basename of the entry.
2314
:param parent_id: the parent_id of the entry.
2315
:param file_id: the file_id to use. if None, one will be created.
2318
file_id = generate_ids.gen_file_id(name)
2319
name = ensure_normalized_name(name)
2321
factory = entry_factory[kind]
2323
raise errors.BadFileKindError(name, kind)
2324
return factory(file_id, name, parent_id)
2327
def ensure_normalized_name(name):
2330
:raises InvalidNormalization: When name is not normalized, and cannot be
2331
accessed on this platform by the normalized path.
2332
:return: The NFC normalised version of name.
2334
#------- This has been copied to bzrlib.dirstate.DirState.add, please
2335
# keep them synchronised.
2336
# we dont import normalized_filename directly because we want to be
2337
# able to change the implementation at runtime for tests.
2338
norm_name, can_access = osutils.normalized_filename(name)
2339
if norm_name != name:
2343
# TODO: jam 20060701 This would probably be more useful
2344
# if the error was raised with the full path
2345
raise errors.InvalidNormalization(name)
2349
1191
_NAME_RE = None
2351
1193
def is_valid_name(name):
2352
1194
global _NAME_RE
2353
if _NAME_RE is None:
1195
if _NAME_RE == None:
2354
1196
_NAME_RE = re.compile(r'^[^/\\]+$')
2356
1198
return bool(_NAME_RE.match(name))
2359
def _check_delta_unique_ids(delta):
2360
"""Decorate a delta and check that the file ids in it are unique.
2362
:return: A generator over delta.
2366
length = len(ids) + 1
2368
if len(ids) != length:
2369
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2374
def _check_delta_unique_new_paths(delta):
2375
"""Decorate a delta and check that the new paths in it are unique.
2377
:return: A generator over delta.
2381
length = len(paths) + 1
2383
if path is not None:
2385
if len(paths) != length:
2386
raise errors.InconsistentDelta(path, item[2], "repeated path")
2390
def _check_delta_unique_old_paths(delta):
2391
"""Decorate a delta and check that the old paths in it are unique.
2393
:return: A generator over delta.
2397
length = len(paths) + 1
2399
if path is not None:
2401
if len(paths) != length:
2402
raise errors.InconsistentDelta(path, item[2], "repeated path")
2406
def _check_delta_ids_are_valid(delta):
2407
"""Decorate a delta and check that the ids in it are valid.
2409
:return: A generator over delta.
2414
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2415
"entry with file_id None %r" % entry)
2416
if type(item[2]) != str:
2417
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2418
"entry with non bytes file_id %r" % entry)
2422
def _check_delta_ids_match_entry(delta):
2423
"""Decorate a delta and check that the ids in it match the entry.file_id.
2425
:return: A generator over delta.
2429
if entry is not None:
2430
if entry.file_id != item[2]:
2431
raise errors.InconsistentDelta(item[0] or item[1], item[2],
2432
"mismatched id with %r" % entry)
2436
def _check_delta_new_path_entry_both_or_None(delta):
2437
"""Decorate a delta and check that the new_path and entry are paired.
2439
:return: A generator over delta.
2444
if new_path is None and entry is not None:
2445
raise errors.InconsistentDelta(item[0], item[1],
2446
"Entry with no new_path")
2447
if new_path is not None and entry is None:
2448
raise errors.InconsistentDelta(new_path, item[1],
2449
"new_path with no entry")