718
712
return compatible
721
class CommonInventory(object):
722
"""Basic inventory logic, defined in terms of primitives like has_id.
724
An inventory is the metadata about the contents of a tree.
726
This is broadly a map from file_id to entries such as directories, files,
727
symlinks and tree references. Each entry maintains its own metadata like
728
SHA1 and length for files, or children for a directory.
715
class Inventory(object):
716
"""Inventory of versioned files in a tree.
718
This describes which file_id is present at each point in the tree,
719
and possibly the SHA-1 or other information about the file.
730
720
Entries can be looked up either by path or by file_id.
722
The inventory represents a typical unix file tree, with
723
directories containing files and subdirectories. We never store
724
the full path to a file, because renaming a directory implicitly
725
moves all of its contents. This class internally maintains a
726
lookup tree that allows the children under a directory to be
732
729
InventoryEntry objects must not be modified after they are
733
730
inserted, other than through the Inventory API.
732
>>> inv = Inventory()
733
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
734
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None)
735
>>> inv['123-123'].name
738
May be treated as an iterator or set to look up file ids:
740
>>> bool(inv.path2id('hello.c'))
745
May also look up by name:
747
>>> [x[0] for x in inv.iter_entries()]
749
>>> inv = Inventory('TREE_ROOT-12345678-12345678')
750
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
751
Traceback (most recent call last):
752
BzrError: parent_id {TREE_ROOT} not in inventory
753
>>> inv.add(InventoryFile('123-123', 'hello.c', 'TREE_ROOT-12345678-12345678'))
754
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT-12345678-12345678', sha1=None, len=None)
736
def __contains__(self, file_id):
737
"""True if this entry contains a file with given id.
739
>>> inv = Inventory()
740
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
741
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
747
Note that this method along with __iter__ are not encouraged for use as
748
they are less clear than specific query methods - they may be rmeoved
751
return self.has_id(file_id)
753
def has_filename(self, filename):
754
return bool(self.path2id(filename))
756
def id2path(self, file_id):
757
"""Return as a string the path to file_id.
760
>>> e = i.add(InventoryDirectory('src-id', 'src', ROOT_ID))
761
>>> e = i.add(InventoryFile('foo-id', 'foo.c', parent_id='src-id'))
762
>>> print i.id2path('foo-id')
765
:raises NoSuchId: If file_id is not present in the inventory.
767
# get all names, skipping root
768
return '/'.join(reversed(
769
[parent.name for parent in
770
self._iter_file_id_parents(file_id)][:-1]))
772
def iter_entries(self, from_dir=None, recursive=True):
773
"""Return (path, entry) pairs, in order by name.
775
:param from_dir: if None, start from the root,
776
otherwise start from this directory (either file-id or entry)
777
:param recursive: recurse into directories or not
756
def __init__(self, root_id=ROOT_ID, revision_id=None):
757
"""Create or read an inventory.
759
If a working directory is specified, the inventory is read
760
from there. If the file is specified, read from that. If not,
761
the inventory is created empty.
763
The inventory is created with a default root directory, with
766
if root_id is not None:
767
self._set_root(InventoryDirectory(root_id, u'', None))
771
self.revision_id = revision_id
774
return "<Inventory object at %x, contents=%r>" % (id(self), self._byid)
776
def apply_delta(self, delta):
777
"""Apply a delta to this inventory.
779
:param delta: A list of changes to apply. After all the changes are
780
applied the final inventory must be internally consistent, but it
781
is ok to supply changes which, if only half-applied would have an
782
invalid result - such as supplying two changes which rename two
783
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
784
('B', 'A', 'B-id', b_entry)].
786
Each change is a tuple, of the form (old_path, new_path, file_id,
789
When new_path is None, the change indicates the removal of an entry
790
from the inventory and new_entry will be ignored (using None is
791
appropriate). If new_path is not None, then new_entry must be an
792
InventoryEntry instance, which will be incorporated into the
793
inventory (and replace any existing entry with the same file id).
795
When old_path is None, the change indicates the addition of
796
a new entry to the inventory.
798
When neither new_path nor old_path are None, the change is a
799
modification to an entry, such as a rename, reparent, kind change
802
The children attribute of new_entry is ignored. This is because
803
this method preserves children automatically across alterations to
804
the parent of the children, and cases where the parent id of a
805
child is changing require the child to be passed in as a separate
806
change regardless. E.g. in the recursive deletion of a directory -
807
the directory's children must be included in the delta, or the
808
final inventory will be invalid.
811
# Remove all affected items which were in the original inventory,
812
# starting with the longest paths, thus ensuring parents are examined
813
# after their children, which means that everything we examine has no
814
# modified children remaining by the time we examine it.
815
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
816
if op is not None), reverse=True):
817
if file_id not in self:
820
# Preserve unaltered children of file_id for later reinsertion.
821
file_id_children = getattr(self[file_id], 'children', {})
822
if len(file_id_children):
823
children[file_id] = file_id_children
824
# Remove file_id and the unaltered children. If file_id is not
825
# being deleted it will be reinserted back later.
826
self.remove_recursive_id(file_id)
827
# Insert all affected which should be in the new inventory, reattaching
828
# their children if they had any. This is done from shortest path to
829
# longest, ensuring that items which were modified and whose parents in
830
# the resulting inventory were also modified, are inserted after their
832
for new_path, new_entry in sorted((np, e) for op, np, f, e in
833
delta if np is not None):
834
if new_entry.kind == 'directory':
835
# Pop the child which to allow detection of children whose
836
# parents were deleted and which were not reattached to a new
838
new_entry.children = children.pop(new_entry.file_id, {})
841
# Get the parent id that was deleted
842
parent_id, children = children.popitem()
843
raise errors.InconsistentDelta("<deleted>", parent_id,
844
"The file id was deleted but its children were not deleted.")
846
def _set_root(self, ie):
848
self._byid = {self.root.file_id: self.root}
851
# TODO: jam 20051218 Should copy also copy the revision_id?
852
entries = self.iter_entries()
853
if self.root is None:
854
return Inventory(root_id=None)
855
other = Inventory(entries.next()[1].file_id)
856
other.root.revision = self.root.revision
857
# copy recursively so we know directories will be added before
858
# their children. There are more efficient ways than this...
859
for path, entry in entries:
860
other.add(entry.copy())
864
return iter(self._byid)
867
"""Returns number of entries."""
868
return len(self._byid)
870
def iter_entries(self, from_dir=None):
871
"""Return (path, entry) pairs, in order by name."""
779
872
if from_dir is None:
780
873
if self.root is None:
782
875
from_dir = self.root
783
876
yield '', self.root
784
877
elif isinstance(from_dir, basestring):
785
from_dir = self[from_dir]
878
from_dir = self._byid[from_dir]
787
880
# unrolling the recursive called changed the time from
788
881
# 440ms/663ms (inline/total) to 116ms/116ms
789
882
children = from_dir.children.items()
792
for name, ie in children:
795
884
children = collections.deque(children)
796
885
stack = [(u'', children)]
958
1021
descend(child_ie, child_path)
959
1022
descend(self.root, u'')
962
def path2id(self, relpath):
963
"""Walk down through directories to return entry of last component.
965
:param relpath: may be either a list of path components, or a single
966
string, in which case it is automatically split.
968
This returns the entry of the last component in the path,
969
which may be either a file or a directory.
971
Returns None IFF the path is not found.
973
if isinstance(relpath, basestring):
974
names = osutils.splitpath(relpath)
980
except errors.NoSuchId:
981
# root doesn't exist yet so nothing else can
987
children = getattr(parent, 'children', None)
996
return parent.file_id
998
def filter(self, specific_fileids):
999
"""Get an inventory view filtered against a set of file-ids.
1001
Children of directories and parents are included.
1003
The result may or may not reference the underlying inventory
1004
so it should be treated as immutable.
1006
interesting_parents = set()
1007
for fileid in specific_fileids:
1009
interesting_parents.update(self.get_idpath(fileid))
1010
except errors.NoSuchId:
1011
# This fileid is not in the inventory - that's ok
1013
entries = self.iter_entries()
1014
if self.root is None:
1015
return Inventory(root_id=None)
1016
other = Inventory(entries.next()[1].file_id)
1017
other.root.revision = self.root.revision
1018
other.revision_id = self.revision_id
1019
directories_to_expand = set()
1020
for path, entry in entries:
1021
file_id = entry.file_id
1022
if (file_id in specific_fileids
1023
or entry.parent_id in directories_to_expand):
1024
if entry.kind == 'directory':
1025
directories_to_expand.add(file_id)
1026
elif file_id not in interesting_parents:
1028
other.add(entry.copy())
1031
def get_idpath(self, file_id):
1032
"""Return a list of file_ids for the path to an entry.
1034
The list contains one element for each directory followed by
1035
the id of the file itself. So the length of the returned list
1036
is equal to the depth of the file in the tree, counting the
1037
root directory as depth 1.
1040
for parent in self._iter_file_id_parents(file_id):
1041
p.insert(0, parent.file_id)
1045
class Inventory(CommonInventory):
1046
"""Mutable dict based in-memory inventory.
1048
We never store the full path to a file, because renaming a directory
1049
implicitly moves all of its contents. This class internally maintains a
1050
lookup tree that allows the children under a directory to be
1053
>>> inv = Inventory()
1054
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
1055
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
1056
>>> inv['123-123'].name
1059
Id's may be looked up from paths:
1061
>>> inv.path2id('hello.c')
1063
>>> '123-123' in inv
1066
There are iterators over the contents:
1068
>>> [entry[0] for entry in inv.iter_entries()]
1072
def __init__(self, root_id=ROOT_ID, revision_id=None):
1073
"""Create or read an inventory.
1075
If a working directory is specified, the inventory is read
1076
from there. If the file is specified, read from that. If not,
1077
the inventory is created empty.
1079
The inventory is created with a default root directory, with
1082
if root_id is not None:
1083
self._set_root(InventoryDirectory(root_id, u'', None))
1087
self.revision_id = revision_id
1090
# More than one page of ouput is not useful anymore to debug
1093
contents = repr(self._byid)
1094
if len(contents) > max_len:
1095
contents = contents[:(max_len-len(closing))] + closing
1096
return "<Inventory object at %x, contents=%r>" % (id(self), contents)
1098
def apply_delta(self, delta):
1099
"""Apply a delta to this inventory.
1101
See the inventory developers documentation for the theory behind
1104
If delta application fails the inventory is left in an indeterminate
1105
state and must not be used.
1107
:param delta: A list of changes to apply. After all the changes are
1108
applied the final inventory must be internally consistent, but it
1109
is ok to supply changes which, if only half-applied would have an
1110
invalid result - such as supplying two changes which rename two
1111
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
1112
('B', 'A', 'B-id', b_entry)].
1114
Each change is a tuple, of the form (old_path, new_path, file_id,
1117
When new_path is None, the change indicates the removal of an entry
1118
from the inventory and new_entry will be ignored (using None is
1119
appropriate). If new_path is not None, then new_entry must be an
1120
InventoryEntry instance, which will be incorporated into the
1121
inventory (and replace any existing entry with the same file id).
1123
When old_path is None, the change indicates the addition of
1124
a new entry to the inventory.
1126
When neither new_path nor old_path are None, the change is a
1127
modification to an entry, such as a rename, reparent, kind change
1130
The children attribute of new_entry is ignored. This is because
1131
this method preserves children automatically across alterations to
1132
the parent of the children, and cases where the parent id of a
1133
child is changing require the child to be passed in as a separate
1134
change regardless. E.g. in the recursive deletion of a directory -
1135
the directory's children must be included in the delta, or the
1136
final inventory will be invalid.
1138
Note that a file_id must only appear once within a given delta.
1139
An AssertionError is raised otherwise.
1141
# Check that the delta is legal. It would be nice if this could be
1142
# done within the loops below but it's safer to validate the delta
1143
# before starting to mutate the inventory, as there isn't a rollback
1145
list(_check_delta_unique_ids(_check_delta_unique_new_paths(
1146
_check_delta_unique_old_paths(_check_delta_ids_match_entry(
1147
_check_delta_ids_are_valid(
1148
_check_delta_new_path_entry_both_or_None(
1152
# Remove all affected items which were in the original inventory,
1153
# starting with the longest paths, thus ensuring parents are examined
1154
# after their children, which means that everything we examine has no
1155
# modified children remaining by the time we examine it.
1156
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
1157
if op is not None), reverse=True):
1158
# Preserve unaltered children of file_id for later reinsertion.
1159
file_id_children = getattr(self[file_id], 'children', {})
1160
if len(file_id_children):
1161
children[file_id] = file_id_children
1162
if self.id2path(file_id) != old_path:
1163
raise errors.InconsistentDelta(old_path, file_id,
1164
"Entry was at wrong other path %r." % self.id2path(file_id))
1165
# Remove file_id and the unaltered children. If file_id is not
1166
# being deleted it will be reinserted back later.
1167
self.remove_recursive_id(file_id)
1168
# Insert all affected which should be in the new inventory, reattaching
1169
# their children if they had any. This is done from shortest path to
1170
# longest, ensuring that items which were modified and whose parents in
1171
# the resulting inventory were also modified, are inserted after their
1173
for new_path, f, new_entry in sorted((np, f, e) for op, np, f, e in
1174
delta if np is not None):
1175
if new_entry.kind == 'directory':
1176
# Pop the child which to allow detection of children whose
1177
# parents were deleted and which were not reattached to a new
1179
replacement = InventoryDirectory(new_entry.file_id,
1180
new_entry.name, new_entry.parent_id)
1181
replacement.revision = new_entry.revision
1182
replacement.children = children.pop(replacement.file_id, {})
1183
new_entry = replacement
1186
except errors.DuplicateFileId:
1187
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1188
"New id is already present in target.")
1189
except AttributeError:
1190
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1191
"Parent is not a directory.")
1192
if self.id2path(new_entry.file_id) != new_path:
1193
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1194
"New path is not consistent with parent path.")
1196
# Get the parent id that was deleted
1197
parent_id, children = children.popitem()
1198
raise errors.InconsistentDelta("<deleted>", parent_id,
1199
"The file id was deleted but its children were not deleted.")
1201
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1202
propagate_caches=False):
1203
"""See CHKInventory.create_by_apply_delta()"""
1204
new_inv = self.copy()
1205
new_inv.apply_delta(inventory_delta)
1206
new_inv.revision_id = new_revision_id
1209
def _set_root(self, ie):
1211
self._byid = {self.root.file_id: self.root}
1214
# TODO: jam 20051218 Should copy also copy the revision_id?
1215
entries = self.iter_entries()
1216
if self.root is None:
1217
return Inventory(root_id=None)
1218
other = Inventory(entries.next()[1].file_id)
1219
other.root.revision = self.root.revision
1220
# copy recursively so we know directories will be added before
1221
# their children. There are more efficient ways than this...
1222
for path, entry in entries:
1223
other.add(entry.copy())
1226
def _get_mutable_inventory(self):
1227
"""See CommonInventory._get_mutable_inventory."""
1228
return copy.deepcopy(self)
1231
"""Iterate over all file-ids."""
1232
return iter(self._byid)
1234
def iter_just_entries(self):
1235
"""Iterate over all entries.
1237
Unlike iter_entries(), just the entries are returned (not (path, ie))
1238
and the order of entries is undefined.
1025
def __contains__(self, file_id):
1026
"""True if this entry contains a file with given id.
1240
XXX: We may not want to merge this into bzr.dev.
1028
>>> inv = Inventory()
1029
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
1030
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None)
1242
if self.root is None:
1244
for _, ie in self._byid.iteritems():
1248
"""Returns number of entries."""
1249
return len(self._byid)
1036
return (file_id in self._byid)
1251
1038
def __getitem__(self, file_id):
1252
1039
"""Return the entry for given file_id.
1254
1041
>>> inv = Inventory()
1255
1042
>>> inv.add(InventoryFile('123123', 'hello.c', ROOT_ID))
1256
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
1043
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None)
1257
1044
>>> inv['123123'].name
1472
1322
return self.root is not None and file_id == self.root.file_id
1475
class CHKInventory(CommonInventory):
1476
"""An inventory persisted in a CHK store.
1478
By design, a CHKInventory is immutable so many of the methods
1479
supported by Inventory - add, rename, apply_delta, etc - are *not*
1480
supported. To create a new CHKInventory, use create_by_apply_delta()
1481
or from_inventory(), say.
1483
Internally, a CHKInventory has one or two CHKMaps:
1485
* id_to_entry - a map from (file_id,) => InventoryEntry as bytes
1486
* parent_id_basename_to_file_id - a map from (parent_id, basename_utf8)
1489
The second map is optional and not present in early CHkRepository's.
1491
No caching is performed: every method call or item access will perform
1492
requests to the storage layer. As such, keep references to objects you
1496
def __init__(self, search_key_name):
1497
CommonInventory.__init__(self)
1498
self._fileid_to_entry_cache = {}
1499
self._path_to_fileid_cache = {}
1500
self._search_key_name = search_key_name
1503
def __eq__(self, other):
1504
"""Compare two sets by comparing their contents."""
1505
if not isinstance(other, CHKInventory):
1506
return NotImplemented
1508
this_key = self.id_to_entry.key()
1509
other_key = other.id_to_entry.key()
1510
this_pid_key = self.parent_id_basename_to_file_id.key()
1511
other_pid_key = other.parent_id_basename_to_file_id.key()
1512
if None in (this_key, this_pid_key, other_key, other_pid_key):
1514
return this_key == other_key and this_pid_key == other_pid_key
1516
def _entry_to_bytes(self, entry):
1517
"""Serialise entry as a single bytestring.
1519
:param Entry: An inventory entry.
1520
:return: A bytestring for the entry.
1523
ENTRY ::= FILE | DIR | SYMLINK | TREE
1524
FILE ::= "file: " COMMON SEP SHA SEP SIZE SEP EXECUTABLE
1525
DIR ::= "dir: " COMMON
1526
SYMLINK ::= "symlink: " COMMON SEP TARGET_UTF8
1527
TREE ::= "tree: " COMMON REFERENCE_REVISION
1528
COMMON ::= FILE_ID SEP PARENT_ID SEP NAME_UTF8 SEP REVISION
1531
if entry.parent_id is not None:
1532
parent_str = entry.parent_id
1535
name_str = entry.name.encode("utf8")
1536
if entry.kind == 'file':
1537
if entry.executable:
1541
return "file: %s\n%s\n%s\n%s\n%s\n%d\n%s" % (
1542
entry.file_id, parent_str, name_str, entry.revision,
1543
entry.text_sha1, entry.text_size, exec_str)
1544
elif entry.kind == 'directory':
1545
return "dir: %s\n%s\n%s\n%s" % (
1546
entry.file_id, parent_str, name_str, entry.revision)
1547
elif entry.kind == 'symlink':
1548
return "symlink: %s\n%s\n%s\n%s\n%s" % (
1549
entry.file_id, parent_str, name_str, entry.revision,
1550
entry.symlink_target.encode("utf8"))
1551
elif entry.kind == 'tree-reference':
1552
return "tree: %s\n%s\n%s\n%s\n%s" % (
1553
entry.file_id, parent_str, name_str, entry.revision,
1554
entry.reference_revision)
1556
raise ValueError("unknown kind %r" % entry.kind)
1558
def _expand_fileids_to_parents_and_children(self, file_ids):
1559
"""Give a more wholistic view starting with the given file_ids.
1561
For any file_id which maps to a directory, we will include all children
1562
of that directory. We will also include all directories which are
1563
parents of the given file_ids, but we will not include their children.
1570
fringle # fringle-id
1574
if given [foo-id] we will include
1575
TREE_ROOT as interesting parents
1577
foo-id, baz-id, frob-id, fringle-id
1581
# TODO: Pre-pass over the list of fileids to see if anything is already
1582
# deserialized in self._fileid_to_entry_cache
1584
directories_to_expand = set()
1585
children_of_parent_id = {}
1586
# It is okay if some of the fileids are missing
1587
for entry in self._getitems(file_ids):
1588
if entry.kind == 'directory':
1589
directories_to_expand.add(entry.file_id)
1590
interesting.add(entry.parent_id)
1591
children_of_parent_id.setdefault(entry.parent_id, []
1592
).append(entry.file_id)
1594
# Now, interesting has all of the direct parents, but not the
1595
# parents of those parents. It also may have some duplicates with
1597
remaining_parents = interesting.difference(file_ids)
1598
# When we hit the TREE_ROOT, we'll get an interesting parent of None,
1599
# but we don't actually want to recurse into that
1600
interesting.add(None) # this will auto-filter it in the loop
1601
remaining_parents.discard(None)
1602
while remaining_parents:
1603
next_parents = set()
1604
for entry in self._getitems(remaining_parents):
1605
next_parents.add(entry.parent_id)
1606
children_of_parent_id.setdefault(entry.parent_id, []
1607
).append(entry.file_id)
1608
# Remove any search tips we've already processed
1609
remaining_parents = next_parents.difference(interesting)
1610
interesting.update(remaining_parents)
1611
# We should probably also .difference(directories_to_expand)
1612
interesting.update(file_ids)
1613
interesting.discard(None)
1614
while directories_to_expand:
1615
# Expand directories by looking in the
1616
# parent_id_basename_to_file_id map
1617
keys = [StaticTuple(f,).intern() for f in directories_to_expand]
1618
directories_to_expand = set()
1619
items = self.parent_id_basename_to_file_id.iteritems(keys)
1620
next_file_ids = set([item[1] for item in items])
1621
next_file_ids = next_file_ids.difference(interesting)
1622
interesting.update(next_file_ids)
1623
for entry in self._getitems(next_file_ids):
1624
if entry.kind == 'directory':
1625
directories_to_expand.add(entry.file_id)
1626
children_of_parent_id.setdefault(entry.parent_id, []
1627
).append(entry.file_id)
1628
return interesting, children_of_parent_id
1630
def filter(self, specific_fileids):
1631
"""Get an inventory view filtered against a set of file-ids.
1633
Children of directories and parents are included.
1635
The result may or may not reference the underlying inventory
1636
so it should be treated as immutable.
1639
parent_to_children) = self._expand_fileids_to_parents_and_children(
1641
# There is some overlap here, but we assume that all interesting items
1642
# are in the _fileid_to_entry_cache because we had to read them to
1643
# determine if they were a dir we wanted to recurse, or just a file
1644
# This should give us all the entries we'll want to add, so start
1646
other = Inventory(self.root_id)
1647
other.root.revision = self.root.revision
1648
other.revision_id = self.revision_id
1649
if not interesting or not parent_to_children:
1650
# empty filter, or filtering entrys that don't exist
1651
# (if even 1 existed, then we would have populated
1652
# parent_to_children with at least the tree root.)
1654
cache = self._fileid_to_entry_cache
1656
remaining_children = collections.deque(parent_to_children[self.root_id])
1658
import pdb; pdb.set_trace()
1660
while remaining_children:
1661
file_id = remaining_children.popleft()
1663
if ie.kind == 'directory':
1664
ie = ie.copy() # We create a copy to depopulate the .children attribute
1665
# TODO: depending on the uses of 'other' we should probably alwyas
1666
# '.copy()' to prevent someone from mutating other and
1667
# invaliding our internal cache
1669
if file_id in parent_to_children:
1670
remaining_children.extend(parent_to_children[file_id])
1674
def _bytes_to_utf8name_key(bytes):
1675
"""Get the file_id, revision_id key out of bytes."""
1676
# We don't normally care about name, except for times when we want
1677
# to filter out empty names because of non rich-root...
1678
sections = bytes.split('\n')
1679
kind, file_id = sections[0].split(': ')
1680
return (sections[2], intern(file_id), intern(sections[3]))
1682
def _bytes_to_entry(self, bytes):
1683
"""Deserialise a serialised entry."""
1684
sections = bytes.split('\n')
1685
if sections[0].startswith("file: "):
1686
result = InventoryFile(sections[0][6:],
1687
sections[2].decode('utf8'),
1689
result.text_sha1 = sections[4]
1690
result.text_size = int(sections[5])
1691
result.executable = sections[6] == "Y"
1692
elif sections[0].startswith("dir: "):
1693
result = CHKInventoryDirectory(sections[0][5:],
1694
sections[2].decode('utf8'),
1696
elif sections[0].startswith("symlink: "):
1697
result = InventoryLink(sections[0][9:],
1698
sections[2].decode('utf8'),
1700
result.symlink_target = sections[4].decode('utf8')
1701
elif sections[0].startswith("tree: "):
1702
result = TreeReference(sections[0][6:],
1703
sections[2].decode('utf8'),
1705
result.reference_revision = sections[4]
1707
raise ValueError("Not a serialised entry %r" % bytes)
1708
result.file_id = intern(result.file_id)
1709
result.revision = intern(sections[3])
1710
if result.parent_id == '':
1711
result.parent_id = None
1712
self._fileid_to_entry_cache[result.file_id] = result
1715
def _get_mutable_inventory(self):
1716
"""See CommonInventory._get_mutable_inventory."""
1717
entries = self.iter_entries()
1718
inv = Inventory(None, self.revision_id)
1719
for path, inv_entry in entries:
1720
inv.add(inv_entry.copy())
1723
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1724
propagate_caches=False):
1725
"""Create a new CHKInventory by applying inventory_delta to this one.
1727
See the inventory developers documentation for the theory behind
1730
:param inventory_delta: The inventory delta to apply. See
1731
Inventory.apply_delta for details.
1732
:param new_revision_id: The revision id of the resulting CHKInventory.
1733
:param propagate_caches: If True, the caches for this inventory are
1734
copied to and updated for the result.
1735
:return: The new CHKInventory.
1737
split = osutils.split
1738
result = CHKInventory(self._search_key_name)
1739
if propagate_caches:
1740
# Just propagate the path-to-fileid cache for now
1741
result._path_to_fileid_cache = dict(self._path_to_fileid_cache.iteritems())
1742
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1743
self.id_to_entry._ensure_root()
1744
maximum_size = self.id_to_entry._root_node.maximum_size
1745
result.revision_id = new_revision_id
1746
result.id_to_entry = chk_map.CHKMap(
1747
self.id_to_entry._store,
1748
self.id_to_entry.key(),
1749
search_key_func=search_key_func)
1750
result.id_to_entry._ensure_root()
1751
result.id_to_entry._root_node.set_maximum_size(maximum_size)
1752
# Change to apply to the parent_id_basename delta. The dict maps
1753
# (parent_id, basename) -> (old_key, new_value). We use a dict because
1754
# when a path has its id replaced (e.g. the root is changed, or someone
1755
# does bzr mv a b, bzr mv c a, we should output a single change to this
1756
# map rather than two.
1757
parent_id_basename_delta = {}
1758
if self.parent_id_basename_to_file_id is not None:
1759
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1760
self.parent_id_basename_to_file_id._store,
1761
self.parent_id_basename_to_file_id.key(),
1762
search_key_func=search_key_func)
1763
result.parent_id_basename_to_file_id._ensure_root()
1764
self.parent_id_basename_to_file_id._ensure_root()
1765
result_p_id_root = result.parent_id_basename_to_file_id._root_node
1766
p_id_root = self.parent_id_basename_to_file_id._root_node
1767
result_p_id_root.set_maximum_size(p_id_root.maximum_size)
1768
result_p_id_root._key_width = p_id_root._key_width
1770
result.parent_id_basename_to_file_id = None
1771
result.root_id = self.root_id
1772
id_to_entry_delta = []
1773
# inventory_delta is only traversed once, so we just update the
1775
# Check for repeated file ids
1776
inventory_delta = _check_delta_unique_ids(inventory_delta)
1777
# Repeated old paths
1778
inventory_delta = _check_delta_unique_old_paths(inventory_delta)
1779
# Check for repeated new paths
1780
inventory_delta = _check_delta_unique_new_paths(inventory_delta)
1781
# Check for entries that don't match the fileid
1782
inventory_delta = _check_delta_ids_match_entry(inventory_delta)
1783
# Check for nonsense fileids
1784
inventory_delta = _check_delta_ids_are_valid(inventory_delta)
1785
# Check for new_path <-> entry consistency
1786
inventory_delta = _check_delta_new_path_entry_both_or_None(
1788
# All changed entries need to have their parents be directories and be
1789
# at the right path. This set contains (path, id) tuples.
1791
# When we delete an item, all the children of it must be either deleted
1792
# or altered in their own right. As we batch process the change via
1793
# CHKMap.apply_delta, we build a set of things to use to validate the
1797
for old_path, new_path, file_id, entry in inventory_delta:
1800
result.root_id = file_id
1801
if new_path is None:
1806
if propagate_caches:
1808
del result._path_to_fileid_cache[old_path]
1811
deletes.add(file_id)
1813
new_key = StaticTuple(file_id,)
1814
new_value = result._entry_to_bytes(entry)
1815
# Update caches. It's worth doing this whether
1816
# we're propagating the old caches or not.
1817
result._path_to_fileid_cache[new_path] = file_id
1818
parents.add((split(new_path)[0], entry.parent_id))
1819
if old_path is None:
1822
old_key = StaticTuple(file_id,)
1823
if self.id2path(file_id) != old_path:
1824
raise errors.InconsistentDelta(old_path, file_id,
1825
"Entry was at wrong other path %r." %
1826
self.id2path(file_id))
1827
altered.add(file_id)
1828
id_to_entry_delta.append(StaticTuple(old_key, new_key, new_value))
1829
if result.parent_id_basename_to_file_id is not None:
1830
# parent_id, basename changes
1831
if old_path is None:
1834
old_entry = self[file_id]
1835
old_key = self._parent_id_basename_key(old_entry)
1836
if new_path is None:
1840
new_key = self._parent_id_basename_key(entry)
1842
# If the two keys are the same, the value will be unchanged
1843
# as its always the file id for this entry.
1844
if old_key != new_key:
1845
# Transform a change into explicit delete/add preserving
1846
# a possible match on the key from a different file id.
1847
if old_key is not None:
1848
parent_id_basename_delta.setdefault(
1849
old_key, [None, None])[0] = old_key
1850
if new_key is not None:
1851
parent_id_basename_delta.setdefault(
1852
new_key, [None, None])[1] = new_value
1853
# validate that deletes are complete.
1854
for file_id in deletes:
1855
entry = self[file_id]
1856
if entry.kind != 'directory':
1858
# This loop could potentially be better by using the id_basename
1859
# map to just get the child file ids.
1860
for child in entry.children.values():
1861
if child.file_id not in altered:
1862
raise errors.InconsistentDelta(self.id2path(child.file_id),
1863
child.file_id, "Child not deleted or reparented when "
1865
result.id_to_entry.apply_delta(id_to_entry_delta)
1866
if parent_id_basename_delta:
1867
# Transform the parent_id_basename delta data into a linear delta
1868
# with only one record for a given key. Optimally this would allow
1869
# re-keying, but its simpler to just output that as a delete+add
1870
# to spend less time calculating the delta.
1872
for key, (old_key, value) in parent_id_basename_delta.iteritems():
1873
if value is not None:
1874
delta_list.append((old_key, key, value))
1876
delta_list.append((old_key, None, None))
1877
result.parent_id_basename_to_file_id.apply_delta(delta_list)
1878
parents.discard(('', None))
1879
for parent_path, parent in parents:
1881
if result[parent].kind != 'directory':
1882
raise errors.InconsistentDelta(result.id2path(parent), parent,
1883
'Not a directory, but given children')
1884
except errors.NoSuchId:
1885
raise errors.InconsistentDelta("<unknown>", parent,
1886
"Parent is not present in resulting inventory.")
1887
if result.path2id(parent_path) != parent:
1888
raise errors.InconsistentDelta(parent_path, parent,
1889
"Parent has wrong path %r." % result.path2id(parent_path))
1893
def deserialise(klass, chk_store, bytes, expected_revision_id):
1894
"""Deserialise a CHKInventory.
1896
:param chk_store: A CHK capable VersionedFiles instance.
1897
:param bytes: The serialised bytes.
1898
:param expected_revision_id: The revision ID we think this inventory is
1900
:return: A CHKInventory
1902
lines = bytes.split('\n')
1904
raise AssertionError('bytes to deserialize must end with an eol')
1906
if lines[0] != 'chkinventory:':
1907
raise ValueError("not a serialised CHKInventory: %r" % bytes)
1909
allowed_keys = frozenset(['root_id', 'revision_id', 'search_key_name',
1910
'parent_id_basename_to_file_id',
1912
for line in lines[1:]:
1913
key, value = line.split(': ', 1)
1914
if key not in allowed_keys:
1915
raise errors.BzrError('Unknown key in inventory: %r\n%r'
1918
raise errors.BzrError('Duplicate key in inventory: %r\n%r'
1921
revision_id = intern(info['revision_id'])
1922
root_id = intern(info['root_id'])
1923
search_key_name = intern(info.get('search_key_name', 'plain'))
1924
parent_id_basename_to_file_id = intern(info.get(
1925
'parent_id_basename_to_file_id', None))
1926
if not parent_id_basename_to_file_id.startswith('sha1:'):
1927
raise ValueError('parent_id_basename_to_file_id should be a sha1'
1928
' key not %r' % (parent_id_basename_to_file_id,))
1929
id_to_entry = info['id_to_entry']
1930
if not id_to_entry.startswith('sha1:'):
1931
raise ValueError('id_to_entry should be a sha1'
1932
' key not %r' % (id_to_entry,))
1934
result = CHKInventory(search_key_name)
1935
result.revision_id = revision_id
1936
result.root_id = root_id
1937
search_key_func = chk_map.search_key_registry.get(
1938
result._search_key_name)
1939
if parent_id_basename_to_file_id is not None:
1940
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1941
chk_store, StaticTuple(parent_id_basename_to_file_id,),
1942
search_key_func=search_key_func)
1944
result.parent_id_basename_to_file_id = None
1946
result.id_to_entry = chk_map.CHKMap(chk_store,
1947
StaticTuple(id_to_entry,),
1948
search_key_func=search_key_func)
1949
if (result.revision_id,) != expected_revision_id:
1950
raise ValueError("Mismatched revision id and expected: %r, %r" %
1951
(result.revision_id, expected_revision_id))
1955
def from_inventory(klass, chk_store, inventory, maximum_size=0, search_key_name='plain'):
1956
"""Create a CHKInventory from an existing inventory.
1958
The content of inventory is copied into the chk_store, and a
1959
CHKInventory referencing that is returned.
1961
:param chk_store: A CHK capable VersionedFiles instance.
1962
:param inventory: The inventory to copy.
1963
:param maximum_size: The CHKMap node size limit.
1964
:param search_key_name: The identifier for the search key function
1966
result = klass(search_key_name)
1967
result.revision_id = inventory.revision_id
1968
result.root_id = inventory.root.file_id
1970
entry_to_bytes = result._entry_to_bytes
1971
parent_id_basename_key = result._parent_id_basename_key
1972
id_to_entry_dict = {}
1973
parent_id_basename_dict = {}
1974
for path, entry in inventory.iter_entries():
1975
key = StaticTuple(entry.file_id,).intern()
1976
id_to_entry_dict[key] = entry_to_bytes(entry)
1977
p_id_key = parent_id_basename_key(entry)
1978
parent_id_basename_dict[p_id_key] = entry.file_id
1980
result._populate_from_dicts(chk_store, id_to_entry_dict,
1981
parent_id_basename_dict, maximum_size=maximum_size)
1984
def _populate_from_dicts(self, chk_store, id_to_entry_dict,
1985
parent_id_basename_dict, maximum_size):
1986
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1987
root_key = chk_map.CHKMap.from_dict(chk_store, id_to_entry_dict,
1988
maximum_size=maximum_size, key_width=1,
1989
search_key_func=search_key_func)
1990
self.id_to_entry = chk_map.CHKMap(chk_store, root_key,
1992
root_key = chk_map.CHKMap.from_dict(chk_store,
1993
parent_id_basename_dict,
1994
maximum_size=maximum_size, key_width=2,
1995
search_key_func=search_key_func)
1996
self.parent_id_basename_to_file_id = chk_map.CHKMap(chk_store,
1997
root_key, search_key_func)
1999
def _parent_id_basename_key(self, entry):
2000
"""Create a key for a entry in a parent_id_basename_to_file_id index."""
2001
if entry.parent_id is not None:
2002
parent_id = entry.parent_id
2005
return StaticTuple(parent_id, entry.name.encode('utf8')).intern()
2007
def __getitem__(self, file_id):
2008
"""map a single file_id -> InventoryEntry."""
2010
raise errors.NoSuchId(self, file_id)
2011
result = self._fileid_to_entry_cache.get(file_id, None)
2012
if result is not None:
2015
return self._bytes_to_entry(
2016
self.id_to_entry.iteritems([StaticTuple(file_id,)]).next()[1])
2017
except StopIteration:
2018
# really we're passing an inventory, not a tree...
2019
raise errors.NoSuchId(self, file_id)
2021
def _getitems(self, file_ids):
2022
"""Similar to __getitem__, but lets you query for multiple.
2024
The returned order is undefined. And currently if an item doesn't
2025
exist, it isn't included in the output.
2029
for file_id in file_ids:
2030
entry = self._fileid_to_entry_cache.get(file_id, None)
2032
remaining.append(file_id)
2034
result.append(entry)
2035
file_keys = [StaticTuple(f,).intern() for f in remaining]
2036
for file_key, value in self.id_to_entry.iteritems(file_keys):
2037
entry = self._bytes_to_entry(value)
2038
result.append(entry)
2039
self._fileid_to_entry_cache[entry.file_id] = entry
2042
def has_id(self, file_id):
2043
# Perhaps have an explicit 'contains' method on CHKMap ?
2044
if self._fileid_to_entry_cache.get(file_id, None) is not None:
2047
self.id_to_entry.iteritems([StaticTuple(file_id,)]))) == 1
2049
def is_root(self, file_id):
2050
return file_id == self.root_id
2052
def _iter_file_id_parents(self, file_id):
2053
"""Yield the parents of file_id up to the root."""
2054
while file_id is not None:
2058
raise errors.NoSuchId(tree=self, file_id=file_id)
2060
file_id = ie.parent_id
2063
"""Iterate over all file-ids."""
2064
for key, _ in self.id_to_entry.iteritems():
2067
def iter_just_entries(self):
2068
"""Iterate over all entries.
2070
Unlike iter_entries(), just the entries are returned (not (path, ie))
2071
and the order of entries is undefined.
2073
XXX: We may not want to merge this into bzr.dev.
2075
for key, entry in self.id_to_entry.iteritems():
2077
ie = self._fileid_to_entry_cache.get(file_id, None)
2079
ie = self._bytes_to_entry(entry)
2080
self._fileid_to_entry_cache[file_id] = ie
2083
def iter_changes(self, basis):
2084
"""Generate a Tree.iter_changes change list between this and basis.
2086
:param basis: Another CHKInventory.
2087
:return: An iterator over the changes between self and basis, as per
2088
tree.iter_changes().
2090
# We want: (file_id, (path_in_source, path_in_target),
2091
# changed_content, versioned, parent, name, kind,
2093
for key, basis_value, self_value in \
2094
self.id_to_entry.iter_changes(basis.id_to_entry):
2096
if basis_value is not None:
2097
basis_entry = basis._bytes_to_entry(basis_value)
2098
path_in_source = basis.id2path(file_id)
2099
basis_parent = basis_entry.parent_id
2100
basis_name = basis_entry.name
2101
basis_executable = basis_entry.executable
2103
path_in_source = None
2106
basis_executable = None
2107
if self_value is not None:
2108
self_entry = self._bytes_to_entry(self_value)
2109
path_in_target = self.id2path(file_id)
2110
self_parent = self_entry.parent_id
2111
self_name = self_entry.name
2112
self_executable = self_entry.executable
2114
path_in_target = None
2117
self_executable = None
2118
if basis_value is None:
2120
kind = (None, self_entry.kind)
2121
versioned = (False, True)
2122
elif self_value is None:
2124
kind = (basis_entry.kind, None)
2125
versioned = (True, False)
2127
kind = (basis_entry.kind, self_entry.kind)
2128
versioned = (True, True)
2129
changed_content = False
2130
if kind[0] != kind[1]:
2131
changed_content = True
2132
elif kind[0] == 'file':
2133
if (self_entry.text_size != basis_entry.text_size or
2134
self_entry.text_sha1 != basis_entry.text_sha1):
2135
changed_content = True
2136
elif kind[0] == 'symlink':
2137
if self_entry.symlink_target != basis_entry.symlink_target:
2138
changed_content = True
2139
elif kind[0] == 'tree-reference':
2140
if (self_entry.reference_revision !=
2141
basis_entry.reference_revision):
2142
changed_content = True
2143
parent = (basis_parent, self_parent)
2144
name = (basis_name, self_name)
2145
executable = (basis_executable, self_executable)
2146
if (not changed_content
2147
and parent[0] == parent[1]
2148
and name[0] == name[1]
2149
and executable[0] == executable[1]):
2150
# Could happen when only the revision changed for a directory
2153
yield (file_id, (path_in_source, path_in_target), changed_content,
2154
versioned, parent, name, kind, executable)
2157
"""Return the number of entries in the inventory."""
2158
return len(self.id_to_entry)
2160
def _make_delta(self, old):
2161
"""Make an inventory delta from two inventories."""
2162
if type(old) != CHKInventory:
2163
return CommonInventory._make_delta(self, old)
2165
for key, old_value, self_value in \
2166
self.id_to_entry.iter_changes(old.id_to_entry):
2168
if old_value is not None:
2169
old_path = old.id2path(file_id)
2172
if self_value is not None:
2173
entry = self._bytes_to_entry(self_value)
2174
self._fileid_to_entry_cache[file_id] = entry
2175
new_path = self.id2path(file_id)
2179
delta.append((old_path, new_path, file_id, entry))
2182
def path2id(self, relpath):
2183
"""See CommonInventory.path2id()."""
2184
# TODO: perhaps support negative hits?
2185
result = self._path_to_fileid_cache.get(relpath, None)
2186
if result is not None:
2188
if isinstance(relpath, basestring):
2189
names = osutils.splitpath(relpath)
2192
current_id = self.root_id
2193
if current_id is None:
2195
parent_id_index = self.parent_id_basename_to_file_id
2197
for basename in names:
2198
if cur_path is None:
2201
cur_path = cur_path + '/' + basename
2202
basename_utf8 = basename.encode('utf8')
2203
file_id = self._path_to_fileid_cache.get(cur_path, None)
2205
key_filter = [StaticTuple(current_id, basename_utf8)]
2206
items = parent_id_index.iteritems(key_filter)
2207
for (parent_id, name_utf8), file_id in items:
2208
if parent_id != current_id or name_utf8 != basename_utf8:
2209
raise errors.BzrError("corrupt inventory lookup! "
2210
"%r %r %r %r" % (parent_id, current_id, name_utf8,
2215
self._path_to_fileid_cache[cur_path] = file_id
2216
current_id = file_id
2220
"""Serialise the inventory to lines."""
2221
lines = ["chkinventory:\n"]
2222
if self._search_key_name != 'plain':
2223
# custom ordering grouping things that don't change together
2224
lines.append('search_key_name: %s\n' % (self._search_key_name,))
2225
lines.append("root_id: %s\n" % self.root_id)
2226
lines.append('parent_id_basename_to_file_id: %s\n' %
2227
(self.parent_id_basename_to_file_id.key()[0],))
2228
lines.append("revision_id: %s\n" % self.revision_id)
2229
lines.append("id_to_entry: %s\n" % (self.id_to_entry.key()[0],))
2231
lines.append("revision_id: %s\n" % self.revision_id)
2232
lines.append("root_id: %s\n" % self.root_id)
2233
if self.parent_id_basename_to_file_id is not None:
2234
lines.append('parent_id_basename_to_file_id: %s\n' %
2235
(self.parent_id_basename_to_file_id.key()[0],))
2236
lines.append("id_to_entry: %s\n" % (self.id_to_entry.key()[0],))
2241
"""Get the root entry."""
2242
return self[self.root_id]
2245
class CHKInventoryDirectory(InventoryDirectory):
2246
"""A directory in an inventory."""
2248
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
2249
'text_id', 'parent_id', '_children', 'executable',
2250
'revision', 'symlink_target', 'reference_revision',
2253
def __init__(self, file_id, name, parent_id, chk_inventory):
2254
# Don't call InventoryDirectory.__init__ - it isn't right for this
2256
InventoryEntry.__init__(self, file_id, name, parent_id)
2257
self._children = None
2258
self.kind = 'directory'
2259
self._chk_inventory = chk_inventory
2263
"""Access the list of children of this directory.
2265
With a parent_id_basename_to_file_id index, loads all the children,
2266
without loads the entire index. Without is bad. A more sophisticated
2267
proxy object might be nice, to allow partial loading of children as
2268
well when specific names are accessed. (So path traversal can be
2269
written in the obvious way but not examine siblings.).
2271
if self._children is not None:
2272
return self._children
2273
# No longer supported
2274
if self._chk_inventory.parent_id_basename_to_file_id is None:
2275
raise AssertionError("Inventories without"
2276
" parent_id_basename_to_file_id are no longer supported")
2278
# XXX: Todo - use proxy objects for the children rather than loading
2279
# all when the attribute is referenced.
2280
parent_id_index = self._chk_inventory.parent_id_basename_to_file_id
2282
for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
2283
key_filter=[StaticTuple(self.file_id,)]):
2284
child_keys.add(StaticTuple(file_id,))
2286
for file_id_key in child_keys:
2287
entry = self._chk_inventory._fileid_to_entry_cache.get(
2288
file_id_key[0], None)
2289
if entry is not None:
2290
result[entry.name] = entry
2291
cached.add(file_id_key)
2292
child_keys.difference_update(cached)
2293
# populate; todo: do by name
2294
id_to_entry = self._chk_inventory.id_to_entry
2295
for file_id_key, bytes in id_to_entry.iteritems(child_keys):
2296
entry = self._chk_inventory._bytes_to_entry(bytes)
2297
result[entry.name] = entry
2298
self._chk_inventory._fileid_to_entry_cache[file_id_key[0]] = entry
2299
self._children = result
2302
1325
entry_factory = {
2303
1326
'directory': InventoryDirectory,
2304
1327
'file': InventoryFile,