807
685
compatible = False
808
686
return compatible
810
def _snapshot_text(self, file_parents, work_tree, commit_builder):
811
"""See InventoryEntry._snapshot_text."""
812
commit_builder.modified_link(
813
self.file_id, file_parents, self.symlink_target)
816
class Inventory(object):
817
"""Inventory of versioned files in a tree.
819
This describes which file_id is present at each point in the tree,
820
and possibly the SHA-1 or other information about the file.
689
class TreeReference(InventoryEntry):
691
kind = 'tree-reference'
693
def __init__(self, file_id, name, parent_id, revision=None,
694
reference_revision=None):
695
InventoryEntry.__init__(self, file_id, name, parent_id)
696
self.revision = revision
697
self.reference_revision = reference_revision
700
return TreeReference(self.file_id, self.name, self.parent_id,
701
self.revision, self.reference_revision)
703
def _read_tree_state(self, path, work_tree):
704
"""Populate fields in the inventory entry from the given tree.
706
self.reference_revision = work_tree.get_reference_revision(
709
def _forget_tree_state(self):
710
self.reference_revision = None
712
def _unchanged(self, previous_ie):
713
"""See InventoryEntry._unchanged."""
714
compatible = super(TreeReference, self)._unchanged(previous_ie)
715
if self.reference_revision != previous_ie.reference_revision:
720
class CommonInventory(object):
721
"""Basic inventory logic, defined in terms of primitives like has_id.
723
An inventory is the metadata about the contents of a tree.
725
This is broadly a map from file_id to entries such as directories, files,
726
symlinks and tree references. Each entry maintains its own metadata like
727
SHA1 and length for files, or children for a directory.
821
729
Entries can be looked up either by path or by file_id.
823
The inventory represents a typical unix file tree, with
824
directories containing files and subdirectories. We never store
825
the full path to a file, because renaming a directory implicitly
826
moves all of its contents. This class internally maintains a
827
lookup tree that allows the children under a directory to be
830
731
InventoryEntry objects must not be modified after they are
831
732
inserted, other than through the Inventory API.
833
>>> inv = Inventory()
834
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
835
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None)
836
>>> inv['123-123'].name
839
May be treated as an iterator or set to look up file ids:
841
>>> bool(inv.path2id('hello.c'))
846
May also look up by name:
848
>>> [x[0] for x in inv.iter_entries()]
850
>>> inv = Inventory('TREE_ROOT-12345678-12345678')
851
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
852
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT-12345678-12345678', sha1=None, len=None)
854
def __init__(self, root_id=ROOT_ID, revision_id=None):
855
"""Create or read an inventory.
857
If a working directory is specified, the inventory is read
858
from there. If the file is specified, read from that. If not,
859
the inventory is created empty.
861
The inventory is created with a default root directory, with
864
# We are letting Branch.create() create a unique inventory
865
# root id. Rather than generating a random one here.
867
# root_id = bzrlib.branch.gen_file_id('TREE_ROOT')
868
self.root = InventoryDirectory(root_id, '', None)
869
# FIXME: this isn't ever used, changing it to self.revision may break
870
# things. TODO make everything use self.revision_id
871
self.revision_id = revision_id
872
self._byid = {self.root.file_id: self.root}
875
# TODO: jam 20051218 Should copy also copy the revision_id?
876
entries = self.iter_entries()
877
other = Inventory(entries.next()[1].file_id)
878
# copy recursively so we know directories will be added before
879
# their children. There are more efficient ways than this...
880
for path, entry in entries():
881
other.add(entry.copy())
885
return iter(self._byid)
888
"""Returns number of entries."""
889
return len(self._byid)
891
def iter_entries(self, from_dir=None):
892
"""Return (path, entry) pairs, in order by name."""
735
def __contains__(self, file_id):
736
"""True if this entry contains a file with given id.
738
>>> inv = Inventory()
739
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
740
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
746
Note that this method along with __iter__ are not encouraged for use as
747
they are less clear than specific query methods - they may be rmeoved
750
return self.has_id(file_id)
752
def has_filename(self, filename):
753
return bool(self.path2id(filename))
755
def id2path(self, file_id):
756
"""Return as a string the path to file_id.
759
>>> e = i.add(InventoryDirectory('src-id', 'src', ROOT_ID))
760
>>> e = i.add(InventoryFile('foo-id', 'foo.c', parent_id='src-id'))
761
>>> print i.id2path('foo-id')
764
:raises NoSuchId: If file_id is not present in the inventory.
766
# get all names, skipping root
767
return '/'.join(reversed(
768
[parent.name for parent in
769
self._iter_file_id_parents(file_id)][:-1]))
771
def iter_entries(self, from_dir=None, recursive=True):
772
"""Return (path, entry) pairs, in order by name.
774
:param from_dir: if None, start from the root,
775
otherwise start from this directory (either file-id or entry)
776
:param recursive: recurse into directories or not
893
778
if from_dir is None:
779
if self.root is None:
895
781
from_dir = self.root
896
782
yield '', self.root
897
783
elif isinstance(from_dir, basestring):
898
from_dir = self._byid[from_dir]
784
from_dir = self[from_dir]
900
786
# unrolling the recursive called changed the time from
901
787
# 440ms/663ms (inline/total) to 116ms/116ms
902
788
children = from_dir.children.items()
791
for name, ie in children:
904
794
children = collections.deque(children)
905
795
stack = [(u'', children)]
990
949
def descend(parent_ie, parent_path):
991
950
accum.append((parent_path, parent_ie))
993
952
kids = [(ie.name, ie) for ie in parent_ie.children.itervalues() if ie.kind == 'directory']
996
955
for name, child_ie in kids:
997
child_path = pathjoin(parent_path, name)
956
child_path = osutils.pathjoin(parent_path, name)
998
957
descend(child_ie, child_path)
999
958
descend(self.root, u'')
961
def path2id(self, name):
962
"""Walk down through directories to return entry of last component.
964
names may be either a list of path components, or a single
965
string, in which case it is automatically split.
967
This returns the entry of the last component in the path,
968
which may be either a file or a directory.
970
Returns None IFF the path is not found.
972
if isinstance(name, basestring):
973
name = osutils.splitpath(name)
975
# mutter("lookup path %r" % name)
979
except errors.NoSuchId:
980
# root doesn't exist yet so nothing else can
986
children = getattr(parent, 'children', None)
995
return parent.file_id
997
def filter(self, specific_fileids):
998
"""Get an inventory view filtered against a set of file-ids.
1000
Children of directories and parents are included.
1002
The result may or may not reference the underlying inventory
1003
so it should be treated as immutable.
1005
interesting_parents = set()
1006
for fileid in specific_fileids:
1008
interesting_parents.update(self.get_idpath(fileid))
1009
except errors.NoSuchId:
1010
# This fileid is not in the inventory - that's ok
1012
entries = self.iter_entries()
1013
if self.root is None:
1014
return Inventory(root_id=None)
1015
other = Inventory(entries.next()[1].file_id)
1016
other.root.revision = self.root.revision
1017
other.revision_id = self.revision_id
1018
directories_to_expand = set()
1019
for path, entry in entries:
1020
file_id = entry.file_id
1021
if (file_id in specific_fileids
1022
or entry.parent_id in directories_to_expand):
1023
if entry.kind == 'directory':
1024
directories_to_expand.add(file_id)
1025
elif file_id not in interesting_parents:
1027
other.add(entry.copy())
1030
def get_idpath(self, file_id):
1031
"""Return a list of file_ids for the path to an entry.
1033
The list contains one element for each directory followed by
1034
the id of the file itself. So the length of the returned list
1035
is equal to the depth of the file in the tree, counting the
1036
root directory as depth 1.
1039
for parent in self._iter_file_id_parents(file_id):
1040
p.insert(0, parent.file_id)
1044
class Inventory(CommonInventory):
1045
"""Mutable dict based in-memory inventory.
1047
We never store the full path to a file, because renaming a directory
1048
implicitly moves all of its contents. This class internally maintains a
1049
lookup tree that allows the children under a directory to be
1052
>>> inv = Inventory()
1053
>>> inv.add(InventoryFile('123-123', 'hello.c', ROOT_ID))
1054
InventoryFile('123-123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
1055
>>> inv['123-123'].name
1058
Id's may be looked up from paths:
1060
>>> inv.path2id('hello.c')
1062
>>> '123-123' in inv
1065
There are iterators over the contents:
1067
>>> [entry[0] for entry in inv.iter_entries()]
1071
def __init__(self, root_id=ROOT_ID, revision_id=None):
1072
"""Create or read an inventory.
1074
If a working directory is specified, the inventory is read
1075
from there. If the file is specified, read from that. If not,
1076
the inventory is created empty.
1078
The inventory is created with a default root directory, with
1081
if root_id is not None:
1082
self._set_root(InventoryDirectory(root_id, u'', None))
1086
self.revision_id = revision_id
1089
# More than one page of ouput is not useful anymore to debug
1092
contents = repr(self._byid)
1093
if len(contents) > max_len:
1094
contents = contents[:(max_len-len(closing))] + closing
1095
return "<Inventory object at %x, contents=%r>" % (id(self), contents)
1097
def apply_delta(self, delta):
1098
"""Apply a delta to this inventory.
1100
See the inventory developers documentation for the theory behind
1103
If delta application fails the inventory is left in an indeterminate
1104
state and must not be used.
1106
:param delta: A list of changes to apply. After all the changes are
1107
applied the final inventory must be internally consistent, but it
1108
is ok to supply changes which, if only half-applied would have an
1109
invalid result - such as supplying two changes which rename two
1110
files, 'A' and 'B' with each other : [('A', 'B', 'A-id', a_entry),
1111
('B', 'A', 'B-id', b_entry)].
1113
Each change is a tuple, of the form (old_path, new_path, file_id,
1116
When new_path is None, the change indicates the removal of an entry
1117
from the inventory and new_entry will be ignored (using None is
1118
appropriate). If new_path is not None, then new_entry must be an
1119
InventoryEntry instance, which will be incorporated into the
1120
inventory (and replace any existing entry with the same file id).
1122
When old_path is None, the change indicates the addition of
1123
a new entry to the inventory.
1125
When neither new_path nor old_path are None, the change is a
1126
modification to an entry, such as a rename, reparent, kind change
1129
The children attribute of new_entry is ignored. This is because
1130
this method preserves children automatically across alterations to
1131
the parent of the children, and cases where the parent id of a
1132
child is changing require the child to be passed in as a separate
1133
change regardless. E.g. in the recursive deletion of a directory -
1134
the directory's children must be included in the delta, or the
1135
final inventory will be invalid.
1137
Note that a file_id must only appear once within a given delta.
1138
An AssertionError is raised otherwise.
1140
# Check that the delta is legal. It would be nice if this could be
1141
# done within the loops below but it's safer to validate the delta
1142
# before starting to mutate the inventory, as there isn't a rollback
1144
list(_check_delta_unique_ids(_check_delta_unique_new_paths(
1145
_check_delta_unique_old_paths(_check_delta_ids_match_entry(
1146
_check_delta_ids_are_valid(
1147
_check_delta_new_path_entry_both_or_None(
1151
# Remove all affected items which were in the original inventory,
1152
# starting with the longest paths, thus ensuring parents are examined
1153
# after their children, which means that everything we examine has no
1154
# modified children remaining by the time we examine it.
1155
for old_path, file_id in sorted(((op, f) for op, np, f, e in delta
1156
if op is not None), reverse=True):
1157
# Preserve unaltered children of file_id for later reinsertion.
1158
file_id_children = getattr(self[file_id], 'children', {})
1159
if len(file_id_children):
1160
children[file_id] = file_id_children
1161
if self.id2path(file_id) != old_path:
1162
raise errors.InconsistentDelta(old_path, file_id,
1163
"Entry was at wrong other path %r." % self.id2path(file_id))
1164
# Remove file_id and the unaltered children. If file_id is not
1165
# being deleted it will be reinserted back later.
1166
self.remove_recursive_id(file_id)
1167
# Insert all affected which should be in the new inventory, reattaching
1168
# their children if they had any. This is done from shortest path to
1169
# longest, ensuring that items which were modified and whose parents in
1170
# the resulting inventory were also modified, are inserted after their
1172
for new_path, f, new_entry in sorted((np, f, e) for op, np, f, e in
1173
delta if np is not None):
1174
if new_entry.kind == 'directory':
1175
# Pop the child which to allow detection of children whose
1176
# parents were deleted and which were not reattached to a new
1178
replacement = InventoryDirectory(new_entry.file_id,
1179
new_entry.name, new_entry.parent_id)
1180
replacement.revision = new_entry.revision
1181
replacement.children = children.pop(replacement.file_id, {})
1182
new_entry = replacement
1185
except errors.DuplicateFileId:
1186
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1187
"New id is already present in target.")
1188
except AttributeError:
1189
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1190
"Parent is not a directory.")
1191
if self.id2path(new_entry.file_id) != new_path:
1192
raise errors.InconsistentDelta(new_path, new_entry.file_id,
1193
"New path is not consistent with parent path.")
1195
# Get the parent id that was deleted
1196
parent_id, children = children.popitem()
1197
raise errors.InconsistentDelta("<deleted>", parent_id,
1198
"The file id was deleted but its children were not deleted.")
1200
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1201
propagate_caches=False):
1202
"""See CHKInventory.create_by_apply_delta()"""
1203
new_inv = self.copy()
1204
new_inv.apply_delta(inventory_delta)
1205
new_inv.revision_id = new_revision_id
1208
def _set_root(self, ie):
1210
self._byid = {self.root.file_id: self.root}
1213
# TODO: jam 20051218 Should copy also copy the revision_id?
1214
entries = self.iter_entries()
1215
if self.root is None:
1216
return Inventory(root_id=None)
1217
other = Inventory(entries.next()[1].file_id)
1218
other.root.revision = self.root.revision
1219
# copy recursively so we know directories will be added before
1220
# their children. There are more efficient ways than this...
1221
for path, entry in entries:
1222
other.add(entry.copy())
1225
def _get_mutable_inventory(self):
1226
"""See CommonInventory._get_mutable_inventory."""
1227
return copy.deepcopy(self)
1230
"""Iterate over all file-ids."""
1231
return iter(self._byid)
1233
def iter_just_entries(self):
1234
"""Iterate over all entries.
1002
def __contains__(self, file_id):
1003
"""True if this entry contains a file with given id.
1236
Unlike iter_entries(), just the entries are returned (not (path, ie))
1237
and the order of entries is undefined.
1005
>>> inv = Inventory()
1006
>>> inv.add(InventoryFile('123', 'foo.c', ROOT_ID))
1007
InventoryFile('123', 'foo.c', parent_id='TREE_ROOT', sha1=None, len=None)
1239
XXX: We may not want to merge this into bzr.dev.
1013
return file_id in self._byid
1241
if self.root is None:
1243
for _, ie in self._byid.iteritems():
1247
"""Returns number of entries."""
1248
return len(self._byid)
1015
1250
def __getitem__(self, file_id):
1016
1251
"""Return the entry for given file_id.
1018
1253
>>> inv = Inventory()
1019
1254
>>> inv.add(InventoryFile('123123', 'hello.c', ROOT_ID))
1020
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None)
1255
InventoryFile('123123', 'hello.c', parent_id='TREE_ROOT', sha1=None, len=None, revision=None)
1021
1256
>>> inv['123123'].name
1025
1260
return self._byid[file_id]
1026
1261
except KeyError:
1028
raise BzrError("can't look up file_id None")
1030
raise BzrError("file_id {%s} not in inventory" % file_id)
1262
# really we're passing an inventory, not a tree...
1263
raise errors.NoSuchId(self, file_id)
1032
1265
def get_file_kind(self, file_id):
1033
1266
return self._byid[file_id].kind
1231
1464
del old_parent.children[file_ie.name]
1232
1465
new_parent.children[new_name] = file_ie
1234
1467
file_ie.name = new_name
1235
1468
file_ie.parent_id = new_parent_id
1470
def is_root(self, file_id):
1471
return self.root is not None and file_id == self.root.file_id
1474
class CHKInventory(CommonInventory):
1475
"""An inventory persisted in a CHK store.
1477
By design, a CHKInventory is immutable so many of the methods
1478
supported by Inventory - add, rename, apply_delta, etc - are *not*
1479
supported. To create a new CHKInventory, use create_by_apply_delta()
1480
or from_inventory(), say.
1482
Internally, a CHKInventory has one or two CHKMaps:
1484
* id_to_entry - a map from (file_id,) => InventoryEntry as bytes
1485
* parent_id_basename_to_file_id - a map from (parent_id, basename_utf8)
1488
The second map is optional and not present in early CHkRepository's.
1490
No caching is performed: every method call or item access will perform
1491
requests to the storage layer. As such, keep references to objects you
1495
def __init__(self, search_key_name):
1496
CommonInventory.__init__(self)
1497
self._fileid_to_entry_cache = {}
1498
self._path_to_fileid_cache = {}
1499
self._search_key_name = search_key_name
1502
def __eq__(self, other):
1503
"""Compare two sets by comparing their contents."""
1504
if not isinstance(other, CHKInventory):
1505
return NotImplemented
1507
this_key = self.id_to_entry.key()
1508
other_key = other.id_to_entry.key()
1509
this_pid_key = self.parent_id_basename_to_file_id.key()
1510
other_pid_key = other.parent_id_basename_to_file_id.key()
1511
if None in (this_key, this_pid_key, other_key, other_pid_key):
1513
return this_key == other_key and this_pid_key == other_pid_key
1515
def _entry_to_bytes(self, entry):
1516
"""Serialise entry as a single bytestring.
1518
:param Entry: An inventory entry.
1519
:return: A bytestring for the entry.
1522
ENTRY ::= FILE | DIR | SYMLINK | TREE
1523
FILE ::= "file: " COMMON SEP SHA SEP SIZE SEP EXECUTABLE
1524
DIR ::= "dir: " COMMON
1525
SYMLINK ::= "symlink: " COMMON SEP TARGET_UTF8
1526
TREE ::= "tree: " COMMON REFERENCE_REVISION
1527
COMMON ::= FILE_ID SEP PARENT_ID SEP NAME_UTF8 SEP REVISION
1530
if entry.parent_id is not None:
1531
parent_str = entry.parent_id
1534
name_str = entry.name.encode("utf8")
1535
if entry.kind == 'file':
1536
if entry.executable:
1540
return "file: %s\n%s\n%s\n%s\n%s\n%d\n%s" % (
1541
entry.file_id, parent_str, name_str, entry.revision,
1542
entry.text_sha1, entry.text_size, exec_str)
1543
elif entry.kind == 'directory':
1544
return "dir: %s\n%s\n%s\n%s" % (
1545
entry.file_id, parent_str, name_str, entry.revision)
1546
elif entry.kind == 'symlink':
1547
return "symlink: %s\n%s\n%s\n%s\n%s" % (
1548
entry.file_id, parent_str, name_str, entry.revision,
1549
entry.symlink_target.encode("utf8"))
1550
elif entry.kind == 'tree-reference':
1551
return "tree: %s\n%s\n%s\n%s\n%s" % (
1552
entry.file_id, parent_str, name_str, entry.revision,
1553
entry.reference_revision)
1555
raise ValueError("unknown kind %r" % entry.kind)
1557
def _expand_fileids_to_parents_and_children(self, file_ids):
1558
"""Give a more wholistic view starting with the given file_ids.
1560
For any file_id which maps to a directory, we will include all children
1561
of that directory. We will also include all directories which are
1562
parents of the given file_ids, but we will not include their children.
1569
fringle # fringle-id
1573
if given [foo-id] we will include
1574
TREE_ROOT as interesting parents
1576
foo-id, baz-id, frob-id, fringle-id
1580
# TODO: Pre-pass over the list of fileids to see if anything is already
1581
# deserialized in self._fileid_to_entry_cache
1583
directories_to_expand = set()
1584
children_of_parent_id = {}
1585
# It is okay if some of the fileids are missing
1586
for entry in self._getitems(file_ids):
1587
if entry.kind == 'directory':
1588
directories_to_expand.add(entry.file_id)
1589
interesting.add(entry.parent_id)
1590
children_of_parent_id.setdefault(entry.parent_id, []
1591
).append(entry.file_id)
1593
# Now, interesting has all of the direct parents, but not the
1594
# parents of those parents. It also may have some duplicates with
1596
remaining_parents = interesting.difference(file_ids)
1597
# When we hit the TREE_ROOT, we'll get an interesting parent of None,
1598
# but we don't actually want to recurse into that
1599
interesting.add(None) # this will auto-filter it in the loop
1600
remaining_parents.discard(None)
1601
while remaining_parents:
1602
if None in remaining_parents:
1603
import pdb; pdb.set_trace()
1604
next_parents = set()
1605
for entry in self._getitems(remaining_parents):
1606
next_parents.add(entry.parent_id)
1607
children_of_parent_id.setdefault(entry.parent_id, []
1608
).append(entry.file_id)
1609
# Remove any search tips we've already processed
1610
remaining_parents = next_parents.difference(interesting)
1611
interesting.update(remaining_parents)
1612
# We should probably also .difference(directories_to_expand)
1613
interesting.update(file_ids)
1614
interesting.discard(None)
1615
while directories_to_expand:
1616
# Expand directories by looking in the
1617
# parent_id_basename_to_file_id map
1618
keys = [(f,) for f in directories_to_expand]
1619
directories_to_expand = set()
1620
items = self.parent_id_basename_to_file_id.iteritems(keys)
1621
next_file_ids = set([item[1] for item in items])
1622
next_file_ids = next_file_ids.difference(interesting)
1623
interesting.update(next_file_ids)
1624
for entry in self._getitems(next_file_ids):
1625
if entry.kind == 'directory':
1626
directories_to_expand.add(entry.file_id)
1627
children_of_parent_id.setdefault(entry.parent_id, []
1628
).append(entry.file_id)
1629
return interesting, children_of_parent_id
1631
def filter(self, specific_fileids):
1632
"""Get an inventory view filtered against a set of file-ids.
1634
Children of directories and parents are included.
1636
The result may or may not reference the underlying inventory
1637
so it should be treated as immutable.
1640
parent_to_children) = self._expand_fileids_to_parents_and_children(
1642
# There is some overlap here, but we assume that all interesting items
1643
# are in the _fileid_to_entry_cache because we had to read them to
1644
# determine if they were a dir we wanted to recurse, or just a file
1645
# This should give us all the entries we'll want to add, so start
1647
other = Inventory(self.root_id)
1648
other.root.revision = self.root.revision
1649
other.revision_id = self.revision_id
1650
if not interesting or not parent_to_children:
1651
# empty filter, or filtering entrys that don't exist
1652
# (if even 1 existed, then we would have populated
1653
# parent_to_children with at least the tree root.)
1655
cache = self._fileid_to_entry_cache
1657
remaining_children = collections.deque(parent_to_children[self.root_id])
1659
import pdb; pdb.set_trace()
1661
while remaining_children:
1662
file_id = remaining_children.popleft()
1664
if ie.kind == 'directory':
1665
ie = ie.copy() # We create a copy to depopulate the .children attribute
1666
# TODO: depending on the uses of 'other' we should probably alwyas
1667
# '.copy()' to prevent someone from mutating other and
1668
# invaliding our internal cache
1670
if file_id in parent_to_children:
1671
remaining_children.extend(parent_to_children[file_id])
1675
def _bytes_to_utf8name_key(bytes):
1676
"""Get the file_id, revision_id key out of bytes."""
1677
# We don't normally care about name, except for times when we want
1678
# to filter out empty names because of non rich-root...
1679
sections = bytes.split('\n')
1680
kind, file_id = sections[0].split(': ')
1681
return (sections[2], file_id, sections[3])
1683
def _bytes_to_entry(self, bytes):
1684
"""Deserialise a serialised entry."""
1685
sections = bytes.split('\n')
1686
if sections[0].startswith("file: "):
1687
result = InventoryFile(sections[0][6:],
1688
sections[2].decode('utf8'),
1690
result.text_sha1 = sections[4]
1691
result.text_size = int(sections[5])
1692
result.executable = sections[6] == "Y"
1693
elif sections[0].startswith("dir: "):
1694
result = CHKInventoryDirectory(sections[0][5:],
1695
sections[2].decode('utf8'),
1697
elif sections[0].startswith("symlink: "):
1698
result = InventoryLink(sections[0][9:],
1699
sections[2].decode('utf8'),
1701
result.symlink_target = sections[4].decode('utf8')
1702
elif sections[0].startswith("tree: "):
1703
result = TreeReference(sections[0][6:],
1704
sections[2].decode('utf8'),
1706
result.reference_revision = sections[4]
1708
raise ValueError("Not a serialised entry %r" % bytes)
1709
result.revision = sections[3]
1710
if result.parent_id == '':
1711
result.parent_id = None
1712
self._fileid_to_entry_cache[result.file_id] = result
1715
def _get_mutable_inventory(self):
1716
"""See CommonInventory._get_mutable_inventory."""
1717
entries = self.iter_entries()
1718
inv = Inventory(None, self.revision_id)
1719
for path, inv_entry in entries:
1720
inv.add(inv_entry.copy())
1723
def create_by_apply_delta(self, inventory_delta, new_revision_id,
1724
propagate_caches=False):
1725
"""Create a new CHKInventory by applying inventory_delta to this one.
1727
See the inventory developers documentation for the theory behind
1730
:param inventory_delta: The inventory delta to apply. See
1731
Inventory.apply_delta for details.
1732
:param new_revision_id: The revision id of the resulting CHKInventory.
1733
:param propagate_caches: If True, the caches for this inventory are
1734
copied to and updated for the result.
1735
:return: The new CHKInventory.
1737
split = osutils.split
1738
result = CHKInventory(self._search_key_name)
1739
if propagate_caches:
1740
# Just propagate the path-to-fileid cache for now
1741
result._path_to_fileid_cache = dict(self._path_to_fileid_cache.iteritems())
1742
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1743
self.id_to_entry._ensure_root()
1744
maximum_size = self.id_to_entry._root_node.maximum_size
1745
result.revision_id = new_revision_id
1746
result.id_to_entry = chk_map.CHKMap(
1747
self.id_to_entry._store,
1748
self.id_to_entry.key(),
1749
search_key_func=search_key_func)
1750
result.id_to_entry._ensure_root()
1751
result.id_to_entry._root_node.set_maximum_size(maximum_size)
1752
# Change to apply to the parent_id_basename delta. The dict maps
1753
# (parent_id, basename) -> (old_key, new_value). We use a dict because
1754
# when a path has its id replaced (e.g. the root is changed, or someone
1755
# does bzr mv a b, bzr mv c a, we should output a single change to this
1756
# map rather than two.
1757
parent_id_basename_delta = {}
1758
if self.parent_id_basename_to_file_id is not None:
1759
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1760
self.parent_id_basename_to_file_id._store,
1761
self.parent_id_basename_to_file_id.key(),
1762
search_key_func=search_key_func)
1763
result.parent_id_basename_to_file_id._ensure_root()
1764
self.parent_id_basename_to_file_id._ensure_root()
1765
result_p_id_root = result.parent_id_basename_to_file_id._root_node
1766
p_id_root = self.parent_id_basename_to_file_id._root_node
1767
result_p_id_root.set_maximum_size(p_id_root.maximum_size)
1768
result_p_id_root._key_width = p_id_root._key_width
1770
result.parent_id_basename_to_file_id = None
1771
result.root_id = self.root_id
1772
id_to_entry_delta = []
1773
# inventory_delta is only traversed once, so we just update the
1775
# Check for repeated file ids
1776
inventory_delta = _check_delta_unique_ids(inventory_delta)
1777
# Repeated old paths
1778
inventory_delta = _check_delta_unique_old_paths(inventory_delta)
1779
# Check for repeated new paths
1780
inventory_delta = _check_delta_unique_new_paths(inventory_delta)
1781
# Check for entries that don't match the fileid
1782
inventory_delta = _check_delta_ids_match_entry(inventory_delta)
1783
# Check for nonsense fileids
1784
inventory_delta = _check_delta_ids_are_valid(inventory_delta)
1785
# Check for new_path <-> entry consistency
1786
inventory_delta = _check_delta_new_path_entry_both_or_None(
1788
# All changed entries need to have their parents be directories and be
1789
# at the right path. This set contains (path, id) tuples.
1791
# When we delete an item, all the children of it must be either deleted
1792
# or altered in their own right. As we batch process the change via
1793
# CHKMap.apply_delta, we build a set of things to use to validate the
1797
for old_path, new_path, file_id, entry in inventory_delta:
1800
result.root_id = file_id
1801
if new_path is None:
1806
if propagate_caches:
1808
del result._path_to_fileid_cache[old_path]
1811
deletes.add(file_id)
1813
new_key = (file_id,)
1814
new_value = result._entry_to_bytes(entry)
1815
# Update caches. It's worth doing this whether
1816
# we're propagating the old caches or not.
1817
result._path_to_fileid_cache[new_path] = file_id
1818
parents.add((split(new_path)[0], entry.parent_id))
1819
if old_path is None:
1822
old_key = (file_id,)
1823
if self.id2path(file_id) != old_path:
1824
raise errors.InconsistentDelta(old_path, file_id,
1825
"Entry was at wrong other path %r." %
1826
self.id2path(file_id))
1827
altered.add(file_id)
1828
id_to_entry_delta.append((old_key, new_key, new_value))
1829
if result.parent_id_basename_to_file_id is not None:
1830
# parent_id, basename changes
1831
if old_path is None:
1834
old_entry = self[file_id]
1835
old_key = self._parent_id_basename_key(old_entry)
1836
if new_path is None:
1840
new_key = self._parent_id_basename_key(entry)
1842
# If the two keys are the same, the value will be unchanged
1843
# as its always the file id for this entry.
1844
if old_key != new_key:
1845
# Transform a change into explicit delete/add preserving
1846
# a possible match on the key from a different file id.
1847
if old_key is not None:
1848
parent_id_basename_delta.setdefault(
1849
old_key, [None, None])[0] = old_key
1850
if new_key is not None:
1851
parent_id_basename_delta.setdefault(
1852
new_key, [None, None])[1] = new_value
1853
# validate that deletes are complete.
1854
for file_id in deletes:
1855
entry = self[file_id]
1856
if entry.kind != 'directory':
1858
# This loop could potentially be better by using the id_basename
1859
# map to just get the child file ids.
1860
for child in entry.children.values():
1861
if child.file_id not in altered:
1862
raise errors.InconsistentDelta(self.id2path(child.file_id),
1863
child.file_id, "Child not deleted or reparented when "
1865
result.id_to_entry.apply_delta(id_to_entry_delta)
1866
if parent_id_basename_delta:
1867
# Transform the parent_id_basename delta data into a linear delta
1868
# with only one record for a given key. Optimally this would allow
1869
# re-keying, but its simpler to just output that as a delete+add
1870
# to spend less time calculating the delta.
1872
for key, (old_key, value) in parent_id_basename_delta.iteritems():
1873
if value is not None:
1874
delta_list.append((old_key, key, value))
1876
delta_list.append((old_key, None, None))
1877
result.parent_id_basename_to_file_id.apply_delta(delta_list)
1878
parents.discard(('', None))
1879
for parent_path, parent in parents:
1881
if result[parent].kind != 'directory':
1882
raise errors.InconsistentDelta(result.id2path(parent), parent,
1883
'Not a directory, but given children')
1884
except errors.NoSuchId:
1885
raise errors.InconsistentDelta("<unknown>", parent,
1886
"Parent is not present in resulting inventory.")
1887
if result.path2id(parent_path) != parent:
1888
raise errors.InconsistentDelta(parent_path, parent,
1889
"Parent has wrong path %r." % result.path2id(parent_path))
1893
def deserialise(klass, chk_store, bytes, expected_revision_id):
1894
"""Deserialise a CHKInventory.
1896
:param chk_store: A CHK capable VersionedFiles instance.
1897
:param bytes: The serialised bytes.
1898
:param expected_revision_id: The revision ID we think this inventory is
1900
:return: A CHKInventory
1902
lines = bytes.split('\n')
1904
raise AssertionError('bytes to deserialize must end with an eol')
1906
if lines[0] != 'chkinventory:':
1907
raise ValueError("not a serialised CHKInventory: %r" % bytes)
1909
allowed_keys = frozenset(['root_id', 'revision_id', 'search_key_name',
1910
'parent_id_basename_to_file_id',
1912
for line in lines[1:]:
1913
key, value = line.split(': ', 1)
1914
if key not in allowed_keys:
1915
raise errors.BzrError('Unknown key in inventory: %r\n%r'
1918
raise errors.BzrError('Duplicate key in inventory: %r\n%r'
1921
revision_id = info['revision_id']
1922
root_id = info['root_id']
1923
search_key_name = info.get('search_key_name', 'plain')
1924
parent_id_basename_to_file_id = info.get(
1925
'parent_id_basename_to_file_id', None)
1926
id_to_entry = info['id_to_entry']
1928
result = CHKInventory(search_key_name)
1929
result.revision_id = revision_id
1930
result.root_id = root_id
1931
search_key_func = chk_map.search_key_registry.get(
1932
result._search_key_name)
1933
if parent_id_basename_to_file_id is not None:
1934
result.parent_id_basename_to_file_id = chk_map.CHKMap(
1935
chk_store, (parent_id_basename_to_file_id,),
1936
search_key_func=search_key_func)
1938
result.parent_id_basename_to_file_id = None
1940
result.id_to_entry = chk_map.CHKMap(chk_store, (id_to_entry,),
1941
search_key_func=search_key_func)
1942
if (result.revision_id,) != expected_revision_id:
1943
raise ValueError("Mismatched revision id and expected: %r, %r" %
1944
(result.revision_id, expected_revision_id))
1948
def from_inventory(klass, chk_store, inventory, maximum_size=0, search_key_name='plain'):
1949
"""Create a CHKInventory from an existing inventory.
1951
The content of inventory is copied into the chk_store, and a
1952
CHKInventory referencing that is returned.
1954
:param chk_store: A CHK capable VersionedFiles instance.
1955
:param inventory: The inventory to copy.
1956
:param maximum_size: The CHKMap node size limit.
1957
:param search_key_name: The identifier for the search key function
1959
result = klass(search_key_name)
1960
result.revision_id = inventory.revision_id
1961
result.root_id = inventory.root.file_id
1963
entry_to_bytes = result._entry_to_bytes
1964
parent_id_basename_key = result._parent_id_basename_key
1965
id_to_entry_dict = {}
1966
parent_id_basename_dict = {}
1967
for path, entry in inventory.iter_entries():
1968
id_to_entry_dict[(entry.file_id,)] = entry_to_bytes(entry)
1969
p_id_key = parent_id_basename_key(entry)
1970
parent_id_basename_dict[p_id_key] = entry.file_id
1972
result._populate_from_dicts(chk_store, id_to_entry_dict,
1973
parent_id_basename_dict, maximum_size=maximum_size)
1976
def _populate_from_dicts(self, chk_store, id_to_entry_dict,
1977
parent_id_basename_dict, maximum_size):
1978
search_key_func = chk_map.search_key_registry.get(self._search_key_name)
1979
root_key = chk_map.CHKMap.from_dict(chk_store, id_to_entry_dict,
1980
maximum_size=maximum_size, key_width=1,
1981
search_key_func=search_key_func)
1982
self.id_to_entry = chk_map.CHKMap(chk_store, root_key,
1984
root_key = chk_map.CHKMap.from_dict(chk_store,
1985
parent_id_basename_dict,
1986
maximum_size=maximum_size, key_width=2,
1987
search_key_func=search_key_func)
1988
self.parent_id_basename_to_file_id = chk_map.CHKMap(chk_store,
1989
root_key, search_key_func)
1991
def _parent_id_basename_key(self, entry):
1992
"""Create a key for a entry in a parent_id_basename_to_file_id index."""
1993
if entry.parent_id is not None:
1994
parent_id = entry.parent_id
1997
return parent_id, entry.name.encode('utf8')
1999
def __getitem__(self, file_id):
2000
"""map a single file_id -> InventoryEntry."""
2002
raise errors.NoSuchId(self, file_id)
2003
result = self._fileid_to_entry_cache.get(file_id, None)
2004
if result is not None:
2007
return self._bytes_to_entry(
2008
self.id_to_entry.iteritems([(file_id,)]).next()[1])
2009
except StopIteration:
2010
# really we're passing an inventory, not a tree...
2011
raise errors.NoSuchId(self, file_id)
2013
def _getitems(self, file_ids):
2014
"""Similar to __getitem__, but lets you query for multiple.
2016
The returned order is undefined. And currently if an item doesn't
2017
exist, it isn't included in the output.
2021
for file_id in file_ids:
2022
entry = self._fileid_to_entry_cache.get(file_id, None)
2024
remaining.append(file_id)
2026
result.append(entry)
2027
file_keys = [(f,) for f in remaining]
2028
for file_key, value in self.id_to_entry.iteritems(file_keys):
2029
entry = self._bytes_to_entry(value)
2030
result.append(entry)
2031
self._fileid_to_entry_cache[entry.file_id] = entry
2034
def has_id(self, file_id):
2035
# Perhaps have an explicit 'contains' method on CHKMap ?
2036
if self._fileid_to_entry_cache.get(file_id, None) is not None:
2038
return len(list(self.id_to_entry.iteritems([(file_id,)]))) == 1
2040
def is_root(self, file_id):
2041
return file_id == self.root_id
2043
def _iter_file_id_parents(self, file_id):
2044
"""Yield the parents of file_id up to the root."""
2045
while file_id is not None:
2049
raise errors.NoSuchId(tree=self, file_id=file_id)
2051
file_id = ie.parent_id
2054
"""Iterate over all file-ids."""
2055
for key, _ in self.id_to_entry.iteritems():
2058
def iter_just_entries(self):
2059
"""Iterate over all entries.
2061
Unlike iter_entries(), just the entries are returned (not (path, ie))
2062
and the order of entries is undefined.
2064
XXX: We may not want to merge this into bzr.dev.
2066
for key, entry in self.id_to_entry.iteritems():
2068
ie = self._fileid_to_entry_cache.get(file_id, None)
2070
ie = self._bytes_to_entry(entry)
2071
self._fileid_to_entry_cache[file_id] = ie
2074
def iter_changes(self, basis):
2075
"""Generate a Tree.iter_changes change list between this and basis.
2077
:param basis: Another CHKInventory.
2078
:return: An iterator over the changes between self and basis, as per
2079
tree.iter_changes().
2081
# We want: (file_id, (path_in_source, path_in_target),
2082
# changed_content, versioned, parent, name, kind,
2084
for key, basis_value, self_value in \
2085
self.id_to_entry.iter_changes(basis.id_to_entry):
2087
if basis_value is not None:
2088
basis_entry = basis._bytes_to_entry(basis_value)
2089
path_in_source = basis.id2path(file_id)
2090
basis_parent = basis_entry.parent_id
2091
basis_name = basis_entry.name
2092
basis_executable = basis_entry.executable
2094
path_in_source = None
2097
basis_executable = None
2098
if self_value is not None:
2099
self_entry = self._bytes_to_entry(self_value)
2100
path_in_target = self.id2path(file_id)
2101
self_parent = self_entry.parent_id
2102
self_name = self_entry.name
2103
self_executable = self_entry.executable
2105
path_in_target = None
2108
self_executable = None
2109
if basis_value is None:
2111
kind = (None, self_entry.kind)
2112
versioned = (False, True)
2113
elif self_value is None:
2115
kind = (basis_entry.kind, None)
2116
versioned = (True, False)
2118
kind = (basis_entry.kind, self_entry.kind)
2119
versioned = (True, True)
2120
changed_content = False
2121
if kind[0] != kind[1]:
2122
changed_content = True
2123
elif kind[0] == 'file':
2124
if (self_entry.text_size != basis_entry.text_size or
2125
self_entry.text_sha1 != basis_entry.text_sha1):
2126
changed_content = True
2127
elif kind[0] == 'symlink':
2128
if self_entry.symlink_target != basis_entry.symlink_target:
2129
changed_content = True
2130
elif kind[0] == 'tree-reference':
2131
if (self_entry.reference_revision !=
2132
basis_entry.reference_revision):
2133
changed_content = True
2134
parent = (basis_parent, self_parent)
2135
name = (basis_name, self_name)
2136
executable = (basis_executable, self_executable)
2137
if (not changed_content
2138
and parent[0] == parent[1]
2139
and name[0] == name[1]
2140
and executable[0] == executable[1]):
2141
# Could happen when only the revision changed for a directory
2144
yield (file_id, (path_in_source, path_in_target), changed_content,
2145
versioned, parent, name, kind, executable)
2148
"""Return the number of entries in the inventory."""
2149
return len(self.id_to_entry)
2151
def _make_delta(self, old):
2152
"""Make an inventory delta from two inventories."""
2153
if type(old) != CHKInventory:
2154
return CommonInventory._make_delta(self, old)
2156
for key, old_value, self_value in \
2157
self.id_to_entry.iter_changes(old.id_to_entry):
2159
if old_value is not None:
2160
old_path = old.id2path(file_id)
2163
if self_value is not None:
2164
entry = self._bytes_to_entry(self_value)
2165
self._fileid_to_entry_cache[file_id] = entry
2166
new_path = self.id2path(file_id)
2170
delta.append((old_path, new_path, file_id, entry))
2173
def path2id(self, name):
2174
"""See CommonInventory.path2id()."""
2175
# TODO: perhaps support negative hits?
2176
result = self._path_to_fileid_cache.get(name, None)
2177
if result is not None:
2179
if isinstance(name, basestring):
2180
names = osutils.splitpath(name)
2183
current_id = self.root_id
2184
if current_id is None:
2186
parent_id_index = self.parent_id_basename_to_file_id
2187
for basename in names:
2188
# TODO: Cache each path we figure out in this function.
2189
basename_utf8 = basename.encode('utf8')
2190
key_filter = [(current_id, basename_utf8)]
2192
for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
2193
key_filter=key_filter):
2194
if parent_id != current_id or name_utf8 != basename_utf8:
2195
raise errors.BzrError("corrupt inventory lookup! "
2196
"%r %r %r %r" % (parent_id, current_id, name_utf8,
2200
current_id = file_id
2201
self._path_to_fileid_cache[name] = current_id
2205
"""Serialise the inventory to lines."""
2206
lines = ["chkinventory:\n"]
2207
if self._search_key_name != 'plain':
2208
# custom ordering grouping things that don't change together
2209
lines.append('search_key_name: %s\n' % (self._search_key_name,))
2210
lines.append("root_id: %s\n" % self.root_id)
2211
lines.append('parent_id_basename_to_file_id: %s\n' %
2212
self.parent_id_basename_to_file_id.key())
2213
lines.append("revision_id: %s\n" % self.revision_id)
2214
lines.append("id_to_entry: %s\n" % self.id_to_entry.key())
2216
lines.append("revision_id: %s\n" % self.revision_id)
2217
lines.append("root_id: %s\n" % self.root_id)
2218
if self.parent_id_basename_to_file_id is not None:
2219
lines.append('parent_id_basename_to_file_id: %s\n' %
2220
self.parent_id_basename_to_file_id.key())
2221
lines.append("id_to_entry: %s\n" % self.id_to_entry.key())
2226
"""Get the root entry."""
2227
return self[self.root_id]
2230
class CHKInventoryDirectory(InventoryDirectory):
2231
"""A directory in an inventory."""
2233
__slots__ = ['text_sha1', 'text_size', 'file_id', 'name', 'kind',
2234
'text_id', 'parent_id', '_children', 'executable',
2235
'revision', 'symlink_target', 'reference_revision',
2238
def __init__(self, file_id, name, parent_id, chk_inventory):
2239
# Don't call InventoryDirectory.__init__ - it isn't right for this
2241
InventoryEntry.__init__(self, file_id, name, parent_id)
2242
self._children = None
2243
self.kind = 'directory'
2244
self._chk_inventory = chk_inventory
2248
"""Access the list of children of this directory.
2250
With a parent_id_basename_to_file_id index, loads all the children,
2251
without loads the entire index. Without is bad. A more sophisticated
2252
proxy object might be nice, to allow partial loading of children as
2253
well when specific names are accessed. (So path traversal can be
2254
written in the obvious way but not examine siblings.).
2256
if self._children is not None:
2257
return self._children
2258
# No longer supported
2259
if self._chk_inventory.parent_id_basename_to_file_id is None:
2260
raise AssertionError("Inventories without"
2261
" parent_id_basename_to_file_id are no longer supported")
2263
# XXX: Todo - use proxy objects for the children rather than loading
2264
# all when the attribute is referenced.
2265
parent_id_index = self._chk_inventory.parent_id_basename_to_file_id
2267
for (parent_id, name_utf8), file_id in parent_id_index.iteritems(
2268
key_filter=[(self.file_id,)]):
2269
child_keys.add((file_id,))
2271
for file_id_key in child_keys:
2272
entry = self._chk_inventory._fileid_to_entry_cache.get(
2273
file_id_key[0], None)
2274
if entry is not None:
2275
result[entry.name] = entry
2276
cached.add(file_id_key)
2277
child_keys.difference_update(cached)
2278
# populate; todo: do by name
2279
id_to_entry = self._chk_inventory.id_to_entry
2280
for file_id_key, bytes in id_to_entry.iteritems(child_keys):
2281
entry = self._chk_inventory._bytes_to_entry(bytes)
2282
result[entry.name] = entry
2283
self._chk_inventory._fileid_to_entry_cache[file_id_key[0]] = entry
2284
self._children = result
2288
'directory': InventoryDirectory,
2289
'file': InventoryFile,
2290
'symlink': InventoryLink,
2291
'tree-reference': TreeReference
1238
2294
def make_entry(kind, name, parent_id, file_id=None):
1239
2295
"""Create an inventory entry.