20
20
lines by NL. The field delimiters are ommitted in the grammar, line delimiters
21
21
are not - this is done for clarity of reading. All string data is in utf8.
23
MINIKIND = "f" | "d" | "l" | "a" | "r" | "t";
26
WHOLE_NUMBER = {digit}, digit;
28
REVISION_ID = a non-empty utf8 string;
30
dirstate format = header line, full checksum, row count, parent details,
31
ghost_details, entries;
32
header line = "#bazaar dirstate flat format 3", NL;
33
full checksum = "crc32: ", ["-"], WHOLE_NUMBER, NL;
34
row count = "num_entries: ", WHOLE_NUMBER, NL;
35
parent_details = WHOLE NUMBER, {REVISION_ID}* NL;
36
ghost_details = WHOLE NUMBER, {REVISION_ID}*, NL;
38
entry = entry_key, current_entry_details, {parent_entry_details};
39
entry_key = dirname, basename, fileid;
40
current_entry_details = common_entry_details, working_entry_details;
41
parent_entry_details = common_entry_details, history_entry_details;
42
common_entry_details = MINIKIND, fingerprint, size, executable
43
working_entry_details = packed_stat
44
history_entry_details = REVISION_ID;
47
fingerprint = a nonempty utf8 sequence with meaning defined by minikind.
49
Given this definition, the following is useful to know:
50
entry (aka row) - all the data for a given key.
51
entry[0]: The key (dirname, basename, fileid)
55
entry[1]: The tree(s) data for this path and id combination.
56
entry[1][0]: The current tree
57
entry[1][1]: The second tree
59
For an entry for a tree, we have (using tree 0 - current tree) to demonstrate:
60
entry[1][0][0]: minikind
61
entry[1][0][1]: fingerprint
63
entry[1][0][3]: executable
64
entry[1][0][4]: packed_stat
66
entry[1][1][4]: revision_id
25
MINIKIND = "f" | "d" | "l" | "a" | "r" | "t";
28
WHOLE_NUMBER = {digit}, digit;
30
REVISION_ID = a non-empty utf8 string;
32
dirstate format = header line, full checksum, row count, parent details,
33
ghost_details, entries;
34
header line = "#bazaar dirstate flat format 3", NL;
35
full checksum = "crc32: ", ["-"], WHOLE_NUMBER, NL;
36
row count = "num_entries: ", WHOLE_NUMBER, NL;
37
parent_details = WHOLE NUMBER, {REVISION_ID}* NL;
38
ghost_details = WHOLE NUMBER, {REVISION_ID}*, NL;
40
entry = entry_key, current_entry_details, {parent_entry_details};
41
entry_key = dirname, basename, fileid;
42
current_entry_details = common_entry_details, working_entry_details;
43
parent_entry_details = common_entry_details, history_entry_details;
44
common_entry_details = MINIKIND, fingerprint, size, executable
45
working_entry_details = packed_stat
46
history_entry_details = REVISION_ID;
49
fingerprint = a nonempty utf8 sequence with meaning defined by minikind.
51
Given this definition, the following is useful to know::
53
entry (aka row) - all the data for a given key.
54
entry[0]: The key (dirname, basename, fileid)
58
entry[1]: The tree(s) data for this path and id combination.
59
entry[1][0]: The current tree
60
entry[1][1]: The second tree
62
For an entry for a tree, we have (using tree 0 - current tree) to demonstrate::
64
entry[1][0][0]: minikind
65
entry[1][0][1]: fingerprint
67
entry[1][0][3]: executable
68
entry[1][0][4]: packed_stat
72
entry[1][1][4]: revision_id
68
74
There may be multiple rows at the root, one per id present in the root, so the
69
in memory root row is now:
70
self._dirblocks[0] -> ('', [entry ...]),
71
and the entries in there are
74
entries[0][2]: file_id
75
entries[1][0]: The tree data for the current tree for this fileid at /
79
'r' is a relocated entry: This path is not present in this tree with this id,
80
but the id can be found at another location. The fingerprint is used to
81
point to the target location.
82
'a' is an absent entry: In that tree the id is not present at this path.
83
'd' is a directory entry: This path in this tree is a directory with the
84
current file id. There is no fingerprint for directories.
85
'f' is a file entry: As for directory, but it's a file. The fingerprint is the
86
sha1 value of the file's canonical form, i.e. after any read filters have
87
been applied to the convenience form stored in the working tree.
88
'l' is a symlink entry: As for directory, but a symlink. The fingerprint is the
90
't' is a reference to a nested subtree; the fingerprint is the referenced
75
in memory root row is now::
77
self._dirblocks[0] -> ('', [entry ...]),
79
and the entries in there are::
83
entries[0][2]: file_id
84
entries[1][0]: The tree data for the current tree for this fileid at /
89
'r' is a relocated entry: This path is not present in this tree with this
90
id, but the id can be found at another location. The fingerprint is
91
used to point to the target location.
92
'a' is an absent entry: In that tree the id is not present at this path.
93
'd' is a directory entry: This path in this tree is a directory with the
94
current file id. There is no fingerprint for directories.
95
'f' is a file entry: As for directory, but it's a file. The fingerprint is
96
the sha1 value of the file's canonical form, i.e. after any read
97
filters have been applied to the convenience form stored in the working
99
'l' is a symlink entry: As for directory, but a symlink. The fingerprint is
101
't' is a reference to a nested subtree; the fingerprint is the referenced
95
The entries on disk and in memory are ordered according to the following keys:
106
The entries on disk and in memory are ordered according to the following keys::
97
108
directory, as a list of components
101
112
--- Format 1 had the following different definition: ---
102
rows = dirname, NULL, basename, NULL, MINIKIND, NULL, fileid_utf8, NULL,
103
WHOLE NUMBER (* size *), NULL, packed stat, NULL, sha1|symlink target,
105
PARENT ROW = NULL, revision_utf8, NULL, MINIKIND, NULL, dirname, NULL,
106
basename, NULL, WHOLE NUMBER (* size *), NULL, "y" | "n", NULL,
116
rows = dirname, NULL, basename, NULL, MINIKIND, NULL, fileid_utf8, NULL,
117
WHOLE NUMBER (* size *), NULL, packed stat, NULL, sha1|symlink target,
119
PARENT ROW = NULL, revision_utf8, NULL, MINIKIND, NULL, dirname, NULL,
120
basename, NULL, WHOLE NUMBER (* size *), NULL, "y" | "n", NULL,
109
123
PARENT ROW's are emitted for every parent that is not in the ghosts details
110
124
line. That is, if the parents are foo, bar, baz, and the ghosts are bar, then
230
251
ERROR_DIRECTORY = 267
233
if not getattr(struct, '_compile', None):
234
# Cannot pre-compile the dirstate pack_stat
235
def pack_stat(st, _encode=binascii.b2a_base64, _pack=struct.pack):
236
"""Convert stat values into a packed representation."""
237
return _encode(_pack('>LLLLLL', st.st_size, int(st.st_mtime),
238
int(st.st_ctime), st.st_dev, st.st_ino & 0xFFFFFFFF,
241
# compile the struct compiler we need, so as to only do it once
242
from _struct import Struct
243
_compiled_pack = Struct('>LLLLLL').pack
244
def pack_stat(st, _encode=binascii.b2a_base64, _pack=_compiled_pack):
245
"""Convert stat values into a packed representation."""
246
# jam 20060614 it isn't really worth removing more entries if we
247
# are going to leave it in packed form.
248
# With only st_mtime and st_mode filesize is 5.5M and read time is 275ms
249
# With all entries, filesize is 5.9M and read time is maybe 280ms
250
# well within the noise margin
252
# base64 encoding always adds a final newline, so strip it off
253
# The current version
254
return _encode(_pack(st.st_size, int(st.st_mtime), int(st.st_ctime),
255
st.st_dev, st.st_ino & 0xFFFFFFFF, st.st_mode))[:-1]
256
# This is 0.060s / 1.520s faster by not encoding as much information
257
# return _encode(_pack('>LL', int(st.st_mtime), st.st_mode))[:-1]
258
# This is not strictly faster than _encode(_pack())[:-1]
259
# return '%X.%X.%X.%X.%X.%X' % (
260
# st.st_size, int(st.st_mtime), int(st.st_ctime),
261
# st.st_dev, st.st_ino, st.st_mode)
262
# Similar to the _encode(_pack('>LL'))
263
# return '%X.%X' % (int(st.st_mtime), st.st_mode)
266
254
class SHA1Provider(object):
267
255
"""An interface for getting sha1s of a file."""
410
403
self._last_block_index = None
411
404
self._last_entry_index = None
405
# The set of known hash changes
406
self._known_hash_changes = set()
407
# How many hash changed entries can we have without saving
408
self._worth_saving_limit = worth_saving_limit
409
self._config_stack = config.LocationStack(urlutils.local_path_to_url(
413
412
def __repr__(self):
414
413
return "%s(%r)" % \
415
414
(self.__class__.__name__, self._filename)
416
def _mark_modified(self, hash_changed_entries=None, header_modified=False):
417
"""Mark this dirstate as modified.
419
:param hash_changed_entries: if non-None, mark just these entries as
420
having their hash modified.
421
:param header_modified: mark the header modified as well, not just the
424
#trace.mutter_callsite(3, "modified hash entries: %s", hash_changed_entries)
425
if hash_changed_entries:
426
self._known_hash_changes.update([e[0] for e in hash_changed_entries])
427
if self._dirblock_state in (DirState.NOT_IN_MEMORY,
428
DirState.IN_MEMORY_UNMODIFIED):
429
# If the dirstate is already marked a IN_MEMORY_MODIFIED, then
430
# that takes precedence.
431
self._dirblock_state = DirState.IN_MEMORY_HASH_MODIFIED
433
# TODO: Since we now have a IN_MEMORY_HASH_MODIFIED state, we
434
# should fail noisily if someone tries to set
435
# IN_MEMORY_MODIFIED but we don't have a write-lock!
436
# We don't know exactly what changed so disable smart saving
437
self._dirblock_state = DirState.IN_MEMORY_MODIFIED
439
self._header_state = DirState.IN_MEMORY_MODIFIED
441
def _mark_unmodified(self):
442
"""Mark this dirstate as unmodified."""
443
self._header_state = DirState.IN_MEMORY_UNMODIFIED
444
self._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
445
self._known_hash_changes = set()
417
447
def add(self, path, file_id, kind, stat, fingerprint):
418
448
"""Add a path to be tracked.
1307
def _check_delta_is_valid(self, delta):
1308
return list(inventory._check_delta_unique_ids(
1309
inventory._check_delta_unique_old_paths(
1310
inventory._check_delta_unique_new_paths(
1311
inventory._check_delta_ids_match_entry(
1312
inventory._check_delta_ids_are_valid(
1313
inventory._check_delta_new_path_entry_both_or_None(delta)))))))
1277
1315
def update_by_delta(self, delta):
1278
1316
"""Apply an inventory delta to the dirstate for tree 0
1318
This is the workhorse for apply_inventory_delta in dirstate based
1280
1321
:param delta: An inventory delta. See Inventory.apply_delta for
1283
1324
self._read_dirblocks_if_needed()
1325
encode = cache_utf8.encode
1284
1326
insertions = {}
1286
for old_path, new_path, file_id, inv_entry in sorted(delta, reverse=True):
1328
# Accumulate parent references (path_utf8, id), to check for parentless
1329
# items or items placed under files/links/tree-references. We get
1330
# references from every item in the delta that is not a deletion and
1331
# is not itself the root.
1333
# Added ids must not be in the dirstate already. This set holds those
1336
# This loop transforms the delta to single atomic operations that can
1337
# be executed and validated.
1338
delta = sorted(self._check_delta_is_valid(delta), reverse=True)
1339
for old_path, new_path, file_id, inv_entry in delta:
1287
1340
if (file_id in insertions) or (file_id in removals):
1288
raise AssertionError("repeated file id in delta %r" % (file_id,))
1341
self._raise_invalid(old_path or new_path, file_id,
1289
1343
if old_path is not None:
1290
1344
old_path = old_path.encode('utf-8')
1291
1345
removals[file_id] = old_path
1347
new_ids.add(file_id)
1292
1348
if new_path is not None:
1349
if inv_entry is None:
1350
self._raise_invalid(new_path, file_id,
1351
"new_path with no entry")
1293
1352
new_path = new_path.encode('utf-8')
1294
dirname, basename = osutils.split(new_path)
1295
key = (dirname, basename, file_id)
1353
dirname_utf8, basename = osutils.split(new_path)
1355
parents.add((dirname_utf8, inv_entry.parent_id))
1356
key = (dirname_utf8, basename, file_id)
1296
1357
minikind = DirState._kind_to_minikind[inv_entry.kind]
1297
1358
if minikind == 't':
1298
fingerprint = inv_entry.reference_revision
1359
fingerprint = inv_entry.reference_revision or ''
1300
1361
fingerprint = ''
1301
1362
insertions[file_id] = (key, minikind, inv_entry.executable,
1310
1371
minikind = child[1][0][0]
1311
1372
fingerprint = child[1][0][4]
1312
1373
executable = child[1][0][3]
1313
old_child_path = osutils.pathjoin(child[0][0],
1374
old_child_path = osutils.pathjoin(child_dirname,
1315
1376
removals[child[0][2]] = old_child_path
1316
1377
child_suffix = child_dirname[len(old_path):]
1317
1378
new_child_dirname = (new_path + child_suffix)
1318
1379
key = (new_child_dirname, child_basename, child[0][2])
1319
new_child_path = os.path.join(new_child_dirname,
1380
new_child_path = osutils.pathjoin(new_child_dirname,
1321
1382
insertions[child[0][2]] = (key, minikind, executable,
1322
1383
fingerprint, new_child_path)
1323
self._apply_removals(removals.values())
1324
self._apply_insertions(insertions.values())
1384
self._check_delta_ids_absent(new_ids, delta, 0)
1386
self._apply_removals(removals.iteritems())
1387
self._apply_insertions(insertions.values())
1389
self._after_delta_check_parents(parents, 0)
1390
except errors.BzrError, e:
1391
self._changes_aborted = True
1392
if 'integrity error' not in str(e):
1394
# _get_entry raises BzrError when a request is inconsistent; we
1395
# want such errors to be shown as InconsistentDelta - and that
1396
# fits the behaviour we trigger.
1397
raise errors.InconsistentDeltaDelta(delta,
1398
"error from _get_entry. %s" % (e,))
1326
1400
def _apply_removals(self, removals):
1327
for path in sorted(removals, reverse=True):
1401
for file_id, path in sorted(removals, reverse=True,
1402
key=operator.itemgetter(1)):
1328
1403
dirname, basename = osutils.split(path)
1329
1404
block_i, entry_i, d_present, f_present = \
1330
1405
self._get_block_entry_index(dirname, basename, 0)
1331
entry = self._dirblocks[block_i][1][entry_i]
1407
entry = self._dirblocks[block_i][1][entry_i]
1409
self._raise_invalid(path, file_id,
1410
"Wrong path for old path.")
1411
if not f_present or entry[1][0][0] in 'ar':
1412
self._raise_invalid(path, file_id,
1413
"Wrong path for old path.")
1414
if file_id != entry[0][2]:
1415
self._raise_invalid(path, file_id,
1416
"Attempt to remove path has wrong id - found %r."
1332
1418
self._make_absent(entry)
1333
1419
# See if we have a malformed delta: deleting a directory must not
1334
1420
# leave crud behind. This increases the number of bisects needed
1399
1486
# At the same time, to reduce interface friction we convert the input
1400
1487
# inventory entries to dirstate.
1401
1488
root_only = ('', '')
1489
# Accumulate parent references (path_utf8, id), to check for parentless
1490
# items or items placed under files/links/tree-references. We get
1491
# references from every item in the delta that is not a deletion and
1492
# is not itself the root.
1494
# Added ids must not be in the dirstate already. This set holds those
1402
1497
for old_path, new_path, file_id, inv_entry in delta:
1403
if old_path is None:
1404
adds.append((None, encode(new_path), file_id,
1498
if inv_entry is not None and file_id != inv_entry.file_id:
1499
self._raise_invalid(new_path, file_id,
1500
"mismatched entry file_id %r" % inv_entry)
1501
if new_path is None:
1502
new_path_utf8 = None
1504
if inv_entry is None:
1505
self._raise_invalid(new_path, file_id,
1506
"new_path with no entry")
1507
new_path_utf8 = encode(new_path)
1508
# note the parent for validation
1509
dirname_utf8, basename_utf8 = osutils.split(new_path_utf8)
1511
parents.add((dirname_utf8, inv_entry.parent_id))
1512
if old_path is None:
1513
old_path_utf8 = None
1515
old_path_utf8 = encode(old_path)
1516
if old_path is None:
1517
adds.append((None, new_path_utf8, file_id,
1405
1518
inv_to_entry(inv_entry), True))
1519
new_ids.add(file_id)
1406
1520
elif new_path is None:
1407
deletes.append((encode(old_path), None, file_id, None, True))
1408
elif (old_path, new_path) != root_only:
1521
deletes.append((old_path_utf8, None, file_id, None, True))
1522
elif (old_path, new_path) == root_only:
1523
# change things in-place
1524
# Note: the case of a parent directory changing its file_id
1525
# tends to break optimizations here, because officially
1526
# the file has actually been moved, it just happens to
1527
# end up at the same path. If we can figure out how to
1528
# handle that case, we can avoid a lot of add+delete
1529
# pairs for objects that stay put.
1530
# elif old_path == new_path:
1531
changes.append((old_path_utf8, new_path_utf8, file_id,
1532
inv_to_entry(inv_entry)))
1410
1535
# Because renames must preserve their children we must have
1411
1536
# processed all relocations and removes before hand. The sort
1420
1545
# for 'r' items on every pass.
1421
1546
self._update_basis_apply_deletes(deletes)
1423
new_path_utf8 = encode(new_path)
1424
1548
# Split into an add/delete pair recursively.
1425
adds.append((None, new_path_utf8, file_id,
1426
inv_to_entry(inv_entry), False))
1549
adds.append((old_path_utf8, new_path_utf8, file_id,
1550
inv_to_entry(inv_entry), False))
1427
1551
# Expunge deletes that we've seen so that deleted/renamed
1428
1552
# children of a rename directory are handled correctly.
1429
new_deletes = reversed(list(self._iter_child_entries(1,
1553
new_deletes = reversed(list(
1554
self._iter_child_entries(1, old_path_utf8)))
1431
1555
# Remove the current contents of the tree at orig_path, and
1432
1556
# reinsert at the correct new path.
1433
1557
for entry in new_deletes:
1435
source_path = entry[0][0] + '/' + entry[0][1]
1558
child_dirname, child_basename, child_file_id = entry[0]
1560
source_path = child_dirname + '/' + child_basename
1437
source_path = entry[0][1]
1562
source_path = child_basename
1438
1563
if new_path_utf8:
1439
target_path = new_path_utf8 + source_path[len(old_path):]
1565
new_path_utf8 + source_path[len(old_path_utf8):]
1567
if old_path_utf8 == '':
1442
1568
raise AssertionError("cannot rename directory to"
1444
target_path = source_path[len(old_path) + 1:]
1570
target_path = source_path[len(old_path_utf8) + 1:]
1445
1571
adds.append((None, target_path, entry[0][2], entry[1][1], False))
1446
1572
deletes.append(
1447
1573
(source_path, target_path, entry[0][2], None, False))
1448
1574
deletes.append(
1449
(encode(old_path), new_path, file_id, None, False))
1451
# changes to just the root should not require remove/insertion
1453
changes.append((encode(old_path), encode(new_path), file_id,
1454
inv_to_entry(inv_entry)))
1456
# Finish expunging deletes/first half of renames.
1457
self._update_basis_apply_deletes(deletes)
1458
# Reinstate second half of renames and new paths.
1459
self._update_basis_apply_adds(adds)
1460
# Apply in-situ changes.
1461
self._update_basis_apply_changes(changes)
1463
self._dirblock_state = DirState.IN_MEMORY_MODIFIED
1464
self._header_state = DirState.IN_MEMORY_MODIFIED
1575
(old_path_utf8, new_path_utf8, file_id, None, False))
1577
self._check_delta_ids_absent(new_ids, delta, 1)
1579
# Finish expunging deletes/first half of renames.
1580
self._update_basis_apply_deletes(deletes)
1581
# Reinstate second half of renames and new paths.
1582
self._update_basis_apply_adds(adds)
1583
# Apply in-situ changes.
1584
self._update_basis_apply_changes(changes)
1586
self._after_delta_check_parents(parents, 1)
1587
except errors.BzrError, e:
1588
self._changes_aborted = True
1589
if 'integrity error' not in str(e):
1591
# _get_entry raises BzrError when a request is inconsistent; we
1592
# want such errors to be shown as InconsistentDelta - and that
1593
# fits the behaviour we trigger.
1594
raise errors.InconsistentDeltaDelta(delta,
1595
"error from _get_entry. %s" % (e,))
1597
self._mark_modified(header_modified=True)
1465
1598
self._id_index = None
1601
def _check_delta_ids_absent(self, new_ids, delta, tree_index):
1602
"""Check that none of the file_ids in new_ids are present in a tree."""
1605
id_index = self._get_id_index()
1606
for file_id in new_ids:
1607
for key in id_index.get(file_id, ()):
1608
block_i, entry_i, d_present, f_present = \
1609
self._get_block_entry_index(key[0], key[1], tree_index)
1611
# In a different tree
1613
entry = self._dirblocks[block_i][1][entry_i]
1614
if entry[0][2] != file_id:
1615
# Different file_id, so not what we want.
1617
self._raise_invalid(("%s/%s" % key[0:2]).decode('utf8'), file_id,
1618
"This file_id is new in the delta but already present in "
1621
def _raise_invalid(self, path, file_id, reason):
1622
self._changes_aborted = True
1623
raise errors.InconsistentDelta(path, file_id, reason)
1468
1625
def _update_basis_apply_adds(self, adds):
1469
1626
"""Apply a sequence of adds to tree 1 during update_basis_by_delta.
1479
1636
# Adds are accumulated partly from renames, so can be in any input
1480
1637
# order - sort it.
1638
# TODO: we may want to sort in dirblocks order. That way each entry
1639
# will end up in the same directory, allowing the _get_entry
1640
# fast-path for looking up 2 items in the same dir work.
1641
adds.sort(key=lambda x: x[1])
1482
1642
# adds is now in lexographic order, which places all parents before
1483
1643
# their children, so we can process it linearly.
1645
st = static_tuple.StaticTuple
1485
1646
for old_path, new_path, file_id, new_details, real_add in adds:
1486
# the entry for this file_id must be in tree 0.
1487
entry = self._get_entry(0, file_id, new_path)
1488
if entry[0] is None or entry[0][2] != file_id:
1489
self._changes_aborted = True
1490
raise errors.InconsistentDelta(new_path, file_id,
1491
'working tree does not contain new entry')
1492
if real_add and entry[1][1][0] not in absent:
1493
self._changes_aborted = True
1494
raise errors.InconsistentDelta(new_path, file_id,
1495
'The entry was considered to be a genuinely new record,'
1496
' but there was already an old record for it.')
1497
# We don't need to update the target of an 'r' because the handling
1498
# of renames turns all 'r' situations into a delete at the original
1500
entry[1][1] = new_details
1647
dirname, basename = osutils.split(new_path)
1648
entry_key = st(dirname, basename, file_id)
1649
block_index, present = self._find_block_index_from_key(entry_key)
1651
self._raise_invalid(new_path, file_id,
1652
"Unable to find block for this record."
1653
" Was the parent added?")
1654
block = self._dirblocks[block_index][1]
1655
entry_index, present = self._find_entry_index(entry_key, block)
1657
if old_path is not None:
1658
self._raise_invalid(new_path, file_id,
1659
'considered a real add but still had old_path at %s'
1662
entry = block[entry_index]
1663
basis_kind = entry[1][1][0]
1664
if basis_kind == 'a':
1665
entry[1][1] = new_details
1666
elif basis_kind == 'r':
1667
raise NotImplementedError()
1669
self._raise_invalid(new_path, file_id,
1670
"An entry was marked as a new add"
1671
" but the basis target already existed")
1673
# The exact key was not found in the block. However, we need to
1674
# check if there is a key next to us that would have matched.
1675
# We only need to check 2 locations, because there are only 2
1677
for maybe_index in range(entry_index-1, entry_index+1):
1678
if maybe_index < 0 or maybe_index >= len(block):
1680
maybe_entry = block[maybe_index]
1681
if maybe_entry[0][:2] != (dirname, basename):
1682
# Just a random neighbor
1684
if maybe_entry[0][2] == file_id:
1685
raise AssertionError(
1686
'_find_entry_index didnt find a key match'
1687
' but walking the data did, for %s'
1689
basis_kind = maybe_entry[1][1][0]
1690
if basis_kind not in 'ar':
1691
self._raise_invalid(new_path, file_id,
1692
"we have an add record for path, but the path"
1693
" is already present with another file_id %s"
1694
% (maybe_entry[0][2],))
1696
entry = (entry_key, [DirState.NULL_PARENT_DETAILS,
1698
block.insert(entry_index, entry)
1700
active_kind = entry[1][0][0]
1701
if active_kind == 'a':
1702
# The active record shows up as absent, this could be genuine,
1703
# or it could be present at some other location. We need to
1705
id_index = self._get_id_index()
1706
# The id_index may not be perfectly accurate for tree1, because
1707
# we haven't been keeping it updated. However, it should be
1708
# fine for tree0, and that gives us enough info for what we
1710
keys = id_index.get(file_id, ())
1712
block_i, entry_i, d_present, f_present = \
1713
self._get_block_entry_index(key[0], key[1], 0)
1716
active_entry = self._dirblocks[block_i][1][entry_i]
1717
if (active_entry[0][2] != file_id):
1718
# Some other file is at this path, we don't need to
1721
real_active_kind = active_entry[1][0][0]
1722
if real_active_kind in 'ar':
1723
# We found a record, which was not *this* record,
1724
# which matches the file_id, but is not actually
1725
# present. Something seems *really* wrong.
1726
self._raise_invalid(new_path, file_id,
1727
"We found a tree0 entry that doesnt make sense")
1728
# Now, we've found a tree0 entry which matches the file_id
1729
# but is at a different location. So update them to be
1731
active_dir, active_name = active_entry[0][:2]
1733
active_path = active_dir + '/' + active_name
1735
active_path = active_name
1736
active_entry[1][1] = st('r', new_path, 0, False, '')
1737
entry[1][0] = st('r', active_path, 0, False, '')
1738
elif active_kind == 'r':
1739
raise NotImplementedError()
1741
new_kind = new_details[0]
1743
self._ensure_block(block_index, entry_index, new_path)
1502
1745
def _update_basis_apply_changes(self, changes):
1503
1746
"""Apply a sequence of changes to tree 1 during update_basis_by_delta.
1535
1772
null = DirState.NULL_PARENT_DETAILS
1536
1773
for old_path, new_path, file_id, _, real_delete in deletes:
1537
1774
if real_delete != (new_path is None):
1538
raise AssertionError("bad delete delta")
1775
self._raise_invalid(old_path, file_id, "bad delete delta")
1539
1776
# the entry for this file_id must be in tree 1.
1540
1777
dirname, basename = osutils.split(old_path)
1541
1778
block_index, entry_index, dir_present, file_present = \
1542
1779
self._get_block_entry_index(dirname, basename, 1)
1543
1780
if not file_present:
1544
self._changes_aborted = True
1545
raise errors.InconsistentDelta(old_path, file_id,
1781
self._raise_invalid(old_path, file_id,
1546
1782
'basis tree does not contain removed entry')
1547
1783
entry = self._dirblocks[block_index][1][entry_index]
1784
# The state of the entry in the 'active' WT
1785
active_kind = entry[1][0][0]
1548
1786
if entry[0][2] != file_id:
1549
self._changes_aborted = True
1550
raise errors.InconsistentDelta(old_path, file_id,
1787
self._raise_invalid(old_path, file_id,
1551
1788
'mismatched file_id in tree 1')
1553
if entry[1][0][0] != 'a':
1554
self._changes_aborted = True
1555
raise errors.InconsistentDelta(old_path, file_id,
1556
'This was marked as a real delete, but the WT state'
1557
' claims that it still exists and is versioned.')
1790
old_kind = entry[1][1][0]
1791
if active_kind in 'ar':
1792
# The active tree doesn't have this file_id.
1793
# The basis tree is changing this record. If this is a
1794
# rename, then we don't want the record here at all
1795
# anymore. If it is just an in-place change, we want the
1796
# record here, but we'll add it if we need to. So we just
1798
if active_kind == 'r':
1799
active_path = entry[1][0][1]
1800
active_entry = self._get_entry(0, file_id, active_path)
1801
if active_entry[1][1][0] != 'r':
1802
self._raise_invalid(old_path, file_id,
1803
"Dirstate did not have matching rename entries")
1804
elif active_entry[1][0][0] in 'ar':
1805
self._raise_invalid(old_path, file_id,
1806
"Dirstate had a rename pointing at an inactive"
1808
active_entry[1][1] = null
1558
1809
del self._dirblocks[block_index][1][entry_index]
1811
# This was a directory, and the active tree says it
1812
# doesn't exist, and now the basis tree says it doesn't
1813
# exist. Remove its dirblock if present
1815
present) = self._find_block_index_from_key(
1818
dir_block = self._dirblocks[dir_block_index][1]
1820
# This entry is empty, go ahead and just remove it
1821
del self._dirblocks[dir_block_index]
1560
if entry[1][0][0] == 'a':
1561
self._changes_aborted = True
1562
raise errors.InconsistentDelta(old_path, file_id,
1563
'The entry was considered a rename, but the source path'
1564
' is marked as absent.')
1565
# For whatever reason, we were asked to rename an entry
1566
# that was originally marked as deleted. This could be
1567
# because we are renaming the parent directory, and the WT
1568
# current state has the file marked as deleted.
1569
elif entry[1][0][0] == 'r':
1570
# implement the rename
1571
del self._dirblocks[block_index][1][entry_index]
1573
# it is being resurrected here, so blank it out temporarily.
1574
self._dirblocks[block_index][1][entry_index][1][1] = null
1823
# There is still an active record, so just mark this
1826
block_i, entry_i, d_present, f_present = \
1827
self._get_block_entry_index(old_path, '', 1)
1829
dir_block = self._dirblocks[block_i][1]
1830
for child_entry in dir_block:
1831
child_basis_kind = child_entry[1][1][0]
1832
if child_basis_kind not in 'ar':
1833
self._raise_invalid(old_path, file_id,
1834
"The file id was deleted but its children were "
1837
def _after_delta_check_parents(self, parents, index):
1838
"""Check that parents required by the delta are all intact.
1840
:param parents: An iterable of (path_utf8, file_id) tuples which are
1841
required to be present in tree 'index' at path_utf8 with id file_id
1843
:param index: The column in the dirstate to check for parents in.
1845
for dirname_utf8, file_id in parents:
1846
# Get the entry - the ensures that file_id, dirname_utf8 exists and
1847
# has the right file id.
1848
entry = self._get_entry(index, file_id, dirname_utf8)
1849
if entry[1] is None:
1850
self._raise_invalid(dirname_utf8.decode('utf8'),
1851
file_id, "This parent is not present.")
1852
# Parents of things must be directories
1853
if entry[1][index][0] != 'd':
1854
self._raise_invalid(dirname_utf8.decode('utf8'),
1855
file_id, "This parent is not a directory.")
1576
1857
def _observed_sha1(self, entry, sha1, stat_value,
1577
_stat_to_minikind=_stat_to_minikind, _pack_stat=pack_stat):
1858
_stat_to_minikind=_stat_to_minikind):
1578
1859
"""Note the sha1 of a file.
1580
1861
:param entry: The entry the sha1 is for.
2002
2289
def _get_id_index(self):
2003
"""Get an id index of self._dirblocks."""
2290
"""Get an id index of self._dirblocks.
2292
This maps from file_id => [(directory, name, file_id)] entries where
2293
that file_id appears in one of the trees.
2004
2295
if self._id_index is None:
2006
2297
for key, tree_details in self._iter_entries():
2007
id_index.setdefault(key[2], set()).add(key)
2298
self._add_to_id_index(id_index, key)
2008
2299
self._id_index = id_index
2009
2300
return self._id_index
2302
def _add_to_id_index(self, id_index, entry_key):
2303
"""Add this entry to the _id_index mapping."""
2304
# This code used to use a set for every entry in the id_index. However,
2305
# it is *rare* to have more than one entry. So a set is a large
2306
# overkill. And even when we do, we won't ever have more than the
2307
# number of parent trees. Which is still a small number (rarely >2). As
2308
# such, we use a simple tuple, and do our own uniqueness checks. While
2309
# the 'in' check is O(N) since N is nicely bounded it shouldn't ever
2310
# cause quadratic failure.
2311
file_id = entry_key[2]
2312
entry_key = static_tuple.StaticTuple.from_sequence(entry_key)
2313
if file_id not in id_index:
2314
id_index[file_id] = static_tuple.StaticTuple(entry_key,)
2316
entry_keys = id_index[file_id]
2317
if entry_key not in entry_keys:
2318
id_index[file_id] = entry_keys + (entry_key,)
2320
def _remove_from_id_index(self, id_index, entry_key):
2321
"""Remove this entry from the _id_index mapping.
2323
It is an programming error to call this when the entry_key is not
2326
file_id = entry_key[2]
2327
entry_keys = list(id_index[file_id])
2328
entry_keys.remove(entry_key)
2329
id_index[file_id] = static_tuple.StaticTuple.from_sequence(entry_keys)
2011
2331
def _get_output_lines(self, lines):
2012
2332
"""Format lines for final output.
2142
2466
trace.mutter('Not saving DirState because '
2143
2467
'_changes_aborted is set.')
2145
if (self._header_state == DirState.IN_MEMORY_MODIFIED or
2146
self._dirblock_state == DirState.IN_MEMORY_MODIFIED):
2469
# TODO: Since we now distinguish IN_MEMORY_MODIFIED from
2470
# IN_MEMORY_HASH_MODIFIED, we should only fail quietly if we fail
2471
# to save an IN_MEMORY_HASH_MODIFIED, and fail *noisily* if we
2472
# fail to save IN_MEMORY_MODIFIED
2473
if not self._worth_saving():
2148
grabbed_write_lock = False
2149
if self._lock_state != 'w':
2150
grabbed_write_lock, new_lock = self._lock_token.temporary_write_lock()
2151
# Switch over to the new lock, as the old one may be closed.
2476
grabbed_write_lock = False
2477
if self._lock_state != 'w':
2478
grabbed_write_lock, new_lock = self._lock_token.temporary_write_lock()
2479
# Switch over to the new lock, as the old one may be closed.
2480
# TODO: jam 20070315 We should validate the disk file has
2481
# not changed contents, since temporary_write_lock may
2482
# not be an atomic operation.
2483
self._lock_token = new_lock
2484
self._state_file = new_lock.f
2485
if not grabbed_write_lock:
2486
# We couldn't grab a write lock, so we switch back to a read one
2489
lines = self.get_lines()
2490
self._state_file.seek(0)
2491
self._state_file.writelines(lines)
2492
self._state_file.truncate()
2493
self._state_file.flush()
2494
self._maybe_fdatasync()
2495
self._mark_unmodified()
2497
if grabbed_write_lock:
2498
self._lock_token = self._lock_token.restore_read_lock()
2499
self._state_file = self._lock_token.f
2152
2500
# TODO: jam 20070315 We should validate the disk file has
2153
# not changed contents. Since temporary_write_lock may
2154
# not be an atomic operation.
2155
self._lock_token = new_lock
2156
self._state_file = new_lock.f
2157
if not grabbed_write_lock:
2158
# We couldn't grab a write lock, so we switch back to a read one
2161
self._state_file.seek(0)
2162
self._state_file.writelines(self.get_lines())
2163
self._state_file.truncate()
2164
self._state_file.flush()
2165
self._header_state = DirState.IN_MEMORY_UNMODIFIED
2166
self._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
2168
if grabbed_write_lock:
2169
self._lock_token = self._lock_token.restore_read_lock()
2170
self._state_file = self._lock_token.f
2171
# TODO: jam 20070315 We should validate the disk file has
2172
# not changed contents. Since restore_read_lock may
2173
# not be an atomic operation.
2501
# not changed contents. Since restore_read_lock may
2502
# not be an atomic operation.
2504
def _maybe_fdatasync(self):
2505
"""Flush to disk if possible and if not configured off."""
2506
if self._config_stack.get('dirstate.fdatasync'):
2507
osutils.fdatasync(self._state_file.fileno())
2509
def _worth_saving(self):
2510
"""Is it worth saving the dirstate or not?"""
2511
if (self._header_state == DirState.IN_MEMORY_MODIFIED
2512
or self._dirblock_state == DirState.IN_MEMORY_MODIFIED):
2514
if self._dirblock_state == DirState.IN_MEMORY_HASH_MODIFIED:
2515
if self._worth_saving_limit == -1:
2516
# We never save hash changes when the limit is -1
2518
# If we're using smart saving and only a small number of
2519
# entries have changed their hash, don't bother saving. John has
2520
# suggested using a heuristic here based on the size of the
2521
# changed files and/or tree. For now, we go with a configurable
2522
# number of changes, keeping the calculation time
2523
# as low overhead as possible. (This also keeps all existing
2524
# tests passing as the default is 0, i.e. always save.)
2525
if len(self._known_hash_changes) >= self._worth_saving_limit:
2175
2529
def _set_data(self, parent_ids, dirblocks):
2176
2530
"""Set the full dirstate data in memory.
2324
2695
new_details = []
2325
2696
for lookup_index in xrange(tree_index):
2326
2697
# boundary case: this is the first occurence of file_id
2327
# so there are no id_indexs, possibly take this out of
2698
# so there are no id_indexes, possibly take this out of
2329
if not len(id_index[file_id]):
2700
if not len(entry_keys):
2330
2701
new_details.append(DirState.NULL_PARENT_DETAILS)
2332
2703
# grab any one entry, use it to find the right path.
2333
# TODO: optimise this to reduce memory use in highly
2334
# fragmented situations by reusing the relocation
2336
a_key = iter(id_index[file_id]).next()
2704
a_key = iter(entry_keys).next()
2337
2705
if by_path[a_key][lookup_index][0] in ('r', 'a'):
2338
# its a pointer or missing statement, use it as is.
2706
# its a pointer or missing statement, use it as
2339
2708
new_details.append(by_path[a_key][lookup_index])
2341
2710
# we have the right key, make a pointer to it.
2342
2711
real_path = ('/'.join(a_key[0:2])).strip('/')
2343
new_details.append(('r', real_path, 0, False, ''))
2712
new_details.append(st('r', real_path, 0, False,
2344
2714
new_details.append(self._inv_entry_to_details(entry))
2345
2715
new_details.extend(new_location_suffix)
2346
2716
by_path[new_entry_key] = new_details
2347
id_index[file_id].add(new_entry_key)
2717
self._add_to_id_index(id_index, new_entry_key)
2348
2718
# --- end generation of full tree mappings
2350
2720
# sort and output all the entries
2462
2859
and new_entry_key[1:] < current_old[0][1:])):
2463
2860
# new comes before:
2464
2861
# add a entry for this and advance new
2863
trace.mutter("Inserting from new '%s'.",
2864
new_path_utf8.decode('utf8'))
2465
2865
self.update_minimal(new_entry_key, current_new_minikind,
2466
2866
executable=current_new[1].executable,
2467
path_utf8=new_path_utf8, fingerprint=fingerprint)
2867
path_utf8=new_path_utf8, fingerprint=fingerprint,
2468
2869
current_new = advance(new_iterator)
2470
2871
# we've advanced past the place where the old key would be,
2471
2872
# without seeing it in the new list. so it must be gone.
2874
trace.mutter("Deleting from old '%s/%s'.",
2875
current_old[0][0].decode('utf8'),
2876
current_old[0][1].decode('utf8'))
2472
2877
self._make_absent(current_old)
2473
2878
current_old = advance(old_iterator)
2474
self._dirblock_state = DirState.IN_MEMORY_MODIFIED
2879
self._mark_modified()
2475
2880
self._id_index = None
2476
2881
self._packed_stat_index = None
2883
trace.mutter("set_state_from_inventory complete.")
2885
def set_state_from_scratch(self, working_inv, parent_trees, parent_ghosts):
2886
"""Wipe the currently stored state and set it to something new.
2888
This is a hard-reset for the data we are working with.
2890
# Technically, we really want a write lock, but until we write, we
2891
# don't really need it.
2892
self._requires_lock()
2893
# root dir and root dir contents with no children. We have to have a
2894
# root for set_state_from_inventory to work correctly.
2895
empty_root = (('', '', inventory.ROOT_ID),
2896
[('d', '', 0, False, DirState.NULLSTAT)])
2897
empty_tree_dirblocks = [('', [empty_root]), ('', [])]
2898
self._set_data([], empty_tree_dirblocks)
2899
self.set_state_from_inventory(working_inv)
2900
self.set_parent_trees(parent_trees, parent_ghosts)
2478
2902
def _make_absent(self, current_old):
2479
2903
"""Mark current_old - an entry - as absent for tree 0.
2569
3013
# grab one of them and use it to generate parent
2570
3014
# relocation/absent entries.
2571
3015
new_entry = key, [new_details]
2572
for other_key in existing_keys:
3016
# existing_keys can be changed as we iterate.
3017
for other_key in tuple(existing_keys):
2573
3018
# change the record at other to be a pointer to this new
2574
3019
# record. The loop looks similar to the change to
2575
3020
# relocations when updating an existing record but its not:
2576
3021
# the test for existing kinds is different: this can be
2577
3022
# factored out to a helper though.
2578
other_block_index, present = self._find_block_index_from_key(other_key)
2580
raise AssertionError('could not find block for %s' % (other_key,))
2581
other_entry_index, present = self._find_entry_index(other_key,
2582
self._dirblocks[other_block_index][1])
2584
raise AssertionError('could not find entry for %s' % (other_key,))
3023
other_block_index, present = self._find_block_index_from_key(
3026
raise AssertionError('could not find block for %s' % (
3028
other_block = self._dirblocks[other_block_index][1]
3029
other_entry_index, present = self._find_entry_index(
3030
other_key, other_block)
3032
raise AssertionError(
3033
'update_minimal: could not find other entry for %s'
2585
3035
if path_utf8 is None:
2586
3036
raise AssertionError('no path')
2587
self._dirblocks[other_block_index][1][other_entry_index][1][0] = \
2588
('r', path_utf8, 0, False, '')
3037
# Turn this other location into a reference to the new
3038
# location. This also updates the aliased iterator
3039
# (current_old in set_state_from_inventory) so that the old
3040
# entry, if not already examined, is skipped over by that
3042
other_entry = other_block[other_entry_index]
3043
other_entry[1][0] = ('r', path_utf8, 0, False, '')
3044
if self._maybe_remove_row(other_block, other_entry_index,
3046
# If the row holding this was removed, we need to
3047
# recompute where this entry goes
3048
entry_index, _ = self._find_entry_index(key, block)
3051
# adds a tuple to the new details for each column
3052
# - either by copying an existing relocation pointer inside that column
3053
# - or by creating a new pointer to the right row inside that column
2590
3054
num_present_parents = self._num_present_parents()
3055
if num_present_parents:
3056
# TODO: This re-evaluates the existing_keys set, do we need
3057
# to do that ourselves?
3058
other_key = list(existing_keys)[0]
2591
3059
for lookup_index in xrange(1, num_present_parents + 1):
2592
3060
# grab any one entry, use it to find the right path.
2593
3061
# TODO: optimise this to reduce memory use in highly
2937
3444
entry[1][0] = ('l', '', stat_value.st_size,
2938
3445
False, DirState.NULLSTAT)
2939
state._dirblock_state = DirState.IN_MEMORY_MODIFIED
3447
state._mark_modified([entry])
2940
3448
return link_or_sha1
2941
update_entry = py_update_entry
2944
3451
class ProcessEntryPython(object):
2946
__slots__ = ["old_dirname_to_file_id", "new_dirname_to_file_id", "uninteresting",
3453
__slots__ = ["old_dirname_to_file_id", "new_dirname_to_file_id",
2947
3454
"last_source_parent", "last_target_parent", "include_unchanged",
2948
"use_filesystem_for_exec", "utf8_decode", "searched_specific_files",
2949
"search_specific_files", "state", "source_index", "target_index",
2950
"want_unversioned", "tree"]
3455
"partial", "use_filesystem_for_exec", "utf8_decode",
3456
"searched_specific_files", "search_specific_files",
3457
"searched_exact_paths", "search_specific_file_parents", "seen_ids",
3458
"state", "source_index", "target_index", "want_unversioned", "tree"]
2952
3460
def __init__(self, include_unchanged, use_filesystem_for_exec,
2953
3461
search_specific_files, state, source_index, target_index,
2954
3462
want_unversioned, tree):
2955
3463
self.old_dirname_to_file_id = {}
2956
3464
self.new_dirname_to_file_id = {}
2957
# Just a sentry, so that _process_entry can say that this
2958
# record is handled, but isn't interesting to process (unchanged)
2959
self.uninteresting = object()
3465
# Are we doing a partial iter_changes?
3466
self.partial = search_specific_files != set([''])
2960
3467
# Using a list so that we can access the values and change them in
2961
3468
# nested scope. Each one is [path, file_id, entry]
2962
3469
self.last_source_parent = [None, None]
3575
4125
current_dir_info = dir_iterator.next()
3576
4126
except StopIteration:
3577
4127
current_dir_info = None
3578
_process_entry = ProcessEntryPython
4128
for result in self._iter_specific_file_parents():
4131
def _iter_specific_file_parents(self):
4132
"""Iter over the specific file parents."""
4133
while self.search_specific_file_parents:
4134
# Process the parent directories for the paths we were iterating.
4135
# Even in extremely large trees this should be modest, so currently
4136
# no attempt is made to optimise.
4137
path_utf8 = self.search_specific_file_parents.pop()
4138
if osutils.is_inside_any(self.searched_specific_files, path_utf8):
4139
# We've examined this path.
4141
if path_utf8 in self.searched_exact_paths:
4142
# We've examined this path.
4144
path_entries = self.state._entries_for_path(path_utf8)
4145
# We need either one or two entries. If the path in
4146
# self.target_index has moved (so the entry in source_index is in
4147
# 'ar') then we need to also look for the entry for this path in
4148
# self.source_index, to output the appropriate delete-or-rename.
4149
selected_entries = []
4151
for candidate_entry in path_entries:
4152
# Find entries present in target at this path:
4153
if candidate_entry[1][self.target_index][0] not in 'ar':
4155
selected_entries.append(candidate_entry)
4156
# Find entries present in source at this path:
4157
elif (self.source_index is not None and
4158
candidate_entry[1][self.source_index][0] not in 'ar'):
4160
if candidate_entry[1][self.target_index][0] == 'a':
4161
# Deleted, emit it here.
4162
selected_entries.append(candidate_entry)
4164
# renamed, emit it when we process the directory it
4166
self.search_specific_file_parents.add(
4167
candidate_entry[1][self.target_index][1])
4169
raise AssertionError(
4170
"Missing entry for specific path parent %r, %r" % (
4171
path_utf8, path_entries))
4172
path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
4173
for entry in selected_entries:
4174
if entry[0][2] in self.seen_ids:
4176
result, changed = self._process_entry(entry, path_info)
4178
raise AssertionError(
4179
"Got entry<->path mismatch for specific path "
4180
"%r entry %r path_info %r " % (
4181
path_utf8, entry, path_info))
4182
# Only include changes - we're outside the users requested
4185
self._gather_result_for_consistency(result)
4186
if (result[6][0] == 'directory' and
4187
result[6][1] != 'directory'):
4188
# This stopped being a directory, the old children have
4190
if entry[1][self.source_index][0] == 'r':
4191
# renamed, take the source path
4192
entry_path_utf8 = entry[1][self.source_index][1]
4194
entry_path_utf8 = path_utf8
4195
initial_key = (entry_path_utf8, '', '')
4196
block_index, _ = self.state._find_block_index_from_key(
4198
if block_index == 0:
4199
# The children of the root are in block index 1.
4201
current_block = None
4202
if block_index < len(self.state._dirblocks):
4203
current_block = self.state._dirblocks[block_index]
4204
if not osutils.is_inside(
4205
entry_path_utf8, current_block[0]):
4206
# No entries for this directory at all.
4207
current_block = None
4208
if current_block is not None:
4209
for entry in current_block[1]:
4210
if entry[1][self.source_index][0] in 'ar':
4211
# Not in the source tree, so doesn't have to be
4214
# Path of the entry itself.
4216
self.search_specific_file_parents.add(
4217
osutils.pathjoin(*entry[0][:2]))
4218
if changed or self.include_unchanged:
4220
self.searched_exact_paths.add(path_utf8)
4222
def _path_info(self, utf8_path, unicode_path):
4223
"""Generate path_info for unicode_path.
4225
:return: None if unicode_path does not exist, or a path_info tuple.
4227
abspath = self.tree.abspath(unicode_path)
4229
stat = os.lstat(abspath)
4231
if e.errno == errno.ENOENT:
4232
# the path does not exist.
4236
utf8_basename = utf8_path.rsplit('/', 1)[-1]
4237
dir_info = (utf8_path, utf8_basename,
4238
osutils.file_kind_from_stat_mode(stat.st_mode), stat,
4240
if dir_info[2] == 'directory':
4241
if self.tree._directory_is_tree_reference(
4243
self.root_dir_info = self.root_dir_info[:2] + \
4244
('tree-reference',) + self.root_dir_info[3:]
3581
4248
# Try to load the compiled form if possible
3583
from bzrlib._dirstate_helpers_c import (
3584
_read_dirblocks_c as _read_dirblocks,
3585
bisect_dirblock_c as bisect_dirblock,
3586
_bisect_path_left_c as _bisect_path_left,
3587
_bisect_path_right_c as _bisect_path_right,
3588
cmp_by_dirs_c as cmp_by_dirs,
4250
from bzrlib._dirstate_helpers_pyx import (
3589
4257
ProcessEntryC as _process_entry,
3590
4258
update_entry as update_entry,
4260
except ImportError, e:
4261
osutils.failed_to_load_extension(e)
3593
4262
from bzrlib._dirstate_helpers_py import (
3594
_read_dirblocks_py as _read_dirblocks,
3595
bisect_dirblock_py as bisect_dirblock,
3596
_bisect_path_left_py as _bisect_path_left,
3597
_bisect_path_right_py as _bisect_path_right,
3598
cmp_by_dirs_py as cmp_by_dirs,
4270
# FIXME: It would be nice to be able to track moved lines so that the
4271
# corresponding python code can be moved to the _dirstate_helpers_py
4272
# module. I don't want to break the history for this important piece of
4273
# code so I left the code here -- vila 20090622
4274
update_entry = py_update_entry
4275
_process_entry = ProcessEntryPython