104
105
self._new_parent = {}
105
106
# mapping of trans_id with new contents -> new file_kind
106
107
self._new_contents = {}
107
# mapping of trans_id => (sha1 of content, stat_value)
108
self._observed_sha1s = {}
109
108
# Set of trans_ids whose contents will be removed
110
109
self._removed_contents = set()
111
110
# Mapping of trans_id -> new execute-bit value
1250
1249
descendants.update(self._limbo_descendants(descendant))
1251
1250
return descendants
1253
def create_file(self, contents, trans_id, mode_id=None, sha1=None):
1252
def create_file(self, contents, trans_id, mode_id=None):
1254
1253
"""Schedule creation of a new file.
1258
:param contents: an iterator of strings, all of which will be written
1259
to the target destination.
1260
:param trans_id: TreeTransform handle
1261
:param mode_id: If not None, force the mode of the target file to match
1262
the mode of the object referenced by mode_id.
1263
Otherwise, we will try to preserve mode bits of an existing file.
1264
:param sha1: If the sha1 of this content is already known, pass it in.
1265
We can use it to prevent future sha1 computations.
1257
Contents is an iterator of strings, all of which will be written
1258
to the target destination.
1260
New file takes the permissions of any existing file with that id,
1261
unless mode_id is specified.
1267
1263
name = self._limbo_name(trans_id)
1268
1264
f = open(name, 'wb')
1276
1272
os.unlink(name)
1278
1275
f.writelines(contents)
1281
1278
self._set_mtime(name)
1282
1279
self._set_mode(trans_id, mode_id, S_ISREG)
1283
# It is unfortunate we have to use lstat instead of fstat, but we just
1284
# used utime and chmod on the file, so we need the accurate final
1286
if sha1 is not None:
1287
self._observed_sha1s[trans_id] = (sha1, osutils.lstat(name))
1289
1281
def _read_file_chunks(self, trans_id):
1290
1282
cur_file = open(self._limbo_name(trans_id), 'rb')
1349
1341
def cancel_creation(self, trans_id):
1350
1342
"""Cancel the creation of new file contents."""
1351
1343
del self._new_contents[trans_id]
1352
if trans_id in self._observed_sha1s:
1353
del self._observed_sha1s[trans_id]
1354
1344
children = self._limbo_children.get(trans_id)
1355
1345
# if this is a limbo directory with children, move them before removing
1356
1346
# the directory
1840
1829
self.rename_count += 1
1841
# TODO: if trans_id in self._observed_sha1s, we should
1842
# re-stat the final target, since ctime will be
1843
# updated by the change.
1844
1830
if (trans_id in self._new_contents or
1845
1831
self.path_changed(trans_id)):
1846
1832
if trans_id in self._new_contents:
1847
1833
modified_paths.append(full_path)
1848
1834
if trans_id in self._new_executability:
1849
1835
self._set_executability(path, trans_id)
1850
if trans_id in self._observed_sha1s:
1851
o_sha1, o_st_val = self._observed_sha1s[trans_id]
1852
st = osutils.lstat(full_path)
1853
self._observed_sha1s[trans_id] = (o_sha1, st)
1855
1837
child_pb.finished()
1856
1838
self._new_contents.clear()
1857
1839
return modified_paths
1859
def _apply_observed_sha1s(self):
1860
"""After we have finished renaming everything, update observed sha1s
1862
This has to be done after self._tree.apply_inventory_delta, otherwise
1863
it doesn't know anything about the files we are updating. Also, we want
1864
to do this as late as possible, so that most entries end up cached.
1866
# TODO: this doesn't update the stat information for directories. So
1867
# the first 'bzr status' will still need to rewrite
1868
# .bzr/checkout/dirstate. However, we at least don't need to
1869
# re-read all of the files.
1870
# TODO: If the operation took a while, we could do a time.sleep(3) here
1871
# to allow the clock to tick over and ensure we won't have any
1872
# problems. (we could observe start time, and finish time, and if
1873
# it is less than eg 10% overhead, add a sleep call.)
1874
paths = FinalPaths(self)
1875
for trans_id, observed in self._observed_sha1s.iteritems():
1876
path = paths.get_path(trans_id)
1877
# We could get the file_id, but dirstate prefers to use the path
1878
# anyway, and it is 'cheaper' to determine.
1879
# file_id = self._new_id[trans_id]
1880
self._tree._observed_sha1(None, path, observed)
1883
1842
class TransformPreview(DiskTreeTransform):
1884
1843
"""A TreeTransform for generating preview trees.
2543
2502
executable = tree.is_executable(file_id, tree_path)
2545
2504
tt.set_executability(executable, trans_id)
2546
trans_data = (trans_id, tree_path, entry.text_sha1)
2505
trans_data = (trans_id, tree_path)
2547
2506
deferred_contents.append((file_id, trans_data))
2549
2508
file_trans_id[file_id] = new_by_entry(tt, entry, parent_id,
2594
2553
unchanged = dict(unchanged)
2595
2554
new_desired_files = []
2597
for file_id, (trans_id, tree_path, text_sha1) in desired_files:
2556
for file_id, (trans_id, tree_path) in desired_files:
2598
2557
accelerator_path = unchanged.get(file_id)
2599
2558
if accelerator_path is None:
2600
new_desired_files.append((file_id,
2601
(trans_id, tree_path, text_sha1)))
2559
new_desired_files.append((file_id, (trans_id, tree_path)))
2603
2561
pb.update('Adding file contents', count + offset, total)
2611
2569
contents = filtered_output_bytes(contents, filters,
2612
2570
ContentFilterContext(tree_path, tree))
2614
tt.create_file(contents, trans_id, sha1=text_sha1)
2572
tt.create_file(contents, trans_id)
2617
2575
contents.close()
2622
2580
offset += count
2623
for count, ((trans_id, tree_path, text_sha1), contents) in enumerate(
2581
for count, ((trans_id, tree_path), contents) in enumerate(
2624
2582
tree.iter_files_bytes(new_desired_files)):
2625
2583
if wt.supports_content_filtering():
2626
2584
filters = wt._content_filter_stack(tree_path)
2627
2585
contents = filtered_output_bytes(contents, filters,
2628
2586
ContentFilterContext(tree_path, tree))
2629
tt.create_file(contents, trans_id, sha1=text_sha1)
2587
tt.create_file(contents, trans_id)
2630
2588
pb.update('Adding file contents', count + offset, total)