385
394
return sorted(FinalPaths(self).get_paths(new_ids))
387
396
def _inventory_altered(self):
388
"""Get the trans_ids and paths of files needing new inv entries."""
390
for id_set in [self._new_name, self._new_parent, self._new_id,
397
"""Determine which trans_ids need new Inventory entries.
399
An new entry is needed when anything that would be reflected by an
400
inventory entry changes, including file name, file_id, parent file_id,
401
file kind, and the execute bit.
403
Some care is taken to return entries with real changes, not cases
404
where the value is deleted and then restored to its original value,
405
but some actually unchanged values may be returned.
407
:returns: A list of (path, trans_id) for all items requiring an
408
inventory change. Ordered by path.
411
# Find entries whose file_ids are new (or changed).
412
new_file_id = set(t for t in self._new_id
413
if self._new_id[t] != self.tree_file_id(t))
414
for id_set in [self._new_name, self._new_parent, new_file_id,
391
415
self._new_executability]:
392
new_ids.update(id_set)
416
changed_ids.update(id_set)
417
# removing implies a kind change
393
418
changed_kind = set(self._removed_contents)
394
420
changed_kind.intersection_update(self._new_contents)
395
changed_kind.difference_update(new_ids)
421
# Ignore entries that are already known to have changed.
422
changed_kind.difference_update(changed_ids)
423
# to keep only the truly changed ones
396
424
changed_kind = (t for t in changed_kind
397
425
if self.tree_kind(t) != self.final_kind(t))
398
new_ids.update(changed_kind)
399
return sorted(FinalPaths(self).get_paths(new_ids))
426
# all kind changes will alter the inventory
427
changed_ids.update(changed_kind)
428
# To find entries with changed parent_ids, find parents which existed,
429
# but changed file_id.
430
changed_file_id = set(t for t in new_file_id if t in self._removed_id)
431
# Now add all their children to the set.
432
for parent_trans_id in new_file_id:
433
changed_ids.update(self.iter_tree_children(parent_trans_id))
434
return sorted(FinalPaths(self).get_paths(changed_ids))
401
436
def final_kind(self, trans_id):
402
437
"""Determine the final file kind, after any changes applied.
1249
1295
descendants.update(self._limbo_descendants(descendant))
1250
1296
return descendants
1252
def create_file(self, contents, trans_id, mode_id=None):
1298
def create_file(self, contents, trans_id, mode_id=None, sha1=None):
1253
1299
"""Schedule creation of a new file.
1257
Contents is an iterator of strings, all of which will be written
1258
to the target destination.
1260
New file takes the permissions of any existing file with that id,
1261
unless mode_id is specified.
1303
:param contents: an iterator of strings, all of which will be written
1304
to the target destination.
1305
:param trans_id: TreeTransform handle
1306
:param mode_id: If not None, force the mode of the target file to match
1307
the mode of the object referenced by mode_id.
1308
Otherwise, we will try to preserve mode bits of an existing file.
1309
:param sha1: If the sha1 of this content is already known, pass it in.
1310
We can use it to prevent future sha1 computations.
1263
1312
name = self._limbo_name(trans_id)
1264
1313
f = open(name, 'wb')
1267
unique_add(self._new_contents, trans_id, 'file')
1269
# Clean up the file, it never got registered so
1270
# TreeTransform.finalize() won't clean it up.
1315
unique_add(self._new_contents, trans_id, 'file')
1275
1316
f.writelines(contents)
1278
1319
self._set_mtime(name)
1279
1320
self._set_mode(trans_id, mode_id, S_ISREG)
1321
# It is unfortunate we have to use lstat instead of fstat, but we just
1322
# used utime and chmod on the file, so we need the accurate final
1324
if sha1 is not None:
1325
self._observed_sha1s[trans_id] = (sha1, osutils.lstat(name))
1281
1327
def _read_file_chunks(self, trans_id):
1282
1328
cur_file = open(self._limbo_name(trans_id), 'rb')
1829
1882
self.rename_count += 1
1883
# TODO: if trans_id in self._observed_sha1s, we should
1884
# re-stat the final target, since ctime will be
1885
# updated by the change.
1830
1886
if (trans_id in self._new_contents or
1831
1887
self.path_changed(trans_id)):
1832
1888
if trans_id in self._new_contents:
1833
1889
modified_paths.append(full_path)
1834
1890
if trans_id in self._new_executability:
1835
1891
self._set_executability(path, trans_id)
1892
if trans_id in self._observed_sha1s:
1893
o_sha1, o_st_val = self._observed_sha1s[trans_id]
1894
st = osutils.lstat(full_path)
1895
self._observed_sha1s[trans_id] = (o_sha1, st)
1837
1897
child_pb.finished()
1898
for path, trans_id in new_paths:
1899
# new_paths includes stuff like workingtree conflicts. Only the
1900
# stuff in new_contents actually comes from limbo.
1901
if trans_id in self._limbo_files:
1902
del self._limbo_files[trans_id]
1838
1903
self._new_contents.clear()
1839
1904
return modified_paths
1906
def _apply_observed_sha1s(self):
1907
"""After we have finished renaming everything, update observed sha1s
1909
This has to be done after self._tree.apply_inventory_delta, otherwise
1910
it doesn't know anything about the files we are updating. Also, we want
1911
to do this as late as possible, so that most entries end up cached.
1913
# TODO: this doesn't update the stat information for directories. So
1914
# the first 'bzr status' will still need to rewrite
1915
# .bzr/checkout/dirstate. However, we at least don't need to
1916
# re-read all of the files.
1917
# TODO: If the operation took a while, we could do a time.sleep(3) here
1918
# to allow the clock to tick over and ensure we won't have any
1919
# problems. (we could observe start time, and finish time, and if
1920
# it is less than eg 10% overhead, add a sleep call.)
1921
paths = FinalPaths(self)
1922
for trans_id, observed in self._observed_sha1s.iteritems():
1923
path = paths.get_path(trans_id)
1924
# We could get the file_id, but dirstate prefers to use the path
1925
# anyway, and it is 'cheaper' to determine.
1926
# file_id = self._new_id[trans_id]
1927
self._tree._observed_sha1(None, path, observed)
1842
1930
class TransformPreview(DiskTreeTransform):
1843
1931
"""A TreeTransform for generating preview trees.
2161
2249
def get_file_size(self, file_id):
2162
2250
"""See Tree.get_file_size"""
2251
trans_id = self._transform.trans_id_file_id(file_id)
2252
kind = self._transform.final_kind(trans_id)
2255
if trans_id in self._transform._new_contents:
2256
return self._stat_limbo_file(trans_id=trans_id).st_size
2163
2257
if self.kind(file_id) == 'file':
2164
2258
return self._transform._tree.get_file_size(file_id)
2262
def get_file_verifier(self, file_id, path=None, stat_value=None):
2263
trans_id = self._transform.trans_id_file_id(file_id)
2264
kind = self._transform._new_contents.get(trans_id)
2266
return self._transform._tree.get_file_verifier(file_id)
2268
fileobj = self.get_file(file_id)
2270
return ("SHA1", sha_file(fileobj))
2168
2274
def get_file_sha1(self, file_id, path=None, stat_value=None):
2169
2275
trans_id = self._transform.trans_id_file_id(file_id)
2170
2276
kind = self._transform._new_contents.get(trans_id)
2553
2668
unchanged = dict(unchanged)
2554
2669
new_desired_files = []
2556
for file_id, (trans_id, tree_path) in desired_files:
2671
for file_id, (trans_id, tree_path, text_sha1) in desired_files:
2557
2672
accelerator_path = unchanged.get(file_id)
2558
2673
if accelerator_path is None:
2559
new_desired_files.append((file_id, (trans_id, tree_path)))
2674
new_desired_files.append((file_id,
2675
(trans_id, tree_path, text_sha1)))
2561
pb.update('Adding file contents', count + offset, total)
2677
pb.update(gettext('Adding file contents'), count + offset, total)
2563
2679
tt.create_hardlink(accelerator_tree.abspath(accelerator_path),
2580
2696
offset += count
2581
for count, ((trans_id, tree_path), contents) in enumerate(
2697
for count, ((trans_id, tree_path, text_sha1), contents) in enumerate(
2582
2698
tree.iter_files_bytes(new_desired_files)):
2583
2699
if wt.supports_content_filtering():
2584
2700
filters = wt._content_filter_stack(tree_path)
2585
2701
contents = filtered_output_bytes(contents, filters,
2586
2702
ContentFilterContext(tree_path, tree))
2587
tt.create_file(contents, trans_id)
2588
pb.update('Adding file contents', count + offset, total)
2703
tt.create_file(contents, trans_id, sha1=text_sha1)
2704
pb.update(gettext('Adding file contents'), count + offset, total)
2591
2707
def _reparent_children(tt, old_parent, new_parent):
2812
2932
deferred_files = []
2813
2933
for id_num, (file_id, path, changed_content, versioned, parent, name,
2814
2934
kind, executable) in enumerate(change_list):
2815
if skip_root and file_id[0] is not None and parent[0] is None:
2935
target_path, wt_path = path
2936
target_versioned, wt_versioned = versioned
2937
target_parent, wt_parent = parent
2938
target_name, wt_name = name
2939
target_kind, wt_kind = kind
2940
target_executable, wt_executable = executable
2941
if skip_root and wt_parent is None:
2817
2943
trans_id = tt.trans_id_file_id(file_id)
2819
2945
if changed_content:
2820
2946
keep_content = False
2821
if kind[0] == 'file' and (backups or kind[1] is None):
2947
if wt_kind == 'file' and (backups or target_kind is None):
2822
2948
wt_sha1 = working_tree.get_file_sha1(file_id)
2823
2949
if merge_modified.get(file_id) != wt_sha1:
2824
2950
# acquire the basis tree lazily to prevent the
2827
2953
if basis_tree is None:
2828
2954
basis_tree = working_tree.basis_tree()
2829
2955
basis_tree.lock_read()
2830
if file_id in basis_tree:
2956
if basis_tree.has_id(file_id):
2831
2957
if wt_sha1 != basis_tree.get_file_sha1(file_id):
2832
2958
keep_content = True
2833
elif kind[1] is None and not versioned[1]:
2959
elif target_kind is None and not target_versioned:
2834
2960
keep_content = True
2835
if kind[0] is not None:
2961
if wt_kind is not None:
2836
2962
if not keep_content:
2837
2963
tt.delete_contents(trans_id)
2838
elif kind[1] is not None:
2839
parent_trans_id = tt.trans_id_file_id(parent[0])
2964
elif target_kind is not None:
2965
parent_trans_id = tt.trans_id_file_id(wt_parent)
2840
2966
backup_name = tt._available_backup_name(
2841
name[0], parent_trans_id)
2967
wt_name, parent_trans_id)
2842
2968
tt.adjust_path(backup_name, parent_trans_id, trans_id)
2843
new_trans_id = tt.create_path(name[0], parent_trans_id)
2844
if versioned == (True, True):
2969
new_trans_id = tt.create_path(wt_name, parent_trans_id)
2970
if wt_versioned and target_versioned:
2845
2971
tt.unversion_file(trans_id)
2846
2972
tt.version_file(file_id, new_trans_id)
2847
2973
# New contents should have the same unix perms as old
2849
2975
mode_id = trans_id
2850
2976
trans_id = new_trans_id
2851
if kind[1] in ('directory', 'tree-reference'):
2977
if target_kind in ('directory', 'tree-reference'):
2852
2978
tt.create_directory(trans_id)
2853
if kind[1] == 'tree-reference':
2979
if target_kind == 'tree-reference':
2854
2980
revision = target_tree.get_reference_revision(file_id,
2856
2982
tt.set_tree_reference(revision, trans_id)
2857
elif kind[1] == 'symlink':
2983
elif target_kind == 'symlink':
2858
2984
tt.create_symlink(target_tree.get_symlink_target(file_id),
2860
elif kind[1] == 'file':
2986
elif target_kind == 'file':
2861
2987
deferred_files.append((file_id, (trans_id, mode_id)))
2862
2988
if basis_tree is None:
2863
2989
basis_tree = working_tree.basis_tree()
2864
2990
basis_tree.lock_read()
2865
2991
new_sha1 = target_tree.get_file_sha1(file_id)
2866
if (file_id in basis_tree and new_sha1 ==
2867
basis_tree.get_file_sha1(file_id)):
2992
if (basis_tree.has_id(file_id) and
2993
new_sha1 == basis_tree.get_file_sha1(file_id)):
2868
2994
if file_id in merge_modified:
2869
2995
del merge_modified[file_id]
2871
2997
merge_modified[file_id] = new_sha1
2873
2999
# preserve the execute bit when backing up
2874
if keep_content and executable[0] == executable[1]:
2875
tt.set_executability(executable[1], trans_id)
2876
elif kind[1] is not None:
2877
raise AssertionError(kind[1])
2878
if versioned == (False, True):
3000
if keep_content and wt_executable == target_executable:
3001
tt.set_executability(target_executable, trans_id)
3002
elif target_kind is not None:
3003
raise AssertionError(target_kind)
3004
if not wt_versioned and target_versioned:
2879
3005
tt.version_file(file_id, trans_id)
2880
if versioned == (True, False):
3006
if wt_versioned and not target_versioned:
2881
3007
tt.unversion_file(trans_id)
2882
if (name[1] is not None and
2883
(name[0] != name[1] or parent[0] != parent[1])):
2884
if name[1] == '' and parent[1] is None:
3008
if (target_name is not None and
3009
(wt_name != target_name or wt_parent != target_parent)):
3010
if target_name == '' and target_parent is None:
2885
3011
parent_trans = ROOT_PARENT
2887
parent_trans = tt.trans_id_file_id(parent[1])
2888
if parent[0] is None and versioned[0]:
2889
tt.adjust_root_path(name[1], parent_trans)
3013
parent_trans = tt.trans_id_file_id(target_parent)
3014
if wt_parent is None and wt_versioned:
3015
tt.adjust_root_path(target_name, parent_trans)
2891
tt.adjust_path(name[1], parent_trans, trans_id)
2892
if executable[0] != executable[1] and kind[1] == "file":
2893
tt.set_executability(executable[1], trans_id)
3017
tt.adjust_path(target_name, parent_trans, trans_id)
3018
if wt_executable != target_executable and target_kind == "file":
3019
tt.set_executability(target_executable, trans_id)
2894
3020
if working_tree.supports_content_filtering():
2895
3021
for index, ((trans_id, mode_id), bytes) in enumerate(
2896
3022
target_tree.iter_files_bytes(deferred_files)):
3058
3183
modified_path = fp.get_path(conflict[2])
3059
3184
modified_id = tt.final_file_id(conflict[2])
3060
3185
if len(conflict) == 3:
3061
yield Conflict.factory(c_type, action=action, path=modified_path,
3062
file_id=modified_id)
3186
yield conflicts.Conflict.factory(
3187
c_type, action=action, path=modified_path, file_id=modified_id)
3065
3190
conflicting_path = fp.get_path(conflict[3])
3066
3191
conflicting_id = tt.final_file_id(conflict[3])
3067
yield Conflict.factory(c_type, action=action, path=modified_path,
3068
file_id=modified_id,
3069
conflict_path=conflicting_path,
3070
conflict_file_id=conflicting_id)
3192
yield conflicts.Conflict.factory(
3193
c_type, action=action, path=modified_path,
3194
file_id=modified_id,
3195
conflict_path=conflicting_path,
3196
conflict_file_id=conflicting_id)
3073
3199
class _FileMover(object):