394
385
return sorted(FinalPaths(self).get_paths(new_ids))
396
387
def _inventory_altered(self):
397
"""Determine which trans_ids need new Inventory entries.
399
An new entry is needed when anything that would be reflected by an
400
inventory entry changes, including file name, file_id, parent file_id,
401
file kind, and the execute bit.
403
Some care is taken to return entries with real changes, not cases
404
where the value is deleted and then restored to its original value,
405
but some actually unchanged values may be returned.
407
:returns: A list of (path, trans_id) for all items requiring an
408
inventory change. Ordered by path.
411
# Find entries whose file_ids are new (or changed).
412
new_file_id = set(t for t in self._new_id
413
if self._new_id[t] != self.tree_file_id(t))
414
for id_set in [self._new_name, self._new_parent, new_file_id,
388
"""Get the trans_ids and paths of files needing new inv entries."""
390
for id_set in [self._new_name, self._new_parent, self._new_id,
415
391
self._new_executability]:
416
changed_ids.update(id_set)
417
# removing implies a kind change
392
new_ids.update(id_set)
418
393
changed_kind = set(self._removed_contents)
420
394
changed_kind.intersection_update(self._new_contents)
421
# Ignore entries that are already known to have changed.
422
changed_kind.difference_update(changed_ids)
423
# to keep only the truly changed ones
395
changed_kind.difference_update(new_ids)
424
396
changed_kind = (t for t in changed_kind
425
397
if self.tree_kind(t) != self.final_kind(t))
426
# all kind changes will alter the inventory
427
changed_ids.update(changed_kind)
428
# To find entries with changed parent_ids, find parents which existed,
429
# but changed file_id.
430
changed_file_id = set(t for t in new_file_id if t in self._removed_id)
431
# Now add all their children to the set.
432
for parent_trans_id in new_file_id:
433
changed_ids.update(self.iter_tree_children(parent_trans_id))
434
return sorted(FinalPaths(self).get_paths(changed_ids))
398
new_ids.update(changed_kind)
399
return sorted(FinalPaths(self).get_paths(new_ids))
436
401
def final_kind(self, trans_id):
437
402
"""Determine the final file kind, after any changes applied.
1295
1249
descendants.update(self._limbo_descendants(descendant))
1296
1250
return descendants
1298
def create_file(self, contents, trans_id, mode_id=None, sha1=None):
1252
def create_file(self, contents, trans_id, mode_id=None):
1299
1253
"""Schedule creation of a new file.
1303
:param contents: an iterator of strings, all of which will be written
1304
to the target destination.
1305
:param trans_id: TreeTransform handle
1306
:param mode_id: If not None, force the mode of the target file to match
1307
the mode of the object referenced by mode_id.
1308
Otherwise, we will try to preserve mode bits of an existing file.
1309
:param sha1: If the sha1 of this content is already known, pass it in.
1310
We can use it to prevent future sha1 computations.
1257
Contents is an iterator of strings, all of which will be written
1258
to the target destination.
1260
New file takes the permissions of any existing file with that id,
1261
unless mode_id is specified.
1312
1263
name = self._limbo_name(trans_id)
1313
1264
f = open(name, 'wb')
1315
unique_add(self._new_contents, trans_id, 'file')
1267
unique_add(self._new_contents, trans_id, 'file')
1269
# Clean up the file, it never got registered so
1270
# TreeTransform.finalize() won't clean it up.
1316
1275
f.writelines(contents)
1319
1278
self._set_mtime(name)
1320
1279
self._set_mode(trans_id, mode_id, S_ISREG)
1321
# It is unfortunate we have to use lstat instead of fstat, but we just
1322
# used utime and chmod on the file, so we need the accurate final
1324
if sha1 is not None:
1325
self._observed_sha1s[trans_id] = (sha1, osutils.lstat(name))
1327
1281
def _read_file_chunks(self, trans_id):
1328
1282
cur_file = open(self._limbo_name(trans_id), 'rb')
1882
1829
self.rename_count += 1
1883
# TODO: if trans_id in self._observed_sha1s, we should
1884
# re-stat the final target, since ctime will be
1885
# updated by the change.
1886
1830
if (trans_id in self._new_contents or
1887
1831
self.path_changed(trans_id)):
1888
1832
if trans_id in self._new_contents:
1889
1833
modified_paths.append(full_path)
1890
1834
if trans_id in self._new_executability:
1891
1835
self._set_executability(path, trans_id)
1892
if trans_id in self._observed_sha1s:
1893
o_sha1, o_st_val = self._observed_sha1s[trans_id]
1894
st = osutils.lstat(full_path)
1895
self._observed_sha1s[trans_id] = (o_sha1, st)
1897
1837
child_pb.finished()
1898
for path, trans_id in new_paths:
1899
# new_paths includes stuff like workingtree conflicts. Only the
1900
# stuff in new_contents actually comes from limbo.
1901
if trans_id in self._limbo_files:
1902
del self._limbo_files[trans_id]
1903
1838
self._new_contents.clear()
1904
1839
return modified_paths
1906
def _apply_observed_sha1s(self):
1907
"""After we have finished renaming everything, update observed sha1s
1909
This has to be done after self._tree.apply_inventory_delta, otherwise
1910
it doesn't know anything about the files we are updating. Also, we want
1911
to do this as late as possible, so that most entries end up cached.
1913
# TODO: this doesn't update the stat information for directories. So
1914
# the first 'bzr status' will still need to rewrite
1915
# .bzr/checkout/dirstate. However, we at least don't need to
1916
# re-read all of the files.
1917
# TODO: If the operation took a while, we could do a time.sleep(3) here
1918
# to allow the clock to tick over and ensure we won't have any
1919
# problems. (we could observe start time, and finish time, and if
1920
# it is less than eg 10% overhead, add a sleep call.)
1921
paths = FinalPaths(self)
1922
for trans_id, observed in self._observed_sha1s.iteritems():
1923
path = paths.get_path(trans_id)
1924
# We could get the file_id, but dirstate prefers to use the path
1925
# anyway, and it is 'cheaper' to determine.
1926
# file_id = self._new_id[trans_id]
1927
self._tree._observed_sha1(None, path, observed)
1930
1842
class TransformPreview(DiskTreeTransform):
1931
1843
"""A TreeTransform for generating preview trees.
2249
2161
def get_file_size(self, file_id):
2250
2162
"""See Tree.get_file_size"""
2251
trans_id = self._transform.trans_id_file_id(file_id)
2252
kind = self._transform.final_kind(trans_id)
2255
if trans_id in self._transform._new_contents:
2256
return self._stat_limbo_file(trans_id=trans_id).st_size
2257
2163
if self.kind(file_id) == 'file':
2258
2164
return self._transform._tree.get_file_size(file_id)
2262
def get_file_verifier(self, file_id, path=None, stat_value=None):
2263
trans_id = self._transform.trans_id_file_id(file_id)
2264
kind = self._transform._new_contents.get(trans_id)
2266
return self._transform._tree.get_file_verifier(file_id)
2268
fileobj = self.get_file(file_id)
2270
return ("SHA1", sha_file(fileobj))
2274
2168
def get_file_sha1(self, file_id, path=None, stat_value=None):
2275
2169
trans_id = self._transform.trans_id_file_id(file_id)
2276
2170
kind = self._transform._new_contents.get(trans_id)
2668
2553
unchanged = dict(unchanged)
2669
2554
new_desired_files = []
2671
for file_id, (trans_id, tree_path, text_sha1) in desired_files:
2556
for file_id, (trans_id, tree_path) in desired_files:
2672
2557
accelerator_path = unchanged.get(file_id)
2673
2558
if accelerator_path is None:
2674
new_desired_files.append((file_id,
2675
(trans_id, tree_path, text_sha1)))
2559
new_desired_files.append((file_id, (trans_id, tree_path)))
2677
pb.update(gettext('Adding file contents'), count + offset, total)
2561
pb.update('Adding file contents', count + offset, total)
2679
2563
tt.create_hardlink(accelerator_tree.abspath(accelerator_path),
2696
2580
offset += count
2697
for count, ((trans_id, tree_path, text_sha1), contents) in enumerate(
2581
for count, ((trans_id, tree_path), contents) in enumerate(
2698
2582
tree.iter_files_bytes(new_desired_files)):
2699
2583
if wt.supports_content_filtering():
2700
2584
filters = wt._content_filter_stack(tree_path)
2701
2585
contents = filtered_output_bytes(contents, filters,
2702
2586
ContentFilterContext(tree_path, tree))
2703
tt.create_file(contents, trans_id, sha1=text_sha1)
2704
pb.update(gettext('Adding file contents'), count + offset, total)
2587
tt.create_file(contents, trans_id)
2588
pb.update('Adding file contents', count + offset, total)
2707
2591
def _reparent_children(tt, old_parent, new_parent):
2932
2812
deferred_files = []
2933
2813
for id_num, (file_id, path, changed_content, versioned, parent, name,
2934
2814
kind, executable) in enumerate(change_list):
2935
target_path, wt_path = path
2936
target_versioned, wt_versioned = versioned
2937
target_parent, wt_parent = parent
2938
target_name, wt_name = name
2939
target_kind, wt_kind = kind
2940
target_executable, wt_executable = executable
2941
if skip_root and wt_parent is None:
2815
if skip_root and file_id[0] is not None and parent[0] is None:
2943
2817
trans_id = tt.trans_id_file_id(file_id)
2945
2819
if changed_content:
2946
2820
keep_content = False
2947
if wt_kind == 'file' and (backups or target_kind is None):
2821
if kind[0] == 'file' and (backups or kind[1] is None):
2948
2822
wt_sha1 = working_tree.get_file_sha1(file_id)
2949
2823
if merge_modified.get(file_id) != wt_sha1:
2950
2824
# acquire the basis tree lazily to prevent the
2953
2827
if basis_tree is None:
2954
2828
basis_tree = working_tree.basis_tree()
2955
2829
basis_tree.lock_read()
2956
if basis_tree.has_id(file_id):
2830
if file_id in basis_tree:
2957
2831
if wt_sha1 != basis_tree.get_file_sha1(file_id):
2958
2832
keep_content = True
2959
elif target_kind is None and not target_versioned:
2833
elif kind[1] is None and not versioned[1]:
2960
2834
keep_content = True
2961
if wt_kind is not None:
2835
if kind[0] is not None:
2962
2836
if not keep_content:
2963
2837
tt.delete_contents(trans_id)
2964
elif target_kind is not None:
2965
parent_trans_id = tt.trans_id_file_id(wt_parent)
2838
elif kind[1] is not None:
2839
parent_trans_id = tt.trans_id_file_id(parent[0])
2966
2840
backup_name = tt._available_backup_name(
2967
wt_name, parent_trans_id)
2841
name[0], parent_trans_id)
2968
2842
tt.adjust_path(backup_name, parent_trans_id, trans_id)
2969
new_trans_id = tt.create_path(wt_name, parent_trans_id)
2970
if wt_versioned and target_versioned:
2843
new_trans_id = tt.create_path(name[0], parent_trans_id)
2844
if versioned == (True, True):
2971
2845
tt.unversion_file(trans_id)
2972
2846
tt.version_file(file_id, new_trans_id)
2973
2847
# New contents should have the same unix perms as old
2975
2849
mode_id = trans_id
2976
2850
trans_id = new_trans_id
2977
if target_kind in ('directory', 'tree-reference'):
2851
if kind[1] in ('directory', 'tree-reference'):
2978
2852
tt.create_directory(trans_id)
2979
if target_kind == 'tree-reference':
2853
if kind[1] == 'tree-reference':
2980
2854
revision = target_tree.get_reference_revision(file_id,
2982
2856
tt.set_tree_reference(revision, trans_id)
2983
elif target_kind == 'symlink':
2857
elif kind[1] == 'symlink':
2984
2858
tt.create_symlink(target_tree.get_symlink_target(file_id),
2986
elif target_kind == 'file':
2860
elif kind[1] == 'file':
2987
2861
deferred_files.append((file_id, (trans_id, mode_id)))
2988
2862
if basis_tree is None:
2989
2863
basis_tree = working_tree.basis_tree()
2990
2864
basis_tree.lock_read()
2991
2865
new_sha1 = target_tree.get_file_sha1(file_id)
2992
if (basis_tree.has_id(file_id) and
2993
new_sha1 == basis_tree.get_file_sha1(file_id)):
2866
if (file_id in basis_tree and new_sha1 ==
2867
basis_tree.get_file_sha1(file_id)):
2994
2868
if file_id in merge_modified:
2995
2869
del merge_modified[file_id]
2997
2871
merge_modified[file_id] = new_sha1
2999
2873
# preserve the execute bit when backing up
3000
if keep_content and wt_executable == target_executable:
3001
tt.set_executability(target_executable, trans_id)
3002
elif target_kind is not None:
3003
raise AssertionError(target_kind)
3004
if not wt_versioned and target_versioned:
2874
if keep_content and executable[0] == executable[1]:
2875
tt.set_executability(executable[1], trans_id)
2876
elif kind[1] is not None:
2877
raise AssertionError(kind[1])
2878
if versioned == (False, True):
3005
2879
tt.version_file(file_id, trans_id)
3006
if wt_versioned and not target_versioned:
2880
if versioned == (True, False):
3007
2881
tt.unversion_file(trans_id)
3008
if (target_name is not None and
3009
(wt_name != target_name or wt_parent != target_parent)):
3010
if target_name == '' and target_parent is None:
2882
if (name[1] is not None and
2883
(name[0] != name[1] or parent[0] != parent[1])):
2884
if name[1] == '' and parent[1] is None:
3011
2885
parent_trans = ROOT_PARENT
3013
parent_trans = tt.trans_id_file_id(target_parent)
3014
if wt_parent is None and wt_versioned:
3015
tt.adjust_root_path(target_name, parent_trans)
2887
parent_trans = tt.trans_id_file_id(parent[1])
2888
if parent[0] is None and versioned[0]:
2889
tt.adjust_root_path(name[1], parent_trans)
3017
tt.adjust_path(target_name, parent_trans, trans_id)
3018
if wt_executable != target_executable and target_kind == "file":
3019
tt.set_executability(target_executable, trans_id)
2891
tt.adjust_path(name[1], parent_trans, trans_id)
2892
if executable[0] != executable[1] and kind[1] == "file":
2893
tt.set_executability(executable[1], trans_id)
3020
2894
if working_tree.supports_content_filtering():
3021
2895
for index, ((trans_id, mode_id), bytes) in enumerate(
3022
2896
target_tree.iter_files_bytes(deferred_files)):
3183
3058
modified_path = fp.get_path(conflict[2])
3184
3059
modified_id = tt.final_file_id(conflict[2])
3185
3060
if len(conflict) == 3:
3186
yield conflicts.Conflict.factory(
3187
c_type, action=action, path=modified_path, file_id=modified_id)
3061
yield Conflict.factory(c_type, action=action, path=modified_path,
3062
file_id=modified_id)
3190
3065
conflicting_path = fp.get_path(conflict[3])
3191
3066
conflicting_id = tt.final_file_id(conflict[3])
3192
yield conflicts.Conflict.factory(
3193
c_type, action=action, path=modified_path,
3194
file_id=modified_id,
3195
conflict_path=conflicting_path,
3196
conflict_file_id=conflicting_id)
3067
yield Conflict.factory(c_type, action=action, path=modified_path,
3068
file_id=modified_id,
3069
conflict_path=conflicting_path,
3070
conflict_file_id=conflicting_id)
3199
3073
class _FileMover(object):