50
62
Trees can be compared, etc, regardless of whether they are working
51
63
trees or versioned trees.
66
def changes_from(self, other, want_unchanged=False, specific_files=None,
67
extra_trees=None, require_versioned=False, include_root=False,
68
want_unversioned=False):
69
"""Return a TreeDelta of the changes from other to this tree.
71
:param other: A tree to compare with.
72
:param specific_files: An optional list of file paths to restrict the
73
comparison to. When mapping filenames to ids, all matches in all
74
trees (including optional extra_trees) are used, and all children of
75
matched directories are included.
76
:param want_unchanged: An optional boolean requesting the inclusion of
77
unchanged entries in the result.
78
:param extra_trees: An optional list of additional trees to use when
79
mapping the contents of specific_files (paths) to file_ids.
80
:param require_versioned: An optional boolean (defaults to False). When
81
supplied and True all the 'specific_files' must be versioned, or
82
a PathsNotVersionedError will be thrown.
83
:param want_unversioned: Scan for unversioned paths.
85
The comparison will be performed by an InterTree object looked up on
88
# Martin observes that Tree.changes_from returns a TreeDelta and this
89
# may confuse people, because the class name of the returned object is
90
# a synonym of the object referenced in the method name.
91
return InterTree.get(other, self).compare(
92
want_unchanged=want_unchanged,
93
specific_files=specific_files,
94
extra_trees=extra_trees,
95
require_versioned=require_versioned,
96
include_root=include_root,
97
want_unversioned=want_unversioned,
100
def iter_changes(self, from_tree, include_unchanged=False,
101
specific_files=None, pb=None, extra_trees=None,
102
require_versioned=True, want_unversioned=False):
103
intertree = InterTree.get(from_tree, self)
104
return intertree.iter_changes(include_unchanged, specific_files, pb,
105
extra_trees, require_versioned, want_unversioned=want_unversioned)
108
"""Get a list of the conflicts in the tree.
110
Each conflict is an instance of bzrlib.conflicts.Conflict.
112
return _mod_conflicts.ConflictList()
115
"""For trees that can have unversioned files, return all such paths."""
118
def get_parent_ids(self):
119
"""Get the parent ids for this tree.
121
:return: a list of parent ids. [] is returned to indicate
122
a tree with no parents.
123
:raises: BzrError if the parents are not known.
125
raise NotImplementedError(self.get_parent_ids)
54
127
def has_filename(self, filename):
55
128
"""True if the tree has given filename."""
56
raise NotImplementedError()
129
raise NotImplementedError(self.has_filename)
58
131
def has_id(self, file_id):
59
132
return self.inventory.has_id(file_id)
134
def __contains__(self, file_id):
135
return self.has_id(file_id)
61
137
def has_or_had_id(self, file_id):
62
138
if file_id == self.inventory.root.file_id:
64
140
return self.inventory.has_id(file_id)
142
def is_ignored(self, filename):
143
"""Check whether the filename is ignored by this tree.
145
:param filename: The relative filename within the tree.
146
:return: True if the filename is ignored.
68
150
def __iter__(self):
69
151
return iter(self.inventory)
153
def all_file_ids(self):
154
"""Iterate through all file ids, including ids for missing files."""
155
return set(self.inventory)
71
157
def id2path(self, file_id):
158
"""Return the path for a file id.
72
162
return self.inventory.id2path(file_id)
164
def is_control_filename(self, filename):
165
"""True if filename is the name of a control file in this tree.
167
:param filename: A filename within the tree. This is a relative path
168
from the root of this tree.
170
This is true IF and ONLY IF the filename is part of the meta data
171
that bzr controls in this tree. I.E. a random .bzr directory placed
172
on disk will not be a control file for this tree.
174
return self.bzrdir.is_control_filename(filename)
177
def iter_entries_by_dir(self, specific_file_ids=None):
178
"""Walk the tree in 'by_dir' order.
180
This will yield each entry in the tree as a (path, entry) tuple.
181
The order that they are yielded is:
183
Directories are walked in a depth-first lexicographical order,
184
however, whenever a directory is reached, all of its direct child
185
nodes are yielded in lexicographical order before yielding the
188
For example, in the tree::
198
The yield order (ignoring root) would be::
199
a, f, a/b, a/d, a/b/c, a/d/e, f/g
201
return self.inventory.iter_entries_by_dir(
202
specific_file_ids=specific_file_ids)
204
def iter_references(self):
205
if self.supports_tree_reference():
206
for path, entry in self.iter_entries_by_dir():
207
if entry.kind == 'tree-reference':
208
yield path, entry.file_id
74
210
def kind(self, file_id):
75
raise NotImplementedError("subclasses must implement kind")
211
raise NotImplementedError("Tree subclass %s must implement kind"
212
% self.__class__.__name__)
214
def stored_kind(self, file_id):
215
"""File kind stored for this file_id.
217
May not match kind on disk for working trees. Always available
218
for versioned files, even when the file itself is missing.
220
return self.kind(file_id)
222
def path_content_summary(self, path):
223
"""Get a summary of the information about path.
225
:param path: A relative path within the tree.
226
:return: A tuple containing kind, size, exec, sha1-or-link.
227
Kind is always present (see tree.kind()).
228
size is present if kind is file, None otherwise.
229
exec is None unless kind is file and the platform supports the 'x'
231
sha1-or-link is the link target if kind is symlink, or the sha1 if
232
it can be obtained without reading the file.
234
raise NotImplementedError(self.path_content_summary)
236
def get_reference_revision(self, file_id, path=None):
237
raise NotImplementedError("Tree subclass %s must implement "
238
"get_reference_revision"
239
% self.__class__.__name__)
241
def _comparison_data(self, entry, path):
242
"""Return a tuple of kind, executable, stat_value for a file.
244
entry may be None if there is no inventory entry for the file, but
245
path must always be supplied.
247
kind is None if there is no file present (even if an inventory id is
248
present). executable is False for non-file entries.
250
raise NotImplementedError(self._comparison_data)
252
def _file_size(self, entry, stat_value):
253
raise NotImplementedError(self._file_size)
77
255
def _get_inventory(self):
78
256
return self._inventory
258
def get_file(self, file_id, path=None):
259
"""Return a file object for the file file_id in the tree.
261
If both file_id and path are defined, it is implementation defined as
262
to which one is used.
264
raise NotImplementedError(self.get_file)
266
def get_file_with_stat(self, file_id, path=None):
267
"""Get a file handle and stat object for file_id.
269
The default implementation returns (self.get_file, None) for backwards
272
:param file_id: The file id to read.
273
:param path: The path of the file, if it is known.
274
:return: A tuple (file_handle, stat_value_or_None). If the tree has
275
no stat facility, or need for a stat cache feedback during commit,
276
it may return None for the second element of the tuple.
278
return (self.get_file(file_id, path), None)
280
def get_file_text(self, file_id, path=None):
281
"""Return the byte content of a file.
283
:param file_id: The file_id of the file.
284
:param path: The path of the file.
285
If both file_id and path are supplied, an implementation may use
288
my_file = self.get_file(file_id, path)
290
return my_file.read()
294
def get_file_lines(self, file_id, path=None):
295
"""Return the content of a file, as lines.
297
:param file_id: The file_id of the file.
298
:param path: The path of the file.
299
If both file_id and path are supplied, an implementation may use
302
return osutils.split_lines(self.get_file_text(file_id, path))
304
def get_file_mtime(self, file_id, path=None):
305
"""Return the modification time for a file.
307
:param file_id: The handle for this file.
308
:param path: The path that this file can be found at.
309
These must point to the same object.
311
raise NotImplementedError(self.get_file_mtime)
313
def get_file_size(self, file_id):
314
"""Return the size of a file in bytes.
316
This applies only to regular files. If invoked on directories or
317
symlinks, it will return None.
318
:param file_id: The file-id of the file
320
raise NotImplementedError(self.get_file_size)
80
322
def get_file_by_path(self, path):
81
return self.get_file(self._inventory.path2id(path))
323
return self.get_file(self._inventory.path2id(path), path)
325
def iter_files_bytes(self, desired_files):
326
"""Iterate through file contents.
328
Files will not necessarily be returned in the order they occur in
329
desired_files. No specific order is guaranteed.
331
Yields pairs of identifier, bytes_iterator. identifier is an opaque
332
value supplied by the caller as part of desired_files. It should
333
uniquely identify the file version in the caller's context. (Examples:
334
an index number or a TreeTransform trans_id.)
336
bytes_iterator is an iterable of bytestrings for the file. The
337
kind of iterable and length of the bytestrings are unspecified, but for
338
this implementation, it is a tuple containing a single bytestring with
339
the complete text of the file.
341
:param desired_files: a list of (file_id, identifier) pairs
343
for file_id, identifier in desired_files:
344
# We wrap the string in a tuple so that we can return an iterable
345
# of bytestrings. (Technically, a bytestring is also an iterable
346
# of bytestrings, but iterating through each character is not
348
cur_file = (self.get_file_text(file_id),)
349
yield identifier, cur_file
351
def get_symlink_target(self, file_id):
352
"""Get the target for a given file_id.
354
It is assumed that the caller already knows that file_id is referencing
356
:param file_id: Handle for the symlink entry.
357
:return: The path the symlink points to.
359
raise NotImplementedError(self.get_symlink_target)
361
def get_canonical_inventory_paths(self, paths):
362
"""Like get_canonical_inventory_path() but works on multiple items.
364
:param paths: A sequence of paths relative to the root of the tree.
365
:return: A list of paths, with each item the corresponding input path
366
adjusted to account for existing elements that match case
369
return list(self._yield_canonical_inventory_paths(paths))
371
def get_canonical_inventory_path(self, path):
372
"""Returns the first inventory item that case-insensitively matches path.
374
If a path matches exactly, it is returned. If no path matches exactly
375
but more than one path matches case-insensitively, it is implementation
376
defined which is returned.
378
If no path matches case-insensitively, the input path is returned, but
379
with as many path entries that do exist changed to their canonical
382
If you need to resolve many names from the same tree, you should
383
use get_canonical_inventory_paths() to avoid O(N) behaviour.
385
:param path: A paths relative to the root of the tree.
386
:return: The input path adjusted to account for existing elements
387
that match case insensitively.
389
return self._yield_canonical_inventory_paths([path]).next()
391
def _yield_canonical_inventory_paths(self, paths):
393
# First, if the path as specified exists exactly, just use it.
394
if self.path2id(path) is not None:
398
cur_id = self.get_root_id()
400
bit_iter = iter(path.split("/"))
403
for child in self.iter_children(cur_id):
405
child_base = os.path.basename(self.id2path(child))
406
if child_base.lower() == lelt:
408
cur_path = osutils.pathjoin(cur_path, child_base)
411
# before a change is committed we can see this error...
414
# got to the end of this directory and no entries matched.
415
# Return what matched so far, plus the rest as specified.
416
cur_path = osutils.pathjoin(cur_path, elt, *list(bit_iter))
421
def get_root_id(self):
422
"""Return the file_id for the root of this tree."""
423
raise NotImplementedError(self.get_root_id)
425
def annotate_iter(self, file_id,
426
default_revision=_mod_revision.CURRENT_REVISION):
427
"""Return an iterator of revision_id, line tuples.
429
For working trees (and mutable trees in general), the special
430
revision_id 'current:' will be used for lines that are new in this
431
tree, e.g. uncommitted changes.
432
:param file_id: The file to produce an annotated version from
433
:param default_revision: For lines that don't match a basis, mark them
434
with this revision id. Not all implementations will make use of
437
raise NotImplementedError(self.annotate_iter)
439
def _get_plan_merge_data(self, file_id, other, base):
440
from bzrlib import merge, versionedfile
441
vf = versionedfile._PlanMergeVersionedFile(file_id)
442
last_revision_a = self._get_file_revision(file_id, vf, 'this:')
443
last_revision_b = other._get_file_revision(file_id, vf, 'other:')
445
last_revision_base = None
447
last_revision_base = base._get_file_revision(file_id, vf, 'base:')
448
return vf, last_revision_a, last_revision_b, last_revision_base
450
def plan_file_merge(self, file_id, other, base=None):
451
"""Generate a merge plan based on annotations.
453
If the file contains uncommitted changes in this tree, they will be
454
attributed to the 'current:' pseudo-revision. If the file contains
455
uncommitted changes in the other tree, they will be assigned to the
456
'other:' pseudo-revision.
458
data = self._get_plan_merge_data(file_id, other, base)
459
vf, last_revision_a, last_revision_b, last_revision_base = data
460
return vf.plan_merge(last_revision_a, last_revision_b,
463
def plan_file_lca_merge(self, file_id, other, base=None):
464
"""Generate a merge plan based lca-newness.
466
If the file contains uncommitted changes in this tree, they will be
467
attributed to the 'current:' pseudo-revision. If the file contains
468
uncommitted changes in the other tree, they will be assigned to the
469
'other:' pseudo-revision.
471
data = self._get_plan_merge_data(file_id, other, base)
472
vf, last_revision_a, last_revision_b, last_revision_base = data
473
return vf.plan_lca_merge(last_revision_a, last_revision_b,
476
def _iter_parent_trees(self):
477
"""Iterate through parent trees, defaulting to Tree.revision_tree."""
478
for revision_id in self.get_parent_ids():
480
yield self.revision_tree(revision_id)
481
except errors.NoSuchRevisionInTree:
482
yield self.repository.revision_tree(revision_id)
485
def _file_revision(revision_tree, file_id):
486
"""Determine the revision associated with a file in a given tree."""
487
revision_tree.lock_read()
489
return revision_tree.inventory[file_id].revision
491
revision_tree.unlock()
493
def _get_file_revision(self, file_id, vf, tree_revision):
494
"""Ensure that file_id, tree_revision is in vf to plan the merge."""
496
if getattr(self, '_repository', None) is None:
497
last_revision = tree_revision
498
parent_keys = [(file_id, self._file_revision(t, file_id)) for t in
499
self._iter_parent_trees()]
500
vf.add_lines((file_id, last_revision), parent_keys,
501
self.get_file(file_id).readlines())
502
repo = self.branch.repository
505
last_revision = self._file_revision(self, file_id)
506
base_vf = self._repository.texts
507
if base_vf not in vf.fallback_versionedfiles:
508
vf.fallback_versionedfiles.append(base_vf)
83
511
inventory = property(_get_inventory,
84
512
doc="Inventory of this Tree")
86
514
def _check_retrieved(self, ie, f):
89
517
fp = fingerprint_file(f)
92
if ie.text_size != None:
520
if ie.text_size is not None:
93
521
if ie.text_size != fp['size']:
94
522
raise BzrError("mismatched size for file %r in %r" % (ie.file_id, self._store),
95
523
["inventory expects %d bytes" % ie.text_size,
102
530
"file is actually %s" % fp['sha1'],
103
531
"store is probably damaged/corrupt"])
106
def print_file(self, file_id):
107
"""Print file with id `file_id` to stdout."""
109
sys.stdout.write(self.get_file_text(file_id))
112
def export(self, dest, format='dir', root=None):
113
"""Export this tree."""
115
exporter = exporters[format]
117
from bzrlib.errors import BzrCommandError
118
raise BzrCommandError("export format %r not supported" % format)
119
exporter(self, dest, root)
123
class RevisionTree(Tree):
124
"""Tree viewing a previous revision.
126
File text can be retrieved from the text store.
128
TODO: Some kind of `__repr__` method, but a good one
129
probably means knowing the branch and revision number,
130
or at least passing a description to the constructor.
133
def __init__(self, weave_store, inv, revision_id):
134
self._weave_store = weave_store
135
self._inventory = inv
136
self._revision_id = revision_id
138
def get_weave(self, file_id):
139
# FIXME: RevisionTree should be given a branch
140
# not a store, or the store should know the branch.
141
import bzrlib.transactions as transactions
142
return self._weave_store.get_weave(file_id,
143
transactions.PassThroughTransaction())
146
def get_file_lines(self, file_id):
147
ie = self._inventory[file_id]
148
weave = self.get_weave(file_id)
149
return weave.get(ie.revision)
152
def get_file_text(self, file_id):
153
return ''.join(self.get_file_lines(file_id))
156
def get_file(self, file_id):
157
return StringIO(self.get_file_text(file_id))
159
def get_file_size(self, file_id):
160
return self._inventory[file_id].text_size
162
def get_file_sha1(self, file_id):
163
ie = self._inventory[file_id]
164
if ie.kind == "file":
167
def is_executable(self, file_id):
168
ie = self._inventory[file_id]
169
if ie.kind != "file":
171
return self._inventory[file_id].executable
173
def has_filename(self, filename):
174
return bool(self.inventory.path2id(filename))
176
def list_files(self):
177
# The only files returned by this are those from the version
178
for path, entry in self.inventory.iter_entries():
179
yield path, 'V', entry.kind, entry.file_id, entry
181
def get_symlink_target(self, file_id):
182
ie = self._inventory[file_id]
183
return ie.symlink_target;
185
def kind(self, file_id):
186
return self._inventory[file_id].kind
189
class EmptyTree(Tree):
191
self._inventory = Inventory()
193
def get_symlink_target(self, file_id):
196
def has_filename(self, filename):
199
def kind(self, file_id):
200
assert self._inventory[file_id].kind == "root_directory"
201
return "root_directory"
203
def list_files(self):
534
def path2id(self, path):
535
"""Return the id for path in this tree."""
536
return self._inventory.path2id(path)
538
def paths2ids(self, paths, trees=[], require_versioned=True):
539
"""Return all the ids that can be reached by walking from paths.
541
Each path is looked up in this tree and any extras provided in
542
trees, and this is repeated recursively: the children in an extra tree
543
of a directory that has been renamed under a provided path in this tree
544
are all returned, even if none exist under a provided path in this
545
tree, and vice versa.
547
:param paths: An iterable of paths to start converting to ids from.
548
Alternatively, if paths is None, no ids should be calculated and None
549
will be returned. This is offered to make calling the api unconditional
550
for code that *might* take a list of files.
551
:param trees: Additional trees to consider.
552
:param require_versioned: If False, do not raise NotVersionedError if
553
an element of paths is not versioned in this tree and all of trees.
555
return find_ids_across_trees(paths, [self] + list(trees), require_versioned)
557
def iter_children(self, file_id):
558
entry = self.iter_entries_by_dir([file_id]).next()[1]
559
for child in getattr(entry, 'children', {}).itervalues():
565
def revision_tree(self, revision_id):
566
"""Obtain a revision tree for the revision revision_id.
568
The intention of this method is to allow access to possibly cached
569
tree data. Implementors of this method should raise NoSuchRevision if
570
the tree is not locally available, even if they could obtain the
571
tree via a repository or some other means. Callers are responsible
572
for finding the ultimate source for a revision tree.
574
:param revision_id: The revision_id of the requested tree.
576
:raises: NoSuchRevision if the tree cannot be obtained.
578
raise errors.NoSuchRevisionInTree(self, revision_id)
581
"""What files are present in this tree and unknown.
583
:return: an iterator over the unknown files.
206
def __contains__(self, file_id):
207
return file_id in self._inventory
209
def get_file_sha1(self, file_id):
210
assert self._inventory[file_id].kind == "root_directory"
590
def filter_unversioned_files(self, paths):
591
"""Filter out paths that are versioned.
593
:return: set of paths.
595
# NB: we specifically *don't* call self.has_filename, because for
596
# WorkingTrees that can indicate files that exist on disk but that
598
pred = self.inventory.has_filename
599
return set((p for p in paths if not pred(p)))
601
def walkdirs(self, prefix=""):
602
"""Walk the contents of this tree from path down.
604
This yields all the data about the contents of a directory at a time.
605
After each directory has been yielded, if the caller has mutated the
606
list to exclude some directories, they are then not descended into.
608
The data yielded is of the form:
609
((directory-relpath, directory-path-from-root, directory-fileid),
610
[(relpath, basename, kind, lstat, path_from_tree_root, file_id,
611
versioned_kind), ...]),
612
- directory-relpath is the containing dirs relpath from prefix
613
- directory-path-from-root is the containing dirs path from /
614
- directory-fileid is the id of the directory if it is versioned.
615
- relpath is the relative path within the subtree being walked.
616
- basename is the basename
617
- kind is the kind of the file now. If unknonwn then the file is not
618
present within the tree - but it may be recorded as versioned. See
620
- lstat is the stat data *if* the file was statted.
621
- path_from_tree_root is the path from the root of the tree.
622
- file_id is the file_id if the entry is versioned.
623
- versioned_kind is the kind of the file as last recorded in the
624
versioning system. If 'unknown' the file is not versioned.
625
One of 'kind' and 'versioned_kind' must not be 'unknown'.
627
:param prefix: Start walking from prefix within the tree rather than
628
at the root. This allows one to walk a subtree but get paths that are
629
relative to a tree rooted higher up.
630
:return: an iterator over the directory data.
632
raise NotImplementedError(self.walkdirs)
634
def supports_content_filtering(self):
637
def _content_filter_stack(self, path=None, file_id=None):
638
"""The stack of content filters for a path if filtering is supported.
640
Readers will be applied in first-to-last order.
641
Writers will be applied in last-to-first order.
642
Either the path or the file-id needs to be provided.
644
:param path: path relative to the root of the tree
646
:param file_id: file_id or None if unknown
647
:return: the list of filters - [] if there are none
649
filter_pref_names = filters._get_registered_names()
650
if len(filter_pref_names) == 0:
653
path = self.id2path(file_id)
654
prefs = self.iter_search_rules([path], filter_pref_names).next()
655
stk = filters._get_filter_stack_for(prefs)
656
if 'filters' in debug.debug_flags:
657
note("*** %s content-filter: %s => %r" % (path,prefs,stk))
660
def _content_filter_stack_provider(self):
661
"""A function that returns a stack of ContentFilters.
663
The function takes a path (relative to the top of the tree) and a
664
file-id as parameters.
666
:return: None if content filtering is not supported by this tree.
668
if self.supports_content_filtering():
669
return lambda path, file_id: \
670
self._content_filter_stack(path, file_id)
674
def iter_search_rules(self, path_names, pref_names=None,
675
_default_searcher=None):
676
"""Find the preferences for filenames in a tree.
678
:param path_names: an iterable of paths to find attributes for.
679
Paths are given relative to the root of the tree.
680
:param pref_names: the list of preferences to lookup - None for all
681
:param _default_searcher: private parameter to assist testing - don't use
682
:return: an iterator of tuple sequences, one per path-name.
683
See _RulesSearcher.get_items for details on the tuple sequence.
685
if _default_searcher is None:
686
_default_searcher = rules._per_user_searcher
687
searcher = self._get_rules_searcher(_default_searcher)
688
if searcher is not None:
689
if pref_names is not None:
690
for path in path_names:
691
yield searcher.get_selected_items(path, pref_names)
693
for path in path_names:
694
yield searcher.get_items(path)
697
def _get_rules_searcher(self, default_searcher):
698
"""Get the RulesSearcher for this tree given the default one."""
699
searcher = default_searcher
214
703
######################################################################
274
762
new_name = new_inv.id2path(file_id)
275
763
if old_name != new_name:
276
764
yield (old_name, new_name)
280
######################################################################
283
def dir_exporter(tree, dest, root):
284
"""Export this tree to a new directory.
286
`dest` should not exist, and will be created holding the
287
contents of this tree.
289
TODO: To handle subdirectories we need to create the
292
:note: If the export fails, the destination directory will be
293
left in a half-assed state.
297
mutter('export version %r' % tree)
299
for dp, ie in inv.iter_entries():
300
ie.put_on_disk(dest, dp, tree)
302
exporters['dir'] = dir_exporter
309
def get_root_name(dest):
310
"""Get just the root name for a tarball.
312
>>> get_root_name('mytar.tar')
314
>>> get_root_name('mytar.tar.bz2')
316
>>> get_root_name('tar.tar.tar.tgz')
318
>>> get_root_name('bzr-0.0.5.tar.gz')
320
>>> get_root_name('a/long/path/mytar.tgz')
322
>>> get_root_name('../parent/../dir/other.tbz2')
325
endings = ['.tar', '.tar.gz', '.tgz', '.tar.bz2', '.tbz2']
326
dest = os.path.basename(dest)
328
if dest.endswith(end):
329
return dest[:-len(end)]
331
def tar_exporter(tree, dest, root, compression=None):
332
"""Export this tree to a new tar file.
334
`dest` will be created holding the contents of this tree; if it
335
already exists, it will be clobbered, like with "tar -c".
337
from time import time
339
compression = str(compression or '')
341
root = get_root_name(dest)
343
ball = tarfile.open(dest, 'w:' + compression)
344
except tarfile.CompressionError, e:
345
raise BzrError(str(e))
346
mutter('export version %r' % tree)
348
for dp, ie in inv.iter_entries():
349
mutter(" export {%s} kind %s to %s" % (ie.file_id, ie.kind, dest))
350
item, fileobj = ie.get_tar_item(root, dp, now, tree)
351
ball.addfile(item, fileobj)
354
exporters['tar'] = tar_exporter
356
def tgz_exporter(tree, dest, root):
357
tar_exporter(tree, dest, root, compression='gz')
358
exporters['tgz'] = tgz_exporter
360
def tbz_exporter(tree, dest, root):
361
tar_exporter(tree, dest, root, compression='bz2')
362
exporters['tbz2'] = tbz_exporter
767
def find_ids_across_trees(filenames, trees, require_versioned=True):
768
"""Find the ids corresponding to specified filenames.
770
All matches in all trees will be used, and all children of matched
771
directories will be used.
773
:param filenames: The filenames to find file_ids for (if None, returns
775
:param trees: The trees to find file_ids within
776
:param require_versioned: if true, all specified filenames must occur in
778
:return: a set of file ids for the specified filenames and their children.
782
specified_path_ids = _find_ids_across_trees(filenames, trees,
784
return _find_children_across_trees(specified_path_ids, trees)
787
def _find_ids_across_trees(filenames, trees, require_versioned):
788
"""Find the ids corresponding to specified filenames.
790
All matches in all trees will be used, but subdirectories are not scanned.
792
:param filenames: The filenames to find file_ids for
793
:param trees: The trees to find file_ids within
794
:param require_versioned: if true, all specified filenames must occur in
796
:return: a set of file ids for the specified filenames
799
interesting_ids = set()
800
for tree_path in filenames:
803
file_id = tree.path2id(tree_path)
804
if file_id is not None:
805
interesting_ids.add(file_id)
808
not_versioned.append(tree_path)
809
if len(not_versioned) > 0 and require_versioned:
810
raise errors.PathsNotVersionedError(not_versioned)
811
return interesting_ids
814
def _find_children_across_trees(specified_ids, trees):
815
"""Return a set including specified ids and their children.
817
All matches in all trees will be used.
819
:param trees: The trees to find file_ids within
820
:return: a set containing all specified ids and their children
822
interesting_ids = set(specified_ids)
823
pending = interesting_ids
824
# now handle children of interesting ids
825
# we loop so that we handle all children of each id in both trees
826
while len(pending) > 0:
828
for file_id in pending:
830
if not tree.has_id(file_id):
832
for child_id in tree.iter_children(file_id):
833
if child_id not in interesting_ids:
834
new_pending.add(child_id)
835
interesting_ids.update(new_pending)
836
pending = new_pending
837
return interesting_ids
840
class InterTree(InterObject):
841
"""This class represents operations taking place between two Trees.
843
Its instances have methods like 'compare' and contain references to the
844
source and target trees these operations are to be carried out on.
846
Clients of bzrlib should not need to use InterTree directly, rather they
847
should use the convenience methods on Tree such as 'Tree.compare()' which
848
will pass through to InterTree as appropriate.
854
def compare(self, want_unchanged=False, specific_files=None,
855
extra_trees=None, require_versioned=False, include_root=False,
856
want_unversioned=False):
857
"""Return the changes from source to target.
859
:return: A TreeDelta.
860
:param specific_files: An optional list of file paths to restrict the
861
comparison to. When mapping filenames to ids, all matches in all
862
trees (including optional extra_trees) are used, and all children of
863
matched directories are included.
864
:param want_unchanged: An optional boolean requesting the inclusion of
865
unchanged entries in the result.
866
:param extra_trees: An optional list of additional trees to use when
867
mapping the contents of specific_files (paths) to file_ids.
868
:param require_versioned: An optional boolean (defaults to False). When
869
supplied and True all the 'specific_files' must be versioned, or
870
a PathsNotVersionedError will be thrown.
871
:param want_unversioned: Scan for unversioned paths.
873
# NB: show_status depends on being able to pass in non-versioned files
874
# and report them as unknown
875
trees = (self.source,)
876
if extra_trees is not None:
877
trees = trees + tuple(extra_trees)
878
# target is usually the newer tree:
879
specific_file_ids = self.target.paths2ids(specific_files, trees,
880
require_versioned=require_versioned)
881
if specific_files and not specific_file_ids:
882
# All files are unversioned, so just return an empty delta
883
# _compare_trees would think we want a complete delta
884
result = delta.TreeDelta()
885
fake_entry = InventoryFile('unused', 'unused', 'unused')
886
result.unversioned = [(path, None,
887
self.target._comparison_data(fake_entry, path)[0]) for path in
890
return delta._compare_trees(self.source, self.target, want_unchanged,
891
specific_files, include_root, extra_trees=extra_trees,
892
require_versioned=require_versioned,
893
want_unversioned=want_unversioned)
895
def iter_changes(self, include_unchanged=False,
896
specific_files=None, pb=None, extra_trees=[],
897
require_versioned=True, want_unversioned=False):
898
"""Generate an iterator of changes between trees.
901
(file_id, (path_in_source, path_in_target),
902
changed_content, versioned, parent, name, kind,
905
Changed_content is True if the file's content has changed. This
906
includes changes to its kind, and to a symlink's target.
908
versioned, parent, name, kind, executable are tuples of (from, to).
909
If a file is missing in a tree, its kind is None.
911
Iteration is done in parent-to-child order, relative to the target
914
There is no guarantee that all paths are in sorted order: the
915
requirement to expand the search due to renames may result in children
916
that should be found early being found late in the search, after
917
lexically later results have been returned.
918
:param require_versioned: Raise errors.PathsNotVersionedError if a
919
path in the specific_files list is not versioned in one of
920
source, target or extra_trees.
921
:param want_unversioned: Should unversioned files be returned in the
922
output. An unversioned file is defined as one with (False, False)
923
for the versioned pair.
925
lookup_trees = [self.source]
927
lookup_trees.extend(extra_trees)
928
if specific_files == []:
929
specific_file_ids = []
931
specific_file_ids = self.target.paths2ids(specific_files,
932
lookup_trees, require_versioned=require_versioned)
934
all_unversioned = sorted([(p.split('/'), p) for p in
936
if specific_files is None or
937
osutils.is_inside_any(specific_files, p)])
938
all_unversioned = deque(all_unversioned)
940
all_unversioned = deque()
942
from_entries_by_dir = list(self.source.iter_entries_by_dir(
943
specific_file_ids=specific_file_ids))
944
from_data = dict((e.file_id, (p, e)) for p, e in from_entries_by_dir)
945
to_entries_by_dir = list(self.target.iter_entries_by_dir(
946
specific_file_ids=specific_file_ids))
947
num_entries = len(from_entries_by_dir) + len(to_entries_by_dir)
949
# the unversioned path lookup only occurs on real trees - where there
950
# can be extras. So the fake_entry is solely used to look up
951
# executable it values when execute is not supported.
952
fake_entry = InventoryFile('unused', 'unused', 'unused')
953
for to_path, to_entry in to_entries_by_dir:
954
while all_unversioned and all_unversioned[0][0] < to_path.split('/'):
955
unversioned_path = all_unversioned.popleft()
956
to_kind, to_executable, to_stat = \
957
self.target._comparison_data(fake_entry, unversioned_path[1])
958
yield (None, (None, unversioned_path[1]), True, (False, False),
960
(None, unversioned_path[0][-1]),
962
(None, to_executable))
963
file_id = to_entry.file_id
964
to_paths[file_id] = to_path
966
changed_content = False
967
from_path, from_entry = from_data.get(file_id, (None, None))
968
from_versioned = (from_entry is not None)
969
if from_entry is not None:
970
from_versioned = True
971
from_name = from_entry.name
972
from_parent = from_entry.parent_id
973
from_kind, from_executable, from_stat = \
974
self.source._comparison_data(from_entry, from_path)
977
from_versioned = False
981
from_executable = None
982
versioned = (from_versioned, True)
983
to_kind, to_executable, to_stat = \
984
self.target._comparison_data(to_entry, to_path)
985
kind = (from_kind, to_kind)
986
if kind[0] != kind[1]:
987
changed_content = True
988
elif from_kind == 'file':
989
if (self.source.get_file_sha1(file_id, from_path, from_stat) !=
990
self.target.get_file_sha1(file_id, to_path, to_stat)):
991
changed_content = True
992
elif from_kind == 'symlink':
993
if (self.source.get_symlink_target(file_id) !=
994
self.target.get_symlink_target(file_id)):
995
changed_content = True
996
# XXX: Yes, the indentation below is wrong. But fixing it broke
997
# test_merge.TestMergerEntriesLCAOnDisk.
998
# test_nested_tree_subtree_renamed_and_modified. We'll wait for
999
# the fix from bzr.dev -- vila 2009026
1000
elif from_kind == 'tree-reference':
1001
if (self.source.get_reference_revision(file_id, from_path)
1002
!= self.target.get_reference_revision(file_id, to_path)):
1003
changed_content = True
1004
parent = (from_parent, to_entry.parent_id)
1005
name = (from_name, to_entry.name)
1006
executable = (from_executable, to_executable)
1008
pb.update('comparing files', entry_count, num_entries)
1009
if (changed_content is not False or versioned[0] != versioned[1]
1010
or parent[0] != parent[1] or name[0] != name[1] or
1011
executable[0] != executable[1] or include_unchanged):
1012
yield (file_id, (from_path, to_path), changed_content,
1013
versioned, parent, name, kind, executable)
1015
while all_unversioned:
1016
# yield any trailing unversioned paths
1017
unversioned_path = all_unversioned.popleft()
1018
to_kind, to_executable, to_stat = \
1019
self.target._comparison_data(fake_entry, unversioned_path[1])
1020
yield (None, (None, unversioned_path[1]), True, (False, False),
1022
(None, unversioned_path[0][-1]),
1024
(None, to_executable))
1026
def get_to_path(to_entry):
1027
if to_entry.parent_id is None:
1028
to_path = '' # the root
1030
if to_entry.parent_id not in to_paths:
1032
return get_to_path(self.target.inventory[to_entry.parent_id])
1033
to_path = osutils.pathjoin(to_paths[to_entry.parent_id],
1035
to_paths[to_entry.file_id] = to_path
1038
for path, from_entry in from_entries_by_dir:
1039
file_id = from_entry.file_id
1040
if file_id in to_paths:
1043
if not file_id in self.target.all_file_ids():
1044
# common case - paths we have not emitted are not present in
1048
to_path = get_to_path(self.target.inventory[file_id])
1051
pb.update('comparing files', entry_count, num_entries)
1052
versioned = (True, False)
1053
parent = (from_entry.parent_id, None)
1054
name = (from_entry.name, None)
1055
from_kind, from_executable, stat_value = \
1056
self.source._comparison_data(from_entry, path)
1057
kind = (from_kind, None)
1058
executable = (from_executable, None)
1059
changed_content = from_kind is not None
1060
# the parent's path is necessarily known at this point.
1061
yield(file_id, (path, to_path), changed_content, versioned, parent,
1062
name, kind, executable)
1065
class MultiWalker(object):
1066
"""Walk multiple trees simultaneously, getting combined results."""
1068
# Note: This could be written to not assume you can do out-of-order
1069
# lookups. Instead any nodes that don't match in all trees could be
1070
# marked as 'deferred', and then returned in the final cleanup loop.
1071
# For now, I think it is "nicer" to return things as close to the
1072
# "master_tree" order as we can.
1074
def __init__(self, master_tree, other_trees):
1075
"""Create a new MultiWalker.
1077
All trees being walked must implement "iter_entries_by_dir()", such
1078
that they yield (path, object) tuples, where that object will have a
1079
'.file_id' member, that can be used to check equality.
1081
:param master_tree: All trees will be 'slaved' to the master_tree such
1082
that nodes in master_tree will be used as 'first-pass' sync points.
1083
Any nodes that aren't in master_tree will be merged in a second
1085
:param other_trees: A list of other trees to walk simultaneously.
1087
self._master_tree = master_tree
1088
self._other_trees = other_trees
1090
# Keep track of any nodes that were properly processed just out of
1091
# order, that way we don't return them at the end, we don't have to
1092
# track *all* processed file_ids, just the out-of-order ones
1093
self._out_of_order_processed = set()
1096
def _step_one(iterator):
1097
"""Step an iter_entries_by_dir iterator.
1099
:return: (has_more, path, ie)
1100
If has_more is False, path and ie will be None.
1103
path, ie = iterator.next()
1104
except StopIteration:
1105
return False, None, None
1107
return True, path, ie
1110
def _cmp_path_by_dirblock(path1, path2):
1111
"""Compare two paths based on what directory they are in.
1113
This generates a sort order, such that all children of a directory are
1114
sorted together, and grandchildren are in the same order as the
1115
children appear. But all grandchildren come after all children.
1117
:param path1: first path
1118
:param path2: the second path
1119
:return: negative number if ``path1`` comes first,
1120
0 if paths are equal
1121
and a positive number if ``path2`` sorts first
1123
# Shortcut this special case
1126
# This is stolen from _dirstate_helpers_py.py, only switching it to
1127
# Unicode objects. Consider using encode_utf8() and then using the
1128
# optimized versions, or maybe writing optimized unicode versions.
1129
if not isinstance(path1, unicode):
1130
raise TypeError("'path1' must be a unicode string, not %s: %r"
1131
% (type(path1), path1))
1132
if not isinstance(path2, unicode):
1133
raise TypeError("'path2' must be a unicode string, not %s: %r"
1134
% (type(path2), path2))
1135
return cmp(MultiWalker._path_to_key(path1),
1136
MultiWalker._path_to_key(path2))
1139
def _path_to_key(path):
1140
dirname, basename = osutils.split(path)
1141
return (dirname.split(u'/'), basename)
1143
def _lookup_by_file_id(self, extra_entries, other_tree, file_id):
1144
"""Lookup an inventory entry by file_id.
1146
This is called when an entry is missing in the normal order.
1147
Generally this is because a file was either renamed, or it was
1148
deleted/added. If the entry was found in the inventory and not in
1149
extra_entries, it will be added to self._out_of_order_processed
1151
:param extra_entries: A dictionary of {file_id: (path, ie)}. This
1152
should be filled with entries that were found before they were
1153
used. If file_id is present, it will be removed from the
1155
:param other_tree: The Tree to search, in case we didn't find the entry
1157
:param file_id: The file_id to look for
1158
:return: (path, ie) if found or (None, None) if not present.
1160
if file_id in extra_entries:
1161
return extra_entries.pop(file_id)
1162
# TODO: Is id2path better as the first call, or is
1163
# inventory[file_id] better as a first check?
1165
cur_path = other_tree.id2path(file_id)
1166
except errors.NoSuchId:
1168
if cur_path is None:
1171
self._out_of_order_processed.add(file_id)
1172
cur_ie = other_tree.inventory[file_id]
1173
return (cur_path, cur_ie)
1176
"""Match up the values in the different trees."""
1177
for result in self._walk_master_tree():
1179
self._finish_others()
1180
for result in self._walk_others():
1183
def _walk_master_tree(self):
1184
"""First pass, walk all trees in lock-step.
1186
When we are done, all nodes in the master_tree will have been
1187
processed. _other_walkers, _other_entries, and _others_extra will be
1188
set on 'self' for future processing.
1190
# This iterator has the most "inlining" done, because it tends to touch
1191
# every file in the tree, while the others only hit nodes that don't
1193
master_iterator = self._master_tree.iter_entries_by_dir()
1195
other_walkers = [other.iter_entries_by_dir()
1196
for other in self._other_trees]
1197
other_entries = [self._step_one(walker) for walker in other_walkers]
1198
# Track extra nodes in the other trees
1199
others_extra = [{} for i in xrange(len(self._other_trees))]
1201
master_has_more = True
1202
step_one = self._step_one
1203
lookup_by_file_id = self._lookup_by_file_id
1204
out_of_order_processed = self._out_of_order_processed
1206
while master_has_more:
1207
(master_has_more, path, master_ie) = step_one(master_iterator)
1208
if not master_has_more:
1211
file_id = master_ie.file_id
1213
other_values_append = other_values.append
1214
next_other_entries = []
1215
next_other_entries_append = next_other_entries.append
1216
for idx, (other_has_more, other_path, other_ie) in enumerate(other_entries):
1217
if not other_has_more:
1218
other_values_append(lookup_by_file_id(
1219
others_extra[idx], self._other_trees[idx], file_id))
1220
next_other_entries_append((False, None, None))
1221
elif file_id == other_ie.file_id:
1222
# This is the critical code path, as most of the entries
1223
# should match between most trees.
1224
other_values_append((other_path, other_ie))
1225
next_other_entries_append(step_one(other_walkers[idx]))
1227
# This walker did not match, step it until it either
1228
# matches, or we know we are past the current walker.
1229
other_walker = other_walkers[idx]
1230
other_extra = others_extra[idx]
1231
while (other_has_more and
1232
self._cmp_path_by_dirblock(other_path, path) < 0):
1233
other_file_id = other_ie.file_id
1234
if other_file_id not in out_of_order_processed:
1235
other_extra[other_file_id] = (other_path, other_ie)
1236
other_has_more, other_path, other_ie = \
1237
step_one(other_walker)
1238
if other_has_more and other_ie.file_id == file_id:
1239
# We ended up walking to this point, match and step
1241
other_values_append((other_path, other_ie))
1242
other_has_more, other_path, other_ie = \
1243
step_one(other_walker)
1245
# This record isn't in the normal order, see if it
1247
other_values_append(lookup_by_file_id(
1248
other_extra, self._other_trees[idx], file_id))
1249
next_other_entries_append((other_has_more, other_path,
1251
other_entries = next_other_entries
1253
# We've matched all the walkers, yield this datapoint
1254
yield path, file_id, master_ie, other_values
1255
self._other_walkers = other_walkers
1256
self._other_entries = other_entries
1257
self._others_extra = others_extra
1259
def _finish_others(self):
1260
"""Finish walking the other iterators, so we get all entries."""
1261
for idx, info in enumerate(self._other_entries):
1262
other_extra = self._others_extra[idx]
1263
(other_has_more, other_path, other_ie) = info
1264
while other_has_more:
1265
other_file_id = other_ie.file_id
1266
if other_file_id not in self._out_of_order_processed:
1267
other_extra[other_file_id] = (other_path, other_ie)
1268
other_has_more, other_path, other_ie = \
1269
self._step_one(self._other_walkers[idx])
1270
del self._other_entries
1272
def _walk_others(self):
1273
"""Finish up by walking all the 'deferred' nodes."""
1274
# TODO: One alternative would be to grab all possible unprocessed
1275
# file_ids, and then sort by path, and then yield them. That
1276
# might ensure better ordering, in case a caller strictly
1277
# requires parents before children.
1278
for idx, other_extra in enumerate(self._others_extra):
1279
others = sorted(other_extra.itervalues(),
1280
key=lambda x: self._path_to_key(x[0]))
1281
for other_path, other_ie in others:
1282
file_id = other_ie.file_id
1283
# We don't need to check out_of_order_processed here, because
1284
# the lookup_by_file_id will be removing anything processed
1285
# from the extras cache
1286
other_extra.pop(file_id)
1287
other_values = [(None, None) for i in xrange(idx)]
1288
other_values.append((other_path, other_ie))
1289
for alt_idx, alt_extra in enumerate(self._others_extra[idx+1:]):
1290
alt_idx = alt_idx + idx + 1
1291
alt_extra = self._others_extra[alt_idx]
1292
alt_tree = self._other_trees[alt_idx]
1293
other_values.append(self._lookup_by_file_id(
1294
alt_extra, alt_tree, file_id))
1295
yield other_path, file_id, None, other_values