1
# Copyright (C) 2007-2010 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Helper functions for DirState.
19
This is the python implementation for DirState functions.
29
from bzrlib import cache_utf8, errors, osutils
30
from bzrlib.dirstate import DirState
31
from bzrlib.osutils import parent_directories, pathjoin, splitpath
34
# This is the Windows equivalent of ENOTDIR
35
# It is defined in pywin32.winerror, but we don't want a strong dependency for
37
# XXX: Perhaps we could get it from a windows header ?
38
cdef int ERROR_PATH_NOT_FOUND
39
ERROR_PATH_NOT_FOUND = 3
40
cdef int ERROR_DIRECTORY
43
#python2.4 support, and other platform-dependent includes
44
cdef extern from "python-compat.h":
45
unsigned long htonl(unsigned long)
47
# Give Pyrex some function definitions for it to understand.
48
# All of these are just hints to Pyrex, so that it can try to convert python
49
# objects into similar C objects. (such as PyInt => int).
50
# In anything defined 'cdef extern from XXX' the real C header will be
51
# imported, and the real definition will be used from there. So these are just
52
# hints, and do not need to match exactly to the C definitions.
55
ctypedef unsigned long size_t
57
cdef extern from "_dirstate_helpers_pyx.h":
62
cdef extern from "stdlib.h":
63
unsigned long int strtoul(char *nptr, char **endptr, int base)
66
cdef extern from 'sys/stat.h':
69
# On win32, this actually comes from "python-compat.h"
73
# These functions allow us access to a bit of the 'bare metal' of python
74
# objects, rather than going through the object abstraction. (For example,
75
# PyList_Append, rather than getting the 'append' attribute of the object, and
76
# creating a tuple, and then using PyCallObject).
77
# Functions that return (or take) a void* are meant to grab a C PyObject*. This
78
# differs from the Pyrex 'object'. If you declare a variable as 'object' Pyrex
79
# will automatically Py_INCREF and Py_DECREF when appropriate. But for some
80
# inner loops, we don't need to do that at all, as the reference only lasts for
82
# Note that the C API GetItem calls borrow references, so pyrex does the wrong
83
# thing if you declare e.g. object PyList_GetItem(object lst, int index) - you
84
# need to manually Py_INCREF yourself.
85
cdef extern from "Python.h":
86
ctypedef int Py_ssize_t
87
ctypedef struct PyObject:
89
int PyList_Append(object lst, object item) except -1
90
void *PyList_GetItem_object_void "PyList_GET_ITEM" (object lst, int index)
91
void *PyList_GetItem_void_void "PyList_GET_ITEM" (void * lst, int index)
92
object PyList_GET_ITEM(object lst, Py_ssize_t index)
93
int PyList_CheckExact(object)
94
Py_ssize_t PyList_GET_SIZE (object p)
96
void *PyTuple_GetItem_void_void "PyTuple_GET_ITEM" (void* tpl, int index)
97
object PyTuple_GetItem_void_object "PyTuple_GET_ITEM" (void* tpl, int index)
98
object PyTuple_GET_ITEM(object tpl, Py_ssize_t index)
101
char *PyString_AsString(object p)
102
char *PyString_AsString_obj "PyString_AsString" (PyObject *string)
103
char *PyString_AS_STRING_void "PyString_AS_STRING" (void *p)
104
int PyString_AsStringAndSize(object str, char **buffer, Py_ssize_t *length) except -1
105
object PyString_FromString(char *)
106
object PyString_FromStringAndSize(char *, Py_ssize_t)
107
int PyString_Size(object p)
108
int PyString_GET_SIZE_void "PyString_GET_SIZE" (void *p)
109
int PyString_CheckExact(object p)
110
void Py_INCREF(object o)
111
void Py_DECREF(object o)
114
cdef extern from "string.h":
115
int strncmp(char *s1, char *s2, int len)
116
void *memchr(void *s, int c, size_t len)
117
int memcmp(void *b1, void *b2, size_t len)
118
# ??? memrchr is a GNU extension :(
119
# void *memrchr(void *s, int c, size_t len)
121
# cimport all of the definitions we will need to access
122
from _static_tuple_c cimport import_static_tuple_c, StaticTuple, \
123
StaticTuple_New, StaticTuple_SET_ITEM
125
import_static_tuple_c()
127
cdef void* _my_memrchr(void *s, int c, size_t n): # cannot_raise
128
# memrchr seems to be a GNU extension, so we have to implement it ourselves
141
def _py_memrchr(s, c):
142
"""Just to expose _my_memrchr for testing.
144
:param s: The Python string to search
145
:param c: The character to search for
146
:return: The offset to the last instance of 'c' in s
153
_s = PyString_AsString(s)
154
length = PyString_Size(s)
156
_c = PyString_AsString(c)
157
assert PyString_Size(c) == 1,\
158
'Must be a single character string, not %s' % (c,)
159
found = _my_memrchr(_s, _c[0], length)
162
return <char*>found - <char*>_s
165
cdef object safe_string_from_size(char *s, Py_ssize_t size):
167
raise AssertionError(
168
'tried to create a string with an invalid size: %d'
170
return PyString_FromStringAndSize(s, size)
173
cdef int _is_aligned(void *ptr): # cannot_raise
174
"""Is this pointer aligned to an integer size offset?
176
:return: 1 if this pointer is aligned, 0 otherwise.
178
return ((<intptr_t>ptr) & ((sizeof(int))-1)) == 0
181
cdef int _cmp_by_dirs(char *path1, int size1, char *path2, int size2): # cannot_raise
182
cdef unsigned char *cur1
183
cdef unsigned char *cur2
184
cdef unsigned char *end1
185
cdef unsigned char *end2
191
if path1 == path2 and size1 == size2:
194
end1 = <unsigned char*>path1+size1
195
end2 = <unsigned char*>path2+size2
197
# Use 32-bit comparisons for the matching portion of the string.
198
# Almost all CPU's are faster at loading and comparing 32-bit integers,
199
# than they are at 8-bit integers.
200
# 99% of the time, these will be aligned, but in case they aren't just skip
202
if _is_aligned(path1) and _is_aligned(path2):
203
cur_int1 = <int*>path1
204
cur_int2 = <int*>path2
205
end_int1 = <int*>(path1 + size1 - (size1 % sizeof(int)))
206
end_int2 = <int*>(path2 + size2 - (size2 % sizeof(int)))
208
while cur_int1 < end_int1 and cur_int2 < end_int2:
209
if cur_int1[0] != cur_int2[0]:
211
cur_int1 = cur_int1 + 1
212
cur_int2 = cur_int2 + 1
214
cur1 = <unsigned char*>cur_int1
215
cur2 = <unsigned char*>cur_int2
217
cur1 = <unsigned char*>path1
218
cur2 = <unsigned char*>path2
220
while cur1 < end1 and cur2 < end2:
221
if cur1[0] == cur2[0]:
222
# This character matches, just go to the next one
226
# The current characters do not match
228
return -1 # Reached the end of path1 segment first
229
elif cur2[0] == c'/':
230
return 1 # Reached the end of path2 segment first
231
elif cur1[0] < cur2[0]:
236
# We reached the end of at least one of the strings
238
return 1 # Not at the end of cur1, must be at the end of cur2
240
return -1 # At the end of cur1, but not at cur2
241
# We reached the end of both strings
245
def cmp_by_dirs(path1, path2):
246
"""Compare two paths directory by directory.
248
This is equivalent to doing::
250
cmp(path1.split('/'), path2.split('/'))
252
The idea is that you should compare path components separately. This
253
differs from plain ``cmp(path1, path2)`` for paths like ``'a-b'`` and
254
``a/b``. "a-b" comes after "a" but would come before "a/b" lexically.
256
:param path1: first path
257
:param path2: second path
258
:return: negative number if ``path1`` comes first,
259
0 if paths are equal,
260
and positive number if ``path2`` sorts first
262
if not PyString_CheckExact(path1):
263
raise TypeError("'path1' must be a plain string, not %s: %r"
264
% (type(path1), path1))
265
if not PyString_CheckExact(path2):
266
raise TypeError("'path2' must be a plain string, not %s: %r"
267
% (type(path2), path2))
268
return _cmp_by_dirs(PyString_AsString(path1),
269
PyString_Size(path1),
270
PyString_AsString(path2),
271
PyString_Size(path2))
274
def _cmp_path_by_dirblock(path1, path2):
275
"""Compare two paths based on what directory they are in.
277
This generates a sort order, such that all children of a directory are
278
sorted together, and grandchildren are in the same order as the
279
children appear. But all grandchildren come after all children.
281
In other words, all entries in a directory are sorted together, and
282
directorys are sorted in cmp_by_dirs order.
284
:param path1: first path
285
:param path2: the second path
286
:return: negative number if ``path1`` comes first,
288
and a positive number if ``path2`` sorts first
290
if not PyString_CheckExact(path1):
291
raise TypeError("'path1' must be a plain string, not %s: %r"
292
% (type(path1), path1))
293
if not PyString_CheckExact(path2):
294
raise TypeError("'path2' must be a plain string, not %s: %r"
295
% (type(path2), path2))
296
return _cmp_path_by_dirblock_intern(PyString_AsString(path1),
297
PyString_Size(path1),
298
PyString_AsString(path2),
299
PyString_Size(path2))
302
cdef int _cmp_path_by_dirblock_intern(char *path1, int path1_len,
303
char *path2, int path2_len): # cannot_raise
304
"""Compare two paths by what directory they are in.
306
see ``_cmp_path_by_dirblock`` for details.
309
cdef int dirname1_len
311
cdef int dirname2_len
313
cdef int basename1_len
315
cdef int basename2_len
319
if path1_len == 0 and path2_len == 0:
322
if path1 == path2 and path1_len == path2_len:
331
basename1 = <char*>_my_memrchr(path1, c'/', path1_len)
333
if basename1 == NULL:
335
basename1_len = path1_len
340
dirname1_len = basename1 - path1
341
basename1 = basename1 + 1
342
basename1_len = path1_len - dirname1_len - 1
344
basename2 = <char*>_my_memrchr(path2, c'/', path2_len)
346
if basename2 == NULL:
348
basename2_len = path2_len
353
dirname2_len = basename2 - path2
354
basename2 = basename2 + 1
355
basename2_len = path2_len - dirname2_len - 1
357
cmp_val = _cmp_by_dirs(dirname1, dirname1_len,
358
dirname2, dirname2_len)
362
cur_len = basename1_len
363
if basename2_len < basename1_len:
364
cur_len = basename2_len
366
cmp_val = memcmp(basename1, basename2, cur_len)
369
if basename1_len == basename2_len:
371
if basename1_len < basename2_len:
376
def _bisect_path_left(paths, path):
377
"""Return the index where to insert path into paths.
379
This uses a path-wise comparison so we get::
389
:param paths: A list of paths to search through
390
:param path: A single path to insert
391
:return: An offset where 'path' can be inserted.
392
:seealso: bisect.bisect_left
403
if not PyList_CheckExact(paths):
404
raise TypeError("you must pass a python list for 'paths' not: %s %r"
405
% (type(paths), paths))
406
if not PyString_CheckExact(path):
407
raise TypeError("you must pass a string for 'path' not: %s %r"
408
% (type(path), path))
413
path_cstr = PyString_AsString(path)
414
path_size = PyString_Size(path)
417
_mid = (_lo + _hi) / 2
418
cur = PyList_GetItem_object_void(paths, _mid)
419
cur_cstr = PyString_AS_STRING_void(cur)
420
cur_size = PyString_GET_SIZE_void(cur)
421
if _cmp_path_by_dirblock_intern(cur_cstr, cur_size,
422
path_cstr, path_size) < 0:
429
def _bisect_path_right(paths, path):
430
"""Return the index where to insert path into paths.
432
This uses a path-wise comparison so we get::
442
:param paths: A list of paths to search through
443
:param path: A single path to insert
444
:return: An offset where 'path' can be inserted.
445
:seealso: bisect.bisect_right
456
if not PyList_CheckExact(paths):
457
raise TypeError("you must pass a python list for 'paths' not: %s %r"
458
% (type(paths), paths))
459
if not PyString_CheckExact(path):
460
raise TypeError("you must pass a string for 'path' not: %s %r"
461
% (type(path), path))
466
path_cstr = PyString_AsString(path)
467
path_size = PyString_Size(path)
470
_mid = (_lo + _hi) / 2
471
cur = PyList_GetItem_object_void(paths, _mid)
472
cur_cstr = PyString_AS_STRING_void(cur)
473
cur_size = PyString_GET_SIZE_void(cur)
474
if _cmp_path_by_dirblock_intern(path_cstr, path_size,
475
cur_cstr, cur_size) < 0:
482
def bisect_dirblock(dirblocks, dirname, lo=0, hi=None, cache=None):
483
"""Return the index where to insert dirname into the dirblocks.
485
The return value idx is such that all directories blocks in dirblock[:idx]
486
have names < dirname, and all blocks in dirblock[idx:] have names >=
489
Optional args lo (default 0) and hi (default len(dirblocks)) bound the
490
slice of a to be searched.
495
cdef char *dirname_cstr
496
cdef int dirname_size
501
if not PyList_CheckExact(dirblocks):
502
raise TypeError("you must pass a python list for 'dirblocks' not: %s %r"
503
% (type(dirblocks), dirblocks))
504
if not PyString_CheckExact(dirname):
505
raise TypeError("you must pass a string for dirname not: %s %r"
506
% (type(dirname), dirname))
513
dirname_cstr = PyString_AsString(dirname)
514
dirname_size = PyString_Size(dirname)
517
_mid = (_lo + _hi) / 2
518
# Grab the dirname for the current dirblock
519
# cur = dirblocks[_mid][0]
520
cur = PyTuple_GetItem_void_void(
521
PyList_GetItem_object_void(dirblocks, _mid), 0)
522
cur_cstr = PyString_AS_STRING_void(cur)
523
cur_size = PyString_GET_SIZE_void(cur)
524
if _cmp_by_dirs(cur_cstr, cur_size, dirname_cstr, dirname_size) < 0:
532
"""Maintain the current location, and return fields as you parse them."""
534
cdef object state # The DirState object
535
cdef object text # The overall string object
536
cdef char *text_cstr # Pointer to the beginning of text
537
cdef int text_size # Length of text
539
cdef char *end_cstr # End of text
540
cdef char *cur_cstr # Pointer to the current record
541
cdef char *next # Pointer to the end of this record
543
def __init__(self, text, state):
546
self.text_cstr = PyString_AsString(text)
547
self.text_size = PyString_Size(text)
548
self.end_cstr = self.text_cstr + self.text_size
549
self.cur_cstr = self.text_cstr
551
cdef char *get_next(self, int *size) except NULL:
552
"""Return a pointer to the start of the next field."""
554
cdef Py_ssize_t extra_len
556
if self.cur_cstr == NULL:
557
raise AssertionError('get_next() called when cur_str is NULL')
558
elif self.cur_cstr >= self.end_cstr:
559
raise AssertionError('get_next() called when there are no chars'
562
self.cur_cstr = <char*>memchr(next, c'\0', self.end_cstr - next)
563
if self.cur_cstr == NULL:
564
extra_len = self.end_cstr - next
565
raise errors.DirstateCorrupt(self.state,
566
'failed to find trailing NULL (\\0).'
567
' Trailing garbage: %r'
568
% safe_string_from_size(next, extra_len))
569
size[0] = self.cur_cstr - next
570
self.cur_cstr = self.cur_cstr + 1
573
cdef object get_next_str(self):
574
"""Get the next field as a Python string."""
577
next = self.get_next(&size)
578
return safe_string_from_size(next, size)
580
cdef int _init(self) except -1:
581
"""Get the pointer ready.
583
This assumes that the dirstate header has already been read, and we
584
already have the dirblock string loaded into memory.
585
This just initializes our memory pointers, etc for parsing of the
590
# The first field should be an empty string left over from the Header
591
first = self.get_next(&size)
592
if first[0] != c'\0' and size == 0:
593
raise AssertionError('First character should be null not: %s'
597
cdef object _get_entry(self, int num_trees, void **p_current_dirname,
599
"""Extract the next entry.
601
This parses the next entry based on the current location in
603
Each entry can be considered a "row" in the total table. And each row
604
has a fixed number of columns. It is generally broken up into "key"
605
columns, then "current" columns, and then "parent" columns.
607
:param num_trees: How many parent trees need to be parsed
608
:param p_current_dirname: A pointer to the current PyString
609
representing the directory name.
610
We pass this in as a void * so that pyrex doesn't have to
611
increment/decrement the PyObject reference counter for each
613
We use a pointer so that _get_entry can update it with the new
615
:param new_block: This is to let the caller know that it needs to
616
create a new directory block to store the next entry.
618
cdef StaticTuple path_name_file_id_key
620
cdef char *entry_size_cstr
621
cdef unsigned long int entry_size
622
cdef char* executable_cstr
623
cdef int is_executable
624
cdef char* dirname_cstr
629
cdef object fingerprint
632
# Read the 'key' information (dirname, name, file_id)
633
dirname_cstr = self.get_next(&cur_size)
634
# Check to see if we have started a new directory block.
635
# If so, then we need to create a new dirname PyString, so that it can
636
# be used in all of the tuples. This saves time and memory, by re-using
637
# the same object repeatedly.
639
# Do the cheap 'length of string' check first. If the string is a
640
# different length, then we *have* to be a different directory.
641
if (cur_size != PyString_GET_SIZE_void(p_current_dirname[0])
642
or strncmp(dirname_cstr,
643
# Extract the char* from our current dirname string. We
644
# know it is a PyString, so we can use
645
# PyString_AS_STRING, we use the _void version because
646
# we are tricking Pyrex by using a void* rather than an
648
PyString_AS_STRING_void(p_current_dirname[0]),
650
dirname = safe_string_from_size(dirname_cstr, cur_size)
651
p_current_dirname[0] = <void*>dirname
656
# Build up the key that will be used.
657
# By using <object>(void *) Pyrex will automatically handle the
658
# Py_INCREF that we need.
659
cur_dirname = <object>p_current_dirname[0]
660
# Use StaticTuple_New to pre-allocate, rather than creating a regular
661
# tuple and passing it to the StaticTuple constructor.
662
# path_name_file_id_key = StaticTuple(<object>p_current_dirname[0],
663
# self.get_next_str(),
664
# self.get_next_str(),
666
tmp = StaticTuple_New(3)
667
Py_INCREF(cur_dirname); StaticTuple_SET_ITEM(tmp, 0, cur_dirname)
668
cur_basename = self.get_next_str()
669
cur_file_id = self.get_next_str()
670
Py_INCREF(cur_basename); StaticTuple_SET_ITEM(tmp, 1, cur_basename)
671
Py_INCREF(cur_file_id); StaticTuple_SET_ITEM(tmp, 2, cur_file_id)
672
path_name_file_id_key = tmp
674
# Parse all of the per-tree information. current has the information in
675
# the same location as parent trees. The only difference is that 'info'
676
# is a 'packed_stat' for current, while it is a 'revision_id' for
678
# minikind, fingerprint, and info will be returned as regular python
680
# entry_size and is_executable will be parsed into a python Long and
681
# python Boolean, respectively.
682
# TODO: jam 20070718 Consider changin the entry_size conversion to
683
# prefer python Int when possible. They are generally faster to
684
# work with, and it will be rare that we have a file >2GB.
685
# Especially since this code is pretty much fixed at a max of
688
for i from 0 <= i < num_trees:
689
minikind = self.get_next_str()
690
fingerprint = self.get_next_str()
691
entry_size_cstr = self.get_next(&cur_size)
692
entry_size = strtoul(entry_size_cstr, NULL, 10)
693
executable_cstr = self.get_next(&cur_size)
694
is_executable = (executable_cstr[0] == c'y')
695
info = self.get_next_str()
696
# TODO: If we want to use StaticTuple_New here we need to be pretty
697
# careful. We are relying on a bit of Pyrex
698
# automatic-conversion from 'int' to PyInt, and that doesn't
699
# play well with the StaticTuple_SET_ITEM macro.
700
# Timing doesn't (yet) show a worthwile improvement in speed
701
# versus complexity and maintainability.
702
# tmp = StaticTuple_New(5)
703
# Py_INCREF(minikind); StaticTuple_SET_ITEM(tmp, 0, minikind)
704
# Py_INCREF(fingerprint); StaticTuple_SET_ITEM(tmp, 1, fingerprint)
705
# Py_INCREF(entry_size); StaticTuple_SET_ITEM(tmp, 2, entry_size)
706
# Py_INCREF(is_executable); StaticTuple_SET_ITEM(tmp, 3, is_executable)
707
# Py_INCREF(info); StaticTuple_SET_ITEM(tmp, 4, info)
708
# PyList_Append(trees, tmp)
709
PyList_Append(trees, StaticTuple(
711
fingerprint, # fingerprint
713
is_executable,# executable
714
info, # packed_stat or revision_id
717
# The returned tuple is (key, [trees])
718
ret = (path_name_file_id_key, trees)
719
# Ignore the trailing newline, but assert that it does exist, this
720
# ensures that we always finish parsing a line on an end-of-entry
722
trailing = self.get_next(&cur_size)
723
if cur_size != 1 or trailing[0] != c'\n':
724
raise errors.DirstateCorrupt(self.state,
725
'Bad parse, we expected to end on \\n, not: %d %s: %s'
726
% (cur_size, safe_string_from_size(trailing, cur_size),
730
def _parse_dirblocks(self):
731
"""Parse all dirblocks in the state file."""
733
cdef object current_block
735
cdef void * current_dirname
737
cdef int expected_entry_count
740
num_trees = self.state._num_present_parents() + 1
741
expected_entry_count = self.state._num_entries
743
# Ignore the first record
747
dirblocks = [('', current_block), ('', [])]
748
self.state._dirblocks = dirblocks
750
current_dirname = <void*>obj
754
# TODO: jam 2007-05-07 Consider pre-allocating some space for the
755
# members, and then growing and shrinking from there. If most
756
# directories have close to 10 entries in them, it would save a
757
# few mallocs if we default our list size to something
758
# reasonable. Or we could malloc it to something large (100 or
759
# so), and then truncate. That would give us a malloc + realloc,
760
# rather than lots of reallocs.
761
while self.cur_cstr < self.end_cstr:
762
entry = self._get_entry(num_trees, ¤t_dirname, &new_block)
764
# new block - different dirname
766
PyList_Append(dirblocks,
767
(<object>current_dirname, current_block))
768
PyList_Append(current_block, entry)
769
entry_count = entry_count + 1
770
if entry_count != expected_entry_count:
771
raise errors.DirstateCorrupt(self.state,
772
'We read the wrong number of entries.'
773
' We expected to read %s, but read %s'
774
% (expected_entry_count, entry_count))
775
self.state._split_root_dirblock_into_contents()
778
def _read_dirblocks(state):
779
"""Read in the dirblocks for the given DirState object.
781
This is tightly bound to the DirState internal representation. It should be
782
thought of as a member function, which is only separated out so that we can
783
re-write it in pyrex.
785
:param state: A DirState object.
787
:postcondition: The dirblocks will be loaded into the appropriate fields in
790
state._state_file.seek(state._end_of_header)
791
text = state._state_file.read()
792
# TODO: check the crc checksums. crc_measured = zlib.crc32(text)
794
reader = Reader(text, state)
796
reader._parse_dirblocks()
797
state._dirblock_state = DirState.IN_MEMORY_UNMODIFIED
800
cdef int minikind_from_mode(int mode): # cannot_raise
801
# in order of frequency:
811
_encode = binascii.b2a_base64
814
from struct import pack
815
cdef _pack_stat(stat_value):
816
"""return a string representing the stat value's key fields.
818
:param stat_value: A stat oject with st_size, st_mtime, st_ctime, st_dev,
819
st_ino and st_mode fields.
821
cdef char result[6*4] # 6 long ints
823
aliased = <int *>result
824
aliased[0] = htonl(stat_value.st_size)
825
aliased[1] = htonl(int(stat_value.st_mtime))
826
aliased[2] = htonl(int(stat_value.st_ctime))
827
aliased[3] = htonl(stat_value.st_dev)
828
aliased[4] = htonl(stat_value.st_ino & 0xFFFFFFFF)
829
aliased[5] = htonl(stat_value.st_mode)
830
packed = PyString_FromStringAndSize(result, 6*4)
831
return _encode(packed)[:-1]
834
def update_entry(self, entry, abspath, stat_value):
835
"""Update the entry based on what is actually on disk.
837
This function only calculates the sha if it needs to - if the entry is
838
uncachable, or clearly different to the first parent's entry, no sha
839
is calculated, and None is returned.
841
:param entry: This is the dirblock entry for the file in question.
842
:param abspath: The path on disk for this file.
843
:param stat_value: (optional) if we already have done a stat on the
845
:return: None, or The sha1 hexdigest of the file (40 bytes) or link
848
return _update_entry(self, entry, abspath, stat_value)
851
cdef _update_entry(self, entry, abspath, stat_value):
852
"""Update the entry based on what is actually on disk.
854
This function only calculates the sha if it needs to - if the entry is
855
uncachable, or clearly different to the first parent's entry, no sha
856
is calculated, and None is returned.
858
:param self: The dirstate object this is operating on.
859
:param entry: This is the dirblock entry for the file in question.
860
:param abspath: The path on disk for this file.
861
:param stat_value: The stat value done on the path.
862
:return: None, or The sha1 hexdigest of the file (40 bytes) or link
865
# TODO - require pyrex 0.9.8, then use a pyd file to define access to the
866
# _st mode of the compiled stat objects.
867
cdef int minikind, saved_minikind
869
cdef int worth_saving
870
minikind = minikind_from_mode(stat_value.st_mode)
873
packed_stat = _pack_stat(stat_value)
874
details = PyList_GetItem_void_void(PyTuple_GetItem_void_void(<void *>entry, 1), 0)
875
saved_minikind = PyString_AsString_obj(<PyObject *>PyTuple_GetItem_void_void(details, 0))[0]
876
if minikind == c'd' and saved_minikind == c't':
878
saved_link_or_sha1 = PyTuple_GetItem_void_object(details, 1)
879
saved_file_size = PyTuple_GetItem_void_object(details, 2)
880
saved_executable = PyTuple_GetItem_void_object(details, 3)
881
saved_packed_stat = PyTuple_GetItem_void_object(details, 4)
882
# Deal with pyrex decrefing the objects
883
Py_INCREF(saved_link_or_sha1)
884
Py_INCREF(saved_file_size)
885
Py_INCREF(saved_executable)
886
Py_INCREF(saved_packed_stat)
887
#(saved_minikind, saved_link_or_sha1, saved_file_size,
888
# saved_executable, saved_packed_stat) = entry[1][0]
890
if (minikind == saved_minikind
891
and packed_stat == saved_packed_stat):
892
# The stat hasn't changed since we saved, so we can re-use the
897
# size should also be in packed_stat
898
if saved_file_size == stat_value.st_size:
899
return saved_link_or_sha1
901
# If we have gotten this far, that means that we need to actually
902
# process this entry.
906
executable = self._is_executable(stat_value.st_mode,
908
if self._cutoff_time is None:
909
self._sha_cutoff_time()
910
if (stat_value.st_mtime < self._cutoff_time
911
and stat_value.st_ctime < self._cutoff_time
912
and len(entry[1]) > 1
913
and entry[1][1][0] != 'a'):
914
# Could check for size changes for further optimised
915
# avoidance of sha1's. However the most prominent case of
916
# over-shaing is during initial add, which this catches.
917
link_or_sha1 = self._sha1_file(abspath)
918
entry[1][0] = ('f', link_or_sha1, stat_value.st_size,
919
executable, packed_stat)
921
# This file is not worth caching the sha1. Either it is too new, or
922
# it is newly added. Regardless, the only things we are changing
923
# are derived from the stat, and so are not worth caching. So we do
924
# *not* set the IN_MEMORY_MODIFIED flag. (But we'll save the
925
# updated values if there is *other* data worth saving.)
926
entry[1][0] = ('f', '', stat_value.st_size, executable,
929
elif minikind == c'd':
930
entry[1][0] = ('d', '', 0, False, packed_stat)
931
if saved_minikind != c'd':
932
# This changed from something into a directory. Make sure we
933
# have a directory block for it. This doesn't happen very
934
# often, so this doesn't have to be super fast.
935
block_index, entry_index, dir_present, file_present = \
936
self._get_block_entry_index(entry[0][0], entry[0][1], 0)
937
self._ensure_block(block_index, entry_index,
938
pathjoin(entry[0][0], entry[0][1]))
940
# Any changes are derived trivially from the stat object, not worth
941
# re-writing a dirstate for just this
943
elif minikind == c'l':
944
if saved_minikind == c'l':
945
# If the object hasn't changed kind, it isn't worth saving the
946
# dirstate just for a symlink. The default is 'fast symlinks' which
947
# save the target in the inode entry, rather than separately. So to
948
# stat, we've already read everything off disk.
950
link_or_sha1 = self._read_link(abspath, saved_link_or_sha1)
951
if self._cutoff_time is None:
952
self._sha_cutoff_time()
953
if (stat_value.st_mtime < self._cutoff_time
954
and stat_value.st_ctime < self._cutoff_time):
955
entry[1][0] = ('l', link_or_sha1, stat_value.st_size,
958
entry[1][0] = ('l', '', stat_value.st_size,
959
False, DirState.NULLSTAT)
961
# Note, even though _mark_modified will only set
962
# IN_MEMORY_HASH_MODIFIED, it still isn't worth
963
self._mark_modified([entry])
967
# TODO: Do we want to worry about exceptions here?
968
cdef char _minikind_from_string(object string) except? -1:
969
"""Convert a python string to a char."""
970
return PyString_AsString(string)[0]
973
cdef object _kind_absent
974
cdef object _kind_file
975
cdef object _kind_directory
976
cdef object _kind_symlink
977
cdef object _kind_relocated
978
cdef object _kind_tree_reference
979
_kind_absent = "absent"
981
_kind_directory = "directory"
982
_kind_symlink = "symlink"
983
_kind_relocated = "relocated"
984
_kind_tree_reference = "tree-reference"
987
cdef object _minikind_to_kind(char minikind):
988
"""Create a string kind for minikind."""
989
cdef char _minikind[1]
992
elif minikind == c'd':
993
return _kind_directory
994
elif minikind == c'a':
996
elif minikind == c'r':
997
return _kind_relocated
998
elif minikind == c'l':
1000
elif minikind == c't':
1001
return _kind_tree_reference
1002
_minikind[0] = minikind
1003
raise KeyError(PyString_FromStringAndSize(_minikind, 1))
1006
cdef int _versioned_minikind(char minikind): # cannot_raise
1007
"""Return non-zero if minikind is in fltd"""
1008
return (minikind == c'f' or
1014
cdef class ProcessEntryC:
1016
cdef int doing_consistency_expansion
1017
cdef object old_dirname_to_file_id # dict
1018
cdef object new_dirname_to_file_id # dict
1019
cdef object last_source_parent
1020
cdef object last_target_parent
1021
cdef int include_unchanged
1023
cdef object use_filesystem_for_exec
1024
cdef object utf8_decode
1025
cdef readonly object searched_specific_files
1026
cdef readonly object searched_exact_paths
1027
cdef object search_specific_files
1028
# The parents up to the root of the paths we are searching.
1029
# After all normal paths are returned, these specific items are returned.
1030
cdef object search_specific_file_parents
1032
# Current iteration variables:
1033
cdef object current_root
1034
cdef object current_root_unicode
1035
cdef object root_entries
1036
cdef int root_entries_pos, root_entries_len
1037
cdef object root_abspath
1038
cdef int source_index, target_index
1039
cdef int want_unversioned
1041
cdef object dir_iterator
1042
cdef int block_index
1043
cdef object current_block
1044
cdef int current_block_pos
1045
cdef object current_block_list
1046
cdef object current_dir_info
1047
cdef object current_dir_list
1048
cdef object _pending_consistent_entries # list
1050
cdef object root_dir_info
1051
cdef object bisect_left
1052
cdef object pathjoin
1054
# A set of the ids we've output when doing partial output.
1055
cdef object seen_ids
1056
cdef object sha_file
1058
def __init__(self, include_unchanged, use_filesystem_for_exec,
1059
search_specific_files, state, source_index, target_index,
1060
want_unversioned, tree):
1061
self.doing_consistency_expansion = 0
1062
self.old_dirname_to_file_id = {}
1063
self.new_dirname_to_file_id = {}
1064
# Are we doing a partial iter_changes?
1065
self.partial = set(['']).__ne__(search_specific_files)
1066
# Using a list so that we can access the values and change them in
1067
# nested scope. Each one is [path, file_id, entry]
1068
self.last_source_parent = [None, None]
1069
self.last_target_parent = [None, None]
1070
if include_unchanged is None:
1071
self.include_unchanged = False
1073
self.include_unchanged = int(include_unchanged)
1074
self.use_filesystem_for_exec = use_filesystem_for_exec
1075
self.utf8_decode = cache_utf8._utf8_decode
1076
# for all search_indexs in each path at or under each element of
1077
# search_specific_files, if the detail is relocated: add the id, and
1078
# add the relocated path as one to search if its not searched already.
1079
# If the detail is not relocated, add the id.
1080
self.searched_specific_files = set()
1081
# When we search exact paths without expanding downwards, we record
1083
self.searched_exact_paths = set()
1084
self.search_specific_files = search_specific_files
1085
# The parents up to the root of the paths we are searching.
1086
# After all normal paths are returned, these specific items are returned.
1087
self.search_specific_file_parents = set()
1088
# The ids we've sent out in the delta.
1089
self.seen_ids = set()
1091
self.current_root = None
1092
self.current_root_unicode = None
1093
self.root_entries = None
1094
self.root_entries_pos = 0
1095
self.root_entries_len = 0
1096
self.root_abspath = None
1097
if source_index is None:
1098
self.source_index = -1
1100
self.source_index = source_index
1101
self.target_index = target_index
1102
self.want_unversioned = want_unversioned
1104
self.dir_iterator = None
1105
self.block_index = -1
1106
self.current_block = None
1107
self.current_block_list = None
1108
self.current_block_pos = -1
1109
self.current_dir_info = None
1110
self.current_dir_list = None
1111
self._pending_consistent_entries = []
1113
self.root_dir_info = None
1114
self.bisect_left = bisect.bisect_left
1115
self.pathjoin = osutils.pathjoin
1116
self.fstat = os.fstat
1117
self.sha_file = osutils.sha_file
1118
if target_index != 0:
1119
# A lot of code in here depends on target_index == 0
1120
raise errors.BzrError('unsupported target index')
1122
cdef _process_entry(self, entry, path_info):
1123
"""Compare an entry and real disk to generate delta information.
1125
:param path_info: top_relpath, basename, kind, lstat, abspath for
1126
the path of entry. If None, then the path is considered absent in
1127
the target (Perhaps we should pass in a concrete entry for this ?)
1128
Basename is returned as a utf8 string because we expect this
1129
tuple will be ignored, and don't want to take the time to
1131
:return: (iter_changes_result, changed). If the entry has not been
1132
handled then changed is None. Otherwise it is False if no content
1133
or metadata changes have occured, and True if any content or
1134
metadata change has occurred. If self.include_unchanged is True then
1135
if changed is not None, iter_changes_result will always be a result
1136
tuple. Otherwise, iter_changes_result is None unless changed is
1139
cdef char target_minikind
1140
cdef char source_minikind
1142
cdef int content_change
1143
cdef object details_list
1145
details_list = entry[1]
1146
if -1 == self.source_index:
1147
source_details = DirState.NULL_PARENT_DETAILS
1149
source_details = details_list[self.source_index]
1150
target_details = details_list[self.target_index]
1151
target_minikind = _minikind_from_string(target_details[0])
1152
if path_info is not None and _versioned_minikind(target_minikind):
1153
if self.target_index != 0:
1154
raise AssertionError("Unsupported target index %d" %
1156
link_or_sha1 = _update_entry(self.state, entry, path_info[4], path_info[3])
1157
# The entry may have been modified by update_entry
1158
target_details = details_list[self.target_index]
1159
target_minikind = _minikind_from_string(target_details[0])
1162
# the rest of this function is 0.3 seconds on 50K paths, or
1163
# 0.000006 seconds per call.
1164
source_minikind = _minikind_from_string(source_details[0])
1165
if ((_versioned_minikind(source_minikind) or source_minikind == c'r')
1166
and _versioned_minikind(target_minikind)):
1167
# claimed content in both: diff
1168
# r | fdlt | | add source to search, add id path move and perform
1169
# | | | diff check on source-target
1170
# r | fdlt | a | dangling file that was present in the basis.
1172
if source_minikind != c'r':
1173
old_dirname = entry[0][0]
1174
old_basename = entry[0][1]
1175
old_path = path = None
1177
# add the source to the search path to find any children it
1178
# has. TODO ? : only add if it is a container ?
1179
if (not self.doing_consistency_expansion and
1180
not osutils.is_inside_any(self.searched_specific_files,
1181
source_details[1])):
1182
self.search_specific_files.add(source_details[1])
1183
# expanding from a user requested path, parent expansion
1184
# for delta consistency happens later.
1185
# generate the old path; this is needed for stating later
1187
old_path = source_details[1]
1188
old_dirname, old_basename = os.path.split(old_path)
1189
path = self.pathjoin(entry[0][0], entry[0][1])
1190
old_entry = self.state._get_entry(self.source_index,
1192
# update the source details variable to be the real
1194
if old_entry == (None, None):
1195
raise errors.CorruptDirstate(self.state._filename,
1196
"entry '%s/%s' is considered renamed from %r"
1197
" but source does not exist\n"
1198
"entry: %s" % (entry[0][0], entry[0][1], old_path, entry))
1199
source_details = old_entry[1][self.source_index]
1200
source_minikind = _minikind_from_string(source_details[0])
1201
if path_info is None:
1202
# the file is missing on disk, show as removed.
1207
# source and target are both versioned and disk file is present.
1208
target_kind = path_info[2]
1209
if target_kind == 'directory':
1211
old_path = path = self.pathjoin(old_dirname, old_basename)
1212
file_id = entry[0][2]
1213
self.new_dirname_to_file_id[path] = file_id
1214
if source_minikind != c'd':
1217
# directories have no fingerprint
1220
elif target_kind == 'file':
1221
if source_minikind != c'f':
1224
# Check the sha. We can't just rely on the size as
1225
# content filtering may mean differ sizes actually
1226
# map to the same content
1227
if link_or_sha1 is None:
1229
statvalue, link_or_sha1 = \
1230
self.state._sha1_provider.stat_and_sha1(
1232
self.state._observed_sha1(entry, link_or_sha1,
1234
content_change = (link_or_sha1 != source_details[1])
1235
# Target details is updated at update_entry time
1236
if self.use_filesystem_for_exec:
1237
# We don't need S_ISREG here, because we are sure
1238
# we are dealing with a file.
1239
target_exec = bool(S_IXUSR & path_info[3].st_mode)
1241
target_exec = target_details[3]
1242
elif target_kind == 'symlink':
1243
if source_minikind != c'l':
1246
content_change = (link_or_sha1 != source_details[1])
1248
elif target_kind == 'tree-reference':
1249
if source_minikind != c't':
1256
path = self.pathjoin(old_dirname, old_basename)
1257
raise errors.BadFileKindError(path, path_info[2])
1258
if source_minikind == c'd':
1260
old_path = path = self.pathjoin(old_dirname, old_basename)
1262
file_id = entry[0][2]
1263
self.old_dirname_to_file_id[old_path] = file_id
1264
# parent id is the entry for the path in the target tree
1265
if old_basename and old_dirname == self.last_source_parent[0]:
1266
# use a cached hit for non-root source entries.
1267
source_parent_id = self.last_source_parent[1]
1270
source_parent_id = self.old_dirname_to_file_id[old_dirname]
1272
source_parent_entry = self.state._get_entry(self.source_index,
1273
path_utf8=old_dirname)
1274
source_parent_id = source_parent_entry[0][2]
1275
if source_parent_id == entry[0][2]:
1276
# This is the root, so the parent is None
1277
source_parent_id = None
1279
self.last_source_parent[0] = old_dirname
1280
self.last_source_parent[1] = source_parent_id
1281
new_dirname = entry[0][0]
1282
if entry[0][1] and new_dirname == self.last_target_parent[0]:
1283
# use a cached hit for non-root target entries.
1284
target_parent_id = self.last_target_parent[1]
1287
target_parent_id = self.new_dirname_to_file_id[new_dirname]
1289
# TODO: We don't always need to do the lookup, because the
1290
# parent entry will be the same as the source entry.
1291
target_parent_entry = self.state._get_entry(self.target_index,
1292
path_utf8=new_dirname)
1293
if target_parent_entry == (None, None):
1294
raise AssertionError(
1295
"Could not find target parent in wt: %s\nparent of: %s"
1296
% (new_dirname, entry))
1297
target_parent_id = target_parent_entry[0][2]
1298
if target_parent_id == entry[0][2]:
1299
# This is the root, so the parent is None
1300
target_parent_id = None
1302
self.last_target_parent[0] = new_dirname
1303
self.last_target_parent[1] = target_parent_id
1305
source_exec = source_details[3]
1306
changed = (content_change
1307
or source_parent_id != target_parent_id
1308
or old_basename != entry[0][1]
1309
or source_exec != target_exec
1311
if not changed and not self.include_unchanged:
1314
if old_path is None:
1315
path = self.pathjoin(old_dirname, old_basename)
1317
old_path_u = self.utf8_decode(old_path)[0]
1320
old_path_u = self.utf8_decode(old_path)[0]
1321
if old_path == path:
1324
path_u = self.utf8_decode(path)[0]
1325
source_kind = _minikind_to_kind(source_minikind)
1326
return (entry[0][2],
1327
(old_path_u, path_u),
1330
(source_parent_id, target_parent_id),
1331
(self.utf8_decode(old_basename)[0], self.utf8_decode(entry[0][1])[0]),
1332
(source_kind, target_kind),
1333
(source_exec, target_exec)), changed
1334
elif source_minikind == c'a' and _versioned_minikind(target_minikind):
1335
# looks like a new file
1336
path = self.pathjoin(entry[0][0], entry[0][1])
1337
# parent id is the entry for the path in the target tree
1338
# TODO: these are the same for an entire directory: cache em.
1339
parent_entry = self.state._get_entry(self.target_index,
1340
path_utf8=entry[0][0])
1341
if parent_entry is None:
1342
raise errors.DirstateCorrupt(self.state,
1343
"We could not find the parent entry in index %d"
1344
" for the entry: %s"
1345
% (self.target_index, entry[0]))
1346
parent_id = parent_entry[0][2]
1347
if parent_id == entry[0][2]:
1349
if path_info is not None:
1351
if self.use_filesystem_for_exec:
1352
# We need S_ISREG here, because we aren't sure if this
1355
S_ISREG(path_info[3].st_mode)
1356
and S_IXUSR & path_info[3].st_mode)
1358
target_exec = target_details[3]
1359
return (entry[0][2],
1360
(None, self.utf8_decode(path)[0]),
1364
(None, self.utf8_decode(entry[0][1])[0]),
1365
(None, path_info[2]),
1366
(None, target_exec)), True
1368
# Its a missing file, report it as such.
1369
return (entry[0][2],
1370
(None, self.utf8_decode(path)[0]),
1374
(None, self.utf8_decode(entry[0][1])[0]),
1376
(None, False)), True
1377
elif _versioned_minikind(source_minikind) and target_minikind == c'a':
1378
# unversioned, possibly, or possibly not deleted: we dont care.
1379
# if its still on disk, *and* theres no other entry at this
1380
# path [we dont know this in this routine at the moment -
1381
# perhaps we should change this - then it would be an unknown.
1382
old_path = self.pathjoin(entry[0][0], entry[0][1])
1383
# parent id is the entry for the path in the target tree
1384
parent_id = self.state._get_entry(self.source_index, path_utf8=entry[0][0])[0][2]
1385
if parent_id == entry[0][2]:
1387
return (entry[0][2],
1388
(self.utf8_decode(old_path)[0], None),
1392
(self.utf8_decode(entry[0][1])[0], None),
1393
(_minikind_to_kind(source_minikind), None),
1394
(source_details[3], None)), True
1395
elif _versioned_minikind(source_minikind) and target_minikind == c'r':
1396
# a rename; could be a true rename, or a rename inherited from
1397
# a renamed parent. TODO: handle this efficiently. Its not
1398
# common case to rename dirs though, so a correct but slow
1399
# implementation will do.
1400
if (not self.doing_consistency_expansion and
1401
not osutils.is_inside_any(self.searched_specific_files,
1402
target_details[1])):
1403
self.search_specific_files.add(target_details[1])
1404
# We don't expand the specific files parents list here as
1405
# the path is absent in target and won't create a delta with
1407
elif ((source_minikind == c'r' or source_minikind == c'a') and
1408
(target_minikind == c'r' or target_minikind == c'a')):
1409
# neither of the selected trees contain this path,
1410
# so skip over it. This is not currently directly tested, but
1411
# is indirectly via test_too_much.TestCommands.test_conflicts.
1414
raise AssertionError("don't know how to compare "
1415
"source_minikind=%r, target_minikind=%r"
1416
% (source_minikind, target_minikind))
1417
## import pdb;pdb.set_trace()
1423
def iter_changes(self):
1426
cdef int _gather_result_for_consistency(self, result) except -1:
1427
"""Check a result we will yield to make sure we are consistent later.
1429
This gathers result's parents into a set to output later.
1431
:param result: A result tuple.
1433
if not self.partial or not result[0]:
1435
self.seen_ids.add(result[0])
1436
new_path = result[1][1]
1438
# Not the root and not a delete: queue up the parents of the path.
1439
self.search_specific_file_parents.update(
1440
osutils.parent_directories(new_path.encode('utf8')))
1441
# Add the root directory which parent_directories does not
1443
self.search_specific_file_parents.add('')
1446
cdef int _update_current_block(self) except -1:
1447
if (self.block_index < len(self.state._dirblocks) and
1448
osutils.is_inside(self.current_root, self.state._dirblocks[self.block_index][0])):
1449
self.current_block = self.state._dirblocks[self.block_index]
1450
self.current_block_list = self.current_block[1]
1451
self.current_block_pos = 0
1453
self.current_block = None
1454
self.current_block_list = None
1458
# Simple thunk to allow tail recursion without pyrex confusion
1459
return self._iter_next()
1461
cdef _iter_next(self):
1462
"""Iterate over the changes."""
1463
# This function single steps through an iterator. As such while loops
1464
# are often exited by 'return' - the code is structured so that the
1465
# next call into the function will return to the same while loop. Note
1466
# that all flow control needed to re-reach that step is reexecuted,
1467
# which can be a performance problem. It has not yet been tuned to
1468
# minimise this; a state machine is probably the simplest restructuring
1469
# to both minimise this overhead and make the code considerably more
1473
# compare source_index and target_index at or under each element of search_specific_files.
1474
# follow the following comparison table. Note that we only want to do diff operations when
1475
# the target is fdl because thats when the walkdirs logic will have exposed the pathinfo
1479
# Source | Target | disk | action
1480
# r | fdlt | | add source to search, add id path move and perform
1481
# | | | diff check on source-target
1482
# r | fdlt | a | dangling file that was present in the basis.
1484
# r | a | | add source to search
1486
# r | r | | this path is present in a non-examined tree, skip.
1487
# r | r | a | this path is present in a non-examined tree, skip.
1488
# a | fdlt | | add new id
1489
# a | fdlt | a | dangling locally added file, skip
1490
# a | a | | not present in either tree, skip
1491
# a | a | a | not present in any tree, skip
1492
# a | r | | not present in either tree at this path, skip as it
1493
# | | | may not be selected by the users list of paths.
1494
# a | r | a | not present in either tree at this path, skip as it
1495
# | | | may not be selected by the users list of paths.
1496
# fdlt | fdlt | | content in both: diff them
1497
# fdlt | fdlt | a | deleted locally, but not unversioned - show as deleted ?
1498
# fdlt | a | | unversioned: output deleted id for now
1499
# fdlt | a | a | unversioned and deleted: output deleted id
1500
# fdlt | r | | relocated in this tree, so add target to search.
1501
# | | | Dont diff, we will see an r,fd; pair when we reach
1502
# | | | this id at the other path.
1503
# fdlt | r | a | relocated in this tree, so add target to search.
1504
# | | | Dont diff, we will see an r,fd; pair when we reach
1505
# | | | this id at the other path.
1507
# TODO: jam 20070516 - Avoid the _get_entry lookup overhead by
1508
# keeping a cache of directories that we have seen.
1509
cdef object current_dirname, current_blockname
1510
cdef char * current_dirname_c, * current_blockname_c
1511
cdef int advance_entry, advance_path
1512
cdef int path_handled
1513
searched_specific_files = self.searched_specific_files
1514
# Are we walking a root?
1515
while self.root_entries_pos < self.root_entries_len:
1516
entry = self.root_entries[self.root_entries_pos]
1517
self.root_entries_pos = self.root_entries_pos + 1
1518
result, changed = self._process_entry(entry, self.root_dir_info)
1519
if changed is not None:
1521
self._gather_result_for_consistency(result)
1522
if changed or self.include_unchanged:
1524
# Have we finished the prior root, or never started one ?
1525
if self.current_root is None:
1526
# TODO: the pending list should be lexically sorted? the
1527
# interface doesn't require it.
1529
self.current_root = self.search_specific_files.pop()
1531
raise StopIteration()
1532
self.searched_specific_files.add(self.current_root)
1533
# process the entries for this containing directory: the rest will be
1534
# found by their parents recursively.
1535
self.root_entries = self.state._entries_for_path(self.current_root)
1536
self.root_entries_len = len(self.root_entries)
1537
self.current_root_unicode = self.current_root.decode('utf8')
1538
self.root_abspath = self.tree.abspath(self.current_root_unicode)
1540
root_stat = os.lstat(self.root_abspath)
1542
if e.errno == errno.ENOENT:
1543
# the path does not exist: let _process_entry know that.
1544
self.root_dir_info = None
1546
# some other random error: hand it up.
1549
self.root_dir_info = ('', self.current_root,
1550
osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
1552
if self.root_dir_info[2] == 'directory':
1553
if self.tree._directory_is_tree_reference(
1554
self.current_root_unicode):
1555
self.root_dir_info = self.root_dir_info[:2] + \
1556
('tree-reference',) + self.root_dir_info[3:]
1557
if not self.root_entries and not self.root_dir_info:
1558
# this specified path is not present at all, skip it.
1559
# (tail recursion, can do a loop once the full structure is
1561
return self._iter_next()
1563
self.root_entries_pos = 0
1564
# XXX Clarity: This loop is duplicated a out the self.current_root
1565
# is None guard above: if we return from it, it completes there
1566
# (and the following if block cannot trigger because
1567
# path_handled must be true, so the if block is not # duplicated.
1568
while self.root_entries_pos < self.root_entries_len:
1569
entry = self.root_entries[self.root_entries_pos]
1570
self.root_entries_pos = self.root_entries_pos + 1
1571
result, changed = self._process_entry(entry, self.root_dir_info)
1572
if changed is not None:
1575
self._gather_result_for_consistency(result)
1576
if changed or self.include_unchanged:
1578
# handle unversioned specified paths:
1579
if self.want_unversioned and not path_handled and self.root_dir_info:
1580
new_executable = bool(
1581
stat.S_ISREG(self.root_dir_info[3].st_mode)
1582
and stat.S_IEXEC & self.root_dir_info[3].st_mode)
1584
(None, self.current_root_unicode),
1588
(None, splitpath(self.current_root_unicode)[-1]),
1589
(None, self.root_dir_info[2]),
1590
(None, new_executable)
1592
# If we reach here, the outer flow continues, which enters into the
1593
# per-root setup logic.
1594
if (self.current_dir_info is None and self.current_block is None and not
1595
self.doing_consistency_expansion):
1596
# setup iteration of this root:
1597
self.current_dir_list = None
1598
if self.root_dir_info and self.root_dir_info[2] == 'tree-reference':
1599
self.current_dir_info = None
1601
self.dir_iterator = osutils._walkdirs_utf8(self.root_abspath,
1602
prefix=self.current_root)
1605
self.current_dir_info = self.dir_iterator.next()
1606
self.current_dir_list = self.current_dir_info[1]
1608
# there may be directories in the inventory even though
1609
# this path is not a file on disk: so mark it as end of
1611
if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
1612
self.current_dir_info = None
1613
elif sys.platform == 'win32':
1614
# on win32, python2.4 has e.errno == ERROR_DIRECTORY, but
1615
# python 2.5 has e.errno == EINVAL,
1616
# and e.winerror == ERROR_DIRECTORY
1618
e_winerror = e.winerror
1619
except AttributeError, _:
1621
win_errors = (ERROR_DIRECTORY, ERROR_PATH_NOT_FOUND)
1622
if (e.errno in win_errors or e_winerror in win_errors):
1623
self.current_dir_info = None
1625
# Will this really raise the right exception ?
1630
if self.current_dir_info[0][0] == '':
1631
# remove .bzr from iteration
1632
bzr_index = self.bisect_left(self.current_dir_list, ('.bzr',))
1633
if self.current_dir_list[bzr_index][0] != '.bzr':
1634
raise AssertionError()
1635
del self.current_dir_list[bzr_index]
1636
initial_key = (self.current_root, '', '')
1637
self.block_index, _ = self.state._find_block_index_from_key(initial_key)
1638
if self.block_index == 0:
1639
# we have processed the total root already, but because the
1640
# initial key matched it we should skip it here.
1641
self.block_index = self.block_index + 1
1642
self._update_current_block()
1643
# walk until both the directory listing and the versioned metadata
1645
while (self.current_dir_info is not None
1646
or self.current_block is not None):
1647
# Uncommon case - a missing directory or an unversioned directory:
1648
if (self.current_dir_info and self.current_block
1649
and self.current_dir_info[0][0] != self.current_block[0]):
1650
# Work around pyrex broken heuristic - current_dirname has
1651
# the same scope as current_dirname_c
1652
current_dirname = self.current_dir_info[0][0]
1653
current_dirname_c = PyString_AS_STRING_void(
1654
<void *>current_dirname)
1655
current_blockname = self.current_block[0]
1656
current_blockname_c = PyString_AS_STRING_void(
1657
<void *>current_blockname)
1658
# In the python generator we evaluate this if block once per
1659
# dir+block; because we reenter in the pyrex version its being
1660
# evaluated once per path: we could cache the result before
1661
# doing the while loop and probably save time.
1662
if _cmp_by_dirs(current_dirname_c,
1663
PyString_Size(current_dirname),
1664
current_blockname_c,
1665
PyString_Size(current_blockname)) < 0:
1666
# filesystem data refers to paths not covered by the
1667
# dirblock. this has two possibilities:
1668
# A) it is versioned but empty, so there is no block for it
1669
# B) it is not versioned.
1671
# if (A) then we need to recurse into it to check for
1672
# new unknown files or directories.
1673
# if (B) then we should ignore it, because we don't
1674
# recurse into unknown directories.
1675
# We are doing a loop
1676
while self.path_index < len(self.current_dir_list):
1677
current_path_info = self.current_dir_list[self.path_index]
1678
# dont descend into this unversioned path if it is
1680
if current_path_info[2] in ('directory',
1682
del self.current_dir_list[self.path_index]
1683
self.path_index = self.path_index - 1
1684
self.path_index = self.path_index + 1
1685
if self.want_unversioned:
1686
if current_path_info[2] == 'directory':
1687
if self.tree._directory_is_tree_reference(
1688
self.utf8_decode(current_path_info[0])[0]):
1689
current_path_info = current_path_info[:2] + \
1690
('tree-reference',) + current_path_info[3:]
1691
new_executable = bool(
1692
stat.S_ISREG(current_path_info[3].st_mode)
1693
and stat.S_IEXEC & current_path_info[3].st_mode)
1695
(None, self.utf8_decode(current_path_info[0])[0]),
1699
(None, self.utf8_decode(current_path_info[1])[0]),
1700
(None, current_path_info[2]),
1701
(None, new_executable))
1702
# This dir info has been handled, go to the next
1704
self.current_dir_list = None
1706
self.current_dir_info = self.dir_iterator.next()
1707
self.current_dir_list = self.current_dir_info[1]
1708
except StopIteration, _:
1709
self.current_dir_info = None
1711
# We have a dirblock entry for this location, but there
1712
# is no filesystem path for this. This is most likely
1713
# because a directory was removed from the disk.
1714
# We don't have to report the missing directory,
1715
# because that should have already been handled, but we
1716
# need to handle all of the files that are contained
1718
while self.current_block_pos < len(self.current_block_list):
1719
current_entry = self.current_block_list[self.current_block_pos]
1720
self.current_block_pos = self.current_block_pos + 1
1721
# entry referring to file not present on disk.
1722
# advance the entry only, after processing.
1723
result, changed = self._process_entry(current_entry, None)
1724
if changed is not None:
1726
self._gather_result_for_consistency(result)
1727
if changed or self.include_unchanged:
1729
self.block_index = self.block_index + 1
1730
self._update_current_block()
1731
continue # next loop-on-block/dir
1732
result = self._loop_one_block()
1733
if result is not None:
1735
if len(self.search_specific_files):
1736
# More supplied paths to process
1737
self.current_root = None
1738
return self._iter_next()
1739
# Start expanding more conservatively, adding paths the user may not
1740
# have intended but required for consistent deltas.
1741
self.doing_consistency_expansion = 1
1742
if not self._pending_consistent_entries:
1743
self._pending_consistent_entries = self._next_consistent_entries()
1744
while self._pending_consistent_entries:
1745
result, changed = self._pending_consistent_entries.pop()
1746
if changed is not None:
1748
raise StopIteration()
1750
cdef object _maybe_tree_ref(self, current_path_info):
1751
if self.tree._directory_is_tree_reference(
1752
self.utf8_decode(current_path_info[0])[0]):
1753
return current_path_info[:2] + \
1754
('tree-reference',) + current_path_info[3:]
1756
return current_path_info
1758
cdef object _loop_one_block(self):
1759
# current_dir_info and current_block refer to the same directory -
1760
# this is the common case code.
1761
# Assign local variables for current path and entry:
1762
cdef object current_entry
1763
cdef object current_path_info
1764
cdef int path_handled
1767
# cdef char * temp_str
1768
# cdef Py_ssize_t temp_str_length
1769
# PyString_AsStringAndSize(disk_kind, &temp_str, &temp_str_length)
1770
# if not strncmp(temp_str, "directory", temp_str_length):
1771
if (self.current_block is not None and
1772
self.current_block_pos < PyList_GET_SIZE(self.current_block_list)):
1773
current_entry = PyList_GET_ITEM(self.current_block_list,
1774
self.current_block_pos)
1776
Py_INCREF(current_entry)
1778
current_entry = None
1779
if (self.current_dir_info is not None and
1780
self.path_index < PyList_GET_SIZE(self.current_dir_list)):
1781
current_path_info = PyList_GET_ITEM(self.current_dir_list,
1784
Py_INCREF(current_path_info)
1785
disk_kind = PyTuple_GET_ITEM(current_path_info, 2)
1787
Py_INCREF(disk_kind)
1788
if disk_kind == "directory":
1789
current_path_info = self._maybe_tree_ref(current_path_info)
1791
current_path_info = None
1792
while (current_entry is not None or current_path_info is not None):
1798
if current_entry is None:
1799
# unversioned - the check for path_handled when the path
1800
# is advanced will yield this path if needed.
1802
elif current_path_info is None:
1803
# no path is fine: the per entry code will handle it.
1804
result, changed = self._process_entry(current_entry,
1807
minikind = _minikind_from_string(
1808
current_entry[1][self.target_index][0])
1809
cmp_result = cmp(current_path_info[1], current_entry[0][1])
1810
if (cmp_result or minikind == c'a' or minikind == c'r'):
1811
# The current path on disk doesn't match the dirblock
1812
# record. Either the dirblock record is marked as
1813
# absent/renamed, or the file on disk is not present at all
1814
# in the dirblock. Either way, report about the dirblock
1815
# entry, and let other code handle the filesystem one.
1817
# Compare the basename for these files to determine
1820
# extra file on disk: pass for now, but only
1821
# increment the path, not the entry
1824
# entry referring to file not present on disk.
1825
# advance the entry only, after processing.
1826
result, changed = self._process_entry(current_entry,
1830
# paths are the same,and the dirstate entry is not
1831
# absent or renamed.
1832
result, changed = self._process_entry(current_entry,
1834
if changed is not None:
1836
if not changed and not self.include_unchanged:
1838
# >- loop control starts here:
1840
if advance_entry and current_entry is not None:
1841
self.current_block_pos = self.current_block_pos + 1
1842
if self.current_block_pos < PyList_GET_SIZE(self.current_block_list):
1843
current_entry = self.current_block_list[self.current_block_pos]
1845
current_entry = None
1847
if advance_path and current_path_info is not None:
1848
if not path_handled:
1849
# unversioned in all regards
1850
if self.want_unversioned:
1851
new_executable = bool(
1852
stat.S_ISREG(current_path_info[3].st_mode)
1853
and stat.S_IEXEC & current_path_info[3].st_mode)
1855
relpath_unicode = self.utf8_decode(current_path_info[0])[0]
1856
except UnicodeDecodeError, _:
1857
raise errors.BadFilenameEncoding(
1858
current_path_info[0], osutils._fs_enc)
1859
if changed is not None:
1860
raise AssertionError(
1861
"result is not None: %r" % result)
1863
(None, relpath_unicode),
1867
(None, self.utf8_decode(current_path_info[1])[0]),
1868
(None, current_path_info[2]),
1869
(None, new_executable))
1871
# dont descend into this unversioned path if it is
1873
if current_path_info[2] in ('directory'):
1874
del self.current_dir_list[self.path_index]
1875
self.path_index = self.path_index - 1
1876
# dont descend the disk iterator into any tree
1878
if current_path_info[2] == 'tree-reference':
1879
del self.current_dir_list[self.path_index]
1880
self.path_index = self.path_index - 1
1881
self.path_index = self.path_index + 1
1882
if self.path_index < len(self.current_dir_list):
1883
current_path_info = self.current_dir_list[self.path_index]
1884
if current_path_info[2] == 'directory':
1885
current_path_info = self._maybe_tree_ref(
1888
current_path_info = None
1889
if changed is not None:
1890
# Found a result on this pass, yield it
1892
self._gather_result_for_consistency(result)
1893
if changed or self.include_unchanged:
1895
if self.current_block is not None:
1896
self.block_index = self.block_index + 1
1897
self._update_current_block()
1898
if self.current_dir_info is not None:
1900
self.current_dir_list = None
1902
self.current_dir_info = self.dir_iterator.next()
1903
self.current_dir_list = self.current_dir_info[1]
1904
except StopIteration, _:
1905
self.current_dir_info = None
1907
cdef object _next_consistent_entries(self):
1908
"""Grabs the next specific file parent case to consider.
1910
:return: A list of the results, each of which is as for _process_entry.
1913
while self.search_specific_file_parents:
1914
# Process the parent directories for the paths we were iterating.
1915
# Even in extremely large trees this should be modest, so currently
1916
# no attempt is made to optimise.
1917
path_utf8 = self.search_specific_file_parents.pop()
1918
if path_utf8 in self.searched_exact_paths:
1919
# We've examined this path.
1921
if osutils.is_inside_any(self.searched_specific_files, path_utf8):
1922
# We've examined this path.
1924
path_entries = self.state._entries_for_path(path_utf8)
1925
# We need either one or two entries. If the path in
1926
# self.target_index has moved (so the entry in source_index is in
1927
# 'ar') then we need to also look for the entry for this path in
1928
# self.source_index, to output the appropriate delete-or-rename.
1929
selected_entries = []
1931
for candidate_entry in path_entries:
1932
# Find entries present in target at this path:
1933
if candidate_entry[1][self.target_index][0] not in 'ar':
1935
selected_entries.append(candidate_entry)
1936
# Find entries present in source at this path:
1937
elif (self.source_index is not None and
1938
candidate_entry[1][self.source_index][0] not in 'ar'):
1940
if candidate_entry[1][self.target_index][0] == 'a':
1941
# Deleted, emit it here.
1942
selected_entries.append(candidate_entry)
1944
# renamed, emit it when we process the directory it
1946
self.search_specific_file_parents.add(
1947
candidate_entry[1][self.target_index][1])
1949
raise AssertionError(
1950
"Missing entry for specific path parent %r, %r" % (
1951
path_utf8, path_entries))
1952
path_info = self._path_info(path_utf8, path_utf8.decode('utf8'))
1953
for entry in selected_entries:
1954
if entry[0][2] in self.seen_ids:
1956
result, changed = self._process_entry(entry, path_info)
1958
raise AssertionError(
1959
"Got entry<->path mismatch for specific path "
1960
"%r entry %r path_info %r " % (
1961
path_utf8, entry, path_info))
1962
# Only include changes - we're outside the users requested
1965
self._gather_result_for_consistency(result)
1966
if (result[6][0] == 'directory' and
1967
result[6][1] != 'directory'):
1968
# This stopped being a directory, the old children have
1970
if entry[1][self.source_index][0] == 'r':
1971
# renamed, take the source path
1972
entry_path_utf8 = entry[1][self.source_index][1]
1974
entry_path_utf8 = path_utf8
1975
initial_key = (entry_path_utf8, '', '')
1976
block_index, _ = self.state._find_block_index_from_key(
1978
if block_index == 0:
1979
# The children of the root are in block index 1.
1980
block_index = block_index + 1
1981
current_block = None
1982
if block_index < len(self.state._dirblocks):
1983
current_block = self.state._dirblocks[block_index]
1984
if not osutils.is_inside(
1985
entry_path_utf8, current_block[0]):
1986
# No entries for this directory at all.
1987
current_block = None
1988
if current_block is not None:
1989
for entry in current_block[1]:
1990
if entry[1][self.source_index][0] in 'ar':
1991
# Not in the source tree, so doesn't have to be
1994
# Path of the entry itself.
1995
self.search_specific_file_parents.add(
1996
self.pathjoin(*entry[0][:2]))
1997
if changed or self.include_unchanged:
1998
results.append((result, changed))
1999
self.searched_exact_paths.add(path_utf8)
2002
cdef object _path_info(self, utf8_path, unicode_path):
2003
"""Generate path_info for unicode_path.
2005
:return: None if unicode_path does not exist, or a path_info tuple.
2007
abspath = self.tree.abspath(unicode_path)
2009
stat = os.lstat(abspath)
2011
if e.errno == errno.ENOENT:
2012
# the path does not exist.
2016
utf8_basename = utf8_path.rsplit('/', 1)[-1]
2017
dir_info = (utf8_path, utf8_basename,
2018
osutils.file_kind_from_stat_mode(stat.st_mode), stat,
2020
if dir_info[2] == 'directory':
2021
if self.tree._directory_is_tree_reference(
2023
self.root_dir_info = self.root_dir_info[:2] + \
2024
('tree-reference',) + self.root_dir_info[3:]