49
51
from bzrlib import (
59
from bzrlib.symbol_versioning import (
64
# sha and md5 modules are deprecated in python2.6 but hashlib is available as
66
if sys.version_info < (2, 5):
67
import md5 as _mod_md5
69
import sha as _mod_sha
57
79
from bzrlib import symbol_versioning
58
from bzrlib.symbol_versioning import (
61
from bzrlib.trace import mutter
82
# Cross platform wall-clock time functionality with decent resolution.
83
# On Linux ``time.clock`` returns only CPU time. On Windows, ``time.time()``
84
# only has a resolution of ~15ms. Note that ``time.clock()`` is not
85
# synchronized with ``time.time()``, this is only meant to be used to find
86
# delta times by subtracting from another call to this function.
87
timer_func = time.time
88
if sys.platform == 'win32':
89
timer_func = time.clock
64
91
# On win32, O_BINARY is used to indicate the file should
65
92
# be opened in binary mode, rather than text mode.
66
93
# On other platforms, O_BINARY doesn't exist, because
67
94
# they always open in binary mode, so it is okay to
68
# OR with 0 on those platforms
95
# OR with 0 on those platforms.
96
# O_NOINHERIT and O_TEXT exists only on win32 too.
69
97
O_BINARY = getattr(os, 'O_BINARY', 0)
98
O_TEXT = getattr(os, 'O_TEXT', 0)
99
O_NOINHERIT = getattr(os, 'O_NOINHERIT', 0)
102
def get_unicode_argv():
104
user_encoding = get_user_encoding()
105
return [a.decode(user_encoding) for a in sys.argv[1:]]
106
except UnicodeDecodeError:
107
raise errors.BzrError(("Parameter '%r' is unsupported by the current "
72
111
def make_readonly(filename):
195
211
def fancy_rename(old, new, rename_func, unlink_func):
196
212
"""A fancy rename, when you don't have atomic rename.
198
214
:param old: The old path, to rename from
199
215
:param new: The new path, to rename to
200
216
:param rename_func: The potentially non-atomic rename function
201
:param unlink_func: A way to delete the target file if the full rename succeeds
217
:param unlink_func: A way to delete the target file if the full rename
204
220
# sftp rename doesn't allow overwriting, so play tricks:
206
221
base = os.path.basename(new)
207
222
dirname = os.path.dirname(new)
208
tmp_name = u'tmp.%s.%.9f.%d.%s' % (base, time.time(), os.getpid(), rand_chars(10))
223
# callers use different encodings for the paths so the following MUST
224
# respect that. We rely on python upcasting to unicode if new is unicode
225
# and keeping a str if not.
226
tmp_name = 'tmp.%s.%.9f.%d.%s' % (base, time.time(),
227
os.getpid(), rand_chars(10))
209
228
tmp_name = pathjoin(dirname, tmp_name)
211
230
# Rename the file out of the way, but keep track if it didn't exist
532
def pumpfile(fromfile, tofile):
566
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
567
report_activity=None, direction='read'):
533
568
"""Copy contents of one file to another.
570
The read_length can either be -1 to read to end-of-file (EOF) or
571
it can specify the maximum number of bytes to read.
573
The buff_size represents the maximum size for each read operation
574
performed on from_file.
576
:param report_activity: Call this as bytes are read, see
577
Transport._report_activity
578
:param direction: Will be passed to report_activity
535
580
:return: The number of bytes copied.
540
b = fromfile.read(BUFSIZE)
584
# read specified number of bytes
586
while read_length > 0:
587
num_bytes_to_read = min(read_length, buff_size)
589
block = from_file.read(num_bytes_to_read)
593
if report_activity is not None:
594
report_activity(len(block), direction)
597
actual_bytes_read = len(block)
598
read_length -= actual_bytes_read
599
length += actual_bytes_read
603
block = from_file.read(buff_size)
607
if report_activity is not None:
608
report_activity(len(block), direction)
614
def pump_string_file(bytes, file_handle, segment_size=None):
615
"""Write bytes to file_handle in many smaller writes.
617
:param bytes: The string to write.
618
:param file_handle: The file to write to.
620
# Write data in chunks rather than all at once, because very large
621
# writes fail on some platforms (e.g. Windows with SMB mounted
624
segment_size = 5242880 # 5MB
625
segments = range(len(bytes) / segment_size + 1)
626
write = file_handle.write
627
for segment_index in segments:
628
segment = buffer(bytes, segment_index * segment_size, segment_size)
548
632
def file_iterator(input_file, readsize=32768):
550
634
b = input_file.read(readsize)
628
733
:param timezone: How to display the time: 'utc', 'original' for the
629
734
timezone specified by offset, or 'local' for the process's current
631
:param show_offset: Whether to append the timezone.
632
:param date_fmt: strftime format.
736
:param date_fmt: strftime format.
737
:param show_offset: Whether to append the timezone.
739
(date_fmt, tt, offset_str) = \
740
_format_date(t, offset, timezone, date_fmt, show_offset)
741
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
742
date_str = time.strftime(date_fmt, tt)
743
return date_str + offset_str
746
# Cache of formatted offset strings
750
def format_date_with_offset_in_original_timezone(t, offset=0,
751
_cache=_offset_cache):
752
"""Return a formatted date string in the original timezone.
754
This routine may be faster then format_date.
756
:param t: Seconds since the epoch.
757
:param offset: Timezone offset in seconds east of utc.
761
tt = time.gmtime(t + offset)
762
date_fmt = _default_format_by_weekday_num[tt[6]]
763
date_str = time.strftime(date_fmt, tt)
764
offset_str = _cache.get(offset, None)
765
if offset_str is None:
766
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
767
_cache[offset] = offset_str
768
return date_str + offset_str
771
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
773
"""Return an unicode date string formatted according to the current locale.
775
:param t: Seconds since the epoch.
776
:param offset: Timezone offset in seconds east of utc.
777
:param timezone: How to display the time: 'utc', 'original' for the
778
timezone specified by offset, or 'local' for the process's current
780
:param date_fmt: strftime format.
781
:param show_offset: Whether to append the timezone.
783
(date_fmt, tt, offset_str) = \
784
_format_date(t, offset, timezone, date_fmt, show_offset)
785
date_str = time.strftime(date_fmt, tt)
786
if not isinstance(date_str, unicode):
787
date_str = date_str.decode(get_user_encoding(), 'replace')
788
return date_str + offset_str
791
def _format_date(t, offset, timezone, date_fmt, show_offset):
634
792
if timezone == 'utc':
635
793
tt = time.gmtime(t)
772
931
return pathjoin(*p)
934
def parent_directories(filename):
935
"""Return the list of parent directories, deepest first.
937
For example, parent_directories("a/b/c") -> ["a/b", "a"].
940
parts = splitpath(dirname(filename))
942
parents.append(joinpath(parts))
947
_extension_load_failures = []
950
def failed_to_load_extension(exception):
951
"""Handle failing to load a binary extension.
953
This should be called from the ImportError block guarding the attempt to
954
import the native extension. If this function returns, the pure-Python
955
implementation should be loaded instead::
958
>>> import bzrlib._fictional_extension_pyx
959
>>> except ImportError, e:
960
>>> bzrlib.osutils.failed_to_load_extension(e)
961
>>> import bzrlib._fictional_extension_py
963
# NB: This docstring is just an example, not a doctest, because doctest
964
# currently can't cope with the use of lazy imports in this namespace --
967
# This currently doesn't report the failure at the time it occurs, because
968
# they tend to happen very early in startup when we can't check config
969
# files etc, and also we want to report all failures but not spam the user
971
from bzrlib import trace
972
exception_str = str(exception)
973
if exception_str not in _extension_load_failures:
974
trace.mutter("failed to load compiled extension: %s" % exception_str)
975
_extension_load_failures.append(exception_str)
978
def report_extension_load_failures():
979
if not _extension_load_failures:
981
from bzrlib.config import GlobalConfig
982
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
984
# the warnings framework should by default show this only once
985
from bzrlib.trace import warning
987
"bzr: warning: some compiled extensions could not be loaded; "
988
"see <https://answers.launchpad.net/bzr/+faq/703>")
989
# we no longer show the specific missing extensions here, because it makes
990
# the message too long and scary - see
991
# https://bugs.launchpad.net/bzr/+bug/430529
995
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
996
except ImportError, e:
997
failed_to_load_extension(e)
998
from bzrlib._chunks_to_lines_py import chunks_to_lines
775
1001
def split_lines(s):
776
1002
"""Split s into lines, but without removing the newline characters."""
1003
# Trivially convert a fulltext into a 'chunked' representation, and let
1004
# chunks_to_lines do the heavy lifting.
1005
if isinstance(s, str):
1006
# chunks_to_lines only supports 8-bit strings
1007
return chunks_to_lines([s])
1009
return _split_lines(s)
1012
def _split_lines(s):
1013
"""Split s into lines, but without removing the newline characters.
1015
This supports Unicode or plain string objects.
777
1017
lines = s.split('\n')
778
1018
result = [line + '\n' for line in lines[:-1]]
798
1038
shutil.copyfile(src, dest)
801
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
802
# Forgiveness than Permission (EAFP) because:
803
# - root can damage a solaris file system by using unlink,
804
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
805
# EACCES, OSX: EPERM) when invoked on a directory.
806
1041
def delete_any(path):
807
"""Delete a file or directory."""
1042
"""Delete a file, symlink or directory.
1044
Will delete even if readonly.
1047
_delete_file_or_dir(path)
1048
except (OSError, IOError), e:
1049
if e.errno in (errno.EPERM, errno.EACCES):
1050
# make writable and try again
1053
except (OSError, IOError):
1055
_delete_file_or_dir(path)
1060
def _delete_file_or_dir(path):
1061
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
1062
# Forgiveness than Permission (EAFP) because:
1063
# - root can damage a solaris file system by using unlink,
1064
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1065
# EACCES, OSX: EPERM) when invoked on a directory.
808
1066
if isdir(path): # Takes care of symlinks
877
while len(head) >= len(base):
1155
if len(head) <= len(base) and head != base:
1156
raise errors.PathNotChild(rp, base)
878
1157
if head == base:
880
head, tail = os.path.split(head)
1159
head, tail = split(head)
884
raise errors.PathNotChild(rp, base)
1164
return pathjoin(*reversed(s))
1169
def _cicp_canonical_relpath(base, path):
1170
"""Return the canonical path relative to base.
1172
Like relpath, but on case-insensitive-case-preserving file-systems, this
1173
will return the relpath as stored on the file-system rather than in the
1174
case specified in the input string, for all existing portions of the path.
1176
This will cause O(N) behaviour if called for every path in a tree; if you
1177
have a number of paths to convert, you should use canonical_relpaths().
1179
# TODO: it should be possible to optimize this for Windows by using the
1180
# win32 API FindFiles function to look for the specified name - but using
1181
# os.listdir() still gives us the correct, platform agnostic semantics in
1184
rel = relpath(base, path)
1185
# '.' will have been turned into ''
1189
abs_base = abspath(base)
1191
_listdir = os.listdir
1193
# use an explicit iterator so we can easily consume the rest on early exit.
1194
bit_iter = iter(rel.split('/'))
1195
for bit in bit_iter:
1198
next_entries = _listdir(current)
1199
except OSError: # enoent, eperm, etc
1200
# We can't find this in the filesystem, so just append the
1202
current = pathjoin(current, bit, *list(bit_iter))
1204
for look in next_entries:
1205
if lbit == look.lower():
1206
current = pathjoin(current, look)
1209
# got to the end, nothing matched, so we just return the
1210
# non-existing bits as they were specified (the filename may be
1211
# the target of a move, for example).
1212
current = pathjoin(current, bit, *list(bit_iter))
1214
return current[len(abs_base):].lstrip('/')
1216
# XXX - TODO - we need better detection/integration of case-insensitive
1217
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1218
# filesystems), for example, so could probably benefit from the same basic
1219
# support there. For now though, only Windows and OSX get that support, and
1220
# they get it for *all* file-systems!
1221
if sys.platform in ('win32', 'darwin'):
1222
canonical_relpath = _cicp_canonical_relpath
1224
canonical_relpath = relpath
1226
def canonical_relpaths(base, paths):
1227
"""Create an iterable to canonicalize a sequence of relative paths.
1229
The intent is for this implementation to use a cache, vastly speeding
1230
up multiple transformations in the same directory.
1232
# but for now, we haven't optimized...
1233
return [canonical_relpath(base, p) for p in paths]
892
1235
def safe_unicode(unicode_or_utf8_string):
893
1236
"""Coerce unicode_or_utf8_string into unicode.
895
1238
If it is unicode, it is returned.
896
Otherwise it is decoded from utf-8. If a decoding error
897
occurs, it is wrapped as a If the decoding fails, the exception is wrapped
898
as a BzrBadParameter exception.
1239
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1240
wrapped in a BzrBadParameterNotUnicode exception.
900
1242
if isinstance(unicode_or_utf8_string, unicode):
901
1243
return unicode_or_utf8_string
1014
1356
normalized_filename = _inaccessible_normalized_filename
1359
def set_signal_handler(signum, handler, restart_syscall=True):
1360
"""A wrapper for signal.signal that also calls siginterrupt(signum, False)
1361
on platforms that support that.
1363
:param restart_syscall: if set, allow syscalls interrupted by a signal to
1364
automatically restart (by calling `signal.siginterrupt(signum,
1365
False)`). May be ignored if the feature is not available on this
1366
platform or Python version.
1368
old_handler = signal.signal(signum, handler)
1371
siginterrupt = signal.siginterrupt
1372
except AttributeError: # siginterrupt doesn't exist on this platform, or for this version of
1376
siginterrupt(signum, False)
1380
default_terminal_width = 80
1381
"""The default terminal width for ttys.
1383
This is defined so that higher levels can share a common fallback value when
1384
terminal_width() returns None.
1017
1388
def terminal_width():
1018
"""Return estimated terminal width."""
1019
if sys.platform == 'win32':
1020
return win32utils.get_console_size()[0]
1389
"""Return terminal width.
1391
None is returned if the width can't established precisely.
1394
- if BZR_COLUMNS is set, returns its value
1395
- if there is no controlling terminal, returns None
1396
- if COLUMNS is set, returns its value,
1398
From there, we need to query the OS to get the size of the controlling
1402
- get termios.TIOCGWINSZ
1403
- if an error occurs or a negative value is obtained, returns None
1407
- win32utils.get_console_size() decides,
1408
- returns None on error (provided default value)
1411
# If BZR_COLUMNS is set, take it, user is always right
1413
return int(os.environ['BZR_COLUMNS'])
1414
except (KeyError, ValueError):
1417
isatty = getattr(sys.stdout, 'isatty', None)
1418
if isatty is None or not isatty():
1419
# Don't guess, setting BZR_COLUMNS is the recommended way to override.
1422
# If COLUMNS is set, take it, the terminal knows better (even inside a
1423
# given terminal, the application can decide to set COLUMNS to a lower
1424
# value (splitted screen) or a bigger value (scroll bars))
1426
return int(os.environ['COLUMNS'])
1427
except (KeyError, ValueError):
1430
width, height = _terminal_size(None, None)
1432
# Consider invalid values as meaning no width
1438
def _win32_terminal_size(width, height):
1439
width, height = win32utils.get_console_size(defaultx=width, defaulty=height)
1440
return width, height
1443
def _ioctl_terminal_size(width, height):
1023
1445
import struct, fcntl, termios
1024
1446
s = struct.pack('HHHH', 0, 0, 0, 0)
1025
1447
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
1026
width = struct.unpack('HHHH', x)[1]
1448
height, width = struct.unpack('HHHH', x)[0:2]
1449
except (IOError, AttributeError):
1031
width = int(os.environ['COLUMNS'])
1451
return width, height
1453
_terminal_size = None
1454
"""Returns the terminal size as (width, height).
1456
:param width: Default value for width.
1457
:param height: Default value for height.
1459
This is defined specifically for each OS and query the size of the controlling
1460
terminal. If any error occurs, the provided default values should be returned.
1462
if sys.platform == 'win32':
1463
_terminal_size = _win32_terminal_size
1465
_terminal_size = _ioctl_terminal_size
1468
def _terminal_size_changed(signum, frame):
1469
"""Set COLUMNS upon receiving a SIGnal for WINdow size CHange."""
1470
width, height = _terminal_size(None, None)
1471
if width is not None:
1472
os.environ['COLUMNS'] = str(width)
1475
_registered_sigwinch = False
1477
def watch_sigwinch():
1478
"""Register for SIGWINCH, once and only once."""
1479
global _registered_sigwinch
1480
if not _registered_sigwinch:
1481
if sys.platform == 'win32':
1482
# Martin (gz) mentioned WINDOW_BUFFER_SIZE_RECORD from
1483
# ReadConsoleInput but I've no idea how to plug that in
1484
# the current design -- vila 20091216
1487
set_signal_handler(signal.SIGWINCH, _terminal_size_changed)
1488
_registered_sigwinch = True
1040
1491
def supports_executable():
1087
1538
raise errors.IllegalPath(path)
1541
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1543
def _is_error_enotdir(e):
1544
"""Check if this exception represents ENOTDIR.
1546
Unfortunately, python is very inconsistent about the exception
1547
here. The cases are:
1548
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1549
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1550
which is the windows error code.
1551
3) Windows, Python2.5 uses errno == EINVAL and
1552
winerror == ERROR_DIRECTORY
1554
:param e: An Exception object (expected to be OSError with an errno
1555
attribute, but we should be able to cope with anything)
1556
:return: True if this represents an ENOTDIR error. False otherwise.
1558
en = getattr(e, 'errno', None)
1559
if (en == errno.ENOTDIR
1560
or (sys.platform == 'win32'
1561
and (en == _WIN32_ERROR_DIRECTORY
1562
or (en == errno.EINVAL
1563
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
1090
1569
def walkdirs(top, prefix=""):
1091
1570
"""Yield data about all the directories in a tree.
1093
1572
This yields all the data about the contents of a directory at a time.
1094
1573
After each directory has been yielded, if the caller has mutated the list
1095
1574
to exclude some directories, they are then not descended into.
1097
1576
The data yielded is of the form:
1098
1577
((directory-relpath, directory-path-from-top),
1099
1578
[(relpath, basename, kind, lstat, path-from-top), ...]),
1100
1579
- directory-relpath is the relative path of the directory being returned
1101
1580
with respect to top. prefix is prepended to this.
1102
- directory-path-from-root is the path including top for this directory.
1581
- directory-path-from-root is the path including top for this directory.
1103
1582
It is suitable for use with os functions.
1104
1583
- relpath is the relative path within the subtree being walked.
1105
1584
- basename is the basename of the path
1107
1586
present within the tree - but it may be recorded as versioned. See
1108
1587
versioned_kind.
1109
1588
- lstat is the stat data *if* the file was statted.
1110
- planned, not implemented:
1589
- planned, not implemented:
1111
1590
path_from_tree_root is the path from the root of the tree.
1113
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1592
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1114
1593
allows one to walk a subtree but get paths that are relative to a tree
1115
1594
rooted higher up.
1116
1595
:return: an iterator over the dirs.
1118
1597
#TODO there is a bit of a smell where the results of the directory-
1119
# summary in this, and the path from the root, may not agree
1598
# summary in this, and the path from the root, may not agree
1120
1599
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
1121
1600
# potentially confusing output. We should make this more robust - but
1122
1601
# not at a speed cost. RBC 20060731
1123
1602
_lstat = os.lstat
1124
1603
_directory = _directory_kind
1125
1604
_listdir = os.listdir
1126
_kind_from_mode = _formats.get
1605
_kind_from_mode = file_kind_from_stat_mode
1127
1606
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
1129
1608
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1138
1617
append = dirblock.append
1139
for name in sorted(_listdir(top)):
1140
abspath = top_slash + name
1141
statvalue = _lstat(abspath)
1142
kind = _kind_from_mode(statvalue.st_mode & 0170000, 'unknown')
1143
append((relprefix + name, name, kind, statvalue, abspath))
1619
names = sorted(_listdir(top))
1621
if not _is_error_enotdir(e):
1625
abspath = top_slash + name
1626
statvalue = _lstat(abspath)
1627
kind = _kind_from_mode(statvalue.st_mode)
1628
append((relprefix + name, name, kind, statvalue, abspath))
1144
1629
yield (relroot, top), dirblock
1146
1631
# push the user specified dirs from dirblock
1147
1632
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1635
class DirReader(object):
1636
"""An interface for reading directories."""
1638
def top_prefix_to_starting_dir(self, top, prefix=""):
1639
"""Converts top and prefix to a starting dir entry
1641
:param top: A utf8 path
1642
:param prefix: An optional utf8 path to prefix output relative paths
1644
:return: A tuple starting with prefix, and ending with the native
1647
raise NotImplementedError(self.top_prefix_to_starting_dir)
1649
def read_dir(self, prefix, top):
1650
"""Read a specific dir.
1652
:param prefix: A utf8 prefix to be preprended to the path basenames.
1653
:param top: A natively encoded path to read.
1654
:return: A list of the directories contents. Each item contains:
1655
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1657
raise NotImplementedError(self.read_dir)
1660
_selected_dir_reader = None
1150
1663
def _walkdirs_utf8(top, prefix=""):
1151
1664
"""Yield data about all the directories in a tree.
1161
1674
path-from-top might be unicode or utf8, but it is the correct path to
1162
1675
pass to os functions to affect the file in question. (such as os.lstat)
1164
fs_encoding = _fs_enc.upper()
1165
if (sys.platform == 'win32' or
1166
fs_encoding not in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968')): # ascii
1167
return _walkdirs_unicode_to_utf8(top, prefix=prefix)
1169
return _walkdirs_fs_utf8(top, prefix=prefix)
1172
def _walkdirs_fs_utf8(top, prefix=""):
1173
"""See _walkdirs_utf8.
1175
This sub-function is called when we know the filesystem is already in utf8
1176
encoding. So we don't need to transcode filenames.
1179
_directory = _directory_kind
1180
_listdir = os.listdir
1181
_kind_from_mode = _formats.get
1677
global _selected_dir_reader
1678
if _selected_dir_reader is None:
1679
fs_encoding = _fs_enc.upper()
1680
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1681
# Win98 doesn't have unicode apis like FindFirstFileW
1682
# TODO: We possibly could support Win98 by falling back to the
1683
# original FindFirstFile, and using TCHAR instead of WCHAR,
1684
# but that gets a bit tricky, and requires custom compiling
1687
from bzrlib._walkdirs_win32 import Win32ReadDir
1688
_selected_dir_reader = Win32ReadDir()
1691
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1692
# ANSI_X3.4-1968 is a form of ASCII
1694
from bzrlib._readdir_pyx import UTF8DirReader
1695
_selected_dir_reader = UTF8DirReader()
1696
except ImportError, e:
1697
failed_to_load_extension(e)
1700
if _selected_dir_reader is None:
1701
# Fallback to the python version
1702
_selected_dir_reader = UnicodeDirReader()
1183
1704
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1184
1705
# But we don't actually uses 1-3 in pending, so set them to None
1185
pending = [(safe_utf8(prefix), None, None, None, safe_utf8(top))]
1706
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1707
read_dir = _selected_dir_reader.read_dir
1708
_directory = _directory_kind
1187
relroot, _, _, _, top = pending.pop()
1189
relprefix = relroot + '/'
1192
top_slash = top + '/'
1195
append = dirblock.append
1196
for name in sorted(_listdir(top)):
1197
abspath = top_slash + name
1198
statvalue = _lstat(abspath)
1199
kind = _kind_from_mode(statvalue.st_mode & 0170000, 'unknown')
1200
append((relprefix + name, name, kind, statvalue, abspath))
1710
relroot, _, _, _, top = pending[-1].pop()
1713
dirblock = sorted(read_dir(relroot, top))
1201
1714
yield (relroot, top), dirblock
1203
1715
# push the user specified dirs from dirblock
1204
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1207
def _walkdirs_unicode_to_utf8(top, prefix=""):
1208
"""See _walkdirs_utf8
1210
Because Win32 has a Unicode api, all of the 'path-from-top' entries will be
1212
This is currently the fallback code path when the filesystem encoding is
1213
not UTF-8. It may be better to implement an alternative so that we can
1214
safely handle paths that are not properly decodable in the current
1217
_utf8_encode = codecs.getencoder('utf8')
1219
_directory = _directory_kind
1220
_listdir = os.listdir
1221
_kind_from_mode = _formats.get
1223
pending = [(safe_utf8(prefix), None, None, None, safe_unicode(top))]
1225
relroot, _, _, _, top = pending.pop()
1227
relprefix = relroot + '/'
1716
next = [d for d in reversed(dirblock) if d[2] == _directory]
1718
pending.append(next)
1721
class UnicodeDirReader(DirReader):
1722
"""A dir reader for non-utf8 file systems, which transcodes."""
1724
__slots__ = ['_utf8_encode']
1727
self._utf8_encode = codecs.getencoder('utf8')
1729
def top_prefix_to_starting_dir(self, top, prefix=""):
1730
"""See DirReader.top_prefix_to_starting_dir."""
1731
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1733
def read_dir(self, prefix, top):
1734
"""Read a single directory from a non-utf8 file system.
1736
top, and the abspath element in the output are unicode, all other paths
1737
are utf8. Local disk IO is done via unicode calls to listdir etc.
1739
This is currently the fallback code path when the filesystem encoding is
1740
not UTF-8. It may be better to implement an alternative so that we can
1741
safely handle paths that are not properly decodable in the current
1744
See DirReader.read_dir for details.
1746
_utf8_encode = self._utf8_encode
1748
_listdir = os.listdir
1749
_kind_from_mode = file_kind_from_stat_mode
1752
relprefix = prefix + '/'
1230
1755
top_slash = top + u'/'
1233
1758
append = dirblock.append
1234
1759
for name in sorted(_listdir(top)):
1235
name_utf8 = _utf8_encode(name)[0]
1761
name_utf8 = _utf8_encode(name)[0]
1762
except UnicodeDecodeError:
1763
raise errors.BadFilenameEncoding(
1764
_utf8_encode(relprefix)[0] + name, _fs_enc)
1236
1765
abspath = top_slash + name
1237
1766
statvalue = _lstat(abspath)
1238
kind = _kind_from_mode(statvalue.st_mode & 0170000, 'unknown')
1767
kind = _kind_from_mode(statvalue.st_mode)
1239
1768
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
1240
yield (relroot, top), dirblock
1242
# push the user specified dirs from dirblock
1243
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1246
1772
def copy_tree(from_path, to_path, handlers={}):
1247
1773
"""Copy all of the entries in from_path into to_path.
1249
:param from_path: The base directory to copy.
1775
:param from_path: The base directory to copy.
1250
1776
:param to_path: The target directory. If it does not exist, it will
1252
1778
:param handlers: A dictionary of functions, which takes a source and
1285
1811
real_handlers[kind](abspath, relpath)
1814
def copy_ownership(dst, src=None):
1815
"""Copy usr/grp ownership from src file/dir to dst file/dir.
1817
If src is None, the containing directory is used as source. If chown
1818
fails, the error is ignored and a warning is printed.
1820
chown = getattr(os, 'chown', None)
1825
src = os.path.dirname(dst)
1831
chown(dst, s.st_uid, s.st_gid)
1833
trace.warning("Unable to copy ownership from '%s' to '%s': IOError: %s." % (src, dst, e))
1836
def mkdir_with_ownership(path, ownership_src=None):
1837
"""Create the directory 'path' with specified ownership.
1839
If ownership_src is given, copies (chown) usr/grp ownership
1840
from 'ownership_src' to 'path'. If ownership_src is None, use the
1841
containing dir ownership.
1844
copy_ownership(path, ownership_src)
1847
def open_with_ownership(filename, mode='r', bufsize=-1, ownership_src=None):
1848
"""Open the file 'filename' with the specified ownership.
1850
If ownership_src is specified, copy usr/grp ownership from ownership_src
1851
to filename. If ownership_src is None, copy ownership from containing
1853
Returns the opened file object.
1855
f = open(filename, mode, bufsize)
1856
copy_ownership(filename, ownership_src)
1288
1860
def path_prefix_key(path):
1289
1861
"""Generate a prefix-order path key for path.
1364
1948
return user_encoding
1367
def recv_all(socket, bytes):
1951
def get_host_name():
1952
"""Return the current unicode host name.
1954
This is meant to be used in place of socket.gethostname() because that
1955
behaves inconsistently on different platforms.
1957
if sys.platform == "win32":
1959
return win32utils.get_host_name()
1962
return socket.gethostname().decode(get_user_encoding())
1965
# We must not read/write any more than 64k at a time from/to a socket so we
1966
# don't risk "no buffer space available" errors on some platforms. Windows in
1967
# particular is likely to throw WSAECONNABORTED or WSAENOBUFS if given too much
1969
MAX_SOCKET_CHUNK = 64 * 1024
1971
def read_bytes_from_socket(sock, report_activity=None,
1972
max_read_size=MAX_SOCKET_CHUNK):
1973
"""Read up to max_read_size of bytes from sock and notify of progress.
1975
Translates "Connection reset by peer" into file-like EOF (return an
1976
empty string rather than raise an error), and repeats the recv if
1977
interrupted by a signal.
1981
bytes = sock.recv(max_read_size)
1982
except socket.error, e:
1984
if eno == getattr(errno, "WSAECONNRESET", errno.ECONNRESET):
1985
# The connection was closed by the other side. Callers expect
1986
# an empty string to signal end-of-stream.
1988
elif eno == errno.EINTR:
1989
# Retry the interrupted recv.
1993
if report_activity is not None:
1994
report_activity(len(bytes), 'read')
1998
def recv_all(socket, count):
1368
1999
"""Receive an exact number of bytes.
1370
2001
Regular Socket.recv() may return less than the requested number of bytes,
1371
dependning on what's in the OS buffer. MSG_WAITALL is not available
2002
depending on what's in the OS buffer. MSG_WAITALL is not available
1372
2003
on all platforms, but this should work everywhere. This will return
1373
2004
less than the requested amount if the remote end closes.
1375
2006
This isn't optimized and is intended mostly for use in testing.
1378
while len(b) < bytes:
1379
new = socket.recv(bytes - len(b))
2009
while len(b) < count:
2010
new = read_bytes_from_socket(socket, None, count - len(b))
1386
def send_all(socket, bytes):
2017
def send_all(sock, bytes, report_activity=None):
1387
2018
"""Send all bytes on a socket.
2020
Breaks large blocks in smaller chunks to avoid buffering limitations on
2021
some platforms, and catches EINTR which may be thrown if the send is
2022
interrupted by a signal.
1389
Regular socket.sendall() can give socket error 10053 on Windows. This
1390
implementation sends no more than 64k at a time, which avoids this problem.
2024
This is preferred to socket.sendall(), because it avoids portability bugs
2025
and provides activity reporting.
2027
:param report_activity: Call this as bytes are read, see
2028
Transport._report_activity
1393
for pos in xrange(0, len(bytes), chunk_size):
1394
socket.sendall(bytes[pos:pos+chunk_size])
2031
byte_count = len(bytes)
2032
while sent_total < byte_count:
2034
sent = sock.send(buffer(bytes, sent_total, MAX_SOCKET_CHUNK))
2035
except socket.error, e:
2036
if e.args[0] != errno.EINTR:
2040
report_activity(sent, 'write')
1397
2043
def dereference_path(path):
1440
2086
base = abspath(pathjoin(base, '..', '..'))
1441
2087
filename = pathjoin(base, resource_relpath)
1442
2088
return open(filename, 'rU').read()
2091
def file_kind_from_stat_mode_thunk(mode):
2092
global file_kind_from_stat_mode
2093
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
2095
from bzrlib._readdir_pyx import UTF8DirReader
2096
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
2097
except ImportError, e:
2098
# This is one time where we won't warn that an extension failed to
2099
# load. The extension is never available on Windows anyway.
2100
from bzrlib._readdir_py import (
2101
_kind_from_mode as file_kind_from_stat_mode
2103
return file_kind_from_stat_mode(mode)
2104
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
2107
def file_kind(f, _lstat=os.lstat):
2109
return file_kind_from_stat_mode(_lstat(f).st_mode)
2111
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
2112
raise errors.NoSuchFile(f)
2116
def until_no_eintr(f, *a, **kw):
2117
"""Run f(*a, **kw), retrying if an EINTR error occurs.
2119
WARNING: you must be certain that it is safe to retry the call repeatedly
2120
if EINTR does occur. This is typically only true for low-level operations
2121
like os.read. If in any doubt, don't use this.
2123
Keep in mind that this is not a complete solution to EINTR. There is
2124
probably code in the Python standard library and other dependencies that
2125
may encounter EINTR if a signal arrives (and there is signal handler for
2126
that signal). So this function can reduce the impact for IO that bzrlib
2127
directly controls, but it is not a complete solution.
2129
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
2133
except (IOError, OSError), e:
2134
if e.errno == errno.EINTR:
2139
def re_compile_checked(re_string, flags=0, where=""):
2140
"""Return a compiled re, or raise a sensible error.
2142
This should only be used when compiling user-supplied REs.
2144
:param re_string: Text form of regular expression.
2145
:param flags: eg re.IGNORECASE
2146
:param where: Message explaining to the user the context where
2147
it occurred, eg 'log search filter'.
2149
# from https://bugs.launchpad.net/bzr/+bug/251352
2151
re_obj = re.compile(re_string, flags)
2156
where = ' in ' + where
2157
# despite the name 'error' is a type
2158
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
2159
% (where, re_string, e))
2162
if sys.platform == "win32":
2165
return msvcrt.getch()
2170
fd = sys.stdin.fileno()
2171
settings = termios.tcgetattr(fd)
2174
ch = sys.stdin.read(1)
2176
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
2180
if sys.platform == 'linux2':
2181
def _local_concurrency():
2183
prefix = 'processor'
2184
for line in file('/proc/cpuinfo', 'rb'):
2185
if line.startswith(prefix):
2186
concurrency = int(line[line.find(':')+1:]) + 1
2188
elif sys.platform == 'darwin':
2189
def _local_concurrency():
2190
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
2191
stdout=subprocess.PIPE).communicate()[0]
2192
elif sys.platform[0:7] == 'freebsd':
2193
def _local_concurrency():
2194
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
2195
stdout=subprocess.PIPE).communicate()[0]
2196
elif sys.platform == 'sunos5':
2197
def _local_concurrency():
2198
return subprocess.Popen(['psrinfo', '-p',],
2199
stdout=subprocess.PIPE).communicate()[0]
2200
elif sys.platform == "win32":
2201
def _local_concurrency():
2202
# This appears to return the number of cores.
2203
return os.environ.get('NUMBER_OF_PROCESSORS')
2205
def _local_concurrency():
2210
_cached_local_concurrency = None
2212
def local_concurrency(use_cache=True):
2213
"""Return how many processes can be run concurrently.
2215
Rely on platform specific implementations and default to 1 (one) if
2216
anything goes wrong.
2218
global _cached_local_concurrency
2220
if _cached_local_concurrency is not None and use_cache:
2221
return _cached_local_concurrency
2223
concurrency = os.environ.get('BZR_CONCURRENCY', None)
2224
if concurrency is None:
2226
concurrency = _local_concurrency()
2227
except (OSError, IOError):
2230
concurrency = int(concurrency)
2231
except (TypeError, ValueError):
2234
_cached_concurrency = concurrency
2238
class UnicodeOrBytesToBytesWriter(codecs.StreamWriter):
2239
"""A stream writer that doesn't decode str arguments."""
2241
def __init__(self, encode, stream, errors='strict'):
2242
codecs.StreamWriter.__init__(self, stream, errors)
2243
self.encode = encode
2245
def write(self, object):
2246
if type(object) is str:
2247
self.stream.write(object)
2249
data, _ = self.encode(object, self.errors)
2250
self.stream.write(data)
2252
if sys.platform == 'win32':
2253
def open_file(filename, mode='r', bufsize=-1):
2254
"""This function is used to override the ``open`` builtin.
2256
But it uses O_NOINHERIT flag so the file handle is not inherited by
2257
child processes. Deleting or renaming a closed file opened with this
2258
function is not blocking child processes.
2260
writing = 'w' in mode
2261
appending = 'a' in mode
2262
updating = '+' in mode
2263
binary = 'b' in mode
2266
# see http://msdn.microsoft.com/en-us/library/yeby3zcb%28VS.71%29.aspx
2267
# for flags for each modes.
2277
flags |= os.O_WRONLY
2278
flags |= os.O_CREAT | os.O_TRUNC
2283
flags |= os.O_WRONLY
2284
flags |= os.O_CREAT | os.O_APPEND
2289
flags |= os.O_RDONLY
2291
return os.fdopen(os.open(filename, flags), mode, bufsize)