482
527
for dirname in dir_list:
483
528
if is_inside(dirname, fname) or is_inside(fname, dirname):
533
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
534
report_activity=None, direction='read'):
535
"""Copy contents of one file to another.
537
The read_length can either be -1 to read to end-of-file (EOF) or
538
it can specify the maximum number of bytes to read.
540
The buff_size represents the maximum size for each read operation
541
performed on from_file.
543
:param report_activity: Call this as bytes are read, see
544
Transport._report_activity
545
:param direction: Will be passed to report_activity
547
:return: The number of bytes copied.
551
# read specified number of bytes
553
while read_length > 0:
554
num_bytes_to_read = min(read_length, buff_size)
556
block = from_file.read(num_bytes_to_read)
560
if report_activity is not None:
561
report_activity(len(block), direction)
564
actual_bytes_read = len(block)
565
read_length -= actual_bytes_read
566
length += actual_bytes_read
489
def pumpfile(fromfile, tofile):
490
"""Copy contents of one file to another."""
493
b = fromfile.read(BUFSIZE)
570
block = from_file.read(buff_size)
574
if report_activity is not None:
575
report_activity(len(block), direction)
581
def pump_string_file(bytes, file_handle, segment_size=None):
582
"""Write bytes to file_handle in many smaller writes.
584
:param bytes: The string to write.
585
:param file_handle: The file to write to.
587
# Write data in chunks rather than all at once, because very large
588
# writes fail on some platforms (e.g. Windows with SMB mounted
591
segment_size = 5242880 # 5MB
592
segments = range(len(bytes) / segment_size + 1)
593
write = file_handle.write
594
for segment_index in segments:
595
segment = buffer(bytes, segment_index * segment_size, segment_size)
499
599
def file_iterator(input_file, readsize=32768):
555
683
def local_time_offset(t=None):
556
684
"""Return offset of local zone from GMT, either at present or at time t."""
557
# python2.3 localtime() can't take None
561
if time.localtime(t).tm_isdst and time.daylight:
564
return -time.timezone
567
def format_date(t, offset=0, timezone='original', date_fmt=None,
687
offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
688
return offset.days * 86400 + offset.seconds
690
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
692
def format_date(t, offset=0, timezone='original', date_fmt=None,
568
693
show_offset=True):
569
## TODO: Perhaps a global option to use either universal or local time?
570
## Or perhaps just let people set $TZ?
571
assert isinstance(t, float)
694
"""Return a formatted date string.
696
:param t: Seconds since the epoch.
697
:param offset: Timezone offset in seconds east of utc.
698
:param timezone: How to display the time: 'utc', 'original' for the
699
timezone specified by offset, or 'local' for the process's current
701
:param date_fmt: strftime format.
702
:param show_offset: Whether to append the timezone.
704
(date_fmt, tt, offset_str) = \
705
_format_date(t, offset, timezone, date_fmt, show_offset)
706
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
707
date_str = time.strftime(date_fmt, tt)
708
return date_str + offset_str
710
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
712
"""Return an unicode date string formatted according to the current locale.
714
:param t: Seconds since the epoch.
715
:param offset: Timezone offset in seconds east of utc.
716
:param timezone: How to display the time: 'utc', 'original' for the
717
timezone specified by offset, or 'local' for the process's current
719
:param date_fmt: strftime format.
720
:param show_offset: Whether to append the timezone.
722
(date_fmt, tt, offset_str) = \
723
_format_date(t, offset, timezone, date_fmt, show_offset)
724
date_str = time.strftime(date_fmt, tt)
725
if not isinstance(date_str, unicode):
726
date_str = date_str.decode(get_user_encoding(), 'replace')
727
return date_str + offset_str
729
def _format_date(t, offset, timezone, date_fmt, show_offset):
573
730
if timezone == 'utc':
574
731
tt = time.gmtime(t)
576
733
elif timezone == 'original':
579
736
tt = time.gmtime(t + offset)
580
737
elif timezone == 'local':
581
738
tt = time.localtime(t)
582
739
offset = local_time_offset(t)
584
raise BzrError("unsupported timezone format %r" % timezone,
585
['options are "utc", "original", "local"'])
741
raise errors.UnsupportedTimezoneFormat(timezone)
586
742
if date_fmt is None:
587
743
date_fmt = "%a %Y-%m-%d %H:%M:%S"
589
745
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
592
return (time.strftime(date_fmt, tt) + offset_str)
748
return (date_fmt, tt, offset_str)
595
751
def compact_date(when):
596
752
return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
755
def format_delta(delta):
756
"""Get a nice looking string for a time delta.
758
:param delta: The time difference in seconds, can be positive or negative.
759
positive indicates time in the past, negative indicates time in the
760
future. (usually time.time() - stored_time)
761
:return: String formatted to show approximate resolution
767
direction = 'in the future'
771
if seconds < 90: # print seconds up to 90 seconds
773
return '%d second %s' % (seconds, direction,)
775
return '%d seconds %s' % (seconds, direction)
777
minutes = int(seconds / 60)
778
seconds -= 60 * minutes
783
if minutes < 90: # print minutes, seconds up to 90 minutes
785
return '%d minute, %d second%s %s' % (
786
minutes, seconds, plural_seconds, direction)
788
return '%d minutes, %d second%s %s' % (
789
minutes, seconds, plural_seconds, direction)
791
hours = int(minutes / 60)
792
minutes -= 60 * hours
799
return '%d hour, %d minute%s %s' % (hours, minutes,
800
plural_minutes, direction)
801
return '%d hours, %d minute%s %s' % (hours, minutes,
802
plural_minutes, direction)
601
805
"""Return size of given open file."""
669
raise BzrError("sorry, %r not allowed in path" % f)
857
raise errors.BzrError("sorry, %r not allowed in path" % f)
670
858
elif (f == '.') or (f == ''):
677
assert isinstance(p, list)
679
if (f == '..') or (f == None) or (f == ''):
680
raise BzrError("sorry, %r not allowed in path" % f)
867
if (f == '..') or (f is None) or (f == ''):
868
raise errors.BzrError("sorry, %r not allowed in path" % f)
681
869
return pathjoin(*p)
684
@deprecated_function(zero_nine)
685
def appendpath(p1, p2):
689
return pathjoin(p1, p2)
872
def parent_directories(filename):
873
"""Return the list of parent directories, deepest first.
875
For example, parent_directories("a/b/c") -> ["a/b", "a"].
878
parts = splitpath(dirname(filename))
880
parents.append(joinpath(parts))
885
_extension_load_failures = []
888
def failed_to_load_extension(exception):
889
"""Handle failing to load a binary extension.
891
This should be called from the ImportError block guarding the attempt to
892
import the native extension. If this function returns, the pure-Python
893
implementation should be loaded instead::
896
>>> import bzrlib._fictional_extension_pyx
897
>>> except ImportError, e:
898
>>> bzrlib.osutils.failed_to_load_extension(e)
899
>>> import bzrlib._fictional_extension_py
901
# NB: This docstring is just an example, not a doctest, because doctest
902
# currently can't cope with the use of lazy imports in this namespace --
905
# This currently doesn't report the failure at the time it occurs, because
906
# they tend to happen very early in startup when we can't check config
907
# files etc, and also we want to report all failures but not spam the user
909
from bzrlib import trace
910
exception_str = str(exception)
911
if exception_str not in _extension_load_failures:
912
trace.mutter("failed to load compiled extension: %s" % exception_str)
913
_extension_load_failures.append(exception_str)
916
def report_extension_load_failures():
917
if not _extension_load_failures:
919
from bzrlib.config import GlobalConfig
920
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
922
# the warnings framework should by default show this only once
924
"bzr: warning: Failed to load compiled extensions:\n"
926
" Bazaar can run, but performance may be reduced.\n"
927
" Check Bazaar is correctly installed or set ignore_missing_extensions"
928
% '\n '.join(_extension_load_failures,))
932
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
933
except ImportError, e:
934
failed_to_load_extension(e)
935
from bzrlib._chunks_to_lines_py import chunks_to_lines
692
938
def split_lines(s):
693
939
"""Split s into lines, but without removing the newline characters."""
940
# Trivially convert a fulltext into a 'chunked' representation, and let
941
# chunks_to_lines do the heavy lifting.
942
if isinstance(s, str):
943
# chunks_to_lines only supports 8-bit strings
944
return chunks_to_lines([s])
946
return _split_lines(s)
950
"""Split s into lines, but without removing the newline characters.
952
This supports Unicode or plain string objects.
694
954
lines = s.split('\n')
695
955
result = [line + '\n' for line in lines[:-1]]
705
965
def link_or_copy(src, dest):
706
966
"""Hardlink a file, or copy it if it can't be hardlinked."""
707
967
if not hardlinks_good():
968
shutil.copyfile(src, dest)
711
971
os.link(src, dest)
712
972
except (OSError, IOError), e:
713
973
if e.errno != errno.EXDEV:
717
def delete_any(full_path):
718
"""Delete a file or directory."""
975
shutil.copyfile(src, dest)
978
def delete_any(path):
979
"""Delete a file, symlink or directory.
981
Will delete even if readonly.
722
# We may be renaming a dangling inventory id
723
if e.errno not in (errno.EISDIR, errno.EACCES, errno.EPERM):
984
_delete_file_or_dir(path)
985
except (OSError, IOError), e:
986
if e.errno in (errno.EPERM, errno.EACCES):
987
# make writable and try again
990
except (OSError, IOError):
992
_delete_file_or_dir(path)
997
def _delete_file_or_dir(path):
998
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
999
# Forgiveness than Permission (EAFP) because:
1000
# - root can damage a solaris file system by using unlink,
1001
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1002
# EACCES, OSX: EPERM) when invoked on a directory.
1003
if isdir(path): # Takes care of symlinks
728
1009
def has_symlinks():
729
if hasattr(os, 'symlink'):
1010
if getattr(os, 'symlink', None) is not None:
1016
def has_hardlinks():
1017
if getattr(os, 'link', None) is not None:
1023
def host_os_dereferences_symlinks():
1024
return (has_symlinks()
1025
and sys.platform not in ('cygwin', 'win32'))
1028
def readlink(abspath):
1029
"""Return a string representing the path to which the symbolic link points.
1031
:param abspath: The link absolute unicode path.
1033
This his guaranteed to return the symbolic link in unicode in all python
1036
link = abspath.encode(_fs_enc)
1037
target = os.readlink(link)
1038
target = target.decode(_fs_enc)
735
1042
def contains_whitespace(s):
736
1043
"""True if there are any whitespace characters in s."""
737
for ch in string.whitespace:
1044
# string.whitespace can include '\xa0' in certain locales, because it is
1045
# considered "non-breaking-space" as part of ISO-8859-1. But it
1046
# 1) Isn't a breaking whitespace
1047
# 2) Isn't one of ' \t\r\n' which are characters we sometimes use as
1049
# 3) '\xa0' isn't unicode safe since it is >128.
1051
# This should *not* be a unicode set of characters in case the source
1052
# string is not a Unicode string. We can auto-up-cast the characters since
1053
# they are ascii, but we don't want to auto-up-cast the string in case it
1055
for ch in ' \t\n\r\v\f':
761
1079
avoids that problem.
764
assert len(base) >= MIN_ABS_PATHLENGTH, ('Length of base must be equal or'
765
' exceed the platform minimum length (which is %d)' %
1082
if len(base) < MIN_ABS_PATHLENGTH:
1083
# must have space for e.g. a drive letter
1084
raise ValueError('%r is too short to calculate a relative path'
768
1087
rp = abspath(path)
772
while len(head) >= len(base):
1092
if len(head) <= len(base) and head != base:
1093
raise errors.PathNotChild(rp, base)
773
1094
if head == base:
775
head, tail = os.path.split(head)
1096
head, tail = split(head)
779
raise PathNotChild(rp, base)
1101
return pathjoin(*reversed(s))
1106
def _cicp_canonical_relpath(base, path):
1107
"""Return the canonical path relative to base.
1109
Like relpath, but on case-insensitive-case-preserving file-systems, this
1110
will return the relpath as stored on the file-system rather than in the
1111
case specified in the input string, for all existing portions of the path.
1113
This will cause O(N) behaviour if called for every path in a tree; if you
1114
have a number of paths to convert, you should use canonical_relpaths().
1116
# TODO: it should be possible to optimize this for Windows by using the
1117
# win32 API FindFiles function to look for the specified name - but using
1118
# os.listdir() still gives us the correct, platform agnostic semantics in
1121
rel = relpath(base, path)
1122
# '.' will have been turned into ''
1126
abs_base = abspath(base)
1128
_listdir = os.listdir
1130
# use an explicit iterator so we can easily consume the rest on early exit.
1131
bit_iter = iter(rel.split('/'))
1132
for bit in bit_iter:
1134
for look in _listdir(current):
1135
if lbit == look.lower():
1136
current = pathjoin(current, look)
1139
# got to the end, nothing matched, so we just return the
1140
# non-existing bits as they were specified (the filename may be
1141
# the target of a move, for example).
1142
current = pathjoin(current, bit, *list(bit_iter))
1144
return current[len(abs_base)+1:]
1146
# XXX - TODO - we need better detection/integration of case-insensitive
1147
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1148
# filesystems), for example, so could probably benefit from the same basic
1149
# support there. For now though, only Windows and OSX get that support, and
1150
# they get it for *all* file-systems!
1151
if sys.platform in ('win32', 'darwin'):
1152
canonical_relpath = _cicp_canonical_relpath
1154
canonical_relpath = relpath
1156
def canonical_relpaths(base, paths):
1157
"""Create an iterable to canonicalize a sequence of relative paths.
1159
The intent is for this implementation to use a cache, vastly speeding
1160
up multiple transformations in the same directory.
1162
# but for now, we haven't optimized...
1163
return [canonical_relpath(base, p) for p in paths]
787
1165
def safe_unicode(unicode_or_utf8_string):
788
1166
"""Coerce unicode_or_utf8_string into unicode.
790
1168
If it is unicode, it is returned.
791
Otherwise it is decoded from utf-8. If a decoding error
792
occurs, it is wrapped as a If the decoding fails, the exception is wrapped
793
as a BzrBadParameter exception.
1169
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1170
wrapped in a BzrBadParameterNotUnicode exception.
795
1172
if isinstance(unicode_or_utf8_string, unicode):
796
1173
return unicode_or_utf8_string
798
1175
return unicode_or_utf8_string.decode('utf8')
799
1176
except UnicodeDecodeError:
800
raise BzrBadParameterNotUnicode(unicode_or_utf8_string)
1177
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1180
def safe_utf8(unicode_or_utf8_string):
1181
"""Coerce unicode_or_utf8_string to a utf8 string.
1183
If it is a str, it is returned.
1184
If it is Unicode, it is encoded into a utf-8 string.
1186
if isinstance(unicode_or_utf8_string, str):
1187
# TODO: jam 20070209 This is overkill, and probably has an impact on
1188
# performance if we are dealing with lots of apis that want a
1191
# Make sure it is a valid utf-8 string
1192
unicode_or_utf8_string.decode('utf-8')
1193
except UnicodeDecodeError:
1194
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1195
return unicode_or_utf8_string
1196
return unicode_or_utf8_string.encode('utf-8')
1199
_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
1200
' Revision id generators should be creating utf8'
1204
def safe_revision_id(unicode_or_utf8_string, warn=True):
1205
"""Revision ids should now be utf8, but at one point they were unicode.
1207
:param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
1209
:param warn: Functions that are sanitizing user data can set warn=False
1210
:return: None or a utf8 revision id.
1212
if (unicode_or_utf8_string is None
1213
or unicode_or_utf8_string.__class__ == str):
1214
return unicode_or_utf8_string
1216
symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
1218
return cache_utf8.encode(unicode_or_utf8_string)
1221
_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
1222
' generators should be creating utf8 file ids.')
1225
def safe_file_id(unicode_or_utf8_string, warn=True):
1226
"""File ids should now be utf8, but at one point they were unicode.
1228
This is the same as safe_utf8, except it uses the cached encode functions
1229
to save a little bit of performance.
1231
:param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
1233
:param warn: Functions that are sanitizing user data can set warn=False
1234
:return: None or a utf8 file id.
1236
if (unicode_or_utf8_string is None
1237
or unicode_or_utf8_string.__class__ == str):
1238
return unicode_or_utf8_string
1240
symbol_versioning.warn(_file_id_warning, DeprecationWarning,
1242
return cache_utf8.encode(unicode_or_utf8_string)
803
1245
_platform_normalizes_filenames = False
870
1312
def supports_executable():
871
1313
return sys.platform != "win32"
1316
def supports_posix_readonly():
1317
"""Return True if 'readonly' has POSIX semantics, False otherwise.
1319
Notably, a win32 readonly file cannot be deleted, unlike POSIX where the
1320
directory controls creation/deletion, etc.
1322
And under win32, readonly means that the directory itself cannot be
1323
deleted. The contents of a readonly directory can be changed, unlike POSIX
1324
where files in readonly directories cannot be added, deleted or renamed.
1326
return sys.platform != "win32"
1329
def set_or_unset_env(env_variable, value):
1330
"""Modify the environment, setting or removing the env_variable.
1332
:param env_variable: The environment variable in question
1333
:param value: The value to set the environment to. If None, then
1334
the variable will be removed.
1335
:return: The original value of the environment variable.
1337
orig_val = os.environ.get(env_variable)
1339
if orig_val is not None:
1340
del os.environ[env_variable]
1342
if isinstance(value, unicode):
1343
value = value.encode(get_user_encoding())
1344
os.environ[env_variable] = value
874
1348
_validWin32PathRE = re.compile(r'^([A-Za-z]:[/\\])?[^:<>*"?\|]*$')
877
1351
def check_legal_path(path):
878
"""Check whether the supplied path is legal.
1352
"""Check whether the supplied path is legal.
879
1353
This is only required on Windows, so we don't test on other platforms
882
1356
if sys.platform != "win32":
884
1358
if _validWin32PathRE.match(path) is None:
885
raise IllegalPath(path)
1359
raise errors.IllegalPath(path)
1362
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1364
def _is_error_enotdir(e):
1365
"""Check if this exception represents ENOTDIR.
1367
Unfortunately, python is very inconsistent about the exception
1368
here. The cases are:
1369
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1370
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1371
which is the windows error code.
1372
3) Windows, Python2.5 uses errno == EINVAL and
1373
winerror == ERROR_DIRECTORY
1375
:param e: An Exception object (expected to be OSError with an errno
1376
attribute, but we should be able to cope with anything)
1377
:return: True if this represents an ENOTDIR error. False otherwise.
1379
en = getattr(e, 'errno', None)
1380
if (en == errno.ENOTDIR
1381
or (sys.platform == 'win32'
1382
and (en == _WIN32_ERROR_DIRECTORY
1383
or (en == errno.EINVAL
1384
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
888
1390
def walkdirs(top, prefix=""):
889
1391
"""Yield data about all the directories in a tree.
891
1393
This yields all the data about the contents of a directory at a time.
892
1394
After each directory has been yielded, if the caller has mutated the list
893
1395
to exclude some directories, they are then not descended into.
895
1397
The data yielded is of the form:
896
1398
((directory-relpath, directory-path-from-top),
897
[(relpath, basename, kind, lstat), ...]),
1399
[(relpath, basename, kind, lstat, path-from-top), ...]),
898
1400
- directory-relpath is the relative path of the directory being returned
899
1401
with respect to top. prefix is prepended to this.
900
- directory-path-from-root is the path including top for this directory.
1402
- directory-path-from-root is the path including top for this directory.
901
1403
It is suitable for use with os functions.
902
1404
- relpath is the relative path within the subtree being walked.
903
1405
- basename is the basename of the path
905
1407
present within the tree - but it may be recorded as versioned. See
907
1409
- lstat is the stat data *if* the file was statted.
908
- planned, not implemented:
1410
- planned, not implemented:
909
1411
path_from_tree_root is the path from the root of the tree.
911
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1413
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
912
1414
allows one to walk a subtree but get paths that are relative to a tree
913
1415
rooted higher up.
914
1416
:return: an iterator over the dirs.
916
1418
#TODO there is a bit of a smell where the results of the directory-
917
# summary in this, and the path from the root, may not agree
1419
# summary in this, and the path from the root, may not agree
918
1420
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
919
1421
# potentially confusing output. We should make this more robust - but
920
1422
# not at a speed cost. RBC 20060731
923
1424
_directory = _directory_kind
925
pending = [(prefix, "", _directory, None, top)]
1425
_listdir = os.listdir
1426
_kind_from_mode = file_kind_from_stat_mode
1427
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
928
currentdir = pending.pop()
929
1429
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
932
relroot = currentdir[0] + '/'
1430
relroot, _, _, _, top = pending.pop()
1432
relprefix = relroot + u'/'
1435
top_slash = top + u'/'
1438
append = dirblock.append
1440
names = sorted(_listdir(top))
1442
if not _is_error_enotdir(e):
1446
abspath = top_slash + name
1447
statvalue = _lstat(abspath)
1448
kind = _kind_from_mode(statvalue.st_mode)
1449
append((relprefix + name, name, kind, statvalue, abspath))
1450
yield (relroot, top), dirblock
1452
# push the user specified dirs from dirblock
1453
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1456
class DirReader(object):
1457
"""An interface for reading directories."""
1459
def top_prefix_to_starting_dir(self, top, prefix=""):
1460
"""Converts top and prefix to a starting dir entry
1462
:param top: A utf8 path
1463
:param prefix: An optional utf8 path to prefix output relative paths
1465
:return: A tuple starting with prefix, and ending with the native
1468
raise NotImplementedError(self.top_prefix_to_starting_dir)
1470
def read_dir(self, prefix, top):
1471
"""Read a specific dir.
1473
:param prefix: A utf8 prefix to be preprended to the path basenames.
1474
:param top: A natively encoded path to read.
1475
:return: A list of the directories contents. Each item contains:
1476
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1478
raise NotImplementedError(self.read_dir)
1481
_selected_dir_reader = None
1484
def _walkdirs_utf8(top, prefix=""):
1485
"""Yield data about all the directories in a tree.
1487
This yields the same information as walkdirs() only each entry is yielded
1488
in utf-8. On platforms which have a filesystem encoding of utf8 the paths
1489
are returned as exact byte-strings.
1491
:return: yields a tuple of (dir_info, [file_info])
1492
dir_info is (utf8_relpath, path-from-top)
1493
file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
1494
if top is an absolute path, path-from-top is also an absolute path.
1495
path-from-top might be unicode or utf8, but it is the correct path to
1496
pass to os functions to affect the file in question. (such as os.lstat)
1498
global _selected_dir_reader
1499
if _selected_dir_reader is None:
1500
fs_encoding = _fs_enc.upper()
1501
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1502
# Win98 doesn't have unicode apis like FindFirstFileW
1503
# TODO: We possibly could support Win98 by falling back to the
1504
# original FindFirstFile, and using TCHAR instead of WCHAR,
1505
# but that gets a bit tricky, and requires custom compiling
1508
from bzrlib._walkdirs_win32 import Win32ReadDir
1509
_selected_dir_reader = Win32ReadDir()
1512
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1513
# ANSI_X3.4-1968 is a form of ASCII
1515
from bzrlib._readdir_pyx import UTF8DirReader
1516
_selected_dir_reader = UTF8DirReader()
1517
except ImportError, e:
1518
failed_to_load_extension(e)
1521
if _selected_dir_reader is None:
1522
# Fallback to the python version
1523
_selected_dir_reader = UnicodeDirReader()
1525
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1526
# But we don't actually uses 1-3 in pending, so set them to None
1527
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1528
read_dir = _selected_dir_reader.read_dir
1529
_directory = _directory_kind
1531
relroot, _, _, _, top = pending[-1].pop()
1534
dirblock = sorted(read_dir(relroot, top))
1535
yield (relroot, top), dirblock
1536
# push the user specified dirs from dirblock
1537
next = [d for d in reversed(dirblock) if d[2] == _directory]
1539
pending.append(next)
1542
class UnicodeDirReader(DirReader):
1543
"""A dir reader for non-utf8 file systems, which transcodes."""
1545
__slots__ = ['_utf8_encode']
1548
self._utf8_encode = codecs.getencoder('utf8')
1550
def top_prefix_to_starting_dir(self, top, prefix=""):
1551
"""See DirReader.top_prefix_to_starting_dir."""
1552
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1554
def read_dir(self, prefix, top):
1555
"""Read a single directory from a non-utf8 file system.
1557
top, and the abspath element in the output are unicode, all other paths
1558
are utf8. Local disk IO is done via unicode calls to listdir etc.
1560
This is currently the fallback code path when the filesystem encoding is
1561
not UTF-8. It may be better to implement an alternative so that we can
1562
safely handle paths that are not properly decodable in the current
1565
See DirReader.read_dir for details.
1567
_utf8_encode = self._utf8_encode
1569
_listdir = os.listdir
1570
_kind_from_mode = file_kind_from_stat_mode
1573
relprefix = prefix + '/'
1576
top_slash = top + u'/'
1579
append = dirblock.append
935
1580
for name in sorted(_listdir(top)):
936
abspath = top + '/' + name
937
statvalue = lstat(abspath)
938
dirblock.append((relroot + name, name,
939
file_kind_from_stat_mode(statvalue.st_mode),
941
yield (currentdir[0], top), dirblock
942
# push the user specified dirs from dirblock
943
for dir in reversed(dirblock):
944
if dir[2] == _directory:
1582
name_utf8 = _utf8_encode(name)[0]
1583
except UnicodeDecodeError:
1584
raise errors.BadFilenameEncoding(
1585
_utf8_encode(relprefix)[0] + name, _fs_enc)
1586
abspath = top_slash + name
1587
statvalue = _lstat(abspath)
1588
kind = _kind_from_mode(statvalue.st_mode)
1589
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
1593
def copy_tree(from_path, to_path, handlers={}):
1594
"""Copy all of the entries in from_path into to_path.
1596
:param from_path: The base directory to copy.
1597
:param to_path: The target directory. If it does not exist, it will
1599
:param handlers: A dictionary of functions, which takes a source and
1600
destinations for files, directories, etc.
1601
It is keyed on the file kind, such as 'directory', 'symlink', or 'file'
1602
'file', 'directory', and 'symlink' should always exist.
1603
If they are missing, they will be replaced with 'os.mkdir()',
1604
'os.readlink() + os.symlink()', and 'shutil.copy2()', respectively.
1606
# Now, just copy the existing cached tree to the new location
1607
# We use a cheap trick here.
1608
# Absolute paths are prefixed with the first parameter
1609
# relative paths are prefixed with the second.
1610
# So we can get both the source and target returned
1611
# without any extra work.
1613
def copy_dir(source, dest):
1616
def copy_link(source, dest):
1617
"""Copy the contents of a symlink"""
1618
link_to = os.readlink(source)
1619
os.symlink(link_to, dest)
1621
real_handlers = {'file':shutil.copy2,
1622
'symlink':copy_link,
1623
'directory':copy_dir,
1625
real_handlers.update(handlers)
1627
if not os.path.exists(to_path):
1628
real_handlers['directory'](from_path, to_path)
1630
for dir_info, entries in walkdirs(from_path, prefix=to_path):
1631
for relpath, name, kind, st, abspath in entries:
1632
real_handlers[kind](abspath, relpath)
948
1635
def path_prefix_key(path):
958
1645
key_a = path_prefix_key(path_a)
959
1646
key_b = path_prefix_key(path_b)
960
1647
return cmp(key_a, key_b)
1650
_cached_user_encoding = None
1653
def get_user_encoding(use_cache=True):
1654
"""Find out what the preferred user encoding is.
1656
This is generally the encoding that is used for command line parameters
1657
and file contents. This may be different from the terminal encoding
1658
or the filesystem encoding.
1660
:param use_cache: Enable cache for detected encoding.
1661
(This parameter is turned on by default,
1662
and required only for selftesting)
1664
:return: A string defining the preferred user encoding
1666
global _cached_user_encoding
1667
if _cached_user_encoding is not None and use_cache:
1668
return _cached_user_encoding
1670
if sys.platform == 'darwin':
1671
# python locale.getpreferredencoding() always return
1672
# 'mac-roman' on darwin. That's a lie.
1673
sys.platform = 'posix'
1675
if os.environ.get('LANG', None) is None:
1676
# If LANG is not set, we end up with 'ascii', which is bad
1677
# ('mac-roman' is more than ascii), so we set a default which
1678
# will give us UTF-8 (which appears to work in all cases on
1679
# OSX). Users are still free to override LANG of course, as
1680
# long as it give us something meaningful. This work-around
1681
# *may* not be needed with python 3k and/or OSX 10.5, but will
1682
# work with them too -- vila 20080908
1683
os.environ['LANG'] = 'en_US.UTF-8'
1686
sys.platform = 'darwin'
1691
user_encoding = locale.getpreferredencoding()
1692
except locale.Error, e:
1693
sys.stderr.write('bzr: warning: %s\n'
1694
' Could not determine what text encoding to use.\n'
1695
' This error usually means your Python interpreter\n'
1696
' doesn\'t support the locale set by $LANG (%s)\n'
1697
" Continuing with ascii encoding.\n"
1698
% (e, os.environ.get('LANG')))
1699
user_encoding = 'ascii'
1701
# Windows returns 'cp0' to indicate there is no code page. So we'll just
1702
# treat that as ASCII, and not support printing unicode characters to the
1705
# For python scripts run under vim, we get '', so also treat that as ASCII
1706
if user_encoding in (None, 'cp0', ''):
1707
user_encoding = 'ascii'
1711
codecs.lookup(user_encoding)
1713
sys.stderr.write('bzr: warning:'
1714
' unknown encoding %s.'
1715
' Continuing with ascii encoding.\n'
1718
user_encoding = 'ascii'
1721
_cached_user_encoding = user_encoding
1723
return user_encoding
1726
def get_host_name():
1727
"""Return the current unicode host name.
1729
This is meant to be used in place of socket.gethostname() because that
1730
behaves inconsistently on different platforms.
1732
if sys.platform == "win32":
1734
return win32utils.get_host_name()
1737
return socket.gethostname().decode(get_user_encoding())
1740
def recv_all(socket, bytes):
1741
"""Receive an exact number of bytes.
1743
Regular Socket.recv() may return less than the requested number of bytes,
1744
dependning on what's in the OS buffer. MSG_WAITALL is not available
1745
on all platforms, but this should work everywhere. This will return
1746
less than the requested amount if the remote end closes.
1748
This isn't optimized and is intended mostly for use in testing.
1751
while len(b) < bytes:
1752
new = until_no_eintr(socket.recv, bytes - len(b))
1759
def send_all(socket, bytes, report_activity=None):
1760
"""Send all bytes on a socket.
1762
Regular socket.sendall() can give socket error 10053 on Windows. This
1763
implementation sends no more than 64k at a time, which avoids this problem.
1765
:param report_activity: Call this as bytes are read, see
1766
Transport._report_activity
1769
for pos in xrange(0, len(bytes), chunk_size):
1770
block = bytes[pos:pos+chunk_size]
1771
if report_activity is not None:
1772
report_activity(len(block), 'write')
1773
until_no_eintr(socket.sendall, block)
1776
def dereference_path(path):
1777
"""Determine the real path to a file.
1779
All parent elements are dereferenced. But the file itself is not
1781
:param path: The original path. May be absolute or relative.
1782
:return: the real path *to* the file
1784
parent, base = os.path.split(path)
1785
# The pathjoin for '.' is a workaround for Python bug #1213894.
1786
# (initial path components aren't dereferenced)
1787
return pathjoin(realpath(pathjoin('.', parent)), base)
1790
def supports_mapi():
1791
"""Return True if we can use MAPI to launch a mail client."""
1792
return sys.platform == "win32"
1795
def resource_string(package, resource_name):
1796
"""Load a resource from a package and return it as a string.
1798
Note: Only packages that start with bzrlib are currently supported.
1800
This is designed to be a lightweight implementation of resource
1801
loading in a way which is API compatible with the same API from
1803
http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
1804
If and when pkg_resources becomes a standard library, this routine
1807
# Check package name is within bzrlib
1808
if package == "bzrlib":
1809
resource_relpath = resource_name
1810
elif package.startswith("bzrlib."):
1811
package = package[len("bzrlib."):].replace('.', os.sep)
1812
resource_relpath = pathjoin(package, resource_name)
1814
raise errors.BzrError('resource package %s not in bzrlib' % package)
1816
# Map the resource to a file and read its contents
1817
base = dirname(bzrlib.__file__)
1818
if getattr(sys, 'frozen', None): # bzr.exe
1819
base = abspath(pathjoin(base, '..', '..'))
1820
filename = pathjoin(base, resource_relpath)
1821
return open(filename, 'rU').read()
1824
def file_kind_from_stat_mode_thunk(mode):
1825
global file_kind_from_stat_mode
1826
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
1828
from bzrlib._readdir_pyx import UTF8DirReader
1829
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
1830
except ImportError, e:
1831
failed_to_load_extension(e)
1832
from bzrlib._readdir_py import (
1833
_kind_from_mode as file_kind_from_stat_mode
1835
return file_kind_from_stat_mode(mode)
1836
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
1839
def file_kind(f, _lstat=os.lstat):
1841
return file_kind_from_stat_mode(_lstat(f).st_mode)
1843
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
1844
raise errors.NoSuchFile(f)
1848
def until_no_eintr(f, *a, **kw):
1849
"""Run f(*a, **kw), retrying if an EINTR error occurs."""
1850
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
1854
except (IOError, OSError), e:
1855
if e.errno == errno.EINTR:
1859
def re_compile_checked(re_string, flags=0, where=""):
1860
"""Return a compiled re, or raise a sensible error.
1862
This should only be used when compiling user-supplied REs.
1864
:param re_string: Text form of regular expression.
1865
:param flags: eg re.IGNORECASE
1866
:param where: Message explaining to the user the context where
1867
it occurred, eg 'log search filter'.
1869
# from https://bugs.launchpad.net/bzr/+bug/251352
1871
re_obj = re.compile(re_string, flags)
1876
where = ' in ' + where
1877
# despite the name 'error' is a type
1878
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
1879
% (where, re_string, e))
1882
if sys.platform == "win32":
1885
return msvcrt.getch()
1890
fd = sys.stdin.fileno()
1891
settings = termios.tcgetattr(fd)
1894
ch = sys.stdin.read(1)
1896
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
1900
if sys.platform == 'linux2':
1901
def _local_concurrency():
1903
prefix = 'processor'
1904
for line in file('/proc/cpuinfo', 'rb'):
1905
if line.startswith(prefix):
1906
concurrency = int(line[line.find(':')+1:]) + 1
1908
elif sys.platform == 'darwin':
1909
def _local_concurrency():
1910
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
1911
stdout=subprocess.PIPE).communicate()[0]
1912
elif sys.platform[0:7] == 'freebsd':
1913
def _local_concurrency():
1914
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
1915
stdout=subprocess.PIPE).communicate()[0]
1916
elif sys.platform == 'sunos5':
1917
def _local_concurrency():
1918
return subprocess.Popen(['psrinfo', '-p',],
1919
stdout=subprocess.PIPE).communicate()[0]
1920
elif sys.platform == "win32":
1921
def _local_concurrency():
1922
# This appears to return the number of cores.
1923
return os.environ.get('NUMBER_OF_PROCESSORS')
1925
def _local_concurrency():
1930
_cached_local_concurrency = None
1932
def local_concurrency(use_cache=True):
1933
"""Return how many processes can be run concurrently.
1935
Rely on platform specific implementations and default to 1 (one) if
1936
anything goes wrong.
1938
global _cached_local_concurrency
1939
if _cached_local_concurrency is not None and use_cache:
1940
return _cached_local_concurrency
1943
concurrency = _local_concurrency()
1944
except (OSError, IOError):
1947
concurrency = int(concurrency)
1948
except (TypeError, ValueError):
1951
_cached_concurrency = concurrency