2052.3.1
by John Arbash Meinel
Add tests to cleanup the copyright of all source files |
1 |
# Copyright (C) 2006 Canonical Ltd
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
2 |
#
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
16 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
17 |
# TODO: Some kind of command-line display of revision properties:
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
18 |
# perhaps show them in log -v and allow them as options to the commit command.
|
19 |
||
20 |
"""Some functions to enable caching the conversion between unicode to utf8"""
|
|
21 |
||
2155.1.1
by John Arbash Meinel
(Dmitry Vasiliev) pre-lookup encoders to improve performance |
22 |
import codecs |
23 |
||
24 |
||
25 |
_utf8_encode = codecs.getencoder("utf-8") |
|
26 |
_utf8_decode = codecs.getdecoder("utf-8") |
|
2255.7.95
by Robert Collins
Add convenience utf8 decode routine for handling strings that might be None |
27 |
def _utf8_decode_with_None(bytestring, _utf8_decode=_utf8_decode): |
2360.1.6
by John Arbash Meinel
Change utf8_decode_with_None to return what we care about. |
28 |
"""wrap _utf8_decode to support None->None for optional strings.
|
29 |
||
30 |
Also, only return the Unicode portion, since we don't care about the second
|
|
31 |
return value.
|
|
32 |
"""
|
|
2255.7.95
by Robert Collins
Add convenience utf8 decode routine for handling strings that might be None |
33 |
if bytestring is None: |
2360.1.6
by John Arbash Meinel
Change utf8_decode_with_None to return what we care about. |
34 |
return None |
2255.7.95
by Robert Collins
Add convenience utf8 decode routine for handling strings that might be None |
35 |
else: |
2360.1.6
by John Arbash Meinel
Change utf8_decode_with_None to return what we care about. |
36 |
return _utf8_decode(bytestring)[0] |
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
37 |
|
38 |
# Map revisions from and to utf8 encoding
|
|
39 |
# Whenever we do an encode/decode operation, we save the result, so that
|
|
40 |
# we don't have to do it again.
|
|
41 |
_unicode_to_utf8_map = {} |
|
42 |
_utf8_to_unicode_map = {} |
|
43 |
||
44 |
||
45 |
def encode(unicode_str, |
|
46 |
_uni_to_utf8=_unicode_to_utf8_map, |
|
2155.1.1
by John Arbash Meinel
(Dmitry Vasiliev) pre-lookup encoders to improve performance |
47 |
_utf8_to_uni=_utf8_to_unicode_map, |
48 |
_utf8_encode=_utf8_encode): |
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
49 |
"""Take this unicode revision id, and get a unicode version"""
|
1934.1.11
by John Arbash Meinel
Document why we use try/except rather than if None |
50 |
# If the key is in the cache try/KeyError is 50% faster than
|
51 |
# val = dict.get(key), if val is None:
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
52 |
# On jam's machine the difference is
|
53 |
# try/KeyError: 900ms
|
|
54 |
# if None: 1250ms
|
|
1934.1.11
by John Arbash Meinel
Document why we use try/except rather than if None |
55 |
# Since these are primarily used when iterating over a knit entry
|
56 |
# *most* of the time the key will already be in the cache, so use the
|
|
57 |
# fast path
|
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
58 |
try: |
59 |
return _uni_to_utf8[unicode_str] |
|
60 |
except KeyError: |
|
2155.1.1
by John Arbash Meinel
(Dmitry Vasiliev) pre-lookup encoders to improve performance |
61 |
_uni_to_utf8[unicode_str] = utf8_str = _utf8_encode(unicode_str)[0] |
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
62 |
_utf8_to_uni[utf8_str] = unicode_str |
63 |
return utf8_str |
|
64 |
||
65 |
||
66 |
def decode(utf8_str, |
|
67 |
_uni_to_utf8=_unicode_to_utf8_map, |
|
2155.1.1
by John Arbash Meinel
(Dmitry Vasiliev) pre-lookup encoders to improve performance |
68 |
_utf8_to_uni=_utf8_to_unicode_map, |
69 |
_utf8_decode=_utf8_decode): |
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
70 |
"""Take a utf8 revision id, and decode it, but cache the result"""
|
71 |
try: |
|
72 |
return _utf8_to_uni[utf8_str] |
|
73 |
except KeyError: |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
74 |
unicode_str = _utf8_decode(utf8_str)[0] |
75 |
_utf8_to_uni[utf8_str] = unicode_str |
|
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
76 |
_uni_to_utf8[unicode_str] = utf8_str |
77 |
return unicode_str |
|
78 |
||
79 |
||
1911.2.5
by John Arbash Meinel
Update cache tests, add a function to do something like intern() only for unicode objects |
80 |
def get_cached_unicode(unicode_str): |
81 |
"""Return a cached version of the unicode string.
|
|
82 |
||
83 |
This has a similar idea to that of intern() in that it tries
|
|
84 |
to return a singleton string. Only it works for unicode strings.
|
|
85 |
"""
|
|
86 |
# This might return the same object, or it might return the cached one
|
|
87 |
# the decode() should just be a hash lookup, because the encode() side
|
|
88 |
# should add the entry to the maps
|
|
89 |
return decode(encode(unicode_str)) |
|
90 |
||
91 |
||
2249.5.2
by John Arbash Meinel
Add a get_cached_utf8, which will ensure it is really utf8, and cache the strings |
92 |
def get_cached_utf8(utf8_str): |
93 |
"""Return a cached version of the utf-8 string.
|
|
94 |
||
95 |
Get a cached version of this string (similar to intern()).
|
|
96 |
At present, this will be decoded to ensure it is a utf-8 string. In the
|
|
97 |
future this might change to simply caching the string.
|
|
98 |
"""
|
|
99 |
return encode(decode(utf8_str)) |
|
100 |
||
101 |
||
2249.5.3
by John Arbash Meinel
Add get_cached_ascii for dealing with how cElementTree handles ascii strings |
102 |
def get_cached_ascii(ascii_str, |
103 |
_uni_to_utf8=_unicode_to_utf8_map, |
|
104 |
_utf8_to_uni=_utf8_to_unicode_map): |
|
105 |
"""This is a string which is identical in utf-8 and unicode."""
|
|
106 |
# We don't need to do any encoding, but we want _utf8_to_uni to return a
|
|
107 |
# real Unicode string. Unicode and plain strings of this type will have the
|
|
108 |
# same hash, so we can just use it as the key in _uni_to_utf8, but we need
|
|
109 |
# the return value to be different in _utf8_to_uni
|
|
110 |
ascii_str = _uni_to_utf8.setdefault(ascii_str, ascii_str) |
|
111 |
_utf8_to_uni.setdefault(ascii_str, unicode(ascii_str)) |
|
112 |
return ascii_str |
|
113 |
||
114 |
||
1911.2.3
by John Arbash Meinel
Moving everything into a new location so that we can cache more than just revision ids |
115 |
def clear_encoding_cache(): |
116 |
"""Clear the encoding and decoding caches"""
|
|
117 |
_unicode_to_utf8_map.clear() |
|
118 |
_utf8_to_unicode_map.clear() |