~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_diff.py

  • Committer: Patch Queue Manager
  • Date: 2016-04-21 04:10:52 UTC
  • mfrom: (6616.1.1 fix-en-user-guide)
  • Revision ID: pqm@pqm.ubuntu.com-20160421041052-clcye7ns1qcl2n7w
(richard-wilbur) Ensure build of English use guide always uses English text
 even when user's locale specifies a different language. (Jelmer Vernooij)

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2005-2011 Canonical Ltd
 
1
# Copyright (C) 2005-2012, 2014, 2016 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
17
17
import os
18
18
from cStringIO import StringIO
19
19
import subprocess
20
 
import sys
21
20
import tempfile
22
21
 
23
22
from bzrlib import (
32
31
    tests,
33
32
    transform,
34
33
    )
35
 
from bzrlib.symbol_versioning import deprecated_in
36
 
from bzrlib.tests import features, EncodingAdapter
37
 
from bzrlib.tests.blackbox.test_diff import subst_dates
38
34
from bzrlib.tests import (
39
35
    features,
40
 
    )
 
36
    EncodingAdapter,
 
37
)
 
38
from bzrlib.tests.blackbox.test_diff import subst_dates
 
39
from bzrlib.tests.scenarios import load_tests_apply_scenarios
 
40
 
 
41
 
 
42
load_tests = load_tests_apply_scenarios
41
43
 
42
44
 
43
45
def udiff_lines(old, new, allow_binary=False):
63
65
    return lines
64
66
 
65
67
 
 
68
class TestDiffOptions(tests.TestCase):
 
69
 
 
70
    def test_unified_added(self):
 
71
        """Check for default style '-u' only if no other style specified
 
72
        in 'diff-options'.
 
73
        """
 
74
        # Verify that style defaults to unified, id est '-u' appended
 
75
        # to option list, in the absence of an alternative style.
 
76
        self.assertEqual(['-a', '-u'], diff.default_style_unified(['-a']))
 
77
 
 
78
 
 
79
class TestDiffOptionsScenarios(tests.TestCase):
 
80
 
 
81
    scenarios = [(s, dict(style=s)) for s in diff.style_option_list]
 
82
    style = None # Set by load_tests_apply_scenarios from scenarios
 
83
 
 
84
    def test_unified_not_added(self):
 
85
        # Verify that for all valid style options, '-u' is not
 
86
        # appended to option list.
 
87
        ret_opts = diff.default_style_unified(diff_opts=["%s" % (self.style,)])
 
88
        self.assertEqual(["%s" % (self.style,)], ret_opts)
 
89
 
 
90
 
66
91
class TestDiff(tests.TestCase):
67
92
 
68
93
    def test_add_nl(self):
69
94
        """diff generates a valid diff for patches that add a newline"""
70
95
        lines = udiff_lines(['boo'], ['boo\n'])
71
96
        self.check_patch(lines)
72
 
        self.assertEquals(lines[4], '\\ No newline at end of file\n')
 
97
        self.assertEqual(lines[4], '\\ No newline at end of file\n')
73
98
            ## "expected no-nl, got %r" % lines[4]
74
99
 
75
100
    def test_add_nl_2(self):
78
103
        """
79
104
        lines = udiff_lines(['boo'], ['goo\n'])
80
105
        self.check_patch(lines)
81
 
        self.assertEquals(lines[4], '\\ No newline at end of file\n')
 
106
        self.assertEqual(lines[4], '\\ No newline at end of file\n')
82
107
            ## "expected no-nl, got %r" % lines[4]
83
108
 
84
109
    def test_remove_nl(self):
87
112
        """
88
113
        lines = udiff_lines(['boo\n'], ['boo'])
89
114
        self.check_patch(lines)
90
 
        self.assertEquals(lines[5], '\\ No newline at end of file\n')
 
115
        self.assertEqual(lines[5], '\\ No newline at end of file\n')
91
116
            ## "expected no-nl, got %r" % lines[5]
92
117
 
93
118
    def check_patch(self, lines):
94
 
        self.assert_(len(lines) > 1)
 
119
        self.assertTrue(len(lines) > 1)
95
120
            ## "Not enough lines for a file header for patch:\n%s" % "".join(lines)
96
 
        self.assert_(lines[0].startswith ('---'))
 
121
        self.assertTrue(lines[0].startswith ('---'))
97
122
            ## 'No orig line for patch:\n%s' % "".join(lines)
98
 
        self.assert_(lines[1].startswith ('+++'))
 
123
        self.assertTrue(lines[1].startswith ('+++'))
99
124
            ## 'No mod line for patch:\n%s' % "".join(lines)
100
 
        self.assert_(len(lines) > 2)
 
125
        self.assertTrue(len(lines) > 2)
101
126
            ## "No hunks for patch:\n%s" % "".join(lines)
102
 
        self.assert_(lines[2].startswith('@@'))
 
127
        self.assertTrue(lines[2].startswith('@@'))
103
128
            ## "No hunk header for patch:\n%s" % "".join(lines)
104
 
        self.assert_('@@' in lines[2][2:])
 
129
        self.assertTrue('@@' in lines[2][2:])
105
130
            ## "Unterminated hunk header for patch:\n%s" % "".join(lines)
106
131
 
107
132
    def test_binary_lines(self):
132
157
        # Older versions of diffutils say "Binary files", newer
133
158
        # versions just say "Files".
134
159
        self.assertContainsRe(lines[0], '(Binary f|F)iles old and new differ\n')
135
 
        self.assertEquals(lines[1:], ['\n'])
 
160
        self.assertEqual(lines[1:], ['\n'])
136
161
 
137
162
    def test_no_external_diff(self):
138
163
        """Check that NoDiff is raised when diff is not available"""
150
175
                           u'new_\xe5', ['new_text\n'], output)
151
176
        lines = output.getvalue().splitlines(True)
152
177
        self.check_patch(lines)
153
 
        self.assertEquals(['--- old_\xc2\xb5\n',
 
178
        self.assertEqual(['--- old_\xc2\xb5\n',
154
179
                           '+++ new_\xc3\xa5\n',
155
180
                           '@@ -1,1 +1,1 @@\n',
156
181
                           '-old_text\n',
166
191
                           path_encoding='utf8')
167
192
        lines = output.getvalue().splitlines(True)
168
193
        self.check_patch(lines)
169
 
        self.assertEquals(['--- old_\xc2\xb5\n',
 
194
        self.assertEqual(['--- old_\xc2\xb5\n',
170
195
                           '+++ new_\xc3\xa5\n',
171
196
                           '@@ -1,1 +1,1 @@\n',
172
197
                           '-old_text\n',
182
207
                           path_encoding='iso-8859-1')
183
208
        lines = output.getvalue().splitlines(True)
184
209
        self.check_patch(lines)
185
 
        self.assertEquals(['--- old_\xb5\n',
 
210
        self.assertEqual(['--- old_\xb5\n',
186
211
                           '+++ new_\xe5\n',
187
212
                           '@@ -1,1 +1,1 @@\n',
188
213
                           '-old_text\n',
219
244
                           'same_text\n','same_text\n','new_text\n'], output)
220
245
        lines = output.getvalue().splitlines(True)
221
246
        self.check_patch(lines)
222
 
        self.assertEquals(['--- old\n',
 
247
        self.assertEqual(['--- old\n',
223
248
                           '+++ new\n',
224
249
                           '@@ -3,4 +3,4 @@\n',
225
250
                           ' same_text\n',
240
265
                           context_lines=0)
241
266
        lines = output.getvalue().splitlines(True)
242
267
        self.check_patch(lines)
243
 
        self.assertEquals(['--- old\n',
 
268
        self.assertEqual(['--- old\n',
244
269
                           '+++ new\n',
245
270
                           '@@ -6,1 +6,1 @@\n',
246
271
                           '-old_text\n',
258
283
                           context_lines=4)
259
284
        lines = output.getvalue().splitlines(True)
260
285
        self.check_patch(lines)
261
 
        self.assertEquals(['--- old\n',
 
286
        self.assertEqual(['--- old\n',
262
287
                           '+++ new\n',
263
288
                           '@@ -2,5 +2,5 @@\n',
264
289
                           ' same_text\n',
839
864
        b = ''.join([unichr(i) for i in range(4300, 4800, 2)])
840
865
        sm = self._PatienceSequenceMatcher(None, a, b)
841
866
        mb = sm.get_matching_blocks()
842
 
        self.assertEquals(35, len(mb))
 
867
        self.assertEqual(35, len(mb))
843
868
 
844
869
    def test_unique_lcs(self):
845
870
        unique_lcs = self._unique_lcs
846
 
        self.assertEquals(unique_lcs('', ''), [])
847
 
        self.assertEquals(unique_lcs('', 'a'), [])
848
 
        self.assertEquals(unique_lcs('a', ''), [])
849
 
        self.assertEquals(unique_lcs('a', 'a'), [(0,0)])
850
 
        self.assertEquals(unique_lcs('a', 'b'), [])
851
 
        self.assertEquals(unique_lcs('ab', 'ab'), [(0,0), (1,1)])
852
 
        self.assertEquals(unique_lcs('abcde', 'cdeab'), [(2,0), (3,1), (4,2)])
853
 
        self.assertEquals(unique_lcs('cdeab', 'abcde'), [(0,2), (1,3), (2,4)])
854
 
        self.assertEquals(unique_lcs('abXde', 'abYde'), [(0,0), (1,1),
 
871
        self.assertEqual(unique_lcs('', ''), [])
 
872
        self.assertEqual(unique_lcs('', 'a'), [])
 
873
        self.assertEqual(unique_lcs('a', ''), [])
 
874
        self.assertEqual(unique_lcs('a', 'a'), [(0,0)])
 
875
        self.assertEqual(unique_lcs('a', 'b'), [])
 
876
        self.assertEqual(unique_lcs('ab', 'ab'), [(0,0), (1,1)])
 
877
        self.assertEqual(unique_lcs('abcde', 'cdeab'), [(2,0), (3,1), (4,2)])
 
878
        self.assertEqual(unique_lcs('cdeab', 'abcde'), [(0,2), (1,3), (2,4)])
 
879
        self.assertEqual(unique_lcs('abXde', 'abYde'), [(0,0), (1,1),
855
880
                                                         (3,3), (4,4)])
856
 
        self.assertEquals(unique_lcs('acbac', 'abc'), [(2,1)])
 
881
        self.assertEqual(unique_lcs('acbac', 'abc'), [(2,1)])
857
882
 
858
883
    def test_recurse_matches(self):
859
884
        def test_one(a, b, matches):
860
885
            test_matches = []
861
886
            self._recurse_matches(
862
887
                a, b, 0, 0, len(a), len(b), test_matches, 10)
863
 
            self.assertEquals(test_matches, matches)
 
888
            self.assertEqual(test_matches, matches)
864
889
 
865
890
        test_one(['a', '', 'b', '', 'c'], ['a', 'a', 'b', 'c', 'c'],
866
891
                 [(0, 0), (2, 2), (4, 4)])
971
996
    def test_opcodes(self):
972
997
        def chk_ops(a, b, expected_codes):
973
998
            s = self._PatienceSequenceMatcher(None, a, b)
974
 
            self.assertEquals(expected_codes, s.get_opcodes())
 
999
            self.assertEqual(expected_codes, s.get_opcodes())
975
1000
 
976
1001
        chk_ops('', '', [])
977
1002
        chk_ops([], [], [])
1047
1072
    def test_grouped_opcodes(self):
1048
1073
        def chk_ops(a, b, expected_codes, n=3):
1049
1074
            s = self._PatienceSequenceMatcher(None, a, b)
1050
 
            self.assertEquals(expected_codes, list(s.get_grouped_opcodes(n)))
 
1075
            self.assertEqual(expected_codes, list(s.get_grouped_opcodes(n)))
1051
1076
 
1052
1077
        chk_ops('', '', [])
1053
1078
        chk_ops([], [], [])
1147
1172
                 'how are you today?\n']
1148
1173
        unified_diff = patiencediff.unified_diff
1149
1174
        psm = self._PatienceSequenceMatcher
1150
 
        self.assertEquals(['--- \n',
 
1175
        self.assertEqual(['--- \n',
1151
1176
                           '+++ \n',
1152
1177
                           '@@ -1,3 +1,2 @@\n',
1153
1178
                           ' hello there\n',
1159
1184
        txt_a = map(lambda x: x+'\n', 'abcdefghijklmnop')
1160
1185
        txt_b = map(lambda x: x+'\n', 'abcdefxydefghijklmnop')
1161
1186
        # This is the result with LongestCommonSubstring matching
1162
 
        self.assertEquals(['--- \n',
 
1187
        self.assertEqual(['--- \n',
1163
1188
                           '+++ \n',
1164
1189
                           '@@ -1,6 +1,11 @@\n',
1165
1190
                           ' a\n',
1175
1200
                           ' f\n']
1176
1201
                          , list(unified_diff(txt_a, txt_b)))
1177
1202
        # And the patience diff
1178
 
        self.assertEquals(['--- \n',
 
1203
        self.assertEqual(['--- \n',
1179
1204
                           '+++ \n',
1180
1205
                           '@@ -4,6 +4,11 @@\n',
1181
1206
                           ' d\n',
1201
1226
                 'how are you today?\n']
1202
1227
        unified_diff = patiencediff.unified_diff
1203
1228
        psm = self._PatienceSequenceMatcher
1204
 
        self.assertEquals(['--- a\t2008-08-08\n',
 
1229
        self.assertEqual(['--- a\t2008-08-08\n',
1205
1230
                           '+++ b\t2008-09-09\n',
1206
1231
                           '@@ -1,3 +1,2 @@\n',
1207
1232
                           ' hello there\n',
1259
1284
 
1260
1285
        unified_diff_files = patiencediff.unified_diff_files
1261
1286
        psm = self._PatienceSequenceMatcher
1262
 
        self.assertEquals(['--- a1\n',
 
1287
        self.assertEqual(['--- a1\n',
1263
1288
                           '+++ b1\n',
1264
1289
                           '@@ -1,3 +1,2 @@\n',
1265
1290
                           ' hello there\n',
1275
1300
        with open('b2', 'wb') as f: f.writelines(txt_b)
1276
1301
 
1277
1302
        # This is the result with LongestCommonSubstring matching
1278
 
        self.assertEquals(['--- a2\n',
 
1303
        self.assertEqual(['--- a2\n',
1279
1304
                           '+++ b2\n',
1280
1305
                           '@@ -1,6 +1,11 @@\n',
1281
1306
                           ' a\n',
1292
1317
                          , list(unified_diff_files('a2', 'b2')))
1293
1318
 
1294
1319
        # And the patience diff
1295
 
        self.assertEquals(['--- a2\n',
1296
 
                           '+++ b2\n',
1297
 
                           '@@ -4,6 +4,11 @@\n',
1298
 
                           ' d\n',
1299
 
                           ' e\n',
1300
 
                           ' f\n',
1301
 
                           '+x\n',
1302
 
                           '+y\n',
1303
 
                           '+d\n',
1304
 
                           '+e\n',
1305
 
                           '+f\n',
1306
 
                           ' g\n',
1307
 
                           ' h\n',
1308
 
                           ' i\n',
1309
 
                          ]
1310
 
                          , list(unified_diff_files('a2', 'b2',
1311
 
                                 sequencematcher=psm)))
 
1320
        self.assertEqual(['--- a2\n',
 
1321
                          '+++ b2\n',
 
1322
                          '@@ -4,6 +4,11 @@\n',
 
1323
                          ' d\n',
 
1324
                          ' e\n',
 
1325
                          ' f\n',
 
1326
                          '+x\n',
 
1327
                          '+y\n',
 
1328
                          '+d\n',
 
1329
                          '+e\n',
 
1330
                          '+f\n',
 
1331
                          ' g\n',
 
1332
                          ' h\n',
 
1333
                          ' i\n'],
 
1334
                         list(unified_diff_files('a2', 'b2',
 
1335
                                                 sequencematcher=psm)))
1312
1336
 
1313
1337
 
1314
1338
class TestPatienceDiffLibFiles_c(TestPatienceDiffLibFiles):
1391
1415
        diff_obj._execute('old', 'new')
1392
1416
        self.assertEqual(output.getvalue().rstrip(), 'old new')
1393
1417
 
1394
 
    def test_excute_missing(self):
 
1418
    def test_execute_missing(self):
1395
1419
        diff_obj = diff.DiffFromTool(['a-tool-which-is-unlikely-to-exist'],
1396
1420
                                     None, None, None)
1397
1421
        self.addCleanup(diff_obj.finish)
1471
1495
    def test_encodable_filename(self):
1472
1496
        # Just checks file path for external diff tool.
1473
1497
        # We cannot change CPython's internal encoding used by os.exec*.
1474
 
        import sys
1475
1498
        diffobj = diff.DiffFromTool(['dummy', '@old_path', '@new_path'],
1476
1499
                                    None, None, None)
1477
1500
        for _, scenario in EncodingAdapter.encoding_scenarios:
1478
1501
            encoding = scenario['encoding']
1479
 
            dirname  = scenario['info']['directory']
 
1502
            dirname = scenario['info']['directory']
1480
1503
            filename = scenario['info']['filename']
1481
1504
 
1482
1505
            self.overrideAttr(diffobj, '_fenc', lambda: encoding)
1483
1506
            relpath = dirname + u'/' + filename
1484
1507
            fullpath = diffobj._safe_filename('safe', relpath)
1485
 
            self.assertEqual(
1486
 
                    fullpath,
1487
 
                    fullpath.encode(encoding).decode(encoding)
1488
 
                    )
1489
 
            self.assert_(fullpath.startswith(diffobj._root + '/safe'))
 
1508
            self.assertEqual(fullpath,
 
1509
                             fullpath.encode(encoding).decode(encoding))
 
1510
            self.assertTrue(fullpath.startswith(diffobj._root + '/safe'))
1490
1511
 
1491
1512
    def test_unencodable_filename(self):
1492
 
        import sys
1493
1513
        diffobj = diff.DiffFromTool(['dummy', '@old_path', '@new_path'],
1494
1514
                                    None, None, None)
1495
1515
        for _, scenario in EncodingAdapter.encoding_scenarios:
1496
1516
            encoding = scenario['encoding']
1497
 
            dirname  = scenario['info']['directory']
 
1517
            dirname = scenario['info']['directory']
1498
1518
            filename = scenario['info']['filename']
1499
1519
 
1500
1520
            if encoding == 'iso-8859-1':
1505
1525
            self.overrideAttr(diffobj, '_fenc', lambda: encoding)
1506
1526
            relpath = dirname + u'/' + filename
1507
1527
            fullpath = diffobj._safe_filename('safe', relpath)
1508
 
            self.assertEqual(
1509
 
                    fullpath,
1510
 
                    fullpath.encode(encoding).decode(encoding)
1511
 
                    )
1512
 
            self.assert_(fullpath.startswith(diffobj._root + '/safe'))
 
1528
            self.assertEqual(fullpath,
 
1529
                             fullpath.encode(encoding).decode(encoding))
 
1530
            self.assertTrue(fullpath.startswith(diffobj._root + '/safe'))
1513
1531
 
1514
1532
 
1515
1533
class TestGetTreesAndBranchesToDiffLocked(tests.TestCaseWithTransport):