1 | n/a | """Regression tests for what was in Python 2's "urllib" module""" |
---|
2 | n/a | |
---|
3 | n/a | import urllib.parse |
---|
4 | n/a | import urllib.request |
---|
5 | n/a | import urllib.error |
---|
6 | n/a | import http.client |
---|
7 | n/a | import email.message |
---|
8 | n/a | import io |
---|
9 | n/a | import unittest |
---|
10 | n/a | from unittest.mock import patch |
---|
11 | n/a | from test import support |
---|
12 | n/a | import os |
---|
13 | n/a | try: |
---|
14 | n/a | import ssl |
---|
15 | n/a | except ImportError: |
---|
16 | n/a | ssl = None |
---|
17 | n/a | import sys |
---|
18 | n/a | import tempfile |
---|
19 | n/a | from nturl2path import url2pathname, pathname2url |
---|
20 | n/a | |
---|
21 | n/a | from base64 import b64encode |
---|
22 | n/a | import collections |
---|
23 | n/a | |
---|
24 | n/a | |
---|
25 | n/a | def hexescape(char): |
---|
26 | n/a | """Escape char as RFC 2396 specifies""" |
---|
27 | n/a | hex_repr = hex(ord(char))[2:].upper() |
---|
28 | n/a | if len(hex_repr) == 1: |
---|
29 | n/a | hex_repr = "0%s" % hex_repr |
---|
30 | n/a | return "%" + hex_repr |
---|
31 | n/a | |
---|
32 | n/a | # Shortcut for testing FancyURLopener |
---|
33 | n/a | _urlopener = None |
---|
34 | n/a | |
---|
35 | n/a | |
---|
36 | n/a | def urlopen(url, data=None, proxies=None): |
---|
37 | n/a | """urlopen(url [, data]) -> open file-like object""" |
---|
38 | n/a | global _urlopener |
---|
39 | n/a | if proxies is not None: |
---|
40 | n/a | opener = urllib.request.FancyURLopener(proxies=proxies) |
---|
41 | n/a | elif not _urlopener: |
---|
42 | n/a | opener = FancyURLopener() |
---|
43 | n/a | _urlopener = opener |
---|
44 | n/a | else: |
---|
45 | n/a | opener = _urlopener |
---|
46 | n/a | if data is None: |
---|
47 | n/a | return opener.open(url) |
---|
48 | n/a | else: |
---|
49 | n/a | return opener.open(url, data) |
---|
50 | n/a | |
---|
51 | n/a | |
---|
52 | n/a | def FancyURLopener(): |
---|
53 | n/a | with support.check_warnings( |
---|
54 | n/a | ('FancyURLopener style of invoking requests is deprecated.', |
---|
55 | n/a | DeprecationWarning)): |
---|
56 | n/a | return urllib.request.FancyURLopener() |
---|
57 | n/a | |
---|
58 | n/a | |
---|
59 | n/a | def fakehttp(fakedata): |
---|
60 | n/a | class FakeSocket(io.BytesIO): |
---|
61 | n/a | io_refs = 1 |
---|
62 | n/a | |
---|
63 | n/a | def sendall(self, data): |
---|
64 | n/a | FakeHTTPConnection.buf = data |
---|
65 | n/a | |
---|
66 | n/a | def makefile(self, *args, **kwds): |
---|
67 | n/a | self.io_refs += 1 |
---|
68 | n/a | return self |
---|
69 | n/a | |
---|
70 | n/a | def read(self, amt=None): |
---|
71 | n/a | if self.closed: |
---|
72 | n/a | return b"" |
---|
73 | n/a | return io.BytesIO.read(self, amt) |
---|
74 | n/a | |
---|
75 | n/a | def readline(self, length=None): |
---|
76 | n/a | if self.closed: |
---|
77 | n/a | return b"" |
---|
78 | n/a | return io.BytesIO.readline(self, length) |
---|
79 | n/a | |
---|
80 | n/a | def close(self): |
---|
81 | n/a | self.io_refs -= 1 |
---|
82 | n/a | if self.io_refs == 0: |
---|
83 | n/a | io.BytesIO.close(self) |
---|
84 | n/a | |
---|
85 | n/a | class FakeHTTPConnection(http.client.HTTPConnection): |
---|
86 | n/a | |
---|
87 | n/a | # buffer to store data for verification in urlopen tests. |
---|
88 | n/a | buf = None |
---|
89 | n/a | |
---|
90 | n/a | def connect(self): |
---|
91 | n/a | self.sock = FakeSocket(self.fakedata) |
---|
92 | n/a | type(self).fakesock = self.sock |
---|
93 | n/a | FakeHTTPConnection.fakedata = fakedata |
---|
94 | n/a | |
---|
95 | n/a | return FakeHTTPConnection |
---|
96 | n/a | |
---|
97 | n/a | |
---|
98 | n/a | class FakeHTTPMixin(object): |
---|
99 | n/a | def fakehttp(self, fakedata): |
---|
100 | n/a | self._connection_class = http.client.HTTPConnection |
---|
101 | n/a | http.client.HTTPConnection = fakehttp(fakedata) |
---|
102 | n/a | |
---|
103 | n/a | def unfakehttp(self): |
---|
104 | n/a | http.client.HTTPConnection = self._connection_class |
---|
105 | n/a | |
---|
106 | n/a | |
---|
107 | n/a | class FakeFTPMixin(object): |
---|
108 | n/a | def fakeftp(self): |
---|
109 | n/a | class FakeFtpWrapper(object): |
---|
110 | n/a | def __init__(self, user, passwd, host, port, dirs, timeout=None, |
---|
111 | n/a | persistent=True): |
---|
112 | n/a | pass |
---|
113 | n/a | |
---|
114 | n/a | def retrfile(self, file, type): |
---|
115 | n/a | return io.BytesIO(), 0 |
---|
116 | n/a | |
---|
117 | n/a | def close(self): |
---|
118 | n/a | pass |
---|
119 | n/a | |
---|
120 | n/a | self._ftpwrapper_class = urllib.request.ftpwrapper |
---|
121 | n/a | urllib.request.ftpwrapper = FakeFtpWrapper |
---|
122 | n/a | |
---|
123 | n/a | def unfakeftp(self): |
---|
124 | n/a | urllib.request.ftpwrapper = self._ftpwrapper_class |
---|
125 | n/a | |
---|
126 | n/a | |
---|
127 | n/a | class urlopen_FileTests(unittest.TestCase): |
---|
128 | n/a | """Test urlopen() opening a temporary file. |
---|
129 | n/a | |
---|
130 | n/a | Try to test as much functionality as possible so as to cut down on reliance |
---|
131 | n/a | on connecting to the Net for testing. |
---|
132 | n/a | |
---|
133 | n/a | """ |
---|
134 | n/a | |
---|
135 | n/a | def setUp(self): |
---|
136 | n/a | # Create a temp file to use for testing |
---|
137 | n/a | self.text = bytes("test_urllib: %s\n" % self.__class__.__name__, |
---|
138 | n/a | "ascii") |
---|
139 | n/a | f = open(support.TESTFN, 'wb') |
---|
140 | n/a | try: |
---|
141 | n/a | f.write(self.text) |
---|
142 | n/a | finally: |
---|
143 | n/a | f.close() |
---|
144 | n/a | self.pathname = support.TESTFN |
---|
145 | n/a | self.returned_obj = urlopen("file:%s" % self.pathname) |
---|
146 | n/a | |
---|
147 | n/a | def tearDown(self): |
---|
148 | n/a | """Shut down the open object""" |
---|
149 | n/a | self.returned_obj.close() |
---|
150 | n/a | os.remove(support.TESTFN) |
---|
151 | n/a | |
---|
152 | n/a | def test_interface(self): |
---|
153 | n/a | # Make sure object returned by urlopen() has the specified methods |
---|
154 | n/a | for attr in ("read", "readline", "readlines", "fileno", |
---|
155 | n/a | "close", "info", "geturl", "getcode", "__iter__"): |
---|
156 | n/a | self.assertTrue(hasattr(self.returned_obj, attr), |
---|
157 | n/a | "object returned by urlopen() lacks %s attribute" % |
---|
158 | n/a | attr) |
---|
159 | n/a | |
---|
160 | n/a | def test_read(self): |
---|
161 | n/a | self.assertEqual(self.text, self.returned_obj.read()) |
---|
162 | n/a | |
---|
163 | n/a | def test_readline(self): |
---|
164 | n/a | self.assertEqual(self.text, self.returned_obj.readline()) |
---|
165 | n/a | self.assertEqual(b'', self.returned_obj.readline(), |
---|
166 | n/a | "calling readline() after exhausting the file did not" |
---|
167 | n/a | " return an empty string") |
---|
168 | n/a | |
---|
169 | n/a | def test_readlines(self): |
---|
170 | n/a | lines_list = self.returned_obj.readlines() |
---|
171 | n/a | self.assertEqual(len(lines_list), 1, |
---|
172 | n/a | "readlines() returned the wrong number of lines") |
---|
173 | n/a | self.assertEqual(lines_list[0], self.text, |
---|
174 | n/a | "readlines() returned improper text") |
---|
175 | n/a | |
---|
176 | n/a | def test_fileno(self): |
---|
177 | n/a | file_num = self.returned_obj.fileno() |
---|
178 | n/a | self.assertIsInstance(file_num, int, "fileno() did not return an int") |
---|
179 | n/a | self.assertEqual(os.read(file_num, len(self.text)), self.text, |
---|
180 | n/a | "Reading on the file descriptor returned by fileno() " |
---|
181 | n/a | "did not return the expected text") |
---|
182 | n/a | |
---|
183 | n/a | def test_close(self): |
---|
184 | n/a | # Test close() by calling it here and then having it be called again |
---|
185 | n/a | # by the tearDown() method for the test |
---|
186 | n/a | self.returned_obj.close() |
---|
187 | n/a | |
---|
188 | n/a | def test_info(self): |
---|
189 | n/a | self.assertIsInstance(self.returned_obj.info(), email.message.Message) |
---|
190 | n/a | |
---|
191 | n/a | def test_geturl(self): |
---|
192 | n/a | self.assertEqual(self.returned_obj.geturl(), self.pathname) |
---|
193 | n/a | |
---|
194 | n/a | def test_getcode(self): |
---|
195 | n/a | self.assertIsNone(self.returned_obj.getcode()) |
---|
196 | n/a | |
---|
197 | n/a | def test_iter(self): |
---|
198 | n/a | # Test iterator |
---|
199 | n/a | # Don't need to count number of iterations since test would fail the |
---|
200 | n/a | # instant it returned anything beyond the first line from the |
---|
201 | n/a | # comparison. |
---|
202 | n/a | # Use the iterator in the usual implicit way to test for ticket #4608. |
---|
203 | n/a | for line in self.returned_obj: |
---|
204 | n/a | self.assertEqual(line, self.text) |
---|
205 | n/a | |
---|
206 | n/a | def test_relativelocalfile(self): |
---|
207 | n/a | self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) |
---|
208 | n/a | |
---|
209 | n/a | class ProxyTests(unittest.TestCase): |
---|
210 | n/a | |
---|
211 | n/a | def setUp(self): |
---|
212 | n/a | # Records changes to env vars |
---|
213 | n/a | self.env = support.EnvironmentVarGuard() |
---|
214 | n/a | # Delete all proxy related env vars |
---|
215 | n/a | for k in list(os.environ): |
---|
216 | n/a | if 'proxy' in k.lower(): |
---|
217 | n/a | self.env.unset(k) |
---|
218 | n/a | |
---|
219 | n/a | def tearDown(self): |
---|
220 | n/a | # Restore all proxy related env vars |
---|
221 | n/a | self.env.__exit__() |
---|
222 | n/a | del self.env |
---|
223 | n/a | |
---|
224 | n/a | def test_getproxies_environment_keep_no_proxies(self): |
---|
225 | n/a | self.env.set('NO_PROXY', 'localhost') |
---|
226 | n/a | proxies = urllib.request.getproxies_environment() |
---|
227 | n/a | # getproxies_environment use lowered case truncated (no '_proxy') keys |
---|
228 | n/a | self.assertEqual('localhost', proxies['no']) |
---|
229 | n/a | # List of no_proxies with space. |
---|
230 | n/a | self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') |
---|
231 | n/a | self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) |
---|
232 | n/a | self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) |
---|
233 | n/a | self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) |
---|
234 | n/a | |
---|
235 | n/a | def test_proxy_cgi_ignore(self): |
---|
236 | n/a | try: |
---|
237 | n/a | self.env.set('HTTP_PROXY', 'http://somewhere:3128') |
---|
238 | n/a | proxies = urllib.request.getproxies_environment() |
---|
239 | n/a | self.assertEqual('http://somewhere:3128', proxies['http']) |
---|
240 | n/a | self.env.set('REQUEST_METHOD', 'GET') |
---|
241 | n/a | proxies = urllib.request.getproxies_environment() |
---|
242 | n/a | self.assertNotIn('http', proxies) |
---|
243 | n/a | finally: |
---|
244 | n/a | self.env.unset('REQUEST_METHOD') |
---|
245 | n/a | self.env.unset('HTTP_PROXY') |
---|
246 | n/a | |
---|
247 | n/a | def test_proxy_bypass_environment_host_match(self): |
---|
248 | n/a | bypass = urllib.request.proxy_bypass_environment |
---|
249 | n/a | self.env.set('NO_PROXY', |
---|
250 | n/a | 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') |
---|
251 | n/a | self.assertTrue(bypass('localhost')) |
---|
252 | n/a | self.assertTrue(bypass('LocalHost')) # MixedCase |
---|
253 | n/a | self.assertTrue(bypass('LOCALHOST')) # UPPERCASE |
---|
254 | n/a | self.assertTrue(bypass('newdomain.com:1234')) |
---|
255 | n/a | self.assertTrue(bypass('foo.d.o.t')) # issue 29142 |
---|
256 | n/a | self.assertTrue(bypass('anotherdomain.com:8888')) |
---|
257 | n/a | self.assertTrue(bypass('www.newdomain.com:1234')) |
---|
258 | n/a | self.assertFalse(bypass('prelocalhost')) |
---|
259 | n/a | self.assertFalse(bypass('newdomain.com')) # no port |
---|
260 | n/a | self.assertFalse(bypass('newdomain.com:1235')) # wrong port |
---|
261 | n/a | |
---|
262 | n/a | class ProxyTests_withOrderedEnv(unittest.TestCase): |
---|
263 | n/a | |
---|
264 | n/a | def setUp(self): |
---|
265 | n/a | # We need to test conditions, where variable order _is_ significant |
---|
266 | n/a | self._saved_env = os.environ |
---|
267 | n/a | # Monkey patch os.environ, start with empty fake environment |
---|
268 | n/a | os.environ = collections.OrderedDict() |
---|
269 | n/a | |
---|
270 | n/a | def tearDown(self): |
---|
271 | n/a | os.environ = self._saved_env |
---|
272 | n/a | |
---|
273 | n/a | def test_getproxies_environment_prefer_lowercase(self): |
---|
274 | n/a | # Test lowercase preference with removal |
---|
275 | n/a | os.environ['no_proxy'] = '' |
---|
276 | n/a | os.environ['No_Proxy'] = 'localhost' |
---|
277 | n/a | self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) |
---|
278 | n/a | self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) |
---|
279 | n/a | os.environ['http_proxy'] = '' |
---|
280 | n/a | os.environ['HTTP_PROXY'] = 'http://somewhere:3128' |
---|
281 | n/a | proxies = urllib.request.getproxies_environment() |
---|
282 | n/a | self.assertEqual({}, proxies) |
---|
283 | n/a | # Test lowercase preference of proxy bypass and correct matching including ports |
---|
284 | n/a | os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' |
---|
285 | n/a | os.environ['No_Proxy'] = 'xyz.com' |
---|
286 | n/a | self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) |
---|
287 | n/a | self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) |
---|
288 | n/a | self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) |
---|
289 | n/a | self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) |
---|
290 | n/a | self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) |
---|
291 | n/a | # Test lowercase preference with replacement |
---|
292 | n/a | os.environ['http_proxy'] = 'http://somewhere:3128' |
---|
293 | n/a | os.environ['Http_Proxy'] = 'http://somewhereelse:3128' |
---|
294 | n/a | proxies = urllib.request.getproxies_environment() |
---|
295 | n/a | self.assertEqual('http://somewhere:3128', proxies['http']) |
---|
296 | n/a | |
---|
297 | n/a | class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): |
---|
298 | n/a | """Test urlopen() opening a fake http connection.""" |
---|
299 | n/a | |
---|
300 | n/a | def check_read(self, ver): |
---|
301 | n/a | self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!") |
---|
302 | n/a | try: |
---|
303 | n/a | fp = urlopen("http://python.org/") |
---|
304 | n/a | self.assertEqual(fp.readline(), b"Hello!") |
---|
305 | n/a | self.assertEqual(fp.readline(), b"") |
---|
306 | n/a | self.assertEqual(fp.geturl(), 'http://python.org/') |
---|
307 | n/a | self.assertEqual(fp.getcode(), 200) |
---|
308 | n/a | finally: |
---|
309 | n/a | self.unfakehttp() |
---|
310 | n/a | |
---|
311 | n/a | def test_url_fragment(self): |
---|
312 | n/a | # Issue #11703: geturl() omits fragments in the original URL. |
---|
313 | n/a | url = 'http://docs.python.org/library/urllib.html#OK' |
---|
314 | n/a | self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") |
---|
315 | n/a | try: |
---|
316 | n/a | fp = urllib.request.urlopen(url) |
---|
317 | n/a | self.assertEqual(fp.geturl(), url) |
---|
318 | n/a | finally: |
---|
319 | n/a | self.unfakehttp() |
---|
320 | n/a | |
---|
321 | n/a | def test_willclose(self): |
---|
322 | n/a | self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") |
---|
323 | n/a | try: |
---|
324 | n/a | resp = urlopen("http://www.python.org") |
---|
325 | n/a | self.assertTrue(resp.fp.will_close) |
---|
326 | n/a | finally: |
---|
327 | n/a | self.unfakehttp() |
---|
328 | n/a | |
---|
329 | n/a | def test_read_0_9(self): |
---|
330 | n/a | # "0.9" response accepted (but not "simple responses" without |
---|
331 | n/a | # a status line) |
---|
332 | n/a | self.check_read(b"0.9") |
---|
333 | n/a | |
---|
334 | n/a | def test_read_1_0(self): |
---|
335 | n/a | self.check_read(b"1.0") |
---|
336 | n/a | |
---|
337 | n/a | def test_read_1_1(self): |
---|
338 | n/a | self.check_read(b"1.1") |
---|
339 | n/a | |
---|
340 | n/a | def test_read_bogus(self): |
---|
341 | n/a | # urlopen() should raise OSError for many error codes. |
---|
342 | n/a | self.fakehttp(b'''HTTP/1.1 401 Authentication Required |
---|
343 | n/a | Date: Wed, 02 Jan 2008 03:03:54 GMT |
---|
344 | n/a | Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e |
---|
345 | n/a | Connection: close |
---|
346 | n/a | Content-Type: text/html; charset=iso-8859-1 |
---|
347 | n/a | ''') |
---|
348 | n/a | try: |
---|
349 | n/a | self.assertRaises(OSError, urlopen, "http://python.org/") |
---|
350 | n/a | finally: |
---|
351 | n/a | self.unfakehttp() |
---|
352 | n/a | |
---|
353 | n/a | def test_invalid_redirect(self): |
---|
354 | n/a | # urlopen() should raise OSError for many error codes. |
---|
355 | n/a | self.fakehttp(b'''HTTP/1.1 302 Found |
---|
356 | n/a | Date: Wed, 02 Jan 2008 03:03:54 GMT |
---|
357 | n/a | Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e |
---|
358 | n/a | Location: file://guidocomputer.athome.com:/python/license |
---|
359 | n/a | Connection: close |
---|
360 | n/a | Content-Type: text/html; charset=iso-8859-1 |
---|
361 | n/a | ''') |
---|
362 | n/a | try: |
---|
363 | n/a | msg = "Redirection to url 'file:" |
---|
364 | n/a | with self.assertRaisesRegex(urllib.error.HTTPError, msg): |
---|
365 | n/a | urlopen("http://python.org/") |
---|
366 | n/a | finally: |
---|
367 | n/a | self.unfakehttp() |
---|
368 | n/a | |
---|
369 | n/a | def test_redirect_limit_independent(self): |
---|
370 | n/a | # Ticket #12923: make sure independent requests each use their |
---|
371 | n/a | # own retry limit. |
---|
372 | n/a | for i in range(FancyURLopener().maxtries): |
---|
373 | n/a | self.fakehttp(b'''HTTP/1.1 302 Found |
---|
374 | n/a | Location: file://guidocomputer.athome.com:/python/license |
---|
375 | n/a | Connection: close |
---|
376 | n/a | ''') |
---|
377 | n/a | try: |
---|
378 | n/a | self.assertRaises(urllib.error.HTTPError, urlopen, |
---|
379 | n/a | "http://something") |
---|
380 | n/a | finally: |
---|
381 | n/a | self.unfakehttp() |
---|
382 | n/a | |
---|
383 | n/a | def test_empty_socket(self): |
---|
384 | n/a | # urlopen() raises OSError if the underlying socket does not send any |
---|
385 | n/a | # data. (#1680230) |
---|
386 | n/a | self.fakehttp(b'') |
---|
387 | n/a | try: |
---|
388 | n/a | self.assertRaises(OSError, urlopen, "http://something") |
---|
389 | n/a | finally: |
---|
390 | n/a | self.unfakehttp() |
---|
391 | n/a | |
---|
392 | n/a | def test_missing_localfile(self): |
---|
393 | n/a | # Test for #10836 |
---|
394 | n/a | with self.assertRaises(urllib.error.URLError) as e: |
---|
395 | n/a | urlopen('file://localhost/a/file/which/doesnot/exists.py') |
---|
396 | n/a | self.assertTrue(e.exception.filename) |
---|
397 | n/a | self.assertTrue(e.exception.reason) |
---|
398 | n/a | |
---|
399 | n/a | def test_file_notexists(self): |
---|
400 | n/a | fd, tmp_file = tempfile.mkstemp() |
---|
401 | n/a | tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') |
---|
402 | n/a | try: |
---|
403 | n/a | self.assertTrue(os.path.exists(tmp_file)) |
---|
404 | n/a | with urlopen(tmp_fileurl) as fobj: |
---|
405 | n/a | self.assertTrue(fobj) |
---|
406 | n/a | finally: |
---|
407 | n/a | os.close(fd) |
---|
408 | n/a | os.unlink(tmp_file) |
---|
409 | n/a | self.assertFalse(os.path.exists(tmp_file)) |
---|
410 | n/a | with self.assertRaises(urllib.error.URLError): |
---|
411 | n/a | urlopen(tmp_fileurl) |
---|
412 | n/a | |
---|
413 | n/a | def test_ftp_nohost(self): |
---|
414 | n/a | test_ftp_url = 'ftp:///path' |
---|
415 | n/a | with self.assertRaises(urllib.error.URLError) as e: |
---|
416 | n/a | urlopen(test_ftp_url) |
---|
417 | n/a | self.assertFalse(e.exception.filename) |
---|
418 | n/a | self.assertTrue(e.exception.reason) |
---|
419 | n/a | |
---|
420 | n/a | def test_ftp_nonexisting(self): |
---|
421 | n/a | with self.assertRaises(urllib.error.URLError) as e: |
---|
422 | n/a | urlopen('ftp://localhost/a/file/which/doesnot/exists.py') |
---|
423 | n/a | self.assertFalse(e.exception.filename) |
---|
424 | n/a | self.assertTrue(e.exception.reason) |
---|
425 | n/a | |
---|
426 | n/a | @patch.object(urllib.request, 'MAXFTPCACHE', 0) |
---|
427 | n/a | def test_ftp_cache_pruning(self): |
---|
428 | n/a | self.fakeftp() |
---|
429 | n/a | try: |
---|
430 | n/a | urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) |
---|
431 | n/a | urlopen('ftp://localhost') |
---|
432 | n/a | finally: |
---|
433 | n/a | self.unfakeftp() |
---|
434 | n/a | |
---|
435 | n/a | |
---|
436 | n/a | def test_userpass_inurl(self): |
---|
437 | n/a | self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") |
---|
438 | n/a | try: |
---|
439 | n/a | fp = urlopen("http://user:pass@python.org/") |
---|
440 | n/a | self.assertEqual(fp.readline(), b"Hello!") |
---|
441 | n/a | self.assertEqual(fp.readline(), b"") |
---|
442 | n/a | self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') |
---|
443 | n/a | self.assertEqual(fp.getcode(), 200) |
---|
444 | n/a | finally: |
---|
445 | n/a | self.unfakehttp() |
---|
446 | n/a | |
---|
447 | n/a | def test_userpass_inurl_w_spaces(self): |
---|
448 | n/a | self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") |
---|
449 | n/a | try: |
---|
450 | n/a | userpass = "a b:c d" |
---|
451 | n/a | url = "http://{}@python.org/".format(userpass) |
---|
452 | n/a | fakehttp_wrapper = http.client.HTTPConnection |
---|
453 | n/a | authorization = ("Authorization: Basic %s\r\n" % |
---|
454 | n/a | b64encode(userpass.encode("ASCII")).decode("ASCII")) |
---|
455 | n/a | fp = urlopen(url) |
---|
456 | n/a | # The authorization header must be in place |
---|
457 | n/a | self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8")) |
---|
458 | n/a | self.assertEqual(fp.readline(), b"Hello!") |
---|
459 | n/a | self.assertEqual(fp.readline(), b"") |
---|
460 | n/a | # the spaces are quoted in URL so no match |
---|
461 | n/a | self.assertNotEqual(fp.geturl(), url) |
---|
462 | n/a | self.assertEqual(fp.getcode(), 200) |
---|
463 | n/a | finally: |
---|
464 | n/a | self.unfakehttp() |
---|
465 | n/a | |
---|
466 | n/a | def test_URLopener_deprecation(self): |
---|
467 | n/a | with support.check_warnings(('',DeprecationWarning)): |
---|
468 | n/a | urllib.request.URLopener() |
---|
469 | n/a | |
---|
470 | n/a | @unittest.skipUnless(ssl, "ssl module required") |
---|
471 | n/a | def test_cafile_and_context(self): |
---|
472 | n/a | context = ssl.create_default_context() |
---|
473 | n/a | with support.check_warnings(('', DeprecationWarning)): |
---|
474 | n/a | with self.assertRaises(ValueError): |
---|
475 | n/a | urllib.request.urlopen( |
---|
476 | n/a | "https://localhost", cafile="/nonexistent/path", context=context |
---|
477 | n/a | ) |
---|
478 | n/a | |
---|
479 | n/a | class urlopen_DataTests(unittest.TestCase): |
---|
480 | n/a | """Test urlopen() opening a data URL.""" |
---|
481 | n/a | |
---|
482 | n/a | def setUp(self): |
---|
483 | n/a | # text containing URL special- and unicode-characters |
---|
484 | n/a | self.text = "test data URLs :;,%=& \u00f6 \u00c4 " |
---|
485 | n/a | # 2x1 pixel RGB PNG image with one black and one white pixel |
---|
486 | n/a | self.image = ( |
---|
487 | n/a | b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00' |
---|
488 | n/a | b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae' |
---|
489 | n/a | b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00' |
---|
490 | n/a | b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82') |
---|
491 | n/a | |
---|
492 | n/a | self.text_url = ( |
---|
493 | n/a | "data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3" |
---|
494 | n/a | "D%26%20%C3%B6%20%C3%84%20") |
---|
495 | n/a | self.text_url_base64 = ( |
---|
496 | n/a | "data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs" |
---|
497 | n/a | "sJT0mIPYgxCA%3D") |
---|
498 | n/a | # base64 encoded data URL that contains ignorable spaces, |
---|
499 | n/a | # such as "\n", " ", "%0A", and "%20". |
---|
500 | n/a | self.image_url = ( |
---|
501 | n/a | "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n" |
---|
502 | n/a | "QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 " |
---|
503 | n/a | "vHgAAAABJRU5ErkJggg%3D%3D%0A%20") |
---|
504 | n/a | |
---|
505 | n/a | self.text_url_resp = urllib.request.urlopen(self.text_url) |
---|
506 | n/a | self.text_url_base64_resp = urllib.request.urlopen( |
---|
507 | n/a | self.text_url_base64) |
---|
508 | n/a | self.image_url_resp = urllib.request.urlopen(self.image_url) |
---|
509 | n/a | |
---|
510 | n/a | def test_interface(self): |
---|
511 | n/a | # Make sure object returned by urlopen() has the specified methods |
---|
512 | n/a | for attr in ("read", "readline", "readlines", |
---|
513 | n/a | "close", "info", "geturl", "getcode", "__iter__"): |
---|
514 | n/a | self.assertTrue(hasattr(self.text_url_resp, attr), |
---|
515 | n/a | "object returned by urlopen() lacks %s attribute" % |
---|
516 | n/a | attr) |
---|
517 | n/a | |
---|
518 | n/a | def test_info(self): |
---|
519 | n/a | self.assertIsInstance(self.text_url_resp.info(), email.message.Message) |
---|
520 | n/a | self.assertEqual(self.text_url_base64_resp.info().get_params(), |
---|
521 | n/a | [('text/plain', ''), ('charset', 'ISO-8859-1')]) |
---|
522 | n/a | self.assertEqual(self.image_url_resp.info()['content-length'], |
---|
523 | n/a | str(len(self.image))) |
---|
524 | n/a | self.assertEqual(urllib.request.urlopen("data:,").info().get_params(), |
---|
525 | n/a | [('text/plain', ''), ('charset', 'US-ASCII')]) |
---|
526 | n/a | |
---|
527 | n/a | def test_geturl(self): |
---|
528 | n/a | self.assertEqual(self.text_url_resp.geturl(), self.text_url) |
---|
529 | n/a | self.assertEqual(self.text_url_base64_resp.geturl(), |
---|
530 | n/a | self.text_url_base64) |
---|
531 | n/a | self.assertEqual(self.image_url_resp.geturl(), self.image_url) |
---|
532 | n/a | |
---|
533 | n/a | def test_read_text(self): |
---|
534 | n/a | self.assertEqual(self.text_url_resp.read().decode( |
---|
535 | n/a | dict(self.text_url_resp.info().get_params())['charset']), self.text) |
---|
536 | n/a | |
---|
537 | n/a | def test_read_text_base64(self): |
---|
538 | n/a | self.assertEqual(self.text_url_base64_resp.read().decode( |
---|
539 | n/a | dict(self.text_url_base64_resp.info().get_params())['charset']), |
---|
540 | n/a | self.text) |
---|
541 | n/a | |
---|
542 | n/a | def test_read_image(self): |
---|
543 | n/a | self.assertEqual(self.image_url_resp.read(), self.image) |
---|
544 | n/a | |
---|
545 | n/a | def test_missing_comma(self): |
---|
546 | n/a | self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') |
---|
547 | n/a | |
---|
548 | n/a | def test_invalid_base64_data(self): |
---|
549 | n/a | # missing padding character |
---|
550 | n/a | self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') |
---|
551 | n/a | |
---|
552 | n/a | class urlretrieve_FileTests(unittest.TestCase): |
---|
553 | n/a | """Test urllib.urlretrieve() on local files""" |
---|
554 | n/a | |
---|
555 | n/a | def setUp(self): |
---|
556 | n/a | # Create a list of temporary files. Each item in the list is a file |
---|
557 | n/a | # name (absolute path or relative to the current working directory). |
---|
558 | n/a | # All files in this list will be deleted in the tearDown method. Note, |
---|
559 | n/a | # this only helps to makes sure temporary files get deleted, but it |
---|
560 | n/a | # does nothing about trying to close files that may still be open. It |
---|
561 | n/a | # is the responsibility of the developer to properly close files even |
---|
562 | n/a | # when exceptional conditions occur. |
---|
563 | n/a | self.tempFiles = [] |
---|
564 | n/a | |
---|
565 | n/a | # Create a temporary file. |
---|
566 | n/a | self.registerFileForCleanUp(support.TESTFN) |
---|
567 | n/a | self.text = b'testing urllib.urlretrieve' |
---|
568 | n/a | try: |
---|
569 | n/a | FILE = open(support.TESTFN, 'wb') |
---|
570 | n/a | FILE.write(self.text) |
---|
571 | n/a | FILE.close() |
---|
572 | n/a | finally: |
---|
573 | n/a | try: FILE.close() |
---|
574 | n/a | except: pass |
---|
575 | n/a | |
---|
576 | n/a | def tearDown(self): |
---|
577 | n/a | # Delete the temporary files. |
---|
578 | n/a | for each in self.tempFiles: |
---|
579 | n/a | try: os.remove(each) |
---|
580 | n/a | except: pass |
---|
581 | n/a | |
---|
582 | n/a | def constructLocalFileUrl(self, filePath): |
---|
583 | n/a | filePath = os.path.abspath(filePath) |
---|
584 | n/a | try: |
---|
585 | n/a | filePath.encode("utf-8") |
---|
586 | n/a | except UnicodeEncodeError: |
---|
587 | n/a | raise unittest.SkipTest("filePath is not encodable to utf8") |
---|
588 | n/a | return "file://%s" % urllib.request.pathname2url(filePath) |
---|
589 | n/a | |
---|
590 | n/a | def createNewTempFile(self, data=b""): |
---|
591 | n/a | """Creates a new temporary file containing the specified data, |
---|
592 | n/a | registers the file for deletion during the test fixture tear down, and |
---|
593 | n/a | returns the absolute path of the file.""" |
---|
594 | n/a | |
---|
595 | n/a | newFd, newFilePath = tempfile.mkstemp() |
---|
596 | n/a | try: |
---|
597 | n/a | self.registerFileForCleanUp(newFilePath) |
---|
598 | n/a | newFile = os.fdopen(newFd, "wb") |
---|
599 | n/a | newFile.write(data) |
---|
600 | n/a | newFile.close() |
---|
601 | n/a | finally: |
---|
602 | n/a | try: newFile.close() |
---|
603 | n/a | except: pass |
---|
604 | n/a | return newFilePath |
---|
605 | n/a | |
---|
606 | n/a | def registerFileForCleanUp(self, fileName): |
---|
607 | n/a | self.tempFiles.append(fileName) |
---|
608 | n/a | |
---|
609 | n/a | def test_basic(self): |
---|
610 | n/a | # Make sure that a local file just gets its own location returned and |
---|
611 | n/a | # a headers value is returned. |
---|
612 | n/a | result = urllib.request.urlretrieve("file:%s" % support.TESTFN) |
---|
613 | n/a | self.assertEqual(result[0], support.TESTFN) |
---|
614 | n/a | self.assertIsInstance(result[1], email.message.Message, |
---|
615 | n/a | "did not get an email.message.Message instance " |
---|
616 | n/a | "as second returned value") |
---|
617 | n/a | |
---|
618 | n/a | def test_copy(self): |
---|
619 | n/a | # Test that setting the filename argument works. |
---|
620 | n/a | second_temp = "%s.2" % support.TESTFN |
---|
621 | n/a | self.registerFileForCleanUp(second_temp) |
---|
622 | n/a | result = urllib.request.urlretrieve(self.constructLocalFileUrl( |
---|
623 | n/a | support.TESTFN), second_temp) |
---|
624 | n/a | self.assertEqual(second_temp, result[0]) |
---|
625 | n/a | self.assertTrue(os.path.exists(second_temp), "copy of the file was not " |
---|
626 | n/a | "made") |
---|
627 | n/a | FILE = open(second_temp, 'rb') |
---|
628 | n/a | try: |
---|
629 | n/a | text = FILE.read() |
---|
630 | n/a | FILE.close() |
---|
631 | n/a | finally: |
---|
632 | n/a | try: FILE.close() |
---|
633 | n/a | except: pass |
---|
634 | n/a | self.assertEqual(self.text, text) |
---|
635 | n/a | |
---|
636 | n/a | def test_reporthook(self): |
---|
637 | n/a | # Make sure that the reporthook works. |
---|
638 | n/a | def hooktester(block_count, block_read_size, file_size, count_holder=[0]): |
---|
639 | n/a | self.assertIsInstance(block_count, int) |
---|
640 | n/a | self.assertIsInstance(block_read_size, int) |
---|
641 | n/a | self.assertIsInstance(file_size, int) |
---|
642 | n/a | self.assertEqual(block_count, count_holder[0]) |
---|
643 | n/a | count_holder[0] = count_holder[0] + 1 |
---|
644 | n/a | second_temp = "%s.2" % support.TESTFN |
---|
645 | n/a | self.registerFileForCleanUp(second_temp) |
---|
646 | n/a | urllib.request.urlretrieve( |
---|
647 | n/a | self.constructLocalFileUrl(support.TESTFN), |
---|
648 | n/a | second_temp, hooktester) |
---|
649 | n/a | |
---|
650 | n/a | def test_reporthook_0_bytes(self): |
---|
651 | n/a | # Test on zero length file. Should call reporthook only 1 time. |
---|
652 | n/a | report = [] |
---|
653 | n/a | def hooktester(block_count, block_read_size, file_size, _report=report): |
---|
654 | n/a | _report.append((block_count, block_read_size, file_size)) |
---|
655 | n/a | srcFileName = self.createNewTempFile() |
---|
656 | n/a | urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), |
---|
657 | n/a | support.TESTFN, hooktester) |
---|
658 | n/a | self.assertEqual(len(report), 1) |
---|
659 | n/a | self.assertEqual(report[0][2], 0) |
---|
660 | n/a | |
---|
661 | n/a | def test_reporthook_5_bytes(self): |
---|
662 | n/a | # Test on 5 byte file. Should call reporthook only 2 times (once when |
---|
663 | n/a | # the "network connection" is established and once when the block is |
---|
664 | n/a | # read). |
---|
665 | n/a | report = [] |
---|
666 | n/a | def hooktester(block_count, block_read_size, file_size, _report=report): |
---|
667 | n/a | _report.append((block_count, block_read_size, file_size)) |
---|
668 | n/a | srcFileName = self.createNewTempFile(b"x" * 5) |
---|
669 | n/a | urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), |
---|
670 | n/a | support.TESTFN, hooktester) |
---|
671 | n/a | self.assertEqual(len(report), 2) |
---|
672 | n/a | self.assertEqual(report[0][2], 5) |
---|
673 | n/a | self.assertEqual(report[1][2], 5) |
---|
674 | n/a | |
---|
675 | n/a | def test_reporthook_8193_bytes(self): |
---|
676 | n/a | # Test on 8193 byte file. Should call reporthook only 3 times (once |
---|
677 | n/a | # when the "network connection" is established, once for the next 8192 |
---|
678 | n/a | # bytes, and once for the last byte). |
---|
679 | n/a | report = [] |
---|
680 | n/a | def hooktester(block_count, block_read_size, file_size, _report=report): |
---|
681 | n/a | _report.append((block_count, block_read_size, file_size)) |
---|
682 | n/a | srcFileName = self.createNewTempFile(b"x" * 8193) |
---|
683 | n/a | urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), |
---|
684 | n/a | support.TESTFN, hooktester) |
---|
685 | n/a | self.assertEqual(len(report), 3) |
---|
686 | n/a | self.assertEqual(report[0][2], 8193) |
---|
687 | n/a | self.assertEqual(report[0][1], 8192) |
---|
688 | n/a | self.assertEqual(report[1][1], 8192) |
---|
689 | n/a | self.assertEqual(report[2][1], 8192) |
---|
690 | n/a | |
---|
691 | n/a | |
---|
692 | n/a | class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): |
---|
693 | n/a | """Test urllib.urlretrieve() using fake http connections""" |
---|
694 | n/a | |
---|
695 | n/a | def test_short_content_raises_ContentTooShortError(self): |
---|
696 | n/a | self.fakehttp(b'''HTTP/1.1 200 OK |
---|
697 | n/a | Date: Wed, 02 Jan 2008 03:03:54 GMT |
---|
698 | n/a | Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e |
---|
699 | n/a | Connection: close |
---|
700 | n/a | Content-Length: 100 |
---|
701 | n/a | Content-Type: text/html; charset=iso-8859-1 |
---|
702 | n/a | |
---|
703 | n/a | FF |
---|
704 | n/a | ''') |
---|
705 | n/a | |
---|
706 | n/a | def _reporthook(par1, par2, par3): |
---|
707 | n/a | pass |
---|
708 | n/a | |
---|
709 | n/a | with self.assertRaises(urllib.error.ContentTooShortError): |
---|
710 | n/a | try: |
---|
711 | n/a | urllib.request.urlretrieve('http://example.com/', |
---|
712 | n/a | reporthook=_reporthook) |
---|
713 | n/a | finally: |
---|
714 | n/a | self.unfakehttp() |
---|
715 | n/a | |
---|
716 | n/a | def test_short_content_raises_ContentTooShortError_without_reporthook(self): |
---|
717 | n/a | self.fakehttp(b'''HTTP/1.1 200 OK |
---|
718 | n/a | Date: Wed, 02 Jan 2008 03:03:54 GMT |
---|
719 | n/a | Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e |
---|
720 | n/a | Connection: close |
---|
721 | n/a | Content-Length: 100 |
---|
722 | n/a | Content-Type: text/html; charset=iso-8859-1 |
---|
723 | n/a | |
---|
724 | n/a | FF |
---|
725 | n/a | ''') |
---|
726 | n/a | with self.assertRaises(urllib.error.ContentTooShortError): |
---|
727 | n/a | try: |
---|
728 | n/a | urllib.request.urlretrieve('http://example.com/') |
---|
729 | n/a | finally: |
---|
730 | n/a | self.unfakehttp() |
---|
731 | n/a | |
---|
732 | n/a | |
---|
733 | n/a | class QuotingTests(unittest.TestCase): |
---|
734 | n/a | r"""Tests for urllib.quote() and urllib.quote_plus() |
---|
735 | n/a | |
---|
736 | n/a | According to RFC 2396 (Uniform Resource Identifiers), to escape a |
---|
737 | n/a | character you write it as '%' + <2 character US-ASCII hex value>. |
---|
738 | n/a | The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a |
---|
739 | n/a | character properly. Case does not matter on the hex letters. |
---|
740 | n/a | |
---|
741 | n/a | The various character sets specified are: |
---|
742 | n/a | |
---|
743 | n/a | Reserved characters : ";/?:@&=+$," |
---|
744 | n/a | Have special meaning in URIs and must be escaped if not being used for |
---|
745 | n/a | their special meaning |
---|
746 | n/a | Data characters : letters, digits, and "-_.!~*'()" |
---|
747 | n/a | Unreserved and do not need to be escaped; can be, though, if desired |
---|
748 | n/a | Control characters : 0x00 - 0x1F, 0x7F |
---|
749 | n/a | Have no use in URIs so must be escaped |
---|
750 | n/a | space : 0x20 |
---|
751 | n/a | Must be escaped |
---|
752 | n/a | Delimiters : '<>#%"' |
---|
753 | n/a | Must be escaped |
---|
754 | n/a | Unwise : "{}|\^[]`" |
---|
755 | n/a | Must be escaped |
---|
756 | n/a | |
---|
757 | n/a | """ |
---|
758 | n/a | |
---|
759 | n/a | def test_never_quote(self): |
---|
760 | n/a | # Make sure quote() does not quote letters, digits, and "_,.-" |
---|
761 | n/a | do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ", |
---|
762 | n/a | "abcdefghijklmnopqrstuvwxyz", |
---|
763 | n/a | "0123456789", |
---|
764 | n/a | "_.-"]) |
---|
765 | n/a | result = urllib.parse.quote(do_not_quote) |
---|
766 | n/a | self.assertEqual(do_not_quote, result, |
---|
767 | n/a | "using quote(): %r != %r" % (do_not_quote, result)) |
---|
768 | n/a | result = urllib.parse.quote_plus(do_not_quote) |
---|
769 | n/a | self.assertEqual(do_not_quote, result, |
---|
770 | n/a | "using quote_plus(): %r != %r" % (do_not_quote, result)) |
---|
771 | n/a | |
---|
772 | n/a | def test_default_safe(self): |
---|
773 | n/a | # Test '/' is default value for 'safe' parameter |
---|
774 | n/a | self.assertEqual(urllib.parse.quote.__defaults__[0], '/') |
---|
775 | n/a | |
---|
776 | n/a | def test_safe(self): |
---|
777 | n/a | # Test setting 'safe' parameter does what it should do |
---|
778 | n/a | quote_by_default = "<>" |
---|
779 | n/a | result = urllib.parse.quote(quote_by_default, safe=quote_by_default) |
---|
780 | n/a | self.assertEqual(quote_by_default, result, |
---|
781 | n/a | "using quote(): %r != %r" % (quote_by_default, result)) |
---|
782 | n/a | result = urllib.parse.quote_plus(quote_by_default, |
---|
783 | n/a | safe=quote_by_default) |
---|
784 | n/a | self.assertEqual(quote_by_default, result, |
---|
785 | n/a | "using quote_plus(): %r != %r" % |
---|
786 | n/a | (quote_by_default, result)) |
---|
787 | n/a | # Safe expressed as bytes rather than str |
---|
788 | n/a | result = urllib.parse.quote(quote_by_default, safe=b"<>") |
---|
789 | n/a | self.assertEqual(quote_by_default, result, |
---|
790 | n/a | "using quote(): %r != %r" % (quote_by_default, result)) |
---|
791 | n/a | # "Safe" non-ASCII characters should have no effect |
---|
792 | n/a | # (Since URIs are not allowed to have non-ASCII characters) |
---|
793 | n/a | result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc") |
---|
794 | n/a | expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") |
---|
795 | n/a | self.assertEqual(expect, result, |
---|
796 | n/a | "using quote(): %r != %r" % |
---|
797 | n/a | (expect, result)) |
---|
798 | n/a | # Same as above, but using a bytes rather than str |
---|
799 | n/a | result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc") |
---|
800 | n/a | expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") |
---|
801 | n/a | self.assertEqual(expect, result, |
---|
802 | n/a | "using quote(): %r != %r" % |
---|
803 | n/a | (expect, result)) |
---|
804 | n/a | |
---|
805 | n/a | def test_default_quoting(self): |
---|
806 | n/a | # Make sure all characters that should be quoted are by default sans |
---|
807 | n/a | # space (separate test for that). |
---|
808 | n/a | should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F |
---|
809 | n/a | should_quote.append(r'<>#%"{}|\^[]`') |
---|
810 | n/a | should_quote.append(chr(127)) # For 0x7F |
---|
811 | n/a | should_quote = ''.join(should_quote) |
---|
812 | n/a | for char in should_quote: |
---|
813 | n/a | result = urllib.parse.quote(char) |
---|
814 | n/a | self.assertEqual(hexescape(char), result, |
---|
815 | n/a | "using quote(): " |
---|
816 | n/a | "%s should be escaped to %s, not %s" % |
---|
817 | n/a | (char, hexescape(char), result)) |
---|
818 | n/a | result = urllib.parse.quote_plus(char) |
---|
819 | n/a | self.assertEqual(hexescape(char), result, |
---|
820 | n/a | "using quote_plus(): " |
---|
821 | n/a | "%s should be escapes to %s, not %s" % |
---|
822 | n/a | (char, hexescape(char), result)) |
---|
823 | n/a | del should_quote |
---|
824 | n/a | partial_quote = "ab[]cd" |
---|
825 | n/a | expected = "ab%5B%5Dcd" |
---|
826 | n/a | result = urllib.parse.quote(partial_quote) |
---|
827 | n/a | self.assertEqual(expected, result, |
---|
828 | n/a | "using quote(): %r != %r" % (expected, result)) |
---|
829 | n/a | result = urllib.parse.quote_plus(partial_quote) |
---|
830 | n/a | self.assertEqual(expected, result, |
---|
831 | n/a | "using quote_plus(): %r != %r" % (expected, result)) |
---|
832 | n/a | |
---|
833 | n/a | def test_quoting_space(self): |
---|
834 | n/a | # Make sure quote() and quote_plus() handle spaces as specified in |
---|
835 | n/a | # their unique way |
---|
836 | n/a | result = urllib.parse.quote(' ') |
---|
837 | n/a | self.assertEqual(result, hexescape(' '), |
---|
838 | n/a | "using quote(): %r != %r" % (result, hexescape(' '))) |
---|
839 | n/a | result = urllib.parse.quote_plus(' ') |
---|
840 | n/a | self.assertEqual(result, '+', |
---|
841 | n/a | "using quote_plus(): %r != +" % result) |
---|
842 | n/a | given = "a b cd e f" |
---|
843 | n/a | expect = given.replace(' ', hexescape(' ')) |
---|
844 | n/a | result = urllib.parse.quote(given) |
---|
845 | n/a | self.assertEqual(expect, result, |
---|
846 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
847 | n/a | expect = given.replace(' ', '+') |
---|
848 | n/a | result = urllib.parse.quote_plus(given) |
---|
849 | n/a | self.assertEqual(expect, result, |
---|
850 | n/a | "using quote_plus(): %r != %r" % (expect, result)) |
---|
851 | n/a | |
---|
852 | n/a | def test_quoting_plus(self): |
---|
853 | n/a | self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), |
---|
854 | n/a | 'alpha%2Bbeta+gamma') |
---|
855 | n/a | self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), |
---|
856 | n/a | 'alpha+beta+gamma') |
---|
857 | n/a | # Test with bytes |
---|
858 | n/a | self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), |
---|
859 | n/a | 'alpha%2Bbeta+gamma') |
---|
860 | n/a | # Test with safe bytes |
---|
861 | n/a | self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), |
---|
862 | n/a | 'alpha+beta+gamma') |
---|
863 | n/a | |
---|
864 | n/a | def test_quote_bytes(self): |
---|
865 | n/a | # Bytes should quote directly to percent-encoded values |
---|
866 | n/a | given = b"\xa2\xd8ab\xff" |
---|
867 | n/a | expect = "%A2%D8ab%FF" |
---|
868 | n/a | result = urllib.parse.quote(given) |
---|
869 | n/a | self.assertEqual(expect, result, |
---|
870 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
871 | n/a | # Encoding argument should raise type error on bytes input |
---|
872 | n/a | self.assertRaises(TypeError, urllib.parse.quote, given, |
---|
873 | n/a | encoding="latin-1") |
---|
874 | n/a | # quote_from_bytes should work the same |
---|
875 | n/a | result = urllib.parse.quote_from_bytes(given) |
---|
876 | n/a | self.assertEqual(expect, result, |
---|
877 | n/a | "using quote_from_bytes(): %r != %r" |
---|
878 | n/a | % (expect, result)) |
---|
879 | n/a | |
---|
880 | n/a | def test_quote_with_unicode(self): |
---|
881 | n/a | # Characters in Latin-1 range, encoded by default in UTF-8 |
---|
882 | n/a | given = "\xa2\xd8ab\xff" |
---|
883 | n/a | expect = "%C2%A2%C3%98ab%C3%BF" |
---|
884 | n/a | result = urllib.parse.quote(given) |
---|
885 | n/a | self.assertEqual(expect, result, |
---|
886 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
887 | n/a | # Characters in Latin-1 range, encoded by with None (default) |
---|
888 | n/a | result = urllib.parse.quote(given, encoding=None, errors=None) |
---|
889 | n/a | self.assertEqual(expect, result, |
---|
890 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
891 | n/a | # Characters in Latin-1 range, encoded with Latin-1 |
---|
892 | n/a | given = "\xa2\xd8ab\xff" |
---|
893 | n/a | expect = "%A2%D8ab%FF" |
---|
894 | n/a | result = urllib.parse.quote(given, encoding="latin-1") |
---|
895 | n/a | self.assertEqual(expect, result, |
---|
896 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
897 | n/a | # Characters in BMP, encoded by default in UTF-8 |
---|
898 | n/a | given = "\u6f22\u5b57" # "Kanji" |
---|
899 | n/a | expect = "%E6%BC%A2%E5%AD%97" |
---|
900 | n/a | result = urllib.parse.quote(given) |
---|
901 | n/a | self.assertEqual(expect, result, |
---|
902 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
903 | n/a | # Characters in BMP, encoded with Latin-1 |
---|
904 | n/a | given = "\u6f22\u5b57" |
---|
905 | n/a | self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, |
---|
906 | n/a | encoding="latin-1") |
---|
907 | n/a | # Characters in BMP, encoded with Latin-1, with replace error handling |
---|
908 | n/a | given = "\u6f22\u5b57" |
---|
909 | n/a | expect = "%3F%3F" # "??" |
---|
910 | n/a | result = urllib.parse.quote(given, encoding="latin-1", |
---|
911 | n/a | errors="replace") |
---|
912 | n/a | self.assertEqual(expect, result, |
---|
913 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
914 | n/a | # Characters in BMP, Latin-1, with xmlcharref error handling |
---|
915 | n/a | given = "\u6f22\u5b57" |
---|
916 | n/a | expect = "%26%2328450%3B%26%2323383%3B" # "漢字" |
---|
917 | n/a | result = urllib.parse.quote(given, encoding="latin-1", |
---|
918 | n/a | errors="xmlcharrefreplace") |
---|
919 | n/a | self.assertEqual(expect, result, |
---|
920 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
921 | n/a | |
---|
922 | n/a | def test_quote_plus_with_unicode(self): |
---|
923 | n/a | # Encoding (latin-1) test for quote_plus |
---|
924 | n/a | given = "\xa2\xd8 \xff" |
---|
925 | n/a | expect = "%A2%D8+%FF" |
---|
926 | n/a | result = urllib.parse.quote_plus(given, encoding="latin-1") |
---|
927 | n/a | self.assertEqual(expect, result, |
---|
928 | n/a | "using quote_plus(): %r != %r" % (expect, result)) |
---|
929 | n/a | # Errors test for quote_plus |
---|
930 | n/a | given = "ab\u6f22\u5b57 cd" |
---|
931 | n/a | expect = "ab%3F%3F+cd" |
---|
932 | n/a | result = urllib.parse.quote_plus(given, encoding="latin-1", |
---|
933 | n/a | errors="replace") |
---|
934 | n/a | self.assertEqual(expect, result, |
---|
935 | n/a | "using quote_plus(): %r != %r" % (expect, result)) |
---|
936 | n/a | |
---|
937 | n/a | |
---|
938 | n/a | class UnquotingTests(unittest.TestCase): |
---|
939 | n/a | """Tests for unquote() and unquote_plus() |
---|
940 | n/a | |
---|
941 | n/a | See the doc string for quoting_Tests for details on quoting and such. |
---|
942 | n/a | |
---|
943 | n/a | """ |
---|
944 | n/a | |
---|
945 | n/a | def test_unquoting(self): |
---|
946 | n/a | # Make sure unquoting of all ASCII values works |
---|
947 | n/a | escape_list = [] |
---|
948 | n/a | for num in range(128): |
---|
949 | n/a | given = hexescape(chr(num)) |
---|
950 | n/a | expect = chr(num) |
---|
951 | n/a | result = urllib.parse.unquote(given) |
---|
952 | n/a | self.assertEqual(expect, result, |
---|
953 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
954 | n/a | result = urllib.parse.unquote_plus(given) |
---|
955 | n/a | self.assertEqual(expect, result, |
---|
956 | n/a | "using unquote_plus(): %r != %r" % |
---|
957 | n/a | (expect, result)) |
---|
958 | n/a | escape_list.append(given) |
---|
959 | n/a | escape_string = ''.join(escape_list) |
---|
960 | n/a | del escape_list |
---|
961 | n/a | result = urllib.parse.unquote(escape_string) |
---|
962 | n/a | self.assertEqual(result.count('%'), 1, |
---|
963 | n/a | "using unquote(): not all characters escaped: " |
---|
964 | n/a | "%s" % result) |
---|
965 | n/a | self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) |
---|
966 | n/a | self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) |
---|
967 | n/a | with support.check_warnings(('', BytesWarning), quiet=True): |
---|
968 | n/a | self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') |
---|
969 | n/a | |
---|
970 | n/a | def test_unquoting_badpercent(self): |
---|
971 | n/a | # Test unquoting on bad percent-escapes |
---|
972 | n/a | given = '%xab' |
---|
973 | n/a | expect = given |
---|
974 | n/a | result = urllib.parse.unquote(given) |
---|
975 | n/a | self.assertEqual(expect, result, "using unquote(): %r != %r" |
---|
976 | n/a | % (expect, result)) |
---|
977 | n/a | given = '%x' |
---|
978 | n/a | expect = given |
---|
979 | n/a | result = urllib.parse.unquote(given) |
---|
980 | n/a | self.assertEqual(expect, result, "using unquote(): %r != %r" |
---|
981 | n/a | % (expect, result)) |
---|
982 | n/a | given = '%' |
---|
983 | n/a | expect = given |
---|
984 | n/a | result = urllib.parse.unquote(given) |
---|
985 | n/a | self.assertEqual(expect, result, "using unquote(): %r != %r" |
---|
986 | n/a | % (expect, result)) |
---|
987 | n/a | # unquote_to_bytes |
---|
988 | n/a | given = '%xab' |
---|
989 | n/a | expect = bytes(given, 'ascii') |
---|
990 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
991 | n/a | self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" |
---|
992 | n/a | % (expect, result)) |
---|
993 | n/a | given = '%x' |
---|
994 | n/a | expect = bytes(given, 'ascii') |
---|
995 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
996 | n/a | self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" |
---|
997 | n/a | % (expect, result)) |
---|
998 | n/a | given = '%' |
---|
999 | n/a | expect = bytes(given, 'ascii') |
---|
1000 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
1001 | n/a | self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" |
---|
1002 | n/a | % (expect, result)) |
---|
1003 | n/a | self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) |
---|
1004 | n/a | self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) |
---|
1005 | n/a | |
---|
1006 | n/a | def test_unquoting_mixed_case(self): |
---|
1007 | n/a | # Test unquoting on mixed-case hex digits in the percent-escapes |
---|
1008 | n/a | given = '%Ab%eA' |
---|
1009 | n/a | expect = b'\xab\xea' |
---|
1010 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
1011 | n/a | self.assertEqual(expect, result, |
---|
1012 | n/a | "using unquote_to_bytes(): %r != %r" |
---|
1013 | n/a | % (expect, result)) |
---|
1014 | n/a | |
---|
1015 | n/a | def test_unquoting_parts(self): |
---|
1016 | n/a | # Make sure unquoting works when have non-quoted characters |
---|
1017 | n/a | # interspersed |
---|
1018 | n/a | given = 'ab%sd' % hexescape('c') |
---|
1019 | n/a | expect = "abcd" |
---|
1020 | n/a | result = urllib.parse.unquote(given) |
---|
1021 | n/a | self.assertEqual(expect, result, |
---|
1022 | n/a | "using quote(): %r != %r" % (expect, result)) |
---|
1023 | n/a | result = urllib.parse.unquote_plus(given) |
---|
1024 | n/a | self.assertEqual(expect, result, |
---|
1025 | n/a | "using unquote_plus(): %r != %r" % (expect, result)) |
---|
1026 | n/a | |
---|
1027 | n/a | def test_unquoting_plus(self): |
---|
1028 | n/a | # Test difference between unquote() and unquote_plus() |
---|
1029 | n/a | given = "are+there+spaces..." |
---|
1030 | n/a | expect = given |
---|
1031 | n/a | result = urllib.parse.unquote(given) |
---|
1032 | n/a | self.assertEqual(expect, result, |
---|
1033 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1034 | n/a | expect = given.replace('+', ' ') |
---|
1035 | n/a | result = urllib.parse.unquote_plus(given) |
---|
1036 | n/a | self.assertEqual(expect, result, |
---|
1037 | n/a | "using unquote_plus(): %r != %r" % (expect, result)) |
---|
1038 | n/a | |
---|
1039 | n/a | def test_unquote_to_bytes(self): |
---|
1040 | n/a | given = 'br%C3%BCckner_sapporo_20050930.doc' |
---|
1041 | n/a | expect = b'br\xc3\xbcckner_sapporo_20050930.doc' |
---|
1042 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
1043 | n/a | self.assertEqual(expect, result, |
---|
1044 | n/a | "using unquote_to_bytes(): %r != %r" |
---|
1045 | n/a | % (expect, result)) |
---|
1046 | n/a | # Test on a string with unescaped non-ASCII characters |
---|
1047 | n/a | # (Technically an invalid URI; expect those characters to be UTF-8 |
---|
1048 | n/a | # encoded). |
---|
1049 | n/a | result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC") |
---|
1050 | n/a | expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc" |
---|
1051 | n/a | self.assertEqual(expect, result, |
---|
1052 | n/a | "using unquote_to_bytes(): %r != %r" |
---|
1053 | n/a | % (expect, result)) |
---|
1054 | n/a | # Test with a bytes as input |
---|
1055 | n/a | given = b'%A2%D8ab%FF' |
---|
1056 | n/a | expect = b'\xa2\xd8ab\xff' |
---|
1057 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
1058 | n/a | self.assertEqual(expect, result, |
---|
1059 | n/a | "using unquote_to_bytes(): %r != %r" |
---|
1060 | n/a | % (expect, result)) |
---|
1061 | n/a | # Test with a bytes as input, with unescaped non-ASCII bytes |
---|
1062 | n/a | # (Technically an invalid URI; expect those bytes to be preserved) |
---|
1063 | n/a | given = b'%A2\xd8ab%FF' |
---|
1064 | n/a | expect = b'\xa2\xd8ab\xff' |
---|
1065 | n/a | result = urllib.parse.unquote_to_bytes(given) |
---|
1066 | n/a | self.assertEqual(expect, result, |
---|
1067 | n/a | "using unquote_to_bytes(): %r != %r" |
---|
1068 | n/a | % (expect, result)) |
---|
1069 | n/a | |
---|
1070 | n/a | def test_unquote_with_unicode(self): |
---|
1071 | n/a | # Characters in the Latin-1 range, encoded with UTF-8 |
---|
1072 | n/a | given = 'br%C3%BCckner_sapporo_20050930.doc' |
---|
1073 | n/a | expect = 'br\u00fcckner_sapporo_20050930.doc' |
---|
1074 | n/a | result = urllib.parse.unquote(given) |
---|
1075 | n/a | self.assertEqual(expect, result, |
---|
1076 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1077 | n/a | # Characters in the Latin-1 range, encoded with None (default) |
---|
1078 | n/a | result = urllib.parse.unquote(given, encoding=None, errors=None) |
---|
1079 | n/a | self.assertEqual(expect, result, |
---|
1080 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1081 | n/a | |
---|
1082 | n/a | # Characters in the Latin-1 range, encoded with Latin-1 |
---|
1083 | n/a | result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', |
---|
1084 | n/a | encoding="latin-1") |
---|
1085 | n/a | expect = 'br\u00fcckner_sapporo_20050930.doc' |
---|
1086 | n/a | self.assertEqual(expect, result, |
---|
1087 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1088 | n/a | |
---|
1089 | n/a | # Characters in BMP, encoded with UTF-8 |
---|
1090 | n/a | given = "%E6%BC%A2%E5%AD%97" |
---|
1091 | n/a | expect = "\u6f22\u5b57" # "Kanji" |
---|
1092 | n/a | result = urllib.parse.unquote(given) |
---|
1093 | n/a | self.assertEqual(expect, result, |
---|
1094 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1095 | n/a | |
---|
1096 | n/a | # Decode with UTF-8, invalid sequence |
---|
1097 | n/a | given = "%F3%B1" |
---|
1098 | n/a | expect = "\ufffd" # Replacement character |
---|
1099 | n/a | result = urllib.parse.unquote(given) |
---|
1100 | n/a | self.assertEqual(expect, result, |
---|
1101 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1102 | n/a | |
---|
1103 | n/a | # Decode with UTF-8, invalid sequence, replace errors |
---|
1104 | n/a | result = urllib.parse.unquote(given, errors="replace") |
---|
1105 | n/a | self.assertEqual(expect, result, |
---|
1106 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1107 | n/a | |
---|
1108 | n/a | # Decode with UTF-8, invalid sequence, ignoring errors |
---|
1109 | n/a | given = "%F3%B1" |
---|
1110 | n/a | expect = "" |
---|
1111 | n/a | result = urllib.parse.unquote(given, errors="ignore") |
---|
1112 | n/a | self.assertEqual(expect, result, |
---|
1113 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1114 | n/a | |
---|
1115 | n/a | # A mix of non-ASCII and percent-encoded characters, UTF-8 |
---|
1116 | n/a | result = urllib.parse.unquote("\u6f22%C3%BC") |
---|
1117 | n/a | expect = '\u6f22\u00fc' |
---|
1118 | n/a | self.assertEqual(expect, result, |
---|
1119 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1120 | n/a | |
---|
1121 | n/a | # A mix of non-ASCII and percent-encoded characters, Latin-1 |
---|
1122 | n/a | # (Note, the string contains non-Latin-1-representable characters) |
---|
1123 | n/a | result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1") |
---|
1124 | n/a | expect = '\u6f22\u00fc' |
---|
1125 | n/a | self.assertEqual(expect, result, |
---|
1126 | n/a | "using unquote(): %r != %r" % (expect, result)) |
---|
1127 | n/a | |
---|
1128 | n/a | class urlencode_Tests(unittest.TestCase): |
---|
1129 | n/a | """Tests for urlencode()""" |
---|
1130 | n/a | |
---|
1131 | n/a | def help_inputtype(self, given, test_type): |
---|
1132 | n/a | """Helper method for testing different input types. |
---|
1133 | n/a | |
---|
1134 | n/a | 'given' must lead to only the pairs: |
---|
1135 | n/a | * 1st, 1 |
---|
1136 | n/a | * 2nd, 2 |
---|
1137 | n/a | * 3rd, 3 |
---|
1138 | n/a | |
---|
1139 | n/a | Test cannot assume anything about order. Docs make no guarantee and |
---|
1140 | n/a | have possible dictionary input. |
---|
1141 | n/a | |
---|
1142 | n/a | """ |
---|
1143 | n/a | expect_somewhere = ["1st=1", "2nd=2", "3rd=3"] |
---|
1144 | n/a | result = urllib.parse.urlencode(given) |
---|
1145 | n/a | for expected in expect_somewhere: |
---|
1146 | n/a | self.assertIn(expected, result, |
---|
1147 | n/a | "testing %s: %s not found in %s" % |
---|
1148 | n/a | (test_type, expected, result)) |
---|
1149 | n/a | self.assertEqual(result.count('&'), 2, |
---|
1150 | n/a | "testing %s: expected 2 '&'s; got %s" % |
---|
1151 | n/a | (test_type, result.count('&'))) |
---|
1152 | n/a | amp_location = result.index('&') |
---|
1153 | n/a | on_amp_left = result[amp_location - 1] |
---|
1154 | n/a | on_amp_right = result[amp_location + 1] |
---|
1155 | n/a | self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), |
---|
1156 | n/a | "testing %s: '&' not located in proper place in %s" % |
---|
1157 | n/a | (test_type, result)) |
---|
1158 | n/a | self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps |
---|
1159 | n/a | "testing %s: " |
---|
1160 | n/a | "unexpected number of characters: %s != %s" % |
---|
1161 | n/a | (test_type, len(result), (5 * 3) + 2)) |
---|
1162 | n/a | |
---|
1163 | n/a | def test_using_mapping(self): |
---|
1164 | n/a | # Test passing in a mapping object as an argument. |
---|
1165 | n/a | self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'}, |
---|
1166 | n/a | "using dict as input type") |
---|
1167 | n/a | |
---|
1168 | n/a | def test_using_sequence(self): |
---|
1169 | n/a | # Test passing in a sequence of two-item sequences as an argument. |
---|
1170 | n/a | self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], |
---|
1171 | n/a | "using sequence of two-item tuples as input") |
---|
1172 | n/a | |
---|
1173 | n/a | def test_quoting(self): |
---|
1174 | n/a | # Make sure keys and values are quoted using quote_plus() |
---|
1175 | n/a | given = {"&":"="} |
---|
1176 | n/a | expect = "%s=%s" % (hexescape('&'), hexescape('=')) |
---|
1177 | n/a | result = urllib.parse.urlencode(given) |
---|
1178 | n/a | self.assertEqual(expect, result) |
---|
1179 | n/a | given = {"key name":"A bunch of pluses"} |
---|
1180 | n/a | expect = "key+name=A+bunch+of+pluses" |
---|
1181 | n/a | result = urllib.parse.urlencode(given) |
---|
1182 | n/a | self.assertEqual(expect, result) |
---|
1183 | n/a | |
---|
1184 | n/a | def test_doseq(self): |
---|
1185 | n/a | # Test that passing True for 'doseq' parameter works correctly |
---|
1186 | n/a | given = {'sequence':['1', '2', '3']} |
---|
1187 | n/a | expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3'])) |
---|
1188 | n/a | result = urllib.parse.urlencode(given) |
---|
1189 | n/a | self.assertEqual(expect, result) |
---|
1190 | n/a | result = urllib.parse.urlencode(given, True) |
---|
1191 | n/a | for value in given["sequence"]: |
---|
1192 | n/a | expect = "sequence=%s" % value |
---|
1193 | n/a | self.assertIn(expect, result) |
---|
1194 | n/a | self.assertEqual(result.count('&'), 2, |
---|
1195 | n/a | "Expected 2 '&'s, got %s" % result.count('&')) |
---|
1196 | n/a | |
---|
1197 | n/a | def test_empty_sequence(self): |
---|
1198 | n/a | self.assertEqual("", urllib.parse.urlencode({})) |
---|
1199 | n/a | self.assertEqual("", urllib.parse.urlencode([])) |
---|
1200 | n/a | |
---|
1201 | n/a | def test_nonstring_values(self): |
---|
1202 | n/a | self.assertEqual("a=1", urllib.parse.urlencode({"a": 1})) |
---|
1203 | n/a | self.assertEqual("a=None", urllib.parse.urlencode({"a": None})) |
---|
1204 | n/a | |
---|
1205 | n/a | def test_nonstring_seq_values(self): |
---|
1206 | n/a | self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True)) |
---|
1207 | n/a | self.assertEqual("a=None&a=a", |
---|
1208 | n/a | urllib.parse.urlencode({"a": [None, "a"]}, True)) |
---|
1209 | n/a | data = collections.OrderedDict([("a", 1), ("b", 1)]) |
---|
1210 | n/a | self.assertEqual("a=a&a=b", |
---|
1211 | n/a | urllib.parse.urlencode({"a": data}, True)) |
---|
1212 | n/a | |
---|
1213 | n/a | def test_urlencode_encoding(self): |
---|
1214 | n/a | # ASCII encoding. Expect %3F with errors="replace' |
---|
1215 | n/a | given = (('\u00a0', '\u00c1'),) |
---|
1216 | n/a | expect = '%3F=%3F' |
---|
1217 | n/a | result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace") |
---|
1218 | n/a | self.assertEqual(expect, result) |
---|
1219 | n/a | |
---|
1220 | n/a | # Default is UTF-8 encoding. |
---|
1221 | n/a | given = (('\u00a0', '\u00c1'),) |
---|
1222 | n/a | expect = '%C2%A0=%C3%81' |
---|
1223 | n/a | result = urllib.parse.urlencode(given) |
---|
1224 | n/a | self.assertEqual(expect, result) |
---|
1225 | n/a | |
---|
1226 | n/a | # Latin-1 encoding. |
---|
1227 | n/a | given = (('\u00a0', '\u00c1'),) |
---|
1228 | n/a | expect = '%A0=%C1' |
---|
1229 | n/a | result = urllib.parse.urlencode(given, encoding="latin-1") |
---|
1230 | n/a | self.assertEqual(expect, result) |
---|
1231 | n/a | |
---|
1232 | n/a | def test_urlencode_encoding_doseq(self): |
---|
1233 | n/a | # ASCII Encoding. Expect %3F with errors="replace' |
---|
1234 | n/a | given = (('\u00a0', '\u00c1'),) |
---|
1235 | n/a | expect = '%3F=%3F' |
---|
1236 | n/a | result = urllib.parse.urlencode(given, doseq=True, |
---|
1237 | n/a | encoding="ASCII", errors="replace") |
---|
1238 | n/a | self.assertEqual(expect, result) |
---|
1239 | n/a | |
---|
1240 | n/a | # ASCII Encoding. On a sequence of values. |
---|
1241 | n/a | given = (("\u00a0", (1, "\u00c1")),) |
---|
1242 | n/a | expect = '%3F=1&%3F=%3F' |
---|
1243 | n/a | result = urllib.parse.urlencode(given, True, |
---|
1244 | n/a | encoding="ASCII", errors="replace") |
---|
1245 | n/a | self.assertEqual(expect, result) |
---|
1246 | n/a | |
---|
1247 | n/a | # Utf-8 |
---|
1248 | n/a | given = (("\u00a0", "\u00c1"),) |
---|
1249 | n/a | expect = '%C2%A0=%C3%81' |
---|
1250 | n/a | result = urllib.parse.urlencode(given, True) |
---|
1251 | n/a | self.assertEqual(expect, result) |
---|
1252 | n/a | |
---|
1253 | n/a | given = (("\u00a0", (42, "\u00c1")),) |
---|
1254 | n/a | expect = '%C2%A0=42&%C2%A0=%C3%81' |
---|
1255 | n/a | result = urllib.parse.urlencode(given, True) |
---|
1256 | n/a | self.assertEqual(expect, result) |
---|
1257 | n/a | |
---|
1258 | n/a | # latin-1 |
---|
1259 | n/a | given = (("\u00a0", "\u00c1"),) |
---|
1260 | n/a | expect = '%A0=%C1' |
---|
1261 | n/a | result = urllib.parse.urlencode(given, True, encoding="latin-1") |
---|
1262 | n/a | self.assertEqual(expect, result) |
---|
1263 | n/a | |
---|
1264 | n/a | given = (("\u00a0", (42, "\u00c1")),) |
---|
1265 | n/a | expect = '%A0=42&%A0=%C1' |
---|
1266 | n/a | result = urllib.parse.urlencode(given, True, encoding="latin-1") |
---|
1267 | n/a | self.assertEqual(expect, result) |
---|
1268 | n/a | |
---|
1269 | n/a | def test_urlencode_bytes(self): |
---|
1270 | n/a | given = ((b'\xa0\x24', b'\xc1\x24'),) |
---|
1271 | n/a | expect = '%A0%24=%C1%24' |
---|
1272 | n/a | result = urllib.parse.urlencode(given) |
---|
1273 | n/a | self.assertEqual(expect, result) |
---|
1274 | n/a | result = urllib.parse.urlencode(given, True) |
---|
1275 | n/a | self.assertEqual(expect, result) |
---|
1276 | n/a | |
---|
1277 | n/a | # Sequence of values |
---|
1278 | n/a | given = ((b'\xa0\x24', (42, b'\xc1\x24')),) |
---|
1279 | n/a | expect = '%A0%24=42&%A0%24=%C1%24' |
---|
1280 | n/a | result = urllib.parse.urlencode(given, True) |
---|
1281 | n/a | self.assertEqual(expect, result) |
---|
1282 | n/a | |
---|
1283 | n/a | def test_urlencode_encoding_safe_parameter(self): |
---|
1284 | n/a | |
---|
1285 | n/a | # Send '$' (\x24) as safe character |
---|
1286 | n/a | # Default utf-8 encoding |
---|
1287 | n/a | |
---|
1288 | n/a | given = ((b'\xa0\x24', b'\xc1\x24'),) |
---|
1289 | n/a | result = urllib.parse.urlencode(given, safe=":$") |
---|
1290 | n/a | expect = '%A0$=%C1$' |
---|
1291 | n/a | self.assertEqual(expect, result) |
---|
1292 | n/a | |
---|
1293 | n/a | given = ((b'\xa0\x24', b'\xc1\x24'),) |
---|
1294 | n/a | result = urllib.parse.urlencode(given, doseq=True, safe=":$") |
---|
1295 | n/a | expect = '%A0$=%C1$' |
---|
1296 | n/a | self.assertEqual(expect, result) |
---|
1297 | n/a | |
---|
1298 | n/a | # Safe parameter in sequence |
---|
1299 | n/a | given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) |
---|
1300 | n/a | expect = '%A0$=%C1$&%A0$=13&%A0$=42' |
---|
1301 | n/a | result = urllib.parse.urlencode(given, True, safe=":$") |
---|
1302 | n/a | self.assertEqual(expect, result) |
---|
1303 | n/a | |
---|
1304 | n/a | # Test all above in latin-1 encoding |
---|
1305 | n/a | |
---|
1306 | n/a | given = ((b'\xa0\x24', b'\xc1\x24'),) |
---|
1307 | n/a | result = urllib.parse.urlencode(given, safe=":$", |
---|
1308 | n/a | encoding="latin-1") |
---|
1309 | n/a | expect = '%A0$=%C1$' |
---|
1310 | n/a | self.assertEqual(expect, result) |
---|
1311 | n/a | |
---|
1312 | n/a | given = ((b'\xa0\x24', b'\xc1\x24'),) |
---|
1313 | n/a | expect = '%A0$=%C1$' |
---|
1314 | n/a | result = urllib.parse.urlencode(given, doseq=True, safe=":$", |
---|
1315 | n/a | encoding="latin-1") |
---|
1316 | n/a | |
---|
1317 | n/a | given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) |
---|
1318 | n/a | expect = '%A0$=%C1$&%A0$=13&%A0$=42' |
---|
1319 | n/a | result = urllib.parse.urlencode(given, True, safe=":$", |
---|
1320 | n/a | encoding="latin-1") |
---|
1321 | n/a | self.assertEqual(expect, result) |
---|
1322 | n/a | |
---|
1323 | n/a | class Pathname_Tests(unittest.TestCase): |
---|
1324 | n/a | """Test pathname2url() and url2pathname()""" |
---|
1325 | n/a | |
---|
1326 | n/a | def test_basic(self): |
---|
1327 | n/a | # Make sure simple tests pass |
---|
1328 | n/a | expected_path = os.path.join("parts", "of", "a", "path") |
---|
1329 | n/a | expected_url = "parts/of/a/path" |
---|
1330 | n/a | result = urllib.request.pathname2url(expected_path) |
---|
1331 | n/a | self.assertEqual(expected_url, result, |
---|
1332 | n/a | "pathname2url() failed; %s != %s" % |
---|
1333 | n/a | (result, expected_url)) |
---|
1334 | n/a | result = urllib.request.url2pathname(expected_url) |
---|
1335 | n/a | self.assertEqual(expected_path, result, |
---|
1336 | n/a | "url2pathame() failed; %s != %s" % |
---|
1337 | n/a | (result, expected_path)) |
---|
1338 | n/a | |
---|
1339 | n/a | def test_quoting(self): |
---|
1340 | n/a | # Test automatic quoting and unquoting works for pathnam2url() and |
---|
1341 | n/a | # url2pathname() respectively |
---|
1342 | n/a | given = os.path.join("needs", "quot=ing", "here") |
---|
1343 | n/a | expect = "needs/%s/here" % urllib.parse.quote("quot=ing") |
---|
1344 | n/a | result = urllib.request.pathname2url(given) |
---|
1345 | n/a | self.assertEqual(expect, result, |
---|
1346 | n/a | "pathname2url() failed; %s != %s" % |
---|
1347 | n/a | (expect, result)) |
---|
1348 | n/a | expect = given |
---|
1349 | n/a | result = urllib.request.url2pathname(result) |
---|
1350 | n/a | self.assertEqual(expect, result, |
---|
1351 | n/a | "url2pathname() failed; %s != %s" % |
---|
1352 | n/a | (expect, result)) |
---|
1353 | n/a | given = os.path.join("make sure", "using_quote") |
---|
1354 | n/a | expect = "%s/using_quote" % urllib.parse.quote("make sure") |
---|
1355 | n/a | result = urllib.request.pathname2url(given) |
---|
1356 | n/a | self.assertEqual(expect, result, |
---|
1357 | n/a | "pathname2url() failed; %s != %s" % |
---|
1358 | n/a | (expect, result)) |
---|
1359 | n/a | given = "make+sure/using_unquote" |
---|
1360 | n/a | expect = os.path.join("make+sure", "using_unquote") |
---|
1361 | n/a | result = urllib.request.url2pathname(given) |
---|
1362 | n/a | self.assertEqual(expect, result, |
---|
1363 | n/a | "url2pathname() failed; %s != %s" % |
---|
1364 | n/a | (expect, result)) |
---|
1365 | n/a | |
---|
1366 | n/a | @unittest.skipUnless(sys.platform == 'win32', |
---|
1367 | n/a | 'test specific to the urllib.url2path function.') |
---|
1368 | n/a | def test_ntpath(self): |
---|
1369 | n/a | given = ('/C:/', '///C:/', '/C|//') |
---|
1370 | n/a | expect = 'C:\\' |
---|
1371 | n/a | for url in given: |
---|
1372 | n/a | result = urllib.request.url2pathname(url) |
---|
1373 | n/a | self.assertEqual(expect, result, |
---|
1374 | n/a | 'urllib.request..url2pathname() failed; %s != %s' % |
---|
1375 | n/a | (expect, result)) |
---|
1376 | n/a | given = '///C|/path' |
---|
1377 | n/a | expect = 'C:\\path' |
---|
1378 | n/a | result = urllib.request.url2pathname(given) |
---|
1379 | n/a | self.assertEqual(expect, result, |
---|
1380 | n/a | 'urllib.request.url2pathname() failed; %s != %s' % |
---|
1381 | n/a | (expect, result)) |
---|
1382 | n/a | |
---|
1383 | n/a | class Utility_Tests(unittest.TestCase): |
---|
1384 | n/a | """Testcase to test the various utility functions in the urllib.""" |
---|
1385 | n/a | |
---|
1386 | n/a | def test_thishost(self): |
---|
1387 | n/a | """Test the urllib.request.thishost utility function returns a tuple""" |
---|
1388 | n/a | self.assertIsInstance(urllib.request.thishost(), tuple) |
---|
1389 | n/a | |
---|
1390 | n/a | |
---|
1391 | n/a | class URLopener_Tests(unittest.TestCase): |
---|
1392 | n/a | """Testcase to test the open method of URLopener class.""" |
---|
1393 | n/a | |
---|
1394 | n/a | def test_quoted_open(self): |
---|
1395 | n/a | class DummyURLopener(urllib.request.URLopener): |
---|
1396 | n/a | def open_spam(self, url): |
---|
1397 | n/a | return url |
---|
1398 | n/a | with support.check_warnings( |
---|
1399 | n/a | ('DummyURLopener style of invoking requests is deprecated.', |
---|
1400 | n/a | DeprecationWarning)): |
---|
1401 | n/a | self.assertEqual(DummyURLopener().open( |
---|
1402 | n/a | 'spam://example/ /'),'//example/%20/') |
---|
1403 | n/a | |
---|
1404 | n/a | # test the safe characters are not quoted by urlopen |
---|
1405 | n/a | self.assertEqual(DummyURLopener().open( |
---|
1406 | n/a | "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"), |
---|
1407 | n/a | "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/") |
---|
1408 | n/a | |
---|
1409 | n/a | # Just commented them out. |
---|
1410 | n/a | # Can't really tell why keep failing in windows and sparc. |
---|
1411 | n/a | # Everywhere else they work ok, but on those machines, sometimes |
---|
1412 | n/a | # fail in one of the tests, sometimes in other. I have a linux, and |
---|
1413 | n/a | # the tests go ok. |
---|
1414 | n/a | # If anybody has one of the problematic environments, please help! |
---|
1415 | n/a | # . Facundo |
---|
1416 | n/a | # |
---|
1417 | n/a | # def server(evt): |
---|
1418 | n/a | # import socket, time |
---|
1419 | n/a | # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
---|
1420 | n/a | # serv.settimeout(3) |
---|
1421 | n/a | # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) |
---|
1422 | n/a | # serv.bind(("", 9093)) |
---|
1423 | n/a | # serv.listen() |
---|
1424 | n/a | # try: |
---|
1425 | n/a | # conn, addr = serv.accept() |
---|
1426 | n/a | # conn.send("1 Hola mundo\n") |
---|
1427 | n/a | # cantdata = 0 |
---|
1428 | n/a | # while cantdata < 13: |
---|
1429 | n/a | # data = conn.recv(13-cantdata) |
---|
1430 | n/a | # cantdata += len(data) |
---|
1431 | n/a | # time.sleep(.3) |
---|
1432 | n/a | # conn.send("2 No more lines\n") |
---|
1433 | n/a | # conn.close() |
---|
1434 | n/a | # except socket.timeout: |
---|
1435 | n/a | # pass |
---|
1436 | n/a | # finally: |
---|
1437 | n/a | # serv.close() |
---|
1438 | n/a | # evt.set() |
---|
1439 | n/a | # |
---|
1440 | n/a | # class FTPWrapperTests(unittest.TestCase): |
---|
1441 | n/a | # |
---|
1442 | n/a | # def setUp(self): |
---|
1443 | n/a | # import ftplib, time, threading |
---|
1444 | n/a | # ftplib.FTP.port = 9093 |
---|
1445 | n/a | # self.evt = threading.Event() |
---|
1446 | n/a | # threading.Thread(target=server, args=(self.evt,)).start() |
---|
1447 | n/a | # time.sleep(.1) |
---|
1448 | n/a | # |
---|
1449 | n/a | # def tearDown(self): |
---|
1450 | n/a | # self.evt.wait() |
---|
1451 | n/a | # |
---|
1452 | n/a | # def testBasic(self): |
---|
1453 | n/a | # # connects |
---|
1454 | n/a | # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) |
---|
1455 | n/a | # ftp.close() |
---|
1456 | n/a | # |
---|
1457 | n/a | # def testTimeoutNone(self): |
---|
1458 | n/a | # # global default timeout is ignored |
---|
1459 | n/a | # import socket |
---|
1460 | n/a | # self.assertIsNone(socket.getdefaulttimeout()) |
---|
1461 | n/a | # socket.setdefaulttimeout(30) |
---|
1462 | n/a | # try: |
---|
1463 | n/a | # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) |
---|
1464 | n/a | # finally: |
---|
1465 | n/a | # socket.setdefaulttimeout(None) |
---|
1466 | n/a | # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) |
---|
1467 | n/a | # ftp.close() |
---|
1468 | n/a | # |
---|
1469 | n/a | # def testTimeoutDefault(self): |
---|
1470 | n/a | # # global default timeout is used |
---|
1471 | n/a | # import socket |
---|
1472 | n/a | # self.assertIsNone(socket.getdefaulttimeout()) |
---|
1473 | n/a | # socket.setdefaulttimeout(30) |
---|
1474 | n/a | # try: |
---|
1475 | n/a | # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) |
---|
1476 | n/a | # finally: |
---|
1477 | n/a | # socket.setdefaulttimeout(None) |
---|
1478 | n/a | # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) |
---|
1479 | n/a | # ftp.close() |
---|
1480 | n/a | # |
---|
1481 | n/a | # def testTimeoutValue(self): |
---|
1482 | n/a | # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [], |
---|
1483 | n/a | # timeout=30) |
---|
1484 | n/a | # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) |
---|
1485 | n/a | # ftp.close() |
---|
1486 | n/a | |
---|
1487 | n/a | |
---|
1488 | n/a | class RequestTests(unittest.TestCase): |
---|
1489 | n/a | """Unit tests for urllib.request.Request.""" |
---|
1490 | n/a | |
---|
1491 | n/a | def test_default_values(self): |
---|
1492 | n/a | Request = urllib.request.Request |
---|
1493 | n/a | request = Request("http://www.python.org") |
---|
1494 | n/a | self.assertEqual(request.get_method(), 'GET') |
---|
1495 | n/a | request = Request("http://www.python.org", {}) |
---|
1496 | n/a | self.assertEqual(request.get_method(), 'POST') |
---|
1497 | n/a | |
---|
1498 | n/a | def test_with_method_arg(self): |
---|
1499 | n/a | Request = urllib.request.Request |
---|
1500 | n/a | request = Request("http://www.python.org", method='HEAD') |
---|
1501 | n/a | self.assertEqual(request.method, 'HEAD') |
---|
1502 | n/a | self.assertEqual(request.get_method(), 'HEAD') |
---|
1503 | n/a | request = Request("http://www.python.org", {}, method='HEAD') |
---|
1504 | n/a | self.assertEqual(request.method, 'HEAD') |
---|
1505 | n/a | self.assertEqual(request.get_method(), 'HEAD') |
---|
1506 | n/a | request = Request("http://www.python.org", method='GET') |
---|
1507 | n/a | self.assertEqual(request.get_method(), 'GET') |
---|
1508 | n/a | request.method = 'HEAD' |
---|
1509 | n/a | self.assertEqual(request.get_method(), 'HEAD') |
---|
1510 | n/a | |
---|
1511 | n/a | |
---|
1512 | n/a | class URL2PathNameTests(unittest.TestCase): |
---|
1513 | n/a | |
---|
1514 | n/a | def test_converting_drive_letter(self): |
---|
1515 | n/a | self.assertEqual(url2pathname("///C|"), 'C:') |
---|
1516 | n/a | self.assertEqual(url2pathname("///C:"), 'C:') |
---|
1517 | n/a | self.assertEqual(url2pathname("///C|/"), 'C:\\') |
---|
1518 | n/a | |
---|
1519 | n/a | def test_converting_when_no_drive_letter(self): |
---|
1520 | n/a | # cannot end a raw string in \ |
---|
1521 | n/a | self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\') |
---|
1522 | n/a | self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\') |
---|
1523 | n/a | |
---|
1524 | n/a | def test_simple_compare(self): |
---|
1525 | n/a | self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"), |
---|
1526 | n/a | r'C:\foo\bar\spam.foo') |
---|
1527 | n/a | |
---|
1528 | n/a | def test_non_ascii_drive_letter(self): |
---|
1529 | n/a | self.assertRaises(IOError, url2pathname, "///\u00e8|/") |
---|
1530 | n/a | |
---|
1531 | n/a | def test_roundtrip_url2pathname(self): |
---|
1532 | n/a | list_of_paths = ['C:', |
---|
1533 | n/a | r'\\\C\test\\', |
---|
1534 | n/a | r'C:\foo\bar\spam.foo' |
---|
1535 | n/a | ] |
---|
1536 | n/a | for path in list_of_paths: |
---|
1537 | n/a | self.assertEqual(url2pathname(pathname2url(path)), path) |
---|
1538 | n/a | |
---|
1539 | n/a | class PathName2URLTests(unittest.TestCase): |
---|
1540 | n/a | |
---|
1541 | n/a | def test_converting_drive_letter(self): |
---|
1542 | n/a | self.assertEqual(pathname2url("C:"), '///C:') |
---|
1543 | n/a | self.assertEqual(pathname2url("C:\\"), '///C:') |
---|
1544 | n/a | |
---|
1545 | n/a | def test_converting_when_no_drive_letter(self): |
---|
1546 | n/a | self.assertEqual(pathname2url(r"\\\folder\test" "\\"), |
---|
1547 | n/a | '/////folder/test/') |
---|
1548 | n/a | self.assertEqual(pathname2url(r"\\folder\test" "\\"), |
---|
1549 | n/a | '////folder/test/') |
---|
1550 | n/a | self.assertEqual(pathname2url(r"\folder\test" "\\"), |
---|
1551 | n/a | '/folder/test/') |
---|
1552 | n/a | |
---|
1553 | n/a | def test_simple_compare(self): |
---|
1554 | n/a | self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'), |
---|
1555 | n/a | "///C:/foo/bar/spam.foo" ) |
---|
1556 | n/a | |
---|
1557 | n/a | def test_long_drive_letter(self): |
---|
1558 | n/a | self.assertRaises(IOError, pathname2url, "XX:\\") |
---|
1559 | n/a | |
---|
1560 | n/a | def test_roundtrip_pathname2url(self): |
---|
1561 | n/a | list_of_paths = ['///C:', |
---|
1562 | n/a | '/////folder/test/', |
---|
1563 | n/a | '///C:/foo/bar/spam.foo'] |
---|
1564 | n/a | for path in list_of_paths: |
---|
1565 | n/a | self.assertEqual(pathname2url(url2pathname(path)), path) |
---|
1566 | n/a | |
---|
1567 | n/a | if __name__ == '__main__': |
---|
1568 | n/a | unittest.main() |
---|