2011-07-14 13:47:10 +00:00
|
|
|
|
# -*- coding: utf-8 -*-
|
2012-06-07 16:08:47 +00:00
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
2015-01-29 07:59:41 +00:00
|
|
|
|
import json
|
2014-02-21 13:46:23 +00:00
|
|
|
|
import warnings
|
2015-01-29 07:59:41 +00:00
|
|
|
|
from unittest import skipUnless
|
2014-02-18 17:07:07 +00:00
|
|
|
|
|
2014-12-21 20:19:05 +00:00
|
|
|
|
from django.test import SimpleTestCase, ignore_warnings
|
2014-12-05 20:14:20 +00:00
|
|
|
|
from django.test.utils import reset_warning_registry
|
2013-12-12 14:58:14 +00:00
|
|
|
|
from django.utils import six, text
|
2014-02-26 21:48:20 +00:00
|
|
|
|
from django.utils.deprecation import RemovedInDjango19Warning
|
2014-09-23 12:45:59 +00:00
|
|
|
|
from django.utils.encoding import force_text
|
|
|
|
|
from django.utils.functional import lazy
|
|
|
|
|
from django.utils.translation import override
|
|
|
|
|
|
|
|
|
|
lazystr = lazy(force_text, six.text_type)
|
2010-09-27 15:15:04 +00:00
|
|
|
|
|
2014-02-18 17:07:07 +00:00
|
|
|
|
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
|
|
|
|
|
|
2013-11-02 21:34:05 +00:00
|
|
|
|
|
2012-05-03 13:27:01 +00:00
|
|
|
|
class TestUtilsText(SimpleTestCase):
|
|
|
|
|
|
2014-09-23 12:45:59 +00:00
|
|
|
|
def test_get_text_list(self):
|
|
|
|
|
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
|
|
|
|
|
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
|
|
|
|
|
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
|
|
|
|
|
self.assertEqual(text.get_text_list(['a']), 'a')
|
|
|
|
|
self.assertEqual(text.get_text_list([]), '')
|
|
|
|
|
with override('ar'):
|
|
|
|
|
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
|
|
|
|
|
|
|
|
|
|
def test_smart_split(self):
|
|
|
|
|
testdata = [
|
|
|
|
|
('This is "a person" test.',
|
|
|
|
|
['This', 'is', '"a person"', 'test.']),
|
|
|
|
|
('This is "a person\'s" test.',
|
|
|
|
|
['This', 'is', '"a person\'s"', 'test.']),
|
|
|
|
|
('This is "a person\\"s" test.',
|
|
|
|
|
['This', 'is', '"a person\\"s"', 'test.']),
|
|
|
|
|
('"a \'one',
|
|
|
|
|
['"a', "'one"]),
|
|
|
|
|
('all friends\' tests',
|
|
|
|
|
['all', 'friends\'', 'tests']),
|
|
|
|
|
('url search_page words="something else"',
|
|
|
|
|
['url', 'search_page', 'words="something else"']),
|
|
|
|
|
("url search_page words='something else'",
|
|
|
|
|
['url', 'search_page', "words='something else'"]),
|
|
|
|
|
('url search_page words "something else"',
|
|
|
|
|
['url', 'search_page', 'words', '"something else"']),
|
|
|
|
|
('url search_page words-"something else"',
|
|
|
|
|
['url', 'search_page', 'words-"something else"']),
|
|
|
|
|
('url search_page words=hello',
|
|
|
|
|
['url', 'search_page', 'words=hello']),
|
|
|
|
|
('url search_page words="something else',
|
|
|
|
|
['url', 'search_page', 'words="something', 'else']),
|
|
|
|
|
("cut:','|cut:' '",
|
|
|
|
|
["cut:','|cut:' '"]),
|
|
|
|
|
(lazystr("a b c d"), # Test for #20231
|
|
|
|
|
['a', 'b', 'c', 'd']),
|
|
|
|
|
]
|
|
|
|
|
for test, expected in testdata:
|
|
|
|
|
self.assertEqual(list(text.smart_split(test)), expected)
|
|
|
|
|
|
2011-07-14 13:47:10 +00:00
|
|
|
|
def test_truncate_chars(self):
|
|
|
|
|
truncator = text.Truncator(
|
2012-06-07 16:08:47 +00:00
|
|
|
|
'The quick brown fox jumped over the lazy dog.'
|
2011-07-14 13:47:10 +00:00
|
|
|
|
)
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('The quick brown fox jumped over the lazy dog.',
|
2011-07-14 13:47:10 +00:00
|
|
|
|
truncator.chars(100)),
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('The quick brown fox ...',
|
2011-07-14 13:47:10 +00:00
|
|
|
|
truncator.chars(23)),
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('The quick brown fo.....',
|
2011-07-14 13:47:10 +00:00
|
|
|
|
truncator.chars(23, '.....')),
|
|
|
|
|
|
|
|
|
|
# Ensure that we normalize our unicode data first
|
2012-06-07 16:08:47 +00:00
|
|
|
|
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
|
|
|
|
|
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
|
|
|
|
|
self.assertEqual('oüoüoüoü', nfc.chars(8))
|
|
|
|
|
self.assertEqual('oüoüoüoü', nfd.chars(8))
|
|
|
|
|
self.assertEqual('oü...', nfc.chars(5))
|
|
|
|
|
self.assertEqual('oü...', nfd.chars(5))
|
2011-07-14 13:47:10 +00:00
|
|
|
|
|
|
|
|
|
# Ensure the final length is calculated correctly when there are
|
|
|
|
|
# combining characters with no precomposed form, and that combining
|
|
|
|
|
# characters are not split up.
|
2012-06-07 16:08:47 +00:00
|
|
|
|
truncator = text.Truncator('-B\u030AB\u030A----8')
|
|
|
|
|
self.assertEqual('-B\u030A...', truncator.chars(5))
|
|
|
|
|
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
|
|
|
|
|
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
|
2011-07-14 13:47:10 +00:00
|
|
|
|
|
|
|
|
|
# Ensure the length of the end text is correctly calculated when it
|
|
|
|
|
# contains combining characters with no precomposed form.
|
2012-06-07 16:08:47 +00:00
|
|
|
|
truncator = text.Truncator('-----')
|
|
|
|
|
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
|
|
|
|
|
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
|
2011-07-14 13:47:10 +00:00
|
|
|
|
|
|
|
|
|
# Make a best effort to shorten to the desired length, but requesting
|
|
|
|
|
# a length shorter than the ellipsis shouldn't break
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('...', text.Truncator('asdf').chars(1))
|
2011-07-14 13:47:10 +00:00
|
|
|
|
|
2010-09-27 15:15:04 +00:00
|
|
|
|
def test_truncate_words(self):
|
2012-06-07 16:08:47 +00:00
|
|
|
|
truncator = text.Truncator('The quick brown fox jumped over the lazy '
|
2011-07-14 13:47:10 +00:00
|
|
|
|
'dog.')
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('The quick brown fox jumped over the lazy dog.',
|
2011-07-14 13:47:10 +00:00
|
|
|
|
truncator.words(10))
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('The quick brown fox...', truncator.words(4))
|
|
|
|
|
self.assertEqual('The quick brown fox[snip]',
|
2011-07-14 13:47:10 +00:00
|
|
|
|
truncator.words(4, '[snip]'))
|
|
|
|
|
|
|
|
|
|
def test_truncate_html_words(self):
|
2013-02-13 17:24:49 +00:00
|
|
|
|
truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox'
|
|
|
|
|
' jumped over the lazy dog.</em></strong></p>')
|
|
|
|
|
self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over'
|
|
|
|
|
' the lazy dog.</em></strong></p>', truncator.words(10, html=True))
|
|
|
|
|
self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>'
|
2011-07-14 13:47:10 +00:00
|
|
|
|
'</strong></p>', truncator.words(4, html=True))
|
2013-02-13 17:24:49 +00:00
|
|
|
|
self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>'
|
2011-07-14 13:47:10 +00:00
|
|
|
|
'</strong></p>', truncator.words(4, '....', html=True))
|
2013-02-13 17:24:49 +00:00
|
|
|
|
self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>'
|
|
|
|
|
'</strong></p>', truncator.words(4, '', html=True))
|
|
|
|
|
|
2012-01-02 18:47:18 +00:00
|
|
|
|
# Test with new line inside tag
|
|
|
|
|
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
|
|
|
|
|
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual('<p>The quick <a href="xyz.html"\n'
|
2012-01-02 18:47:18 +00:00
|
|
|
|
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
|
2011-07-14 13:47:10 +00:00
|
|
|
|
|
2013-02-13 17:24:49 +00:00
|
|
|
|
# Test self-closing tags
|
|
|
|
|
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over'
|
|
|
|
|
' the lazy dog.')
|
|
|
|
|
self.assertEqual('<br/>The <hr />quick brown...',
|
2013-10-14 19:13:14 +00:00
|
|
|
|
truncator.words(3, '...', html=True))
|
2013-02-13 17:24:49 +00:00
|
|
|
|
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> '
|
|
|
|
|
'jumped over the lazy dog.')
|
|
|
|
|
self.assertEqual('<br>The <hr/>quick <em>brown...</em>',
|
2013-10-14 19:13:14 +00:00
|
|
|
|
truncator.words(3, '...', html=True))
|
2013-02-13 17:24:49 +00:00
|
|
|
|
|
2013-07-18 08:45:34 +00:00
|
|
|
|
# Test html entities
|
|
|
|
|
truncator = text.Truncator('<i>Buenos días!'
|
|
|
|
|
' ¿Cómo está?</i>')
|
|
|
|
|
self.assertEqual('<i>Buenos días! ¿Cómo...</i>',
|
|
|
|
|
truncator.words(3, '...', html=True))
|
|
|
|
|
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
|
|
|
|
|
self.assertEqual('<p>I <3 python...</p>',
|
|
|
|
|
truncator.words(3, '...', html=True))
|
|
|
|
|
|
2010-11-30 21:21:37 +00:00
|
|
|
|
def test_wrap(self):
|
|
|
|
|
digits = '1234 67 9'
|
2012-06-07 16:08:47 +00:00
|
|
|
|
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
|
|
|
|
|
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
|
|
|
|
|
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
|
2010-11-30 21:21:37 +00:00
|
|
|
|
|
|
|
|
|
self.assertEqual(text.wrap('short\na long line', 7),
|
2012-06-07 16:08:47 +00:00
|
|
|
|
'short\na long\nline')
|
2010-11-30 21:21:37 +00:00
|
|
|
|
|
|
|
|
|
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
|
2012-06-07 16:08:47 +00:00
|
|
|
|
'do-not-break-long-words\nplease?\nok')
|
2010-11-30 21:21:37 +00:00
|
|
|
|
|
|
|
|
|
long_word = 'l%sng' % ('o' * 20)
|
|
|
|
|
self.assertEqual(text.wrap(long_word, 20), long_word)
|
|
|
|
|
self.assertEqual(text.wrap('a %s word' % long_word, 10),
|
2012-06-07 16:08:47 +00:00
|
|
|
|
'a\n%s\nword' % long_word)
|
2012-08-18 12:53:22 +00:00
|
|
|
|
|
2013-12-07 08:28:22 +00:00
|
|
|
|
def test_normalize_newlines(self):
|
|
|
|
|
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"),
|
|
|
|
|
"abc\ndef\nghi\n")
|
|
|
|
|
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
|
|
|
|
|
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
|
|
|
|
|
self.assertEqual(text.normalize_newlines(""), "")
|
|
|
|
|
|
2013-12-12 14:58:14 +00:00
|
|
|
|
def test_normalize_newlines_bytes(self):
|
|
|
|
|
"""normalize_newlines should be able to handle bytes too"""
|
|
|
|
|
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
|
|
|
|
|
self.assertEqual(normalized, "abc\ndef\nghi\n")
|
|
|
|
|
self.assertIsInstance(normalized, six.text_type)
|
|
|
|
|
|
2012-08-18 12:53:22 +00:00
|
|
|
|
def test_slugify(self):
|
|
|
|
|
items = (
|
2012-08-18 15:47:21 +00:00
|
|
|
|
('Hello, World!', 'hello-world'),
|
|
|
|
|
('spam & eggs', 'spam-eggs'),
|
2012-08-18 12:53:22 +00:00
|
|
|
|
)
|
|
|
|
|
for value, output in items:
|
|
|
|
|
self.assertEqual(text.slugify(value), output)
|
2013-09-27 15:00:42 +00:00
|
|
|
|
|
|
|
|
|
def test_unescape_entities(self):
|
|
|
|
|
items = [
|
|
|
|
|
('', ''),
|
|
|
|
|
('foo', 'foo'),
|
|
|
|
|
('&', '&'),
|
|
|
|
|
('&', '&'),
|
|
|
|
|
('&', '&'),
|
|
|
|
|
('foo & bar', 'foo & bar'),
|
|
|
|
|
('foo & bar', 'foo & bar'),
|
|
|
|
|
]
|
|
|
|
|
for value, output in items:
|
|
|
|
|
self.assertEqual(text.unescape_entities(value), output)
|
2013-11-24 08:10:21 +00:00
|
|
|
|
|
|
|
|
|
def test_get_valid_filename(self):
|
|
|
|
|
filename = "^&'@{}[],$=!-#()%+~_123.txt"
|
|
|
|
|
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
|
2014-01-04 04:57:50 +00:00
|
|
|
|
|
2015-01-29 07:59:41 +00:00
|
|
|
|
def test_compress_sequence(self):
|
|
|
|
|
data = [{'key': i} for i in range(10)]
|
|
|
|
|
seq = list(json.JSONEncoder().iterencode(data))
|
|
|
|
|
seq = [s.encode('utf-8') for s in seq]
|
|
|
|
|
actual_length = len(b''.join(seq))
|
|
|
|
|
out = text.compress_sequence(seq)
|
|
|
|
|
compressed_length = len(b''.join(out))
|
|
|
|
|
self.assertTrue(compressed_length < actual_length)
|
|
|
|
|
|
2014-12-21 20:19:05 +00:00
|
|
|
|
@ignore_warnings(category=RemovedInDjango19Warning)
|
2014-01-04 04:57:50 +00:00
|
|
|
|
def test_javascript_quote(self):
|
|
|
|
|
input = "<script>alert('Hello \\xff.\n Welcome\there\r');</script>"
|
|
|
|
|
output = r"<script>alert(\'Hello \\xff.\n Welcome\there\r\');<\/script>"
|
2014-12-21 20:19:05 +00:00
|
|
|
|
self.assertEqual(text.javascript_quote(input), output)
|
2014-01-04 04:57:50 +00:00
|
|
|
|
|
2014-12-21 20:19:05 +00:00
|
|
|
|
# Exercising quote_double_quotes keyword argument
|
|
|
|
|
input = '"Text"'
|
|
|
|
|
self.assertEqual(text.javascript_quote(input), '"Text"')
|
|
|
|
|
self.assertEqual(text.javascript_quote(input, quote_double_quotes=True),
|
|
|
|
|
'"Text"')
|
2014-02-15 17:55:33 +00:00
|
|
|
|
|
2014-12-21 20:19:05 +00:00
|
|
|
|
@ignore_warnings(category=RemovedInDjango19Warning)
|
2014-02-21 13:46:23 +00:00
|
|
|
|
@skipUnless(IS_WIDE_BUILD, 'Not running in a wide build of Python')
|
2014-02-15 17:55:33 +00:00
|
|
|
|
def test_javascript_quote_unicode(self):
|
|
|
|
|
input = "<script>alert('Hello \\xff.\n Wel𝕃come\there\r');</script>"
|
|
|
|
|
output = r"<script>alert(\'Hello \\xff.\n Wel𝕃come\there\r\');<\/script>"
|
2014-12-21 20:19:05 +00:00
|
|
|
|
self.assertEqual(text.javascript_quote(input), output)
|
2014-02-21 13:46:23 +00:00
|
|
|
|
|
|
|
|
|
def test_deprecation(self):
|
2014-12-05 20:14:20 +00:00
|
|
|
|
reset_warning_registry()
|
2014-02-21 13:46:23 +00:00
|
|
|
|
with warnings.catch_warnings(record=True) as w:
|
|
|
|
|
warnings.simplefilter("always")
|
|
|
|
|
text.javascript_quote('thingy')
|
|
|
|
|
self.assertEqual(len(w), 1)
|
|
|
|
|
self.assertIn('escapejs()', repr(w[0].message))
|