Dropped safeguards against very old versions of gettext.

gettext 0.19 was released in 2014.
This commit is contained in:
Claude Paroz 2024-08-27 21:31:07 +02:00
parent 2ff00251f9
commit 2c1f27d0d0
8 changed files with 12 additions and 705 deletions

View File

@ -79,8 +79,8 @@ class Command(BaseCommand):
if find_command(self.program) is None:
raise CommandError(
"Can't find %s. Make sure you have GNU gettext "
"tools 0.15 or newer installed." % self.program
f"Can't find {self.program}. Make sure you have GNU gettext "
"tools 0.19 or newer installed."
)
basedirs = [os.path.join("conf", "locale"), "locale"]

View File

@ -19,7 +19,6 @@ from django.core.management.utils import (
)
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.regex_helper import _lazy_re_compile
from django.utils.text import get_text_list
from django.utils.translation import templatize
@ -35,8 +34,8 @@ def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError(
"Can't find %s. Make sure you have GNU gettext tools 0.15 or "
"newer installed." % program
f"Can't find {program}. Make sure you have GNU gettext tools "
"0.19 or newer installed."
)
@ -80,9 +79,7 @@ class BuildFile:
@cached_property
def is_templatized(self):
if self.domain == "djangojs":
return self.command.gettext_version < (0, 18, 3)
elif self.domain == "django":
if self.domain == "django":
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != ".py"
return False
@ -99,11 +96,7 @@ class BuildFile:
"""
if not self.is_templatized:
return self.path
extension = {
"djangojs": "c",
"django": "py",
}.get(self.domain)
filename = "%s.%s" % (self.translatable.file, extension)
filename = f"{self.translatable.file}.py"
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
@ -117,9 +110,7 @@ class BuildFile:
with open(self.path, encoding="utf-8") as fp:
src_data = fp.read()
if self.domain == "djangojs":
content = prepare_js_for_gettext(src_data)
elif self.domain == "django":
if self.domain == "django":
content = templatize(src_data, origin=self.path[2:])
with open(self.work_path, "w", encoding="utf-8") as fp:
@ -349,11 +340,6 @@ class Command(BaseCommand):
self.msgattrib_options = self.msgattrib_options[:] + ["--no-location"]
self.xgettext_options = self.xgettext_options[:] + ["--no-location"]
if options["add_location"]:
if self.gettext_version < (0, 19):
raise CommandError(
"The --add-location option requires gettext 0.19 or later. "
"You have %s." % ".".join(str(x) for x in self.gettext_version)
)
arg_add_location = "--add-location=%s" % options["add_location"]
self.msgmerge_options = self.msgmerge_options[:] + [arg_add_location]
self.msguniq_options = self.msguniq_options[:] + [arg_add_location]
@ -636,12 +622,11 @@ class Command(BaseCommand):
build_files.append(build_file)
if self.domain == "djangojs":
is_templatized = build_file.is_templatized
args = [
"xgettext",
"-d",
self.domain,
"--language=%s" % ("C" if is_templatized else "JavaScript",),
"--language=JavaScript",
"--keyword=gettext_noop",
"--keyword=gettext_lazy",
"--keyword=ngettext_lazy:1,2",

View File

@ -1,250 +0,0 @@
"""JsLex: a lexer for JavaScript"""
# Originally from https://bitbucket.org/ned/jslex
import re
class Tok:
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer:
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yield pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match[name]
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A JavaScript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the JavaScript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed JavaScript, only properly
# lex correct JavaScript, so much of this is simplified.
# Details of JavaScript lexical structure are taken from
# https://www.ecma-international.org/publications-and-standards/standards/ecma-262/
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok(
"keyword",
literals(
"""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""",
suffix=r"\b",
),
next="reg",
),
Tok("reserved", literals("null true false", suffix=r"\b"), next="div"),
Tok(
"id",
r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""",
next="div",
),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next="div"),
Tok("onum", r"0[0-7]+"),
Tok(
"dnum",
r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""",
next="div",
),
Tok(
"punct",
literals(
"""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""
),
next="reg",
),
Tok("punct", literals("++ -- ) ]"), next="div"),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next="reg"),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next="div"),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next="div"),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
"div": both_before
+ [
Tok("punct", literals("/= /"), next="reg"),
]
+ both_after,
# slash will mean regex
"reg": both_before
+ [
Tok(
"regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""",
next="div",
),
]
+ both_after,
}
def __init__(self):
super().__init__(self.states, "reg")
def prepare_js_for_gettext(js):
"""
Convert the JavaScript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m[0]
if s == '"':
return r"\""
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == "regex":
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == "string":
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == "id":
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return "".join(c)

View File

@ -306,6 +306,8 @@ Miscellaneous
* Adding :attr:`.EmailMultiAlternatives.alternatives` is now only supported via
the :meth:`~.EmailMultiAlternatives.attach_alternative` method.
* The minimum supported version of ``gettext`` is increased from 0.15 to 0.19.
.. _deprecated-features-5.2:
Features deprecated in 5.2

View File

@ -1551,7 +1551,7 @@ Django comes with a tool, :djadmin:`django-admin makemessages
commands from the GNU gettext toolset: ``xgettext``, ``msgfmt``,
``msgmerge`` and ``msguniq``.
The minimum version of the ``gettext`` utilities supported is 0.15.
The minimum version of the ``gettext`` utilities supported is 0.19.
To create or update a message file, run this command:

View File

@ -8,7 +8,6 @@ from subprocess import run
from unittest import mock
from django.core.management import CommandError, call_command, execute_from_command_line
from django.core.management.commands.makemessages import Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, override_settings
from django.test.utils import captured_stderr, captured_stdout
@ -269,9 +268,6 @@ class CompilationErrorHandling(MessageCompilationTests):
"django.core.management.utils.run",
lambda *args, **kwargs: run(*args, env=env, **kwargs),
):
cmd = MakeMessagesCommand()
if cmd.gettext_version < (0, 18, 3):
self.skipTest("python-brace-format is a recent gettext addition.")
stderr = StringIO()
with self.assertRaisesMessage(
CommandError, "compilemessages generated one or more errors"

View File

@ -6,7 +6,7 @@ import time
import warnings
from io import StringIO
from pathlib import Path
from unittest import mock, skipIf, skipUnless
from unittest import mock, skipUnless
from admin_scripts.tests import AdminScriptTestCase
@ -25,10 +25,6 @@ from .utils import POFileAssertionMixin, RunInTmpDirMixin, copytree
LOCALE = "de"
has_xgettext = find_command("xgettext")
gettext_version = MakeMessagesCommand().gettext_version if has_xgettext else None
requires_gettext_019 = skipIf(
has_xgettext and gettext_version < (0, 19), "gettext 0.19 required"
)
@skipUnless(has_xgettext, "xgettext is mandatory for extraction tests")
@ -836,7 +832,6 @@ class LocationCommentsTests(ExtractorTests):
self.assertLocationCommentNotPresent(self.PO_FILE, None, ".html.py")
self.assertLocationCommentPresent(self.PO_FILE, 5, "templates", "test.html")
@requires_gettext_019
def test_add_location_full(self):
"""makemessages --add-location=full"""
management.call_command(
@ -848,7 +843,6 @@ class LocationCommentsTests(ExtractorTests):
self.PO_FILE, "Translatable literal #6b", "templates", "test.html"
)
@requires_gettext_019
def test_add_location_file(self):
"""makemessages --add-location=file"""
management.call_command(
@ -862,7 +856,6 @@ class LocationCommentsTests(ExtractorTests):
self.PO_FILE, "Translatable literal #6b", "templates", "test.html"
)
@requires_gettext_019
def test_add_location_never(self):
"""makemessages --add-location=never"""
management.call_command(
@ -871,24 +864,6 @@ class LocationCommentsTests(ExtractorTests):
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, None, "test.html")
@mock.patch(
"django.core.management.commands.makemessages.Command.gettext_version",
new=(0, 18, 99),
)
def test_add_location_gettext_version_check(self):
"""
CommandError is raised when using makemessages --add-location with
gettext < 0.19.
"""
msg = (
"The --add-location option requires gettext 0.19 or later. You have "
"0.18.99."
)
with self.assertRaisesMessage(CommandError, msg):
management.call_command(
"makemessages", locale=[LOCALE], verbosity=0, add_location="full"
)
class NoObsoleteExtractorTests(ExtractorTests):
work_subdir = "obsolete_translations"

View File

@ -1,401 +0,0 @@
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
(
"\\u1234 abc\\u0020 \\u0065_\\u0067",
["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"],
),
# numbers
(
"123 1.234 0.123e-3 0 1E+40 1e1 .123",
[
"dnum 123",
"dnum 1.234",
"dnum 0.123e-3",
"dnum 0",
"dnum 1E+40",
"dnum 1e1",
"dnum .123",
],
),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
(
"function Function FUNCTION",
["keyword function", "id Function", "id FUNCTION"],
),
(
"const constructor in inherits",
["keyword const", "id constructor", "keyword in", "id inherits"],
),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(""" 'hello' "hello" """, ["string 'hello'", 'string "hello"']),
(
r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """,
[
r"""string 'don\'t'""",
r'''string "don\"t"''',
r"""string '"'""",
r'''string "'"''',
r"""string '\''""",
r'''string "\""''',
],
),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
(
"/****/a/=2//hello",
["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"],
),
(
"/*\n * Header\n */\na=1;",
["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"],
),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from https://www-archive.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions # NOQA
(
'for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}',
[
"keyword for",
"punct (",
"keyword var",
"id x",
"punct =",
"id a",
"keyword in",
"id foo",
"punct &&",
'string "</x>"',
"punct ||",
"id mot",
"punct ?",
"id z",
"punct :",
"regex /x:3;x<5;y</g",
"punct /",
"id i",
"punct )",
"punct {",
"id xyz",
"punct (",
"id x",
"punct ++",
"punct )",
"punct ;",
"punct }",
],
),
(
'for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}',
[
"keyword for",
"punct (",
"keyword var",
"id x",
"punct =",
"id a",
"keyword in",
"id foo",
"punct &&",
'string "</x>"',
"punct ||",
"id mot",
"punct ?",
"id z",
"punct /",
"id x",
"punct :",
"dnum 3",
"punct ;",
"id x",
"punct <",
"dnum 5",
"punct ;",
"id y",
"punct <",
"regex /g/i",
"punct )",
"punct {",
"id xyz",
"punct (",
"id x",
"punct ++",
"punct )",
"punct ;",
"punct }",
],
),
# Various "illegal" regexes that are valid according to the std.
(
r"""/????/, /++++/, /[----]/ """,
["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"],
),
# Stress cases from https://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409 # NOQA
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
(
r"""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""", # NOQA
[
"id rexl",
"punct .",
"id re",
"punct =",
"punct {",
"id NAME",
"punct :",
r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""",
"punct ,",
"id UNQUOTED_LITERAL",
"punct :",
r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL",
"punct :",
r"""regex /^'(?:[^']|'')*'/""",
"punct ,",
"id NUMERIC_LITERAL",
"punct :",
r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""",
"punct ,",
"id SYMBOL",
"punct :",
r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }",
"punct ;",
],
),
(
r"""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""", # NOQA
[
"id rexl",
"punct .",
"id re",
"punct =",
"punct {",
"id NAME",
"punct :",
r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""",
"punct ,",
"id UNQUOTED_LITERAL",
"punct :",
r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL",
"punct :",
r"""regex /^'(?:[^']|'')*'/""",
"punct ,",
"id NUMERIC_LITERAL",
"punct :",
r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""",
"punct ,",
"id SYMBOL",
"punct :",
r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }",
"punct ;",
"id str",
"punct =",
"""string '"'""",
"punct ;",
],
),
(
r' this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\")'
r'.replace(/"/g, "\\\"") + "\")"; ',
[
"keyword this",
"punct .",
"id _js",
"punct =",
r'''string "e.str(\""''',
"punct +",
"keyword this",
"punct .",
"id value",
"punct .",
"id replace",
"punct (",
r"regex /\\/g",
"punct ,",
r'string "\\\\"',
"punct )",
"punct .",
"id replace",
"punct (",
r'regex /"/g',
"punct ,",
r'string "\\\""',
"punct )",
"punct +",
r'string "\")"',
"punct ;",
],
),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = [
"%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != "ws"
]
self.assertEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
),
(
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
),
(
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
),
(
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
""",
),
(
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
""",
),
(
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
""",
),
(
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
""",
),
(
"""
\\u1234xyz = gettext('Hello there');
""",
r"""
Uu1234xyz = gettext("Hello there");
""",
),
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))