mirror of
https://github.com/django/django.git
synced 2025-01-03 15:06:09 +00:00
Replaced TOKEN_* constants by TokenType enums.
Thanks Tim Graham for the review.
This commit is contained in:
parent
1e20fedb35
commit
9c4ea63e87
@ -3,9 +3,10 @@ This is the Django template system.
|
|||||||
|
|
||||||
How it works:
|
How it works:
|
||||||
|
|
||||||
The Lexer.tokenize() function converts a template string (i.e., a string containing
|
The Lexer.tokenize() method converts a template string (i.e., a string
|
||||||
markup with custom template tags) to tokens, which can be either plain text
|
containing markup with custom template tags) to tokens, which can be either
|
||||||
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
|
plain text (TokenType.TEXT), variables (TokenType.VAR), or block statements
|
||||||
|
(TokenType.BLOCK).
|
||||||
|
|
||||||
The Parser() class takes a list of tokens in its constructor, and its parse()
|
The Parser() class takes a list of tokens in its constructor, and its parse()
|
||||||
method returns a compiled template -- which is, under the hood, a list of
|
method returns a compiled template -- which is, under the hood, a list of
|
||||||
@ -51,6 +52,7 @@ times with multiple contexts)
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
from enum import Enum
|
||||||
from inspect import getcallargs, getfullargspec
|
from inspect import getcallargs, getfullargspec
|
||||||
|
|
||||||
from django.template.context import ( # NOQA: imported for backwards compatibility
|
from django.template.context import ( # NOQA: imported for backwards compatibility
|
||||||
@ -67,17 +69,6 @@ from django.utils.translation import gettext_lazy, pgettext_lazy
|
|||||||
|
|
||||||
from .exceptions import TemplateSyntaxError
|
from .exceptions import TemplateSyntaxError
|
||||||
|
|
||||||
TOKEN_TEXT = 0
|
|
||||||
TOKEN_VAR = 1
|
|
||||||
TOKEN_BLOCK = 2
|
|
||||||
TOKEN_COMMENT = 3
|
|
||||||
TOKEN_MAPPING = {
|
|
||||||
TOKEN_TEXT: 'Text',
|
|
||||||
TOKEN_VAR: 'Var',
|
|
||||||
TOKEN_BLOCK: 'Block',
|
|
||||||
TOKEN_COMMENT: 'Comment',
|
|
||||||
}
|
|
||||||
|
|
||||||
# template syntax constants
|
# template syntax constants
|
||||||
FILTER_SEPARATOR = '|'
|
FILTER_SEPARATOR = '|'
|
||||||
FILTER_ARGUMENT_SEPARATOR = ':'
|
FILTER_ARGUMENT_SEPARATOR = ':'
|
||||||
@ -106,6 +97,13 @@ tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
|
|||||||
logger = logging.getLogger('django.template')
|
logger = logging.getLogger('django.template')
|
||||||
|
|
||||||
|
|
||||||
|
class TokenType(Enum):
|
||||||
|
TEXT = 0
|
||||||
|
VAR = 1
|
||||||
|
BLOCK = 2
|
||||||
|
COMMENT = 3
|
||||||
|
|
||||||
|
|
||||||
class VariableDoesNotExist(Exception):
|
class VariableDoesNotExist(Exception):
|
||||||
|
|
||||||
def __init__(self, msg, params=()):
|
def __init__(self, msg, params=()):
|
||||||
@ -293,7 +291,7 @@ class Token:
|
|||||||
A token representing a string from the template.
|
A token representing a string from the template.
|
||||||
|
|
||||||
token_type
|
token_type
|
||||||
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
|
A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT.
|
||||||
|
|
||||||
contents
|
contents
|
||||||
The token source string.
|
The token source string.
|
||||||
@ -312,7 +310,7 @@ class Token:
|
|||||||
self.position = position
|
self.position = position
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
token_name = TOKEN_MAPPING[self.token_type]
|
token_name = self.token_type.name.capitalize()
|
||||||
return ('<%s token: "%s...">' %
|
return ('<%s token: "%s...">' %
|
||||||
(token_name, self.contents[:20].replace('\n', '')))
|
(token_name, self.contents[:20].replace('\n', '')))
|
||||||
|
|
||||||
@ -367,18 +365,18 @@ class Lexer:
|
|||||||
self.verbatim = False
|
self.verbatim = False
|
||||||
if in_tag and not self.verbatim:
|
if in_tag and not self.verbatim:
|
||||||
if token_string.startswith(VARIABLE_TAG_START):
|
if token_string.startswith(VARIABLE_TAG_START):
|
||||||
return Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
|
return Token(TokenType.VAR, token_string[2:-2].strip(), position, lineno)
|
||||||
elif token_string.startswith(BLOCK_TAG_START):
|
elif token_string.startswith(BLOCK_TAG_START):
|
||||||
if block_content[:9] in ('verbatim', 'verbatim '):
|
if block_content[:9] in ('verbatim', 'verbatim '):
|
||||||
self.verbatim = 'end%s' % block_content
|
self.verbatim = 'end%s' % block_content
|
||||||
return Token(TOKEN_BLOCK, block_content, position, lineno)
|
return Token(TokenType.BLOCK, block_content, position, lineno)
|
||||||
elif token_string.startswith(COMMENT_TAG_START):
|
elif token_string.startswith(COMMENT_TAG_START):
|
||||||
content = ''
|
content = ''
|
||||||
if token_string.find(TRANSLATOR_COMMENT_MARK):
|
if token_string.find(TRANSLATOR_COMMENT_MARK):
|
||||||
content = token_string[2:-2].strip()
|
content = token_string[2:-2].strip()
|
||||||
return Token(TOKEN_COMMENT, content, position, lineno)
|
return Token(TokenType.COMMENT, content, position, lineno)
|
||||||
else:
|
else:
|
||||||
return Token(TOKEN_TEXT, token_string, position, lineno)
|
return Token(TokenType.TEXT, token_string, position, lineno)
|
||||||
|
|
||||||
|
|
||||||
class DebugLexer(Lexer):
|
class DebugLexer(Lexer):
|
||||||
@ -439,10 +437,10 @@ class Parser:
|
|||||||
nodelist = NodeList()
|
nodelist = NodeList()
|
||||||
while self.tokens:
|
while self.tokens:
|
||||||
token = self.next_token()
|
token = self.next_token()
|
||||||
# Use the raw values here for TOKEN_* for a tiny performance boost.
|
# Use the raw values here for TokenType.* for a tiny performance boost.
|
||||||
if token.token_type == 0: # TOKEN_TEXT
|
if token.token_type.value == 0: # TokenType.TEXT
|
||||||
self.extend_nodelist(nodelist, TextNode(token.contents), token)
|
self.extend_nodelist(nodelist, TextNode(token.contents), token)
|
||||||
elif token.token_type == 1: # TOKEN_VAR
|
elif token.token_type.value == 1: # TokenType.VAR
|
||||||
if not token.contents:
|
if not token.contents:
|
||||||
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
|
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
|
||||||
try:
|
try:
|
||||||
@ -451,7 +449,7 @@ class Parser:
|
|||||||
raise self.error(token, e)
|
raise self.error(token, e)
|
||||||
var_node = VariableNode(filter_expression)
|
var_node = VariableNode(filter_expression)
|
||||||
self.extend_nodelist(nodelist, var_node, token)
|
self.extend_nodelist(nodelist, var_node, token)
|
||||||
elif token.token_type == 2: # TOKEN_BLOCK
|
elif token.token_type.value == 2: # TokenType.BLOCK
|
||||||
try:
|
try:
|
||||||
command = token.contents.split()[0]
|
command = token.contents.split()[0]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
@ -488,7 +486,7 @@ class Parser:
|
|||||||
def skip_past(self, endtag):
|
def skip_past(self, endtag):
|
||||||
while self.tokens:
|
while self.tokens:
|
||||||
token = self.next_token()
|
token = self.next_token()
|
||||||
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
|
if token.token_type == TokenType.BLOCK and token.contents == endtag:
|
||||||
return
|
return
|
||||||
self.unclosed_block_tag([endtag])
|
self.unclosed_block_tag([endtag])
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.template import Library, Node, TemplateSyntaxError, Variable
|
from django.template import Library, Node, TemplateSyntaxError, Variable
|
||||||
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
|
from django.template.base import TokenType, render_value_in_context
|
||||||
from django.template.defaulttags import token_kwargs
|
from django.template.defaulttags import token_kwargs
|
||||||
from django.utils import translation
|
from django.utils import translation
|
||||||
from django.utils.safestring import SafeData, mark_safe
|
from django.utils.safestring import SafeData, mark_safe
|
||||||
@ -112,9 +112,9 @@ class BlockTranslateNode(Node):
|
|||||||
result = []
|
result = []
|
||||||
vars = []
|
vars = []
|
||||||
for token in tokens:
|
for token in tokens:
|
||||||
if token.token_type == TOKEN_TEXT:
|
if token.token_type == TokenType.TEXT:
|
||||||
result.append(token.contents.replace('%', '%%'))
|
result.append(token.contents.replace('%', '%%'))
|
||||||
elif token.token_type == TOKEN_VAR:
|
elif token.token_type == TokenType.VAR:
|
||||||
result.append('%%(%s)s' % token.contents)
|
result.append('%%(%s)s' % token.contents)
|
||||||
vars.append(token.contents)
|
vars.append(token.contents)
|
||||||
msg = ''.join(result)
|
msg = ''.join(result)
|
||||||
@ -510,7 +510,7 @@ def do_block_translate(parser, token):
|
|||||||
plural = []
|
plural = []
|
||||||
while parser.tokens:
|
while parser.tokens:
|
||||||
token = parser.next_token()
|
token = parser.next_token()
|
||||||
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
|
if token.token_type in (TokenType.VAR, TokenType.TEXT):
|
||||||
singular.append(token)
|
singular.append(token)
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
@ -519,7 +519,7 @@ def do_block_translate(parser, token):
|
|||||||
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
|
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
|
||||||
while parser.tokens:
|
while parser.tokens:
|
||||||
token = parser.next_token()
|
token = parser.next_token()
|
||||||
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
|
if token.token_type in (TokenType.VAR, TokenType.TEXT):
|
||||||
plural.append(token)
|
plural.append(token)
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
@ -2,10 +2,7 @@ import re
|
|||||||
import warnings
|
import warnings
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
|
||||||
from django.template.base import (
|
from django.template.base import TRANSLATOR_COMMENT_MARK, Lexer, TokenType
|
||||||
TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, TOKEN_VAR, TRANSLATOR_COMMENT_MARK,
|
|
||||||
Lexer,
|
|
||||||
)
|
|
||||||
|
|
||||||
from . import TranslatorCommentWarning, trim_whitespace
|
from . import TranslatorCommentWarning, trim_whitespace
|
||||||
|
|
||||||
@ -63,7 +60,7 @@ def templatize(src, origin=None):
|
|||||||
|
|
||||||
for t in Lexer(src).tokenize():
|
for t in Lexer(src).tokenize():
|
||||||
if incomment:
|
if incomment:
|
||||||
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
|
if t.token_type == TokenType.BLOCK and t.contents == 'endcomment':
|
||||||
content = ''.join(comment)
|
content = ''.join(comment)
|
||||||
translators_comment_start = None
|
translators_comment_start = None
|
||||||
for lineno, line in enumerate(content.splitlines(True)):
|
for lineno, line in enumerate(content.splitlines(True)):
|
||||||
@ -79,7 +76,7 @@ def templatize(src, origin=None):
|
|||||||
else:
|
else:
|
||||||
comment.append(t.contents)
|
comment.append(t.contents)
|
||||||
elif intrans:
|
elif intrans:
|
||||||
if t.token_type == TOKEN_BLOCK:
|
if t.token_type == TokenType.BLOCK:
|
||||||
endbmatch = endblock_re.match(t.contents)
|
endbmatch = endblock_re.match(t.contents)
|
||||||
pluralmatch = plural_re.match(t.contents)
|
pluralmatch = plural_re.match(t.contents)
|
||||||
if endbmatch:
|
if endbmatch:
|
||||||
@ -130,12 +127,12 @@ def templatize(src, origin=None):
|
|||||||
"Translation blocks must not include other block tags: "
|
"Translation blocks must not include other block tags: "
|
||||||
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
|
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
|
||||||
)
|
)
|
||||||
elif t.token_type == TOKEN_VAR:
|
elif t.token_type == TokenType.VAR:
|
||||||
if inplural:
|
if inplural:
|
||||||
plural.append('%%(%s)s' % t.contents)
|
plural.append('%%(%s)s' % t.contents)
|
||||||
else:
|
else:
|
||||||
singular.append('%%(%s)s' % t.contents)
|
singular.append('%%(%s)s' % t.contents)
|
||||||
elif t.token_type == TOKEN_TEXT:
|
elif t.token_type == TokenType.TEXT:
|
||||||
contents = t.contents.replace('%', '%%')
|
contents = t.contents.replace('%', '%%')
|
||||||
if inplural:
|
if inplural:
|
||||||
plural.append(contents)
|
plural.append(contents)
|
||||||
@ -147,7 +144,7 @@ def templatize(src, origin=None):
|
|||||||
if comment_lineno_cache is not None:
|
if comment_lineno_cache is not None:
|
||||||
cur_lineno = t.lineno + t.contents.count('\n')
|
cur_lineno = t.lineno + t.contents.count('\n')
|
||||||
if comment_lineno_cache == cur_lineno:
|
if comment_lineno_cache == cur_lineno:
|
||||||
if t.token_type != TOKEN_COMMENT:
|
if t.token_type != TokenType.COMMENT:
|
||||||
for c in lineno_comment_map[comment_lineno_cache]:
|
for c in lineno_comment_map[comment_lineno_cache]:
|
||||||
filemsg = ''
|
filemsg = ''
|
||||||
if origin:
|
if origin:
|
||||||
@ -163,7 +160,7 @@ def templatize(src, origin=None):
|
|||||||
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
|
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
|
||||||
comment_lineno_cache = None
|
comment_lineno_cache = None
|
||||||
|
|
||||||
if t.token_type == TOKEN_BLOCK:
|
if t.token_type == TokenType.BLOCK:
|
||||||
imatch = inline_re.match(t.contents)
|
imatch = inline_re.match(t.contents)
|
||||||
bmatch = block_re.match(t.contents)
|
bmatch = block_re.match(t.contents)
|
||||||
cmatches = constant_re.findall(t.contents)
|
cmatches = constant_re.findall(t.contents)
|
||||||
@ -211,7 +208,7 @@ def templatize(src, origin=None):
|
|||||||
incomment = True
|
incomment = True
|
||||||
else:
|
else:
|
||||||
out.write(blankout(t.contents, 'B'))
|
out.write(blankout(t.contents, 'B'))
|
||||||
elif t.token_type == TOKEN_VAR:
|
elif t.token_type == TokenType.VAR:
|
||||||
parts = t.contents.split('|')
|
parts = t.contents.split('|')
|
||||||
cmatch = constant_re.match(parts[0])
|
cmatch = constant_re.match(parts[0])
|
||||||
if cmatch:
|
if cmatch:
|
||||||
@ -221,7 +218,7 @@ def templatize(src, origin=None):
|
|||||||
out.write(' %s ' % p.split(':', 1)[1])
|
out.write(' %s ' % p.split(':', 1)[1])
|
||||||
else:
|
else:
|
||||||
out.write(blankout(p, 'F'))
|
out.write(blankout(p, 'F'))
|
||||||
elif t.token_type == TOKEN_COMMENT:
|
elif t.token_type == TokenType.COMMENT:
|
||||||
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
|
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
|
||||||
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
|
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
|
||||||
comment_lineno_cache = t.lineno
|
comment_lineno_cache = t.lineno
|
||||||
|
@ -3,7 +3,7 @@ Testing some internals of the template processing. These are *not* examples to b
|
|||||||
"""
|
"""
|
||||||
from django.template import Library, TemplateSyntaxError
|
from django.template import Library, TemplateSyntaxError
|
||||||
from django.template.base import (
|
from django.template.base import (
|
||||||
TOKEN_BLOCK, FilterExpression, Parser, Token, Variable,
|
FilterExpression, Parser, Token, TokenType, Variable,
|
||||||
)
|
)
|
||||||
from django.template.defaultfilters import register as filter_library
|
from django.template.defaultfilters import register as filter_library
|
||||||
from django.test import SimpleTestCase
|
from django.test import SimpleTestCase
|
||||||
@ -15,7 +15,7 @@ class ParserTests(SimpleTestCase):
|
|||||||
"""
|
"""
|
||||||
#7027 -- _() syntax should work with spaces
|
#7027 -- _() syntax should work with spaces
|
||||||
"""
|
"""
|
||||||
token = Token(TOKEN_BLOCK, 'sometag _("Page not found") value|yesno:_("yes,no")')
|
token = Token(TokenType.BLOCK, 'sometag _("Page not found") value|yesno:_("yes,no")')
|
||||||
split = token.split_contents()
|
split = token.split_contents()
|
||||||
self.assertEqual(split, ["sometag", '_("Page not found")', 'value|yesno:_("yes,no")'])
|
self.assertEqual(split, ["sometag", '_("Page not found")', 'value|yesno:_("yes,no")'])
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user