1
0
mirror of https://github.com/django/django.git synced 2025-11-07 07:15:35 +00:00

Replaced TOKEN_* constants by TokenType enums.

Thanks Tim Graham for the review.
This commit is contained in:
Claude Paroz
2018-05-10 17:51:51 +02:00
committed by GitHub
parent 1e20fedb35
commit 9c4ea63e87
4 changed files with 39 additions and 44 deletions

View File

@@ -3,9 +3,10 @@ This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Lexer.tokenize() method converts a template string (i.e., a string
containing markup with custom template tags) to tokens, which can be either
plain text (TokenType.TEXT), variables (TokenType.VAR), or block statements
(TokenType.BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
@@ -51,6 +52,7 @@ times with multiple contexts)
import logging
import re
from enum import Enum
from inspect import getcallargs, getfullargspec
from django.template.context import ( # NOQA: imported for backwards compatibility
@@ -67,17 +69,6 @@ from django.utils.translation import gettext_lazy, pgettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
@@ -106,6 +97,13 @@ tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
logger = logging.getLogger('django.template')
class TokenType(Enum):
TEXT = 0
VAR = 1
BLOCK = 2
COMMENT = 3
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
@@ -293,7 +291,7 @@ class Token:
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT.
contents
The token source string.
@@ -312,7 +310,7 @@ class Token:
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
token_name = self.token_type.name.capitalize()
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
@@ -367,18 +365,18 @@ class Lexer:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
return Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
return Token(TokenType.VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
return Token(TOKEN_BLOCK, block_content, position, lineno)
return Token(TokenType.BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
return Token(TOKEN_COMMENT, content, position, lineno)
return Token(TokenType.COMMENT, content, position, lineno)
else:
return Token(TOKEN_TEXT, token_string, position, lineno)
return Token(TokenType.TEXT, token_string, position, lineno)
class DebugLexer(Lexer):
@@ -439,10 +437,10 @@ class Parser:
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
# Use the raw values here for TokenType.* for a tiny performance boost.
if token.token_type.value == 0: # TokenType.TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
elif token.token_type.value == 1: # TokenType.VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
@@ -451,7 +449,7 @@ class Parser:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
elif token.token_type.value == 2: # TokenType.BLOCK
try:
command = token.contents.split()[0]
except IndexError:
@@ -488,7 +486,7 @@ class Parser:
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
if token.token_type == TokenType.BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])