From 5edd1335b2ae3530bab124b8e9cfb6928a975088 Mon Sep 17 00:00:00 2001 From: Adrian Holovaty Date: Thu, 8 Jun 2006 04:29:10 +0000 Subject: [PATCH] Added django.template.Token.split_contents() and used it to add support for strings with spaces in {% ifchanged %} git-svn-id: http://code.djangoproject.com/svn/django/trunk@3112 bcc190cf-cafb-0310-a4f2-bffc1f526a37 --- django/template/__init__.py | 25 ++++++++++--------------- django/template/defaulttags.py | 2 +- tests/othertests/templates.py | 12 ++++++++++++ 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/django/template/__init__.py b/django/template/__init__.py index cf65caef07..656cffd4b6 100644 --- a/django/template/__init__.py +++ b/django/template/__init__.py @@ -56,9 +56,10 @@ times with multiple contexts) """ import re from inspect import getargspec -from django.utils.functional import curry from django.conf import settings from django.template.context import Context, RequestContext, ContextPopException +from django.utils.functional import curry +from django.utils.text import smart_split __all__ = ('Template', 'Context', 'RequestContext', 'compile_string') @@ -163,16 +164,12 @@ class Token: self.token_type, self.contents = token_type, contents def __str__(self): - return '<%s token: "%s...">' % ( - {TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type], - self.contents[:20].replace('\n', '') - ) + return '<%s token: "%s...">' % \ + ({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type], + self.contents[:20].replace('\n', '')) - def __repr__(self): - return '<%s token: "%s">' % ( - {TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type], - self.contents[:].replace('\n', '') - ) + def split_contents(self): + return smart_split(self.contents) class Lexer(object): def __init__(self, template_string, origin): @@ -367,7 +364,6 @@ class DebugParser(Parser): if not hasattr(e, 'source'): e.source = token.source - def lexer_factory(*args, **kwargs): if settings.TEMPLATE_DEBUG: return DebugLexer(*args, **kwargs) @@ -380,7 +376,6 @@ def parser_factory(*args, **kwargs): else: return Parser(*args, **kwargs) - class TokenParser: """ Subclass this and implement the top() method to parse a template line. When @@ -564,7 +559,7 @@ class FilterExpression(object): def args_check(name, func, provided): provided = list(provided) plen = len(provided) - (args, varargs, varkw, defaults) = getargspec(func) + args, varargs, varkw, defaults = getargspec(func) # First argument is filter input. args.pop(0) if defaults: @@ -820,7 +815,7 @@ class Library(object): return func def simple_tag(self,func): - (params, xx, xxx, defaults) = getargspec(func) + params, xx, xxx, defaults = getargspec(func) class SimpleNode(Node): def __init__(self, vars_to_resolve): @@ -837,7 +832,7 @@ class Library(object): def inclusion_tag(self, file_name, context_class=Context, takes_context=False): def dec(func): - (params, xx, xxx, defaults) = getargspec(func) + params, xx, xxx, defaults = getargspec(func) if takes_context: if params[0] == 'context': params = params[1:] diff --git a/django/template/defaulttags.py b/django/template/defaulttags.py index 364a4608eb..50a6da68f4 100644 --- a/django/template/defaulttags.py +++ b/django/template/defaulttags.py @@ -502,7 +502,7 @@ def do_ifequal(parser, token, negate): ... {% endifnotequal %} """ - bits = token.contents.split() + bits = list(token.split_contents()) if len(bits) != 3: raise TemplateSyntaxError, "%r takes two arguments" % bits[0] end_tag = 'end' + bits[0] diff --git a/tests/othertests/templates.py b/tests/othertests/templates.py index 37495caf54..f624b251ba 100644 --- a/tests/othertests/templates.py +++ b/tests/othertests/templates.py @@ -307,6 +307,18 @@ TEMPLATE_TESTS = { 'ifequal09': ('{% ifequal a "test" %}yes{% else %}no{% endifequal %}', {}, "no"), 'ifequal10': ('{% ifequal a b %}yes{% else %}no{% endifequal %}', {}, "yes"), + # SMART SPLITTING + 'ifequal-split01': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {}, "no"), + 'ifequal-split02': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {'a': 'foo'}, "no"), + 'ifequal-split03': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {'a': 'test man'}, "yes"), + 'ifequal-split04': ("{% ifequal a 'test man' %}yes{% else %}no{% endifequal %}", {'a': 'test man'}, "yes"), + 'ifequal-split05': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': ''}, "no"), + 'ifequal-split06': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': 'i "love" you'}, "yes"), + 'ifequal-split07': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': 'i love you'}, "no"), + 'ifequal-split08': (r"{% ifequal a 'I\'m happy' %}yes{% else %}no{% endifequal %}", {'a': "I'm happy"}, "yes"), + 'ifequal-split09': (r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}", {'a': r"slash\man"}, "yes"), + 'ifequal-split10': (r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}", {'a': r"slashman"}, "no"), + ### IFNOTEQUAL TAG ######################################################## 'ifnotequal01': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 2}, "yes"), 'ifnotequal02': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 1}, ""),