""")
+ self.assertTrue(pattern.match(rendered))
+
+ @unittest.skipIf(markdown, 'markdown is installed')
+ def test_no_markdown(self):
+ t = Template("{{ markdown_content|markdown }}")
+ rendered = t.render(Context({'markdown_content':self.markdown_content})).strip()
+ self.assertEqual(rendered, self.markdown_content)
+
+ @unittest.skipUnless(docutils, 'docutils not installed')
+ def test_docutils(self):
t = Template("{{ rest_content|restructuredtext }}")
- rendered = t.render(Context(locals())).strip()
- if docutils:
- # Different versions of docutils return slightly different HTML
- try:
- # Docutils v0.4 and earlier
- self.assertEqual(rendered, """
Paragraph 1
+ rendered = t.render(Context({'rest_content':self.rest_content})).strip()
+ # Different versions of docutils return slightly different HTML
+ try:
+ # Docutils v0.4 and earlier
+ self.assertEqual(rendered, """
""")
- else:
- self.assertEqual(rendered, rest_content)
+
+ @unittest.skipIf(docutils, 'docutils is installed')
+ def test_no_docutils(self):
+ t = Template("{{ rest_content|restructuredtext }}")
+ rendered = t.render(Context({'rest_content':self.rest_content})).strip()
+ self.assertEqual(rendered, self.rest_content)
if __name__ == '__main__':
diff --git a/django/contrib/messages/tests/middleware.py b/django/contrib/messages/tests/middleware.py
index 654217a224..5618c2d2b2 100644
--- a/django/contrib/messages/tests/middleware.py
+++ b/django/contrib/messages/tests/middleware.py
@@ -1,6 +1,6 @@
-import unittest
from django import http
from django.contrib.messages.middleware import MessageMiddleware
+from django.utils import unittest
class MiddlewareTest(unittest.TestCase):
diff --git a/django/contrib/sessions/tests.py b/django/contrib/sessions/tests.py
index e645b73817..f9b66309cb 100644
--- a/django/contrib/sessions/tests.py
+++ b/django/contrib/sessions/tests.py
@@ -1,4 +1,7 @@
from datetime import datetime, timedelta
+import shutil
+import tempfile
+
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
@@ -8,9 +11,7 @@ from django.contrib.sessions.backends.base import SessionBase
from django.contrib.sessions.models import Session
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
-import shutil
-import tempfile
-import unittest
+from django.utils import unittest
class SessionTestsMixin(object):
diff --git a/django/db/backends/__init__.py b/django/db/backends/__init__.py
index 75f40cd36f..4bbaa1cd43 100644
--- a/django/db/backends/__init__.py
+++ b/django/db/backends/__init__.py
@@ -20,6 +20,7 @@ class BaseDatabaseWrapper(local):
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
+ self.vendor = 'unknown'
def __eq__(self, other):
return self.settings_dict == other.settings_dict
@@ -87,16 +88,105 @@ class BaseDatabaseFeatures(object):
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
+
+ # Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
+
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
+
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
+ # Does the default test database allow multiple connections?
+ # Usually an indication that the test database is in-memory
+ test_db_allows_multiple_connections = True
+
+ # Can an object be saved without an explicit primary key?
+ supports_unspecified_pk = False
+
+ # Can a fixture contain forward references? i.e., are
+ # FK constraints checked at the end of transaction, or
+ # at the end of each save operation?
+ supports_forward_references = True
+
+ # Does a dirty transaction need to be rolled back
+ # before the cursor can be used again?
+ requires_rollback_on_dirty_transaction = False
+
+ # Does the backend allow very long model names without error?
+ supports_long_model_names = True
+
+ # Is there a REAL datatype in addition to floats/doubles?
+ has_real_datatype = False
+ supports_subqueries_in_group_by = True
+ supports_bitwise_or = True
+
+ # Do time/datetime fields have microsecond precision?
+ supports_microsecond_precision = True
+
+ # Does the __regex lookup support backreferencing and grouping?
+ supports_regex_backreferencing = True
+
+ # Can date/datetime lookups be performed using a string?
+ supports_date_lookup_using_string = True
+
+ # Can datetimes with timezones be used?
+ supports_timezones = True
+
+ # When performing a GROUP BY, is an ORDER BY NULL required
+ # to remove any ordering?
+ requires_explicit_null_ordering_when_grouping = False
+
+ # Is there a 1000 item limit on query parameters?
+ supports_1000_query_paramters = True
+
+ # Can an object have a primary key of 0? MySQL says No.
+ allows_primary_key_0 = True
+
+ # Features that need to be confirmed at runtime
+ # Cache whether the confirmation has been performed.
+ _confirmed = False
+ supports_transactions = None
+ supports_stddev = None
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ def confirm(self):
+ "Perform manual checks of any database features that might vary between installs"
+ self._confirmed = True
+ self.supports_transactions = self._supports_transactions()
+ self.supports_stddev = self._supports_stddev()
+
+ def _supports_transactions(self):
+ "Confirm support for transactions"
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
+ self.connection._commit()
+ cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
+ self.connection._rollback()
+ cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
+ count, = cursor.fetchone()
+ cursor.execute('DROP TABLE ROLLBACK_TEST')
+ self.connection._commit()
+ return count == 0
+
+ def _supports_stddev(self):
+ "Confirm support for STDDEV and related stats functions"
+ class StdDevPop(object):
+ sql_function = 'STDDEV_POP'
+
+ try:
+ self.connection.ops.check_aggregate_support(StdDevPop())
+ except DatabaseError:
+ self.supports_stddev = False
+
+
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
diff --git a/django/db/backends/creation.py b/django/db/backends/creation.py
index 407bf5cbbb..ce32012c2b 100644
--- a/django/db/backends/creation.py
+++ b/django/db/backends/creation.py
@@ -347,8 +347,9 @@ class BaseDatabaseCreation(object):
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
- can_rollback = self._rollback_works()
- self.connection.settings_dict["SUPPORTS_TRANSACTIONS"] = can_rollback
+
+ # Confirm the feature set of the test database
+ self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
@@ -405,18 +406,6 @@ class BaseDatabaseCreation(object):
return test_database_name
- def _rollback_works(self):
- cursor = self.connection.cursor()
- cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
- self.connection._commit()
- cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
- self.connection._rollback()
- cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
- count, = cursor.fetchone()
- cursor.execute('DROP TABLE ROLLBACK_TEST')
- self.connection._commit()
- return count == 0
-
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
diff --git a/django/db/backends/dummy/base.py b/django/db/backends/dummy/base.py
index 2cda04e528..71f7a0e3e2 100644
--- a/django/db/backends/dummy/base.py
+++ b/django/db/backends/dummy/base.py
@@ -42,7 +42,7 @@ class DatabaseWrapper(object):
_rollback = ignore
def __init__(self, settings_dict, alias, *args, **kwargs):
- self.features = BaseDatabaseFeatures()
+ self.features = BaseDatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = BaseDatabaseCreation(self)
diff --git a/django/db/backends/mysql/base.py b/django/db/backends/mysql/base.py
index a39c41f8d8..500c1ac495 100644
--- a/django/db/backends/mysql/base.py
+++ b/django/db/backends/mysql/base.py
@@ -124,6 +124,14 @@ class DatabaseFeatures(BaseDatabaseFeatures):
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
+ supports_forward_references = False
+ supports_long_model_names = False
+ supports_microsecond_precision = False
+ supports_regex_backreferencing = False
+ supports_date_lookup_using_string = False
+ supports_timezones = False
+ requires_explicit_null_ordering_when_grouping = True
+ allows_primary_key_0 = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
@@ -231,7 +239,7 @@ class DatabaseOperations(BaseDatabaseOperations):
return 64
class DatabaseWrapper(BaseDatabaseWrapper):
-
+ vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
@@ -253,7 +261,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
- self.features = DatabaseFeatures()
+ self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
diff --git a/django/db/backends/oracle/base.py b/django/db/backends/oracle/base.py
index a175c3933a..40a70d1f47 100644
--- a/django/db/backends/oracle/base.py
+++ b/django/db/backends/oracle/base.py
@@ -50,7 +50,9 @@ class DatabaseFeatures(BaseDatabaseFeatures):
uses_savepoints = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
-
+ supports_subqueries_in_group_by = True
+ supports_timezones = False
+ supports_bitwise_or = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
@@ -314,7 +316,7 @@ WHEN (new.%(col_name)s IS NULL)
class DatabaseWrapper(BaseDatabaseWrapper):
-
+ vendor = 'oracle'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
@@ -334,7 +336,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
- self.features = DatabaseFeatures()
+ self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
diff --git a/django/db/backends/postgresql/base.py b/django/db/backends/postgresql/base.py
index 84b4c7f5f3..5e47a37d3f 100644
--- a/django/db/backends/postgresql/base.py
+++ b/django/db/backends/postgresql/base.py
@@ -80,8 +80,11 @@ class UnicodeCursorWrapper(object):
class DatabaseFeatures(BaseDatabaseFeatures):
uses_savepoints = True
+ requires_rollback_on_dirty_transaction = True
+ has_real_datatype = True
class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
@@ -108,7 +111,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
DeprecationWarning
)
- self.features = DatabaseFeatures()
+ self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
diff --git a/django/db/backends/postgresql_psycopg2/base.py b/django/db/backends/postgresql_psycopg2/base.py
index ce4e48330e..9e5fb53d56 100644
--- a/django/db/backends/postgresql_psycopg2/base.py
+++ b/django/db/backends/postgresql_psycopg2/base.py
@@ -67,6 +67,8 @@ class CursorWrapper(object):
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
+ requires_rollback_on_dirty_transaction = True
+ has_real_datatype = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
@@ -79,6 +81,7 @@ class DatabaseOperations(PostgresqlDatabaseOperations):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
@@ -99,7 +102,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
- self.features = DatabaseFeatures()
+ self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
diff --git a/django/db/backends/sqlite3/base.py b/django/db/backends/sqlite3/base.py
index 1ab2557627..5af1576189 100644
--- a/django/db/backends/sqlite3/base.py
+++ b/django/db/backends/sqlite3/base.py
@@ -60,6 +60,27 @@ class DatabaseFeatures(BaseDatabaseFeatures):
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
+ test_db_allows_multiple_connections = False
+ supports_unspecified_pk = True
+ supports_1000_query_paramters = False
+
+ def _supports_stddev(self):
+ """Confirm support for STDDEV and related stats functions
+
+ SQLite supports STDDEV as an extension package; so
+ connection.ops.check_aggregate_support() can't unilaterally
+ rule out support for STDDEV. We need to manually check
+ whether the call works.
+ """
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
+ try:
+ cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
+ has_support = True
+ except utils.DatabaseError:
+ has_support = False
+ cursor.execute('DROP TABLE STDDEV_TEST')
+ return has_support
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
@@ -129,7 +150,7 @@ class DatabaseOperations(BaseDatabaseOperations):
return value
class DatabaseWrapper(BaseDatabaseWrapper):
-
+ vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
@@ -153,7 +174,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
- self.features = DatabaseFeatures()
+ self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
diff --git a/django/test/__init__.py b/django/test/__init__.py
index c996ed49d6..b3198fcf88 100644
--- a/django/test/__init__.py
+++ b/django/test/__init__.py
@@ -3,5 +3,5 @@ Django Unit Test and Doctest framework.
"""
from django.test.client import Client
-from django.test.testcases import TestCase, TransactionTestCase
+from django.test.testcases import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
diff --git a/django/test/simple.py b/django/test/simple.py
index 7ec273c75a..b18c22781b 100644
--- a/django/test/simple.py
+++ b/django/test/simple.py
@@ -1,12 +1,12 @@
import sys
import signal
-import unittest
from django.conf import settings
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
+from django.utils import unittest
# The module name for tests outside models.py
TEST_MODULE = 'tests'
@@ -14,52 +14,13 @@ TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
-
- def __init__(self, verbosity=0, failfast=False, **kwargs):
- super(DjangoTestRunner, self).__init__(verbosity=verbosity, **kwargs)
- self.failfast = failfast
- self._keyboard_interrupt_intercepted = False
-
- def run(self, *args, **kwargs):
- """
- Runs the test suite after registering a custom signal handler
- that triggers a graceful exit when Ctrl-C is pressed.
- """
- self._default_keyboard_interrupt_handler = signal.signal(signal.SIGINT,
- self._keyboard_interrupt_handler)
- try:
- result = super(DjangoTestRunner, self).run(*args, **kwargs)
- finally:
- signal.signal(signal.SIGINT, self._default_keyboard_interrupt_handler)
- return result
-
- def _keyboard_interrupt_handler(self, signal_number, stack_frame):
- """
- Handles Ctrl-C by setting a flag that will stop the test run when
- the currently running test completes.
- """
- self._keyboard_interrupt_intercepted = True
- sys.stderr.write(" ")
- # Set the interrupt handler back to the default handler, so that
- # another Ctrl-C press will trigger immediate exit.
- signal.signal(signal.SIGINT, self._default_keyboard_interrupt_handler)
-
- def _makeResult(self):
- result = super(DjangoTestRunner, self)._makeResult()
- failfast = self.failfast
-
- def stoptest_override(func):
- def stoptest(test):
- # If we were set to failfast and the unit test failed,
- # or if the user has typed Ctrl-C, report and quit
- if (failfast and not result.wasSuccessful()) or \
- self._keyboard_interrupt_intercepted:
- result.stop()
- func(test)
- return stoptest
-
- result.stopTest = stoptest_override(result.stopTest)
- return result
+ def __init__(self, *args, **kwargs):
+ import warnings
+ warnings.warn(
+ "DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner",
+ PendingDeprecationWarning
+ )
+ super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
try:
@@ -232,6 +193,7 @@ class DjangoTestSuiteRunner(object):
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
+ unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
@@ -271,7 +233,7 @@ class DjangoTestSuiteRunner(object):
return old_names, mirrors
def run_suite(self, suite, **kwargs):
- return DjangoTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
+ return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
@@ -284,6 +246,7 @@ class DjangoTestSuiteRunner(object):
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
+ unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
diff --git a/django/test/testcases.py b/django/test/testcases.py
index 40b3c82500..06b6eb39f4 100644
--- a/django/test/testcases.py
+++ b/django/test/testcases.py
@@ -1,5 +1,4 @@
import re
-import unittest
from urlparse import urlsplit, urlunsplit
from xml.dom.minidom import parseString, Node
@@ -7,12 +6,14 @@ from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import clear_url_caches
-from django.db import transaction, connections, DEFAULT_DB_ALIAS
+from django.db import transaction, connection, connections, DEFAULT_DB_ALIAS
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
-from django.utils import simplejson
+from django.utils import simplejson, unittest
from django.utils.encoding import smart_str
+from django.utils.functional import wraps
+
try:
all
@@ -22,6 +23,7 @@ except NameError:
normalize_long_ints = lambda s: re.sub(r'(?= (2,7):
+ # unittest2 features are native in Python 2.7
+ from unittest import *
+else:
+ try:
+ # check the system path first
+ from unittest2 import *
+ except ImportError:
+ # otherwise use our bundled version
+ __all__ = ['TestResult', 'TestCase', 'TestSuite',
+ 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
+ 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
+ 'expectedFailure', 'TextTestResult', '__version__', 'collector']
+
+ __version__ = '0.5.1'
+
+ # Expose obsolete functions for backwards compatibility
+ __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
+
+
+ from django.utils.unittest.collector import collector
+ from django.utils.unittest.result import TestResult
+ from django.utils.unittest.case import \
+ TestCase, FunctionTestCase, SkipTest, skip, skipIf,\
+ skipUnless, expectedFailure
+
+ from django.utils.unittest.suite import BaseTestSuite, TestSuite
+ from django.utils.unittest.loader import \
+ TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,\
+ findTestCases
+
+ from django.utils.unittest.main import TestProgram, main, main_
+ from django.utils.unittest.runner import TextTestRunner, TextTestResult
+
+ try:
+ from django.utils.unittest.signals import\
+ installHandler, registerResult, removeResult, removeHandler
+ except ImportError:
+ # Compatibility with platforms that don't have the signal module
+ pass
+ else:
+ __all__.extend(['installHandler', 'registerResult', 'removeResult',
+ 'removeHandler'])
+
+ # deprecated
+ _TextTestResult = TextTestResult
+
+ __unittest = True
diff --git a/django/utils/unittest/__main__.py b/django/utils/unittest/__main__.py
new file mode 100644
index 0000000000..68b893d139
--- /dev/null
+++ b/django/utils/unittest/__main__.py
@@ -0,0 +1,10 @@
+"""Main entry point"""
+
+import sys
+if sys.argv[0].endswith("__main__.py"):
+ sys.argv[0] = "unittest2"
+
+__unittest = True
+
+from django.utils.unittest.main import main_
+main_()
diff --git a/django/utils/unittest/case.py b/django/utils/unittest/case.py
new file mode 100644
index 0000000000..fd5623b03c
--- /dev/null
+++ b/django/utils/unittest/case.py
@@ -0,0 +1,1083 @@
+"""Test case implementation"""
+
+import sys
+import difflib
+import pprint
+import re
+import unittest
+import warnings
+
+from django.utils.unittest import result
+from django.utils.unittest.util import\
+ safe_repr, safe_str, strclass,\
+ unorderable_list_difference
+
+from django.utils.unittest.compatibility import wraps
+
+__unittest = True
+
+
+DIFF_OMITTED = ('\nDiff is %s characters long. '
+ 'Set self.maxDiff to None to see it.')
+
+class SkipTest(Exception):
+ """
+ Raise this exception in a test to skip it.
+
+ Usually you can use TestResult.skip() or one of the skipping decorators
+ instead of raising this directly.
+ """
+
+class _ExpectedFailure(Exception):
+ """
+ Raise this when a test is expected to fail.
+
+ This is an implementation detail.
+ """
+
+ def __init__(self, exc_info):
+ # can't use super because Python 2.4 exceptions are old style
+ Exception.__init__(self)
+ self.exc_info = exc_info
+
+class _UnexpectedSuccess(Exception):
+ """
+ The test was supposed to fail, but it didn't!
+ """
+
+def _id(obj):
+ return obj
+
+def skip(reason):
+ """
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+ if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
+ @wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+ test_item = skip_wrapper
+
+ test_item.__unittest_skip__ = True
+ test_item.__unittest_skip_why__ = reason
+ return test_item
+ return decorator
+
+def skipIf(condition, reason):
+ """
+ Skip a test if the condition is true.
+ """
+ if condition:
+ return skip(reason)
+ return _id
+
+def skipUnless(condition, reason):
+ """
+ Skip a test unless the condition is true.
+ """
+ if not condition:
+ return skip(reason)
+ return _id
+
+
+def expectedFailure(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ raise _ExpectedFailure(sys.exc_info())
+ raise _UnexpectedSuccess
+ return wrapper
+
+
+class _AssertRaisesContext(object):
+ """A context manager used to implement TestCase.assertRaises* methods."""
+
+ def __init__(self, expected, test_case, expected_regexp=None):
+ self.expected = expected
+ self.failureException = test_case.failureException
+ self.expected_regexp = expected_regexp
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ if exc_type is None:
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ raise self.failureException(
+ "%s not raised" % (exc_name,))
+ if not issubclass(exc_type, self.expected):
+ # let unexpected exceptions pass through
+ return False
+ self.exception = exc_value # store for later retrieval
+ if self.expected_regexp is None:
+ return True
+
+ expected_regexp = self.expected_regexp
+ if isinstance(expected_regexp, basestring):
+ expected_regexp = re.compile(expected_regexp)
+ if not expected_regexp.search(str(exc_value)):
+ raise self.failureException('"%s" does not match "%s"' %
+ (expected_regexp.pattern, str(exc_value)))
+ return True
+
+
+class _TypeEqualityDict(object):
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+ self._store = {}
+
+ def __setitem__(self, key, value):
+ self._store[key] = value
+
+ def __getitem__(self, key):
+ value = self._store[key]
+ if isinstance(value, basestring):
+ return getattr(self.testcase, value)
+ return value
+
+ def get(self, key, default=None):
+ if key in self._store:
+ return self[key]
+ return default
+
+
+class TestCase(unittest.TestCase):
+ """A class whose instances are single test cases.
+
+ By default, the test code itself should be placed in a method named
+ 'runTest'.
+
+ If the fixture may be used for many test cases, create as
+ many test methods as are needed. When instantiating such a TestCase
+ subclass, specify in the constructor arguments the name of the test method
+ that the instance is to execute.
+
+ Test authors should subclass TestCase for their own tests. Construction
+ and deconstruction of the test's environment ('fixture') can be
+ implemented by overriding the 'setUp' and 'tearDown' methods respectively.
+
+ If it is necessary to override the __init__ method, the base class
+ __init__ method must always be called. It is important that subclasses
+ should not change the signature of their __init__ method, since instances
+ of the classes are instantiated automatically by parts of the framework
+ in order to be run.
+ """
+
+ # This attribute determines which exception will be raised when
+ # the instance's assertion methods fail; test methods raising this
+ # exception will be deemed to have 'failed' rather than 'errored'
+
+ failureException = AssertionError
+
+ # This attribute sets the maximum length of a diff in failure messages
+ # by assert methods using difflib. It is looked up as an instance attribute
+ # so can be configured by individual tests if required.
+
+ maxDiff = 80*8
+
+ # This attribute determines whether long messages (including repr of
+ # objects used in assert methods) will be printed on failure in *addition*
+ # to any explicit message passed.
+
+ longMessage = True
+
+ # Attribute used by TestSuite for classSetUp
+
+ _classSetupFailed = False
+
+ def __init__(self, methodName='runTest'):
+ """Create an instance of the class that will use the named test
+ method when executed. Raises a ValueError if the instance does
+ not have a method with the specified name.
+ """
+ self._testMethodName = methodName
+ self._resultForDoCleanups = None
+ try:
+ testMethod = getattr(self, methodName)
+ except AttributeError:
+ raise ValueError("no such test method in %s: %s" % \
+ (self.__class__, methodName))
+ self._testMethodDoc = testMethod.__doc__
+ self._cleanups = []
+
+ # Map types to custom assertEqual functions that will compare
+ # instances of said type in more detail to generate a more useful
+ # error message.
+ self._type_equality_funcs = _TypeEqualityDict(self)
+ self.addTypeEqualityFunc(dict, 'assertDictEqual')
+ self.addTypeEqualityFunc(list, 'assertListEqual')
+ self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
+ self.addTypeEqualityFunc(set, 'assertSetEqual')
+ self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
+ self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
+
+ def addTypeEqualityFunc(self, typeobj, function):
+ """Add a type specific assertEqual style function to compare a type.
+
+ This method is for use by TestCase subclasses that need to register
+ their own type equality functions to provide nicer error messages.
+
+ Args:
+ typeobj: The data type to call this function on when both values
+ are of the same type in assertEqual().
+ function: The callable taking two arguments and an optional
+ msg= argument that raises self.failureException with a
+ useful error message when the two arguments are not equal.
+ """
+ self._type_equality_funcs[typeobj] = function
+
+ def addCleanup(self, function, *args, **kwargs):
+ """Add a function, with arguments, to be called when the test is
+ completed. Functions added are called on a LIFO basis and are
+ called after tearDown on test failure or success.
+
+ Cleanup items are called even if setUp fails (unlike tearDown)."""
+ self._cleanups.append((function, args, kwargs))
+
+ def setUp(self):
+ "Hook method for setting up the test fixture before exercising it."
+
+ @classmethod
+ def setUpClass(cls):
+ "Hook method for setting up class fixture before running tests in the class."
+
+ @classmethod
+ def tearDownClass(cls):
+ "Hook method for deconstructing the class fixture after running all tests in the class."
+
+ def tearDown(self):
+ "Hook method for deconstructing the test fixture after testing it."
+
+ def countTestCases(self):
+ return 1
+
+ def defaultTestResult(self):
+ return result.TestResult()
+
+ def shortDescription(self):
+ """Returns a one-line description of the test, or None if no
+ description has been provided.
+
+ The default implementation of this method returns the first line of
+ the specified test method's docstring.
+ """
+ doc = self._testMethodDoc
+ return doc and doc.split("\n")[0].strip() or None
+
+
+ def id(self):
+ return "%s.%s" % (strclass(self.__class__), self._testMethodName)
+
+ def __eq__(self, other):
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return self._testMethodName == other._testMethodName
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((type(self), self._testMethodName))
+
+ def __str__(self):
+ return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
+
+ def __repr__(self):
+ return "<%s testMethod=%s>" % \
+ (strclass(self.__class__), self._testMethodName)
+
+ def _addSkip(self, result, reason):
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None:
+ addSkip(self, reason)
+ else:
+ warnings.warn("Use of a TestResult without an addSkip method is deprecated",
+ DeprecationWarning, 2)
+ result.addSuccess(self)
+
+ def run(self, result=None):
+ orig_result = result
+ if result is None:
+ result = self.defaultTestResult()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+
+ self._resultForDoCleanups = result
+ result.startTest(self)
+
+ testMethod = getattr(self, self._testMethodName)
+
+ if (getattr(self.__class__, "__unittest_skip__", False) or
+ getattr(testMethod, "__unittest_skip__", False)):
+ # If the class or method was skipped.
+ try:
+ skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
+ or getattr(testMethod, '__unittest_skip_why__', ''))
+ self._addSkip(result, skip_why)
+ finally:
+ result.stopTest(self)
+ return
+ try:
+ success = False
+ try:
+ self.setUp()
+ except SkipTest, e:
+ self._addSkip(result, str(e))
+ except Exception:
+ result.addError(self, sys.exc_info())
+ else:
+ try:
+ testMethod()
+ except self.failureException:
+ result.addFailure(self, sys.exc_info())
+ except _ExpectedFailure, e:
+ addExpectedFailure = getattr(result, 'addExpectedFailure', None)
+ if addExpectedFailure is not None:
+ addExpectedFailure(self, e.exc_info)
+ else:
+ warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
+ DeprecationWarning)
+ result.addSuccess(self)
+ except _UnexpectedSuccess:
+ addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
+ if addUnexpectedSuccess is not None:
+ addUnexpectedSuccess(self)
+ else:
+ warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
+ DeprecationWarning)
+ result.addFailure(self, sys.exc_info())
+ except SkipTest, e:
+ self._addSkip(result, str(e))
+ except Exception:
+ result.addError(self, sys.exc_info())
+ else:
+ success = True
+
+ try:
+ self.tearDown()
+ except Exception:
+ result.addError(self, sys.exc_info())
+ success = False
+
+ cleanUpSuccess = self.doCleanups()
+ success = success and cleanUpSuccess
+ if success:
+ result.addSuccess(self)
+ finally:
+ result.stopTest(self)
+ if orig_result is None:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+
+ def doCleanups(self):
+ """Execute all cleanup functions. Normally called for you after
+ tearDown."""
+ result = self._resultForDoCleanups
+ ok = True
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop(-1)
+ try:
+ function(*args, **kwargs)
+ except Exception:
+ ok = False
+ result.addError(self, sys.exc_info())
+ return ok
+
+ def __call__(self, *args, **kwds):
+ return self.run(*args, **kwds)
+
+ def debug(self):
+ """Run the test without collecting errors in a TestResult"""
+ self.setUp()
+ getattr(self, self._testMethodName)()
+ self.tearDown()
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop(-1)
+ function(*args, **kwargs)
+
+ def skipTest(self, reason):
+ """Skip this test."""
+ raise SkipTest(reason)
+
+ def fail(self, msg=None):
+ """Fail immediately, with the given message."""
+ raise self.failureException(msg)
+
+ def assertFalse(self, expr, msg=None):
+ "Fail the test if the expression is true."
+ if expr:
+ msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ def assertTrue(self, expr, msg=None):
+ """Fail the test unless the expression is true."""
+ if not expr:
+ msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ def _formatMessage(self, msg, standardMsg):
+ """Honour the longMessage attribute when generating failure messages.
+ If longMessage is False this means:
+ * Use only an explicit message if it is provided
+ * Otherwise use the standard message for the assert
+
+ If longMessage is True:
+ * Use the standard message
+ * If an explicit message is provided, plus ' : ' and the explicit message
+ """
+ if not self.longMessage:
+ return msg or standardMsg
+ if msg is None:
+ return standardMsg
+ try:
+ return '%s : %s' % (standardMsg, msg)
+ except UnicodeDecodeError:
+ return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
+
+
+ def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ If called with callableObj omitted or None, will return a
+ context object used like this::
+
+ with self.assertRaises(SomeException):
+ do_something()
+
+ The context manager keeps a reference to the exception as
+ the 'exception' attribute. This allows you to inspect the
+ exception after the assertion::
+
+ with self.assertRaises(SomeException) as cm:
+ do_something()
+ the_exception = cm.exception
+ self.assertEqual(the_exception.error_code, 3)
+ """
+ if callableObj is None:
+ return _AssertRaisesContext(excClass, self)
+ try:
+ callableObj(*args, **kwargs)
+ except excClass:
+ return
+
+ if hasattr(excClass,'__name__'):
+ excName = excClass.__name__
+ else:
+ excName = str(excClass)
+ raise self.failureException, "%s not raised" % excName
+
+ def _getAssertEqualityFunc(self, first, second):
+ """Get a detailed comparison function for the types of the two args.
+
+ Returns: A callable accepting (first, second, msg=None) that will
+ raise a failure exception if first != second with a useful human
+ readable error message for those types.
+ """
+ #
+ # NOTE(gregory.p.smith): I considered isinstance(first, type(second))
+ # and vice versa. I opted for the conservative approach in case
+ # subclasses are not intended to be compared in detail to their super
+ # class instances using a type equality func. This means testing
+ # subtypes won't automagically use the detailed comparison. Callers
+ # should use their type specific assertSpamEqual method to compare
+ # subclasses if the detailed comparison is desired and appropriate.
+ # See the discussion in http://bugs.python.org/issue2578.
+ #
+ if type(first) is type(second):
+ asserter = self._type_equality_funcs.get(type(first))
+ if asserter is not None:
+ return asserter
+
+ return self._baseAssertEqual
+
+ def _baseAssertEqual(self, first, second, msg=None):
+ """The default assertEqual implementation, not type specific."""
+ if not first == second:
+ standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertEqual(self, first, second, msg=None):
+ """Fail if the two objects are unequal as determined by the '=='
+ operator.
+ """
+ assertion_func = self._getAssertEqualityFunc(first, second)
+ assertion_func(first, second, msg=msg)
+
+ def assertNotEqual(self, first, second, msg=None):
+ """Fail if the two objects are equal as determined by the '=='
+ operator.
+ """
+ if not first != second:
+ msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
+ safe_repr(second)))
+ raise self.failureException(msg)
+
+ def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
+ """Fail if the two objects are unequal as determined by their
+ difference rounded to the given number of decimal places
+ (default 7) and comparing to zero, or by comparing that the
+ between the two objects is more than the given delta.
+
+ Note that decimal places (from zero) are usually not the same
+ as significant digits (measured from the most signficant digit).
+
+ If the two objects compare equal then they will automatically
+ compare almost equal.
+ """
+ if first == second:
+ # shortcut
+ return
+ if delta is not None and places is not None:
+ raise TypeError("specify delta or places not both")
+
+ if delta is not None:
+ if abs(first - second) <= delta:
+ return
+
+ standardMsg = '%s != %s within %s delta' % (safe_repr(first),
+ safe_repr(second),
+ safe_repr(delta))
+ else:
+ if places is None:
+ places = 7
+
+ if round(abs(second-first), places) == 0:
+ return
+
+ standardMsg = '%s != %s within %r places' % (safe_repr(first),
+ safe_repr(second),
+ places)
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
+ """Fail if the two objects are equal as determined by their
+ difference rounded to the given number of decimal places
+ (default 7) and comparing to zero, or by comparing that the
+ between the two objects is less than the given delta.
+
+ Note that decimal places (from zero) are usually not the same
+ as significant digits (measured from the most signficant digit).
+
+ Objects that are equal automatically fail.
+ """
+ if delta is not None and places is not None:
+ raise TypeError("specify delta or places not both")
+ if delta is not None:
+ if not (first == second) and abs(first - second) > delta:
+ return
+ standardMsg = '%s == %s within %s delta' % (safe_repr(first),
+ safe_repr(second),
+ safe_repr(delta))
+ else:
+ if places is None:
+ places = 7
+ if not (first == second) and round(abs(second-first), places) != 0:
+ return
+ standardMsg = '%s == %s within %r places' % (safe_repr(first),
+ safe_repr(second),
+ places)
+
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ # Synonyms for assertion methods
+
+ # The plurals are undocumented. Keep them that way to discourage use.
+ # Do not add more. Do not remove.
+ # Going through a deprecation cycle on these would annoy many people.
+ assertEquals = assertEqual
+ assertNotEquals = assertNotEqual
+ assertAlmostEquals = assertAlmostEqual
+ assertNotAlmostEquals = assertNotAlmostEqual
+ assert_ = assertTrue
+
+ # These fail* assertion method names are pending deprecation and will
+ # be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
+ def _deprecate(original_func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn(
+ ('Please use %s instead.' % original_func.__name__),
+ PendingDeprecationWarning, 2)
+ return original_func(*args, **kwargs)
+ return deprecated_func
+
+ failUnlessEqual = _deprecate(assertEqual)
+ failIfEqual = _deprecate(assertNotEqual)
+ failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
+ failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
+ failUnless = _deprecate(assertTrue)
+ failUnlessRaises = _deprecate(assertRaises)
+ failIf = _deprecate(assertFalse)
+
+ def assertSequenceEqual(self, seq1, seq2,
+ msg=None, seq_type=None, max_diff=80*8):
+ """An equality assertion for ordered sequences (like lists and tuples).
+
+ For the purposes of this function, a valid ordered sequence type is one
+ which can be indexed, has a length, and has an equality operator.
+
+ Args:
+ seq1: The first sequence to compare.
+ seq2: The second sequence to compare.
+ seq_type: The expected datatype of the sequences, or None if no
+ datatype should be enforced.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+ max_diff: Maximum size off the diff, larger diffs are not shown
+ """
+ if seq_type is not None:
+ seq_type_name = seq_type.__name__
+ if not isinstance(seq1, seq_type):
+ raise self.failureException('First sequence is not a %s: %s'
+ % (seq_type_name, safe_repr(seq1)))
+ if not isinstance(seq2, seq_type):
+ raise self.failureException('Second sequence is not a %s: %s'
+ % (seq_type_name, safe_repr(seq2)))
+ else:
+ seq_type_name = "sequence"
+
+ differing = None
+ try:
+ len1 = len(seq1)
+ except (TypeError, NotImplementedError):
+ differing = 'First %s has no length. Non-sequence?' % (
+ seq_type_name)
+
+ if differing is None:
+ try:
+ len2 = len(seq2)
+ except (TypeError, NotImplementedError):
+ differing = 'Second %s has no length. Non-sequence?' % (
+ seq_type_name)
+
+ if differing is None:
+ if seq1 == seq2:
+ return
+
+ seq1_repr = repr(seq1)
+ seq2_repr = repr(seq2)
+ if len(seq1_repr) > 30:
+ seq1_repr = seq1_repr[:30] + '...'
+ if len(seq2_repr) > 30:
+ seq2_repr = seq2_repr[:30] + '...'
+ elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
+ differing = '%ss differ: %s != %s\n' % elements
+
+ for i in xrange(min(len1, len2)):
+ try:
+ item1 = seq1[i]
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('\nUnable to index element %d of first %s\n' %
+ (i, seq_type_name))
+ break
+
+ try:
+ item2 = seq2[i]
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('\nUnable to index element %d of second %s\n' %
+ (i, seq_type_name))
+ break
+
+ if item1 != item2:
+ differing += ('\nFirst differing element %d:\n%s\n%s\n' %
+ (i, item1, item2))
+ break
+ else:
+ if (len1 == len2 and seq_type is None and
+ type(seq1) != type(seq2)):
+ # The sequences are the same, but have differing types.
+ return
+
+ if len1 > len2:
+ differing += ('\nFirst %s contains %d additional '
+ 'elements.\n' % (seq_type_name, len1 - len2))
+ try:
+ differing += ('First extra element %d:\n%s\n' %
+ (len2, seq1[len2]))
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('Unable to index element %d '
+ 'of first %s\n' % (len2, seq_type_name))
+ elif len1 < len2:
+ differing += ('\nSecond %s contains %d additional '
+ 'elements.\n' % (seq_type_name, len2 - len1))
+ try:
+ differing += ('First extra element %d:\n%s\n' %
+ (len1, seq2[len1]))
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('Unable to index element %d '
+ 'of second %s\n' % (len1, seq_type_name))
+ standardMsg = differing
+ diffMsg = '\n' + '\n'.join(
+ difflib.ndiff(pprint.pformat(seq1).splitlines(),
+ pprint.pformat(seq2).splitlines()))
+
+ standardMsg = self._truncateMessage(standardMsg, diffMsg)
+ msg = self._formatMessage(msg, standardMsg)
+ self.fail(msg)
+
+ def _truncateMessage(self, message, diff):
+ max_diff = self.maxDiff
+ if max_diff is None or len(diff) <= max_diff:
+ return message + diff
+ return message + (DIFF_OMITTED % len(diff))
+
+ def assertListEqual(self, list1, list2, msg=None):
+ """A list-specific equality assertion.
+
+ Args:
+ list1: The first list to compare.
+ list2: The second list to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+
+ """
+ self.assertSequenceEqual(list1, list2, msg, seq_type=list)
+
+ def assertTupleEqual(self, tuple1, tuple2, msg=None):
+ """A tuple-specific equality assertion.
+
+ Args:
+ tuple1: The first tuple to compare.
+ tuple2: The second tuple to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+ """
+ self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
+
+ def assertSetEqual(self, set1, set2, msg=None):
+ """A set-specific equality assertion.
+
+ Args:
+ set1: The first set to compare.
+ set2: The second set to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+
+ assertSetEqual uses ducktyping to support
+ different types of sets, and is optimized for sets specifically
+ (parameters must support a difference method).
+ """
+ try:
+ difference1 = set1.difference(set2)
+ except TypeError, e:
+ self.fail('invalid type when attempting set difference: %s' % e)
+ except AttributeError, e:
+ self.fail('first argument does not support set difference: %s' % e)
+
+ try:
+ difference2 = set2.difference(set1)
+ except TypeError, e:
+ self.fail('invalid type when attempting set difference: %s' % e)
+ except AttributeError, e:
+ self.fail('second argument does not support set difference: %s' % e)
+
+ if not (difference1 or difference2):
+ return
+
+ lines = []
+ if difference1:
+ lines.append('Items in the first set but not the second:')
+ for item in difference1:
+ lines.append(repr(item))
+ if difference2:
+ lines.append('Items in the second set but not the first:')
+ for item in difference2:
+ lines.append(repr(item))
+
+ standardMsg = '\n'.join(lines)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIn(self, member, container, msg=None):
+ """Just like self.assertTrue(a in b), but with a nicer default message."""
+ if member not in container:
+ standardMsg = '%s not found in %s' % (safe_repr(member),
+ safe_repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNotIn(self, member, container, msg=None):
+ """Just like self.assertTrue(a not in b), but with a nicer default message."""
+ if member in container:
+ standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
+ safe_repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIs(self, expr1, expr2, msg=None):
+ """Just like self.assertTrue(a is b), but with a nicer default message."""
+ if expr1 is not expr2:
+ standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNot(self, expr1, expr2, msg=None):
+ """Just like self.assertTrue(a is not b), but with a nicer default message."""
+ if expr1 is expr2:
+ standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertDictEqual(self, d1, d2, msg=None):
+ self.assert_(isinstance(d1, dict), 'First argument is not a dictionary')
+ self.assert_(isinstance(d2, dict), 'Second argument is not a dictionary')
+
+ if d1 != d2:
+ standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
+ diff = ('\n' + '\n'.join(difflib.ndiff(
+ pprint.pformat(d1).splitlines(),
+ pprint.pformat(d2).splitlines())))
+ standardMsg = self._truncateMessage(standardMsg, diff)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertDictContainsSubset(self, expected, actual, msg=None):
+ """Checks whether actual is a superset of expected."""
+ missing = []
+ mismatched = []
+ for key, value in expected.iteritems():
+ if key not in actual:
+ missing.append(key)
+ elif value != actual[key]:
+ mismatched.append('%s, expected: %s, actual: %s' %
+ (safe_repr(key), safe_repr(value),
+ safe_repr(actual[key])))
+
+ if not (missing or mismatched):
+ return
+
+ standardMsg = ''
+ if missing:
+ standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
+ missing)
+ if mismatched:
+ if standardMsg:
+ standardMsg += '; '
+ standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
+
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
+ """An unordered sequence specific comparison. It asserts that
+ expected_seq and actual_seq contain the same elements. It is
+ the equivalent of::
+
+ self.assertEqual(sorted(expected_seq), sorted(actual_seq))
+
+ Raises with an error message listing which elements of expected_seq
+ are missing from actual_seq and vice versa if any.
+
+ Asserts that each element has the same count in both sequences.
+ Example:
+ - [0, 1, 1] and [1, 0, 1] compare equal.
+ - [0, 0, 1] and [0, 1] compare unequal.
+ """
+ try:
+ expected = sorted(expected_seq)
+ actual = sorted(actual_seq)
+ except TypeError:
+ # Unsortable items (example: set(), complex(), ...)
+ expected = list(expected_seq)
+ actual = list(actual_seq)
+ missing, unexpected = unorderable_list_difference(
+ expected, actual, ignore_duplicate=False
+ )
+ else:
+ return self.assertSequenceEqual(expected, actual, msg=msg)
+
+ errors = []
+ if missing:
+ errors.append('Expected, but missing:\n %s' %
+ safe_repr(missing))
+ if unexpected:
+ errors.append('Unexpected, but present:\n %s' %
+ safe_repr(unexpected))
+ if errors:
+ standardMsg = '\n'.join(errors)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertMultiLineEqual(self, first, second, msg=None):
+ """Assert that two multi-line strings are equal."""
+ self.assert_(isinstance(first, basestring), (
+ 'First argument is not a string'))
+ self.assert_(isinstance(second, basestring), (
+ 'Second argument is not a string'))
+
+ if first != second:
+ standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
+ diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
+ second.splitlines(True)))
+ standardMsg = self._truncateMessage(standardMsg, diff)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertLess(self, a, b, msg=None):
+ """Just like self.assertTrue(a < b), but with a nicer default message."""
+ if not a < b:
+ standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertLessEqual(self, a, b, msg=None):
+ """Just like self.assertTrue(a <= b), but with a nicer default message."""
+ if not a <= b:
+ standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertGreater(self, a, b, msg=None):
+ """Just like self.assertTrue(a > b), but with a nicer default message."""
+ if not a > b:
+ standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertGreaterEqual(self, a, b, msg=None):
+ """Just like self.assertTrue(a >= b), but with a nicer default message."""
+ if not a >= b:
+ standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNone(self, obj, msg=None):
+ """Same as self.assertTrue(obj is None), with a nicer default message."""
+ if obj is not None:
+ standardMsg = '%s is not None' % (safe_repr(obj),)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNotNone(self, obj, msg=None):
+ """Included for symmetry with assertIsNone."""
+ if obj is None:
+ standardMsg = 'unexpectedly None'
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsInstance(self, obj, cls, msg=None):
+ """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
+ default message."""
+ if not isinstance(obj, cls):
+ standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNotIsInstance(self, obj, cls, msg=None):
+ """Included for symmetry with assertIsInstance."""
+ if isinstance(obj, cls):
+ standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertRaisesRegexp(self, expected_exception, expected_regexp,
+ callable_obj=None, *args, **kwargs):
+ """Asserts that the message in a raised exception matches a regexp.
+
+ Args:
+ expected_exception: Exception class expected to be raised.
+ expected_regexp: Regexp (re pattern object or string) expected
+ to be found in error message.
+ callable_obj: Function to be called.
+ args: Extra args.
+ kwargs: Extra kwargs.
+ """
+ if callable_obj is None:
+ return _AssertRaisesContext(expected_exception, self, expected_regexp)
+ try:
+ callable_obj(*args, **kwargs)
+ except expected_exception, exc_value:
+ if isinstance(expected_regexp, basestring):
+ expected_regexp = re.compile(expected_regexp)
+ if not expected_regexp.search(str(exc_value)):
+ raise self.failureException('"%s" does not match "%s"' %
+ (expected_regexp.pattern, str(exc_value)))
+ else:
+ if hasattr(expected_exception, '__name__'):
+ excName = expected_exception.__name__
+ else:
+ excName = str(expected_exception)
+ raise self.failureException, "%s not raised" % excName
+
+
+ def assertRegexpMatches(self, text, expected_regexp, msg=None):
+ """Fail the test unless the text matches the regular expression."""
+ if isinstance(expected_regexp, basestring):
+ expected_regexp = re.compile(expected_regexp)
+ if not expected_regexp.search(text):
+ msg = msg or "Regexp didn't match"
+ msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
+ raise self.failureException(msg)
+
+ def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
+ """Fail the test if the text matches the regular expression."""
+ if isinstance(unexpected_regexp, basestring):
+ unexpected_regexp = re.compile(unexpected_regexp)
+ match = unexpected_regexp.search(text)
+ if match:
+ msg = msg or "Regexp matched"
+ msg = '%s: %r matches %r in %r' % (msg,
+ text[match.start():match.end()],
+ unexpected_regexp.pattern,
+ text)
+ raise self.failureException(msg)
+
+class FunctionTestCase(TestCase):
+ """A test case that wraps a test function.
+
+ This is useful for slipping pre-existing test functions into the
+ unittest framework. Optionally, set-up and tidy-up functions can be
+ supplied. As with TestCase, the tidy-up ('tearDown') function will
+ always be called if the set-up ('setUp') function ran successfully.
+ """
+
+ def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
+ super(FunctionTestCase, self).__init__()
+ self._setUpFunc = setUp
+ self._tearDownFunc = tearDown
+ self._testFunc = testFunc
+ self._description = description
+
+ def setUp(self):
+ if self._setUpFunc is not None:
+ self._setUpFunc()
+
+ def tearDown(self):
+ if self._tearDownFunc is not None:
+ self._tearDownFunc()
+
+ def runTest(self):
+ self._testFunc()
+
+ def id(self):
+ return self._testFunc.__name__
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._setUpFunc == other._setUpFunc and \
+ self._tearDownFunc == other._tearDownFunc and \
+ self._testFunc == other._testFunc and \
+ self._description == other._description
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((type(self), self._setUpFunc, self._tearDownFunc,
+ self._testFunc, self._description))
+
+ def __str__(self):
+ return "%s (%s)" % (strclass(self.__class__),
+ self._testFunc.__name__)
+
+ def __repr__(self):
+ return "<%s testFunc=%s>" % (strclass(self.__class__),
+ self._testFunc)
+
+ def shortDescription(self):
+ if self._description is not None:
+ return self._description
+ doc = self._testFunc.__doc__
+ return doc and doc.split("\n")[0].strip() or None
diff --git a/django/utils/unittest/collector.py b/django/utils/unittest/collector.py
new file mode 100644
index 0000000000..0f76fc3404
--- /dev/null
+++ b/django/utils/unittest/collector.py
@@ -0,0 +1,9 @@
+import os
+import sys
+from django.utils.unittest.loader import defaultTestLoader
+
+def collector():
+ # import __main__ triggers code re-execution
+ __main__ = sys.modules['__main__']
+ setupDir = os.path.abspath(os.path.dirname(__main__.__file__))
+ return defaultTestLoader.discover(setupDir)
diff --git a/django/utils/unittest/compatibility.py b/django/utils/unittest/compatibility.py
new file mode 100644
index 0000000000..a0dc499cbf
--- /dev/null
+++ b/django/utils/unittest/compatibility.py
@@ -0,0 +1,64 @@
+import os
+import sys
+
+try:
+ from functools import wraps
+except ImportError:
+ # only needed for Python 2.4
+ def wraps(_):
+ def _wraps(func):
+ return func
+ return _wraps
+
+__unittest = True
+
+def _relpath_nt(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+ if start_list[0].lower() != path_list[0].lower():
+ unc_path, rest = os.path.splitunc(path)
+ unc_start, rest = os.path.splitunc(start)
+ if bool(unc_path) ^ bool(unc_start):
+ raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+ % (path, start))
+ else:
+ raise ValueError("path is on drive %s, start on drive %s"
+ % (path_list[0], start_list[0]))
+ # Work out how much of the filepath is shared by start and path.
+ for i in range(min(len(start_list), len(path_list))):
+ if start_list[i].lower() != path_list[i].lower():
+ break
+ else:
+ i += 1
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+# default to posixpath definition
+def _relpath_posix(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+if os.path is sys.modules.get('ntpath'):
+ relpath = _relpath_nt
+else:
+ relpath = _relpath_posix
diff --git a/django/utils/unittest/loader.py b/django/utils/unittest/loader.py
new file mode 100644
index 0000000000..1ca910f019
--- /dev/null
+++ b/django/utils/unittest/loader.py
@@ -0,0 +1,322 @@
+"""Loading unittests."""
+
+import os
+import re
+import sys
+import traceback
+import types
+import unittest
+
+from fnmatch import fnmatch
+
+from django.utils.unittest import case, suite
+
+try:
+ from os.path import relpath
+except ImportError:
+ from django.utils.unittest.compatibility import relpath
+
+__unittest = True
+
+
+def _CmpToKey(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class K(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) == -1
+ return K
+
+
+# what about .pyc or .pyo (etc)
+# we would need to avoid loading the same tests multiple times
+# from '.py', '.pyc' *and* '.pyo'
+VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
+
+
+def _make_failed_import_test(name, suiteClass):
+ message = 'Failed to import test module: %s' % name
+ if hasattr(traceback, 'format_exc'):
+ # Python 2.3 compatibility
+ # format_exc returns two frames of discover.py as well
+ message += '\n%s' % traceback.format_exc()
+ return _make_failed_test('ModuleImportFailure', name, ImportError(message),
+ suiteClass)
+
+def _make_failed_load_tests(name, exception, suiteClass):
+ return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
+
+def _make_failed_test(classname, methodname, exception, suiteClass):
+ def testFailure(self):
+ raise exception
+ attrs = {methodname: testFailure}
+ TestClass = type(classname, (case.TestCase,), attrs)
+ return suiteClass((TestClass(methodname),))
+
+
+class TestLoader(unittest.TestLoader):
+ """
+ This class is responsible for loading tests according to various criteria
+ and returning them wrapped in a TestSuite
+ """
+ testMethodPrefix = 'test'
+ sortTestMethodsUsing = cmp
+ suiteClass = suite.TestSuite
+ _top_level_dir = None
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Return a suite of all tests cases contained in testCaseClass"""
+ if issubclass(testCaseClass, suite.TestSuite):
+ raise TypeError("Test cases should not be derived from TestSuite."
+ " Maybe you meant to derive from TestCase?")
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
+ loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
+ return loaded_suite
+
+ def loadTestsFromModule(self, module, use_load_tests=True):
+ """Return a suite of all tests cases contained in the given module"""
+ tests = []
+ for name in dir(module):
+ obj = getattr(module, name)
+ if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+ tests.append(self.loadTestsFromTestCase(obj))
+
+ load_tests = getattr(module, 'load_tests', None)
+ tests = self.suiteClass(tests)
+ if use_load_tests and load_tests is not None:
+ try:
+ return load_tests(self, tests, None)
+ except Exception, e:
+ return _make_failed_load_tests(module.__name__, e,
+ self.suiteClass)
+ return tests
+
+ def loadTestsFromName(self, name, module=None):
+ """Return a suite of all tests cases given a string specifier.
+
+ The name may resolve either to a module, a test case class, a
+ test method within a test case class, or a callable object which
+ returns a TestCase or TestSuite instance.
+
+ The method optionally resolves the names relative to a given module.
+ """
+ parts = name.split('.')
+ if module is None:
+ parts_copy = parts[:]
+ while parts_copy:
+ try:
+ module = __import__('.'.join(parts_copy))
+ break
+ except ImportError:
+ del parts_copy[-1]
+ if not parts_copy:
+ raise
+ parts = parts[1:]
+ obj = module
+ for part in parts:
+ parent, obj = obj, getattr(obj, part)
+
+ if isinstance(obj, types.ModuleType):
+ return self.loadTestsFromModule(obj)
+ elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ elif (isinstance(obj, types.UnboundMethodType) and
+ isinstance(parent, type) and
+ issubclass(parent, case.TestCase)):
+ return self.suiteClass([parent(obj.__name__)])
+ elif isinstance(obj, unittest.TestSuite):
+ return obj
+ elif hasattr(obj, '__call__'):
+ test = obj()
+ if isinstance(test, unittest.TestSuite):
+ return test
+ elif isinstance(test, unittest.TestCase):
+ return self.suiteClass([test])
+ else:
+ raise TypeError("calling %s returned %s, not a test" %
+ (obj, test))
+ else:
+ raise TypeError("don't know how to make test from: %s" % obj)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a suite of all tests cases found using the given sequence
+ of string specifiers. See 'loadTestsFromName()'.
+ """
+ suites = [self.loadTestsFromName(name, module) for name in names]
+ return self.suiteClass(suites)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Return a sorted sequence of method names found within testCaseClass
+ """
+ def isTestMethod(attrname, testCaseClass=testCaseClass,
+ prefix=self.testMethodPrefix):
+ return attrname.startswith(prefix) and \
+ hasattr(getattr(testCaseClass, attrname), '__call__')
+ testFnNames = filter(isTestMethod, dir(testCaseClass))
+ if self.sortTestMethodsUsing:
+ testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
+ return testFnNames
+
+ def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
+ """Find and return all test modules from the specified start
+ directory, recursing into subdirectories to find them. Only test files
+ that match the pattern will be loaded. (Using shell style pattern
+ matching.)
+
+ All test modules must be importable from the top level of the project.
+ If the start directory is not the top level directory then the top
+ level directory must be specified separately.
+
+ If a test package name (directory with '__init__.py') matches the
+ pattern then the package will be checked for a 'load_tests' function. If
+ this exists then it will be called with loader, tests, pattern.
+
+ If load_tests exists then discovery does *not* recurse into the package,
+ load_tests is responsible for loading all tests in the package.
+
+ The pattern is deliberately not stored as a loader attribute so that
+ packages can continue discovery themselves. top_level_dir is stored so
+ load_tests does not need to pass this argument in to loader.discover().
+ """
+ set_implicit_top = False
+ if top_level_dir is None and self._top_level_dir is not None:
+ # make top_level_dir optional if called from load_tests in a package
+ top_level_dir = self._top_level_dir
+ elif top_level_dir is None:
+ set_implicit_top = True
+ top_level_dir = start_dir
+
+ top_level_dir = os.path.abspath(top_level_dir)
+
+ if not top_level_dir in sys.path:
+ # all test modules must be importable from the top level directory
+ # should we *unconditionally* put the start directory in first
+ # in sys.path to minimise likelihood of conflicts between installed
+ # modules and development versions?
+ sys.path.insert(0, top_level_dir)
+ self._top_level_dir = top_level_dir
+
+ is_not_importable = False
+ if os.path.isdir(os.path.abspath(start_dir)):
+ start_dir = os.path.abspath(start_dir)
+ if start_dir != top_level_dir:
+ is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
+ else:
+ # support for discovery from dotted module names
+ try:
+ __import__(start_dir)
+ except ImportError:
+ is_not_importable = True
+ else:
+ the_module = sys.modules[start_dir]
+ top_part = start_dir.split('.')[0]
+ start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
+ if set_implicit_top:
+ self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
+ sys.path.remove(top_level_dir)
+
+ if is_not_importable:
+ raise ImportError('Start directory is not importable: %r' % start_dir)
+
+ tests = list(self._find_tests(start_dir, pattern))
+ return self.suiteClass(tests)
+
+ def _get_name_from_path(self, path):
+ path = os.path.splitext(os.path.normpath(path))[0]
+
+ _relpath = relpath(path, self._top_level_dir)
+ assert not os.path.isabs(_relpath), "Path must be within the project"
+ assert not _relpath.startswith('..'), "Path must be within the project"
+
+ name = _relpath.replace(os.path.sep, '.')
+ return name
+
+ def _get_module_from_name(self, name):
+ __import__(name)
+ return sys.modules[name]
+
+ def _match_path(self, path, full_path, pattern):
+ # override this method to use alternative matching strategy
+ return fnmatch(path, pattern)
+
+ def _find_tests(self, start_dir, pattern):
+ """Used by discovery. Yields test suites it loads."""
+ paths = os.listdir(start_dir)
+
+ for path in paths:
+ full_path = os.path.join(start_dir, path)
+ if os.path.isfile(full_path):
+ if not VALID_MODULE_NAME.match(path):
+ # valid Python identifiers only
+ continue
+ if not self._match_path(path, full_path, pattern):
+ continue
+ # if the test file matches, load it
+ name = self._get_name_from_path(full_path)
+ try:
+ module = self._get_module_from_name(name)
+ except:
+ yield _make_failed_import_test(name, self.suiteClass)
+ else:
+ mod_file = os.path.abspath(getattr(module, '__file__', full_path))
+ realpath = os.path.splitext(mod_file)[0]
+ fullpath_noext = os.path.splitext(full_path)[0]
+ if realpath.lower() != fullpath_noext.lower():
+ module_dir = os.path.dirname(realpath)
+ mod_name = os.path.splitext(os.path.basename(full_path))[0]
+ expected_dir = os.path.dirname(full_path)
+ msg = ("%r module incorrectly imported from %r. Expected %r. "
+ "Is this module globally installed?")
+ raise ImportError(msg % (mod_name, module_dir, expected_dir))
+ yield self.loadTestsFromModule(module)
+ elif os.path.isdir(full_path):
+ if not os.path.isfile(os.path.join(full_path, '__init__.py')):
+ continue
+
+ load_tests = None
+ tests = None
+ if fnmatch(path, pattern):
+ # only check load_tests if the package directory itself matches the filter
+ name = self._get_name_from_path(full_path)
+ package = self._get_module_from_name(name)
+ load_tests = getattr(package, 'load_tests', None)
+ tests = self.loadTestsFromModule(package, use_load_tests=False)
+
+ if load_tests is None:
+ if tests is not None:
+ # tests loaded from package file
+ yield tests
+ # recurse into the package
+ for test in self._find_tests(full_path, pattern):
+ yield test
+ else:
+ try:
+ yield load_tests(self, tests, pattern)
+ except Exception, e:
+ yield _make_failed_load_tests(package.__name__, e,
+ self.suiteClass)
+
+defaultTestLoader = TestLoader()
+
+
+def _makeLoader(prefix, sortUsing, suiteClass=None):
+ loader = TestLoader()
+ loader.sortTestMethodsUsing = sortUsing
+ loader.testMethodPrefix = prefix
+ if suiteClass:
+ loader.suiteClass = suiteClass
+ return loader
+
+def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
+ return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
+
+def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
+ suiteClass=suite.TestSuite):
+ return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
+
+def findTestCases(module, prefix='test', sortUsing=cmp,
+ suiteClass=suite.TestSuite):
+ return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
diff --git a/django/utils/unittest/main.py b/django/utils/unittest/main.py
new file mode 100644
index 0000000000..744cacd841
--- /dev/null
+++ b/django/utils/unittest/main.py
@@ -0,0 +1,241 @@
+"""Unittest main program"""
+
+import sys
+import os
+import types
+
+from django.utils.unittest import loader, runner
+try:
+ from django.utils.unittest.signals import installHandler
+except ImportError:
+ installHandler = None
+
+__unittest = True
+
+FAILFAST = " -f, --failfast Stop on first failure\n"
+CATCHBREAK = " -c, --catch Catch control-C and display results\n"
+BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
+
+USAGE_AS_MAIN = """\
+Usage: %(progName)s [options] [tests]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s test_module - run tests from test_module
+ %(progName)s test_module.TestClass - run tests from
+ test_module.TestClass
+ %(progName)s test_module.TestClass.test_method - run specified test method
+
+[tests] can be a list of any number of test modules, classes and test
+methods.
+
+Alternative Usage: %(progName)s discover [options]
+
+Options:
+ -v, --verbose Verbose output
+%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+USAGE_FROM_MODULE = """\
+Usage: %(progName)s [options] [test] [...]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s - run default set of tests
+ %(progName)s MyTestSuite - run suite 'MyTestSuite'
+ %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
+ %(progName)s MyTestCase - run all 'test*' test methods
+ in MyTestCase
+"""
+
+
+class TestProgram(object):
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ USAGE = USAGE_FROM_MODULE
+
+ # defaults for testing
+ failfast = catchbreak = buffer = progName = None
+
+ def __init__(self, module='__main__', defaultTest=None,
+ argv=None, testRunner=None,
+ testLoader=loader.defaultTestLoader, exit=True,
+ verbosity=1, failfast=None, catchbreak=None, buffer=None):
+ if isinstance(module, basestring):
+ self.module = __import__(module)
+ for part in module.split('.')[1:]:
+ self.module = getattr(self.module, part)
+ else:
+ self.module = module
+ if argv is None:
+ argv = sys.argv
+
+ self.exit = exit
+ self.verbosity = verbosity
+ self.failfast = failfast
+ self.catchbreak = catchbreak
+ self.buffer = buffer
+ self.defaultTest = defaultTest
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ self.progName = os.path.basename(argv[0])
+ self.parseArgs(argv)
+ self.runTests()
+
+ def usageExit(self, msg=None):
+ if msg:
+ print msg
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False and installHandler is not None:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ print self.USAGE % usage
+ sys.exit(2)
+
+ def parseArgs(self, argv):
+ if len(argv) > 1 and argv[1].lower() == 'discover':
+ self._do_discovery(argv[2:])
+ return
+
+ import getopt
+ long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
+ try:
+ options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if opt in ('-f','--failfast'):
+ if self.failfast is None:
+ self.failfast = True
+ # Should this raise an exception if -f is not valid?
+ if opt in ('-c','--catch'):
+ if self.catchbreak is None and installHandler is not None:
+ self.catchbreak = True
+ # Should this raise an exception if -c is not valid?
+ if opt in ('-b','--buffer'):
+ if self.buffer is None:
+ self.buffer = True
+ # Should this raise an exception if -b is not valid?
+ if len(args) == 0 and self.defaultTest is None:
+ # createTests will load tests from self.module
+ self.testNames = None
+ elif len(args) > 0:
+ self.testNames = args
+ if __name__ == '__main__':
+ # to support python -m unittest ...
+ self.module = None
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ except getopt.error, msg:
+ self.usageExit(msg)
+
+ def createTests(self):
+ if self.testNames is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames,
+ self.module)
+
+ def _do_discovery(self, argv, Loader=loader.TestLoader):
+ # handle command line args for test discovery
+ self.progName = '%s discover' % self.progName
+ import optparse
+ parser = optparse.OptionParser()
+ parser.prog = self.progName
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ help='Verbose output', action='store_true')
+ if self.failfast != False:
+ parser.add_option('-f', '--failfast', dest='failfast', default=False,
+ help='Stop on first fail or error',
+ action='store_true')
+ if self.catchbreak != False and installHandler is not None:
+ parser.add_option('-c', '--catch', dest='catchbreak', default=False,
+ help='Catch ctrl-C and display results so far',
+ action='store_true')
+ if self.buffer != False:
+ parser.add_option('-b', '--buffer', dest='buffer', default=False,
+ help='Buffer stdout and stderr during tests',
+ action='store_true')
+ parser.add_option('-s', '--start-directory', dest='start', default='.',
+ help="Directory to start discovery ('.' default)")
+ parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+ help='Top level directory of project (defaults to start directory)')
+
+ options, args = parser.parse_args(argv)
+ if len(args) > 3:
+ self.usageExit()
+
+ for name, value in zip(('start', 'pattern', 'top'), args):
+ setattr(options, name, value)
+
+ # only set options from the parsing here
+ # if they weren't set explicitly in the constructor
+ if self.failfast is None:
+ self.failfast = options.failfast
+ if self.catchbreak is None and installHandler is not None:
+ self.catchbreak = options.catchbreak
+ if self.buffer is None:
+ self.buffer = options.buffer
+
+ if options.verbose:
+ self.verbosity = 2
+
+ start_dir = options.start
+ pattern = options.pattern
+ top_level_dir = options.top
+
+ loader = Loader()
+ self.test = loader.discover(start_dir, pattern, top_level_dir)
+
+ def runTests(self):
+ if self.catchbreak:
+ installHandler()
+ if self.testRunner is None:
+ self.testRunner = runner.TextTestRunner
+ if isinstance(self.testRunner, (type, types.ClassType)):
+ try:
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer)
+ except TypeError:
+ # didn't accept the verbosity, buffer or failfast arguments
+ testRunner = self.testRunner()
+ else:
+ # it is assumed to be a TestRunner instance
+ testRunner = self.testRunner
+ self.result = testRunner.run(self.test)
+ if self.exit:
+ sys.exit(not self.result.wasSuccessful())
+
+main = TestProgram
+
+def main_():
+ TestProgram.USAGE = USAGE_AS_MAIN
+ main(module=None)
+
diff --git a/django/utils/unittest/result.py b/django/utils/unittest/result.py
new file mode 100644
index 0000000000..2d2a1ada95
--- /dev/null
+++ b/django/utils/unittest/result.py
@@ -0,0 +1,183 @@
+"""Test result object"""
+
+import sys
+import traceback
+import unittest
+
+from StringIO import StringIO
+
+from django.utils.unittest import util
+from django.utils.unittest.compatibility import wraps
+
+__unittest = True
+
+def failfast(method):
+ @wraps(method)
+ def inner(self, *args, **kw):
+ if getattr(self, 'failfast', False):
+ self.stop()
+ return method(self, *args, **kw)
+ return inner
+
+
+STDOUT_LINE = '\nStdout:\n%s'
+STDERR_LINE = '\nStderr:\n%s'
+
+class TestResult(unittest.TestResult):
+ """Holder for test result information.
+
+ Test results are automatically managed by the TestCase and TestSuite
+ classes, and do not need to be explicitly manipulated by writers of tests.
+
+ Each instance holds the total number of tests run, and collections of
+ failures and errors that occurred among those test runs. The collections
+ contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
+ formatted traceback of the error that occurred.
+ """
+ _previousTestClass = None
+ _moduleSetUpFailed = False
+
+ def __init__(self):
+ self.failfast = False
+ self.failures = []
+ self.errors = []
+ self.testsRun = 0
+ self.skipped = []
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+ self.shouldStop = False
+ self.buffer = False
+ self._stdout_buffer = None
+ self._stderr_buffer = None
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ self._mirrorOutput = False
+
+ def startTest(self, test):
+ "Called when the given test is about to be run"
+ self.testsRun += 1
+ self._mirrorOutput = False
+ if self.buffer:
+ if self._stderr_buffer is None:
+ self._stderr_buffer = StringIO()
+ self._stdout_buffer = StringIO()
+ sys.stdout = self._stdout_buffer
+ sys.stderr = self._stderr_buffer
+
+ def startTestRun(self):
+ """Called once before any tests are executed.
+
+ See startTest for a method called before each test.
+ """
+
+ def stopTest(self, test):
+ """Called when the given test has been run"""
+ if self.buffer:
+ if self._mirrorOutput:
+ output = sys.stdout.getvalue()
+ error = sys.stderr.getvalue()
+ if output:
+ if not output.endswith('\n'):
+ output += '\n'
+ self._original_stdout.write(STDOUT_LINE % output)
+ if error:
+ if not error.endswith('\n'):
+ error += '\n'
+ self._original_stderr.write(STDERR_LINE % error)
+
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
+ self._stdout_buffer.seek(0)
+ self._stdout_buffer.truncate()
+ self._stderr_buffer.seek(0)
+ self._stderr_buffer.truncate()
+ self._mirrorOutput = False
+
+
+ def stopTestRun(self):
+ """Called once after all tests are executed.
+
+ See stopTest for a method called after each test.
+ """
+
+ @failfast
+ def addError(self, test, err):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+ """
+ self.errors.append((test, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ @failfast
+ def addFailure(self, test, err):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info()."""
+ self.failures.append((test, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ def addSuccess(self, test):
+ "Called when a test has completed successfully"
+ pass
+
+ def addSkip(self, test, reason):
+ """Called when a test is skipped."""
+ self.skipped.append((test, reason))
+
+ def addExpectedFailure(self, test, err):
+ """Called when an expected failure/error occured."""
+ self.expectedFailures.append(
+ (test, self._exc_info_to_string(err, test)))
+
+ @failfast
+ def addUnexpectedSuccess(self, test):
+ """Called when a test was expected to fail, but succeed."""
+ self.unexpectedSuccesses.append(test)
+
+ def wasSuccessful(self):
+ "Tells whether or not this result was a success"
+ return (len(self.failures) + len(self.errors) == 0)
+
+ def stop(self):
+ "Indicates that the tests should be aborted"
+ self.shouldStop = True
+
+ def _exc_info_to_string(self, err, test):
+ """Converts a sys.exc_info()-style tuple of values into a string."""
+ exctype, value, tb = err
+ # Skip test runner traceback levels
+ while tb and self._is_relevant_tb_level(tb):
+ tb = tb.tb_next
+ if exctype is test.failureException:
+ # Skip assert*() traceback levels
+ length = self._count_relevant_tb_levels(tb)
+ msgLines = traceback.format_exception(exctype, value, tb, length)
+ else:
+ msgLines = traceback.format_exception(exctype, value, tb)
+
+ if self.buffer:
+ output = sys.stdout.getvalue()
+ error = sys.stderr.getvalue()
+ if output:
+ if not output.endswith('\n'):
+ output += '\n'
+ msgLines.append(STDOUT_LINE % output)
+ if error:
+ if not error.endswith('\n'):
+ error += '\n'
+ msgLines.append(STDERR_LINE % error)
+ return ''.join(msgLines)
+
+ def _is_relevant_tb_level(self, tb):
+ return '__unittest' in tb.tb_frame.f_globals
+
+ def _count_relevant_tb_levels(self, tb):
+ length = 0
+ while tb and not self._is_relevant_tb_level(tb):
+ length += 1
+ tb = tb.tb_next
+ return length
+
+ def __repr__(self):
+ return "<%s run=%i errors=%i failures=%i>" % \
+ (util.strclass(self.__class__), self.testsRun, len(self.errors),
+ len(self.failures))
diff --git a/django/utils/unittest/runner.py b/django/utils/unittest/runner.py
new file mode 100644
index 0000000000..242173ee31
--- /dev/null
+++ b/django/utils/unittest/runner.py
@@ -0,0 +1,206 @@
+"""Running tests"""
+
+import sys
+import time
+import unittest
+
+from django.utils.unittest import result
+
+try:
+ from django.utils.unittest.signals import registerResult
+except ImportError:
+ def registerResult(_):
+ pass
+
+__unittest = True
+
+
+class _WritelnDecorator(object):
+ """Used to decorate file-like objects with a handy 'writeln' method"""
+ def __init__(self,stream):
+ self.stream = stream
+
+ def __getattr__(self, attr):
+ if attr in ('stream', '__getstate__'):
+ raise AttributeError(attr)
+ return getattr(self.stream,attr)
+
+ def writeln(self, arg=None):
+ if arg:
+ self.write(arg)
+ self.write('\n') # text-mode streams translate to \r\n if needed
+
+
+class TextTestResult(result.TestResult):
+ """A test result class that can print formatted text results to a stream.
+
+ Used by TextTestRunner.
+ """
+ separator1 = '=' * 70
+ separator2 = '-' * 70
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__()
+ self.stream = stream
+ self.showAll = verbosity > 1
+ self.dots = verbosity == 1
+ self.descriptions = descriptions
+
+ def getDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return '\n'.join((str(test), doc_first_line))
+ else:
+ return str(test)
+
+ def startTest(self, test):
+ super(TextTestResult, self).startTest(test)
+ if self.showAll:
+ self.stream.write(self.getDescription(test))
+ self.stream.write(" ... ")
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super(TextTestResult, self).addSuccess(test)
+ if self.showAll:
+ self.stream.writeln("ok")
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ def addError(self, test, err):
+ super(TextTestResult, self).addError(test, err)
+ if self.showAll:
+ self.stream.writeln("ERROR")
+ elif self.dots:
+ self.stream.write('E')
+ self.stream.flush()
+
+ def addFailure(self, test, err):
+ super(TextTestResult, self).addFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("FAIL")
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ def addSkip(self, test, reason):
+ super(TextTestResult, self).addSkip(test, reason)
+ if self.showAll:
+ self.stream.writeln("skipped %r" % (reason,))
+ elif self.dots:
+ self.stream.write("s")
+ self.stream.flush()
+
+ def addExpectedFailure(self, test, err):
+ super(TextTestResult, self).addExpectedFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("expected failure")
+ elif self.dots:
+ self.stream.write("x")
+ self.stream.flush()
+
+ def addUnexpectedSuccess(self, test):
+ super(TextTestResult, self).addUnexpectedSuccess(test)
+ if self.showAll:
+ self.stream.writeln("unexpected success")
+ elif self.dots:
+ self.stream.write("u")
+ self.stream.flush()
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ def printErrorList(self, flavour, errors):
+ for test, err in errors:
+ self.stream.writeln(self.separator1)
+ self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
+ self.stream.writeln(self.separator2)
+ self.stream.writeln("%s" % err)
+
+ def stopTestRun(self):
+ super(TextTestResult, self).stopTestRun()
+ self.printErrors()
+
+
+class TextTestRunner(unittest.TextTestRunner):
+ """A test runner class that displays results in textual form.
+
+ It prints out the names of tests as they are run, errors as they
+ occur, and a summary of the results at the end of the test run.
+ """
+ resultclass = TextTestResult
+
+ def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
+ failfast=False, buffer=False, resultclass=None):
+ self.stream = _WritelnDecorator(stream)
+ self.descriptions = descriptions
+ self.verbosity = verbosity
+ self.failfast = failfast
+ self.buffer = buffer
+ if resultclass is not None:
+ self.resultclass = resultclass
+
+ def _makeResult(self):
+ return self.resultclass(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = self._makeResult()
+ result.failfast = self.failfast
+ result.buffer = self.buffer
+ registerResult(result)
+
+ startTime = time.time()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+ try:
+ test(result)
+ finally:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+ else:
+ result.printErrors()
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ if hasattr(result, 'separator2'):
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+
+ expectedFails = unexpectedSuccesses = skipped = 0
+ try:
+ results = map(len, (result.expectedFailures,
+ result.unexpectedSuccesses,
+ result.skipped))
+ expectedFails, unexpectedSuccesses, skipped = results
+ except AttributeError:
+ pass
+ infos = []
+ if not result.wasSuccessful():
+ self.stream.write("FAILED")
+ failed, errored = map(len, (result.failures, result.errors))
+ if failed:
+ infos.append("failures=%d" % failed)
+ if errored:
+ infos.append("errors=%d" % errored)
+ else:
+ self.stream.write("OK")
+ if skipped:
+ infos.append("skipped=%d" % skipped)
+ if expectedFails:
+ infos.append("expected failures=%d" % expectedFails)
+ if unexpectedSuccesses:
+ infos.append("unexpected successes=%d" % unexpectedSuccesses)
+ if infos:
+ self.stream.writeln(" (%s)" % (", ".join(infos),))
+ else:
+ self.stream.write("\n")
+ return result
diff --git a/django/utils/unittest/signals.py b/django/utils/unittest/signals.py
new file mode 100644
index 0000000000..f1731ea13d
--- /dev/null
+++ b/django/utils/unittest/signals.py
@@ -0,0 +1,57 @@
+import signal
+import weakref
+
+from django.utils.unittest.compatibility import wraps
+
+__unittest = True
+
+
+class _InterruptHandler(object):
+ def __init__(self, default_handler):
+ self.called = False
+ self.default_handler = default_handler
+
+ def __call__(self, signum, frame):
+ installed_handler = signal.getsignal(signal.SIGINT)
+ if installed_handler is not self:
+ # if we aren't the installed handler, then delegate immediately
+ # to the default handler
+ self.default_handler(signum, frame)
+
+ if self.called:
+ self.default_handler(signum, frame)
+ self.called = True
+ for result in _results.keys():
+ result.stop()
+
+_results = weakref.WeakKeyDictionary()
+def registerResult(result):
+ _results[result] = 1
+
+def removeResult(result):
+ return bool(_results.pop(result, None))
+
+_interrupt_handler = None
+def installHandler():
+ global _interrupt_handler
+ if _interrupt_handler is None:
+ default_handler = signal.getsignal(signal.SIGINT)
+ _interrupt_handler = _InterruptHandler(default_handler)
+ signal.signal(signal.SIGINT, _interrupt_handler)
+
+
+def removeHandler(method=None):
+ if method is not None:
+ @wraps(method)
+ def inner(*args, **kwargs):
+ initial = signal.getsignal(signal.SIGINT)
+ removeHandler()
+ try:
+ return method(*args, **kwargs)
+ finally:
+ signal.signal(signal.SIGINT, initial)
+ return inner
+
+ global _interrupt_handler
+ if _interrupt_handler is not None:
+ signal.signal(signal.SIGINT, _interrupt_handler.default_handler)
diff --git a/django/utils/unittest/suite.py b/django/utils/unittest/suite.py
new file mode 100644
index 0000000000..f39569bbc2
--- /dev/null
+++ b/django/utils/unittest/suite.py
@@ -0,0 +1,287 @@
+"""TestSuite"""
+
+import sys
+import unittest
+from django.utils.unittest import case, util
+
+__unittest = True
+
+
+class BaseTestSuite(unittest.TestSuite):
+ """A simple test suite that doesn't provide class or module shared fixtures.
+ """
+ def __init__(self, tests=()):
+ self._tests = []
+ self.addTests(tests)
+
+ def __repr__(self):
+ return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return list(self) == list(other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # Can't guarantee hash invariant, so flag as unhashable
+ __hash__ = None
+
+ def __iter__(self):
+ return iter(self._tests)
+
+ def countTestCases(self):
+ cases = 0
+ for test in self:
+ cases += test.countTestCases()
+ return cases
+
+ def addTest(self, test):
+ # sanity checks
+ if not hasattr(test, '__call__'):
+ raise TypeError("%r is not callable" % (repr(test),))
+ if isinstance(test, type) and issubclass(test,
+ (case.TestCase, TestSuite)):
+ raise TypeError("TestCases and TestSuites must be instantiated "
+ "before passing them to addTest()")
+ self._tests.append(test)
+
+ def addTests(self, tests):
+ if isinstance(tests, basestring):
+ raise TypeError("tests must be an iterable of tests, not a string")
+ for test in tests:
+ self.addTest(test)
+
+ def run(self, result):
+ for test in self:
+ if result.shouldStop:
+ break
+ test(result)
+ return result
+
+ def __call__(self, *args, **kwds):
+ return self.run(*args, **kwds)
+
+ def debug(self):
+ """Run the tests without collecting errors in a TestResult"""
+ for test in self:
+ test.debug()
+
+
+class TestSuite(BaseTestSuite):
+ """A test suite is a composite test consisting of a number of TestCases.
+
+ For use, create an instance of TestSuite, then add test case instances.
+ When all tests have been added, the suite can be passed to a test
+ runner, such as TextTestRunner. It will run the individual test cases
+ in the order in which they were added, aggregating the results. When
+ subclassing, do not forget to call the base class constructor.
+ """
+
+
+ def run(self, result):
+ self._wrapped_run(result)
+ self._tearDownPreviousClass(None, result)
+ self._handleModuleTearDown(result)
+ return result
+
+ def debug(self):
+ """Run the tests without collecting errors in a TestResult"""
+ debug = _DebugResult()
+ self._wrapped_run(debug, True)
+ self._tearDownPreviousClass(None, debug)
+ self._handleModuleTearDown(debug)
+
+ ################################
+ # private methods
+ def _wrapped_run(self, result, debug=False):
+ for test in self:
+ if result.shouldStop:
+ break
+
+ if _isnotsuite(test):
+ self._tearDownPreviousClass(test, result)
+ self._handleModuleFixture(test, result)
+ self._handleClassSetUp(test, result)
+ result._previousTestClass = test.__class__
+
+ if (getattr(test.__class__, '_classSetupFailed', False) or
+ getattr(result, '_moduleSetUpFailed', False)):
+ continue
+
+ if hasattr(test, '_wrapped_run'):
+ test._wrapped_run(result, debug)
+ elif not debug:
+ test(result)
+ else:
+ test.debug()
+
+ def _handleClassSetUp(self, test, result):
+ previousClass = getattr(result, '_previousTestClass', None)
+ currentClass = test.__class__
+ if currentClass == previousClass:
+ return
+ if result._moduleSetUpFailed:
+ return
+ if getattr(currentClass, "__unittest_skip__", False):
+ return
+
+ try:
+ currentClass._classSetupFailed = False
+ except TypeError:
+ # test may actually be a function
+ # so its class will be a builtin-type
+ pass
+
+ setUpClass = getattr(currentClass, 'setUpClass', None)
+ if setUpClass is not None:
+ try:
+ setUpClass()
+ except Exception, e:
+ if isinstance(result, _DebugResult):
+ raise
+ currentClass._classSetupFailed = True
+ className = util.strclass(currentClass)
+ errorName = 'setUpClass (%s)' % className
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+ def _get_previous_module(self, result):
+ previousModule = None
+ previousClass = getattr(result, '_previousTestClass', None)
+ if previousClass is not None:
+ previousModule = previousClass.__module__
+ return previousModule
+
+
+ def _handleModuleFixture(self, test, result):
+ previousModule = self._get_previous_module(result)
+ currentModule = test.__class__.__module__
+ if currentModule == previousModule:
+ return
+
+ self._handleModuleTearDown(result)
+
+
+ result._moduleSetUpFailed = False
+ try:
+ module = sys.modules[currentModule]
+ except KeyError:
+ return
+ setUpModule = getattr(module, 'setUpModule', None)
+ if setUpModule is not None:
+ try:
+ setUpModule()
+ except Exception, e:
+ if isinstance(result, _DebugResult):
+ raise
+ result._moduleSetUpFailed = True
+ errorName = 'setUpModule (%s)' % currentModule
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+ def _addClassOrModuleLevelException(self, result, exception, errorName):
+ error = _ErrorHolder(errorName)
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None and isinstance(exception, case.SkipTest):
+ addSkip(error, str(exception))
+ else:
+ result.addError(error, sys.exc_info())
+
+ def _handleModuleTearDown(self, result):
+ previousModule = self._get_previous_module(result)
+ if previousModule is None:
+ return
+ if result._moduleSetUpFailed:
+ return
+
+ try:
+ module = sys.modules[previousModule]
+ except KeyError:
+ return
+
+ tearDownModule = getattr(module, 'tearDownModule', None)
+ if tearDownModule is not None:
+ try:
+ tearDownModule()
+ except Exception, e:
+ if isinstance(result, _DebugResult):
+ raise
+ errorName = 'tearDownModule (%s)' % previousModule
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+ def _tearDownPreviousClass(self, test, result):
+ previousClass = getattr(result, '_previousTestClass', None)
+ currentClass = test.__class__
+ if currentClass == previousClass:
+ return
+ if getattr(previousClass, '_classSetupFailed', False):
+ return
+ if getattr(result, '_moduleSetUpFailed', False):
+ return
+ if getattr(previousClass, "__unittest_skip__", False):
+ return
+
+ tearDownClass = getattr(previousClass, 'tearDownClass', None)
+ if tearDownClass is not None:
+ try:
+ tearDownClass()
+ except Exception, e:
+ if isinstance(result, _DebugResult):
+ raise
+ className = util.strclass(previousClass)
+ errorName = 'tearDownClass (%s)' % className
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+
+class _ErrorHolder(object):
+ """
+ Placeholder for a TestCase inside a result. As far as a TestResult
+ is concerned, this looks exactly like a unit test. Used to insert
+ arbitrary errors into a test suite run.
+ """
+ # Inspired by the ErrorHolder from Twisted:
+ # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
+
+ # attribute used by TestResult._exc_info_to_string
+ failureException = None
+
+ def __init__(self, description):
+ self.description = description
+
+ def id(self):
+ return self.description
+
+ def shortDescription(self):
+ return None
+
+ def __repr__(self):
+ return "" % (self.description,)
+
+ def __str__(self):
+ return self.id()
+
+ def run(self, result):
+ # could call result.addError(...) - but this test-like object
+ # shouldn't be run anyway
+ pass
+
+ def __call__(self, result):
+ return self.run(result)
+
+ def countTestCases(self):
+ return 0
+
+def _isnotsuite(test):
+ "A crude way to tell apart testcases and suites with duck-typing"
+ try:
+ iter(test)
+ except TypeError:
+ return True
+ return False
+
+
+class _DebugResult(object):
+ "Used by the TestSuite to hold previous class when running in debug."
+ _previousTestClass = None
+ _moduleSetUpFailed = False
+ shouldStop = False
diff --git a/django/utils/unittest/util.py b/django/utils/unittest/util.py
new file mode 100644
index 0000000000..c45d008cc8
--- /dev/null
+++ b/django/utils/unittest/util.py
@@ -0,0 +1,99 @@
+"""Various utility functions."""
+
+__unittest = True
+
+
+_MAX_LENGTH = 80
+def safe_repr(obj, short=False):
+ try:
+ result = repr(obj)
+ except Exception:
+ result = object.__repr__(obj)
+ if not short or len(result) < _MAX_LENGTH:
+ return result
+ return result[:_MAX_LENGTH] + ' [truncated]...'
+
+def safe_str(obj):
+ try:
+ return str(obj)
+ except Exception:
+ return object.__str__(obj)
+
+def strclass(cls):
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+def sorted_list_difference(expected, actual):
+ """Finds elements in only one or the other of two, sorted input lists.
+
+ Returns a two-element tuple of lists. The first list contains those
+ elements in the "expected" list but not in the "actual" list, and the
+ second contains those elements in the "actual" list but not in the
+ "expected" list. Duplicate elements in either input list are ignored.
+ """
+ i = j = 0
+ missing = []
+ unexpected = []
+ while True:
+ try:
+ e = expected[i]
+ a = actual[j]
+ if e < a:
+ missing.append(e)
+ i += 1
+ while expected[i] == e:
+ i += 1
+ elif e > a:
+ unexpected.append(a)
+ j += 1
+ while actual[j] == a:
+ j += 1
+ else:
+ i += 1
+ try:
+ while expected[i] == e:
+ i += 1
+ finally:
+ j += 1
+ while actual[j] == a:
+ j += 1
+ except IndexError:
+ missing.extend(expected[i:])
+ unexpected.extend(actual[j:])
+ break
+ return missing, unexpected
+
+def unorderable_list_difference(expected, actual, ignore_duplicate=False):
+ """Same behavior as sorted_list_difference but
+ for lists of unorderable items (like dicts).
+
+ As it does a linear search per item (remove) it
+ has O(n*n) performance.
+ """
+ missing = []
+ unexpected = []
+ while expected:
+ item = expected.pop()
+ try:
+ actual.remove(item)
+ except ValueError:
+ missing.append(item)
+ if ignore_duplicate:
+ for lst in expected, actual:
+ try:
+ while True:
+ lst.remove(item)
+ except ValueError:
+ pass
+ if ignore_duplicate:
+ while actual:
+ item = actual.pop()
+ unexpected.append(item)
+ try:
+ while True:
+ actual.remove(item)
+ except ValueError:
+ pass
+ return missing, unexpected
+
+ # anything left in actual is unexpected
+ return missing, actual
diff --git a/docs/releases/1.3.txt b/docs/releases/1.3.txt
index ca19e50d39..6f196e1985 100644
--- a/docs/releases/1.3.txt
+++ b/docs/releases/1.3.txt
@@ -136,3 +136,31 @@ have been added to Django's own code as well -- most notably, the
error emails sent on a HTTP 500 server error are now handled as a
logging activity. See :doc:`the documentation on Django's logging
interface ` for more details.
+
+``unittest2`` support
+~~~~~~~~~~~~~~~~~~~~~
+
+Python 2.7 introduced some major changes to the unittest library,
+adding some extremely useful features. To ensure that every Django
+project can benefit from these new features, Django ships with a
+copy of unittest2_, a copy of the Python 2.7 unittest library,
+backported for Python 2.4 compatibility.
+
+To access this library, Django provides the
+``django.utils.unittest`` module alias. If you are using Python
+2.7, or you have installed unittest2 locally, Django will mapt the
+alias to the installed version of the unittest library Otherwise,
+Django will use it's own bundled version of unittest2.
+
+To use this alias, simply use::
+
+ from django.utils import unittest
+
+wherever you would historically used::
+
+ import unittest
+
+If you want to continue to use the base unittest libary, you can --
+you just won't get any of the nice new unittest2 features.
+
+.. _unittest2: http://pypi.python.org/pypi/unittest2
diff --git a/docs/topics/testing.txt b/docs/topics/testing.txt
index 8a19d38f22..5ecf1f032a 100644
--- a/docs/topics/testing.txt
+++ b/docs/topics/testing.txt
@@ -57,8 +57,8 @@ frameworks are:
class MyFuncTestCase(unittest.TestCase):
def testBasic(self):
a = ['larry', 'curly', 'moe']
- self.assertEquals(my_func(a, 0), 'larry')
- self.assertEquals(my_func(a, 1), 'curly')
+ self.assertEqual(my_func(a, 0), 'larry')
+ self.assertEqual(my_func(a, 1), 'curly')
You can choose the test framework you like, depending on which syntax you
prefer, or you can mix and match, using one framework for some of your code and
@@ -151,9 +151,38 @@ documentation for doctest`_.
Writing unit tests
------------------
-Like doctests, Django's unit tests use a standard library module: unittest_.
-This module uses a different way of defining tests, taking a class-based
-approach.
+Like doctests, Django's unit tests use a Python standard library
+module: unittest_. This module uses a different way of defining tests,
+taking a class-based approach.
+
+.. admonition:: unittest2
+
+ .. versionchanged:: 1.3
+
+ Python 2.7 introduced some major changes to the unittest library,
+ adding some extremely useful features. To ensure that every Django
+ project can benefit from these new features, Django ships with a
+ copy of unittest2_, a copy of the Python 2.7 unittest library,
+ backported for Python 2.4 compatibility.
+
+ To access this library, Django provides the
+ ``django.utils.unittest`` module alias. If you are using Python
+ 2.7, or you have installed unittest2 locally, Django will mapt the
+ alias to the installed version of the unittest library Otherwise,
+ Django will use it's own bundled version of unittest2.
+
+ To use this alias, simply use::
+
+ from django.utils import unittest
+
+ wherever you would historically used::
+
+ import unittest
+
+ If you want to continue to use the base unittest libary, you can --
+ you just won't get any of the nice new unittest2 features.
+
+.. _unittest2: http://pypi.python.org/pypi/unittest2
As with doctests, for a given Django application, the test runner looks for
unit tests in two places:
@@ -168,7 +197,7 @@ unit tests in two places:
This example ``unittest.TestCase`` subclass is equivalent to the example given
in the doctest section above::
- import unittest
+ from django.utils import unittest
from myapp.models import Animal
class AnimalTestCase(unittest.TestCase):
@@ -177,8 +206,8 @@ in the doctest section above::
self.cat = Animal.objects.create(name="cat", sound="meow")
def testSpeaking(self):
- self.assertEquals(self.lion.speak(), 'The lion says "roar"')
- self.assertEquals(self.cat.speak(), 'The cat says "meow"')
+ self.assertEqual(self.lion.speak(), 'The lion says "roar"')
+ self.assertEqual(self.cat.speak(), 'The cat says "meow"')
When you :ref:`run your tests `, the default behavior of the
test utility is to find all the test cases (that is, subclasses of
@@ -199,6 +228,7 @@ documentation`_.
.. _standard library unittest documentation: unittest_
.. _suggested organization: http://docs.python.org/library/unittest.html#organizing-tests
+
Which should I use?
-------------------
@@ -231,6 +261,8 @@ you:
routines, which give you a high level of control over the environment
in which your test cases are run.
+ * If you're writing tests for Django itself, you should use ``unittest``.
+
Again, remember that you can use both systems side-by-side (even in the same
app). In the end, most projects will eventually end up using both. Each shines
in different circumstances.
@@ -964,7 +996,7 @@ Example
The following is a simple unit test using the test client::
- import unittest
+ from django.utils import unittest
from django.test.client import Client
class SimpleTest(unittest.TestCase):
@@ -977,10 +1009,10 @@ The following is a simple unit test using the test client::
response = self.client.get('/customer/details/')
# Check that the response is 200 OK.
- self.failUnlessEqual(response.status_code, 200)
+ self.assertEqual(response.status_code, 200)
# Check that the rendered context contains 5 customers.
- self.failUnlessEqual(len(response.context['customers']), 5)
+ self.assertEqual(len(response.context['customers']), 5)
TestCase
--------
@@ -1061,19 +1093,19 @@ worry about state (such as cookies) carrying over from one test to another.
This means, instead of instantiating a ``Client`` in each test::
- import unittest
+ from django.utils import unittest
from django.test.client import Client
class SimpleTest(unittest.TestCase):
def test_details(self):
client = Client()
response = client.get('/customer/details/')
- self.failUnlessEqual(response.status_code, 200)
+ self.assertEqual(response.status_code, 200)
def test_index(self):
client = Client()
response = client.get('/customer/index/')
- self.failUnlessEqual(response.status_code, 200)
+ self.assertEqual(response.status_code, 200)
...you can just refer to ``self.client``, like so::
@@ -1082,11 +1114,11 @@ This means, instead of instantiating a ``Client`` in each test::
class SimpleTest(TestCase):
def test_details(self):
response = self.client.get('/customer/details/')
- self.failUnlessEqual(response.status_code, 200)
+ self.assertEqual(response.status_code, 200)
def test_index(self):
response = self.client.get('/customer/index/')
- self.failUnlessEqual(response.status_code, 200)
+ self.assertEqual(response.status_code, 200)
Customizing the test client
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1265,7 +1297,7 @@ Assertions
Addded ``msg_prefix`` argument.
As Python's normal ``unittest.TestCase`` class implements assertion methods
-such as ``assertTrue`` and ``assertEquals``, Django's custom ``TestCase`` class
+such as ``assertTrue`` and ``assertEqual``, Django's custom ``TestCase`` class
provides a number of custom assertion methods that are useful for testing Web
applications:
@@ -1385,10 +1417,10 @@ and contents::
fail_silently=False)
# Test that one message has been sent.
- self.assertEquals(len(mail.outbox), 1)
+ self.assertEqual(len(mail.outbox), 1)
# Verify that the subject of the first message is correct.
- self.assertEquals(mail.outbox[0].subject, 'Subject here')
+ self.assertEqual(mail.outbox[0].subject, 'Subject here')
As noted :ref:`previously `, the test outbox is emptied
at the start of every test in a Django ``TestCase``. To empty the outbox
diff --git a/tests/modeltests/basic/models.py b/tests/modeltests/basic/models.py
index ad2e965810..ffce4cba6b 100644
--- a/tests/modeltests/basic/models.py
+++ b/tests/modeltests/basic/models.py
@@ -4,7 +4,7 @@
This is a basic model with only two non-primary-key fields.
"""
-from django.db import models, DEFAULT_DB_ALIAS
+from django.db import models, DEFAULT_DB_ALIAS, connection
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
@@ -359,9 +359,7 @@ AttributeError: Manager isn't accessible via Article instances
from django.conf import settings
-building_docs = getattr(settings, 'BUILDING_DOCS', False)
-
-if building_docs or settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.postgresql':
+if connection.features.supports_microsecond_precision:
__test__['API_TESTS'] += """
# In PostgreSQL, microsecond-level precision is available.
>>> a9 = Article(headline='Article 9', pub_date=datetime(2005, 7, 31, 12, 30, 45, 180))
@@ -369,8 +367,7 @@ if building_docs or settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db
>>> Article.objects.get(id__exact=9).pub_date
datetime.datetime(2005, 7, 31, 12, 30, 45, 180)
"""
-
-if building_docs or settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.mysql':
+else:
__test__['API_TESTS'] += """
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
diff --git a/tests/modeltests/custom_pk/tests.py b/tests/modeltests/custom_pk/tests.py
index 6ef4bdd433..22975a8417 100644
--- a/tests/modeltests/custom_pk/tests.py
+++ b/tests/modeltests/custom_pk/tests.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS, transaction, IntegrityError
-from django.test import TestCase
+from django.test import TestCase, skipIfDBFeature
from models import Employee, Business, Bar, Foo
@@ -168,16 +168,15 @@ class CustomPKTests(TestCase):
self.assertEqual(f, new_foo),
self.assertEqual(f.bar, new_bar)
-
# SQLite lets objects be saved with an empty primary key, even though an
# integer is expected. So we can't check for an error being raised in that
# case for SQLite. Remove it from the suite for this next bit.
- if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.sqlite3':
- def test_required_pk(self):
- # The primary key must be specified, so an error is raised if you
- # try to create an object without it.
- sid = transaction.savepoint()
- self.assertRaises(IntegrityError,
- Employee.objects.create, first_name="Tom", last_name="Smith"
- )
- transaction.savepoint_rollback(sid)
+ @skipIfDBFeature('supports_unspecified_pk')
+ def test_required_pk(self):
+ # The primary key must be specified, so an error is raised if you
+ # try to create an object without it.
+ sid = transaction.savepoint()
+ self.assertRaises(IntegrityError,
+ Employee.objects.create, first_name="Tom", last_name="Smith"
+ )
+ transaction.savepoint_rollback(sid)
diff --git a/tests/modeltests/fixtures/tests.py b/tests/modeltests/fixtures/tests.py
index 799a7328da..3c08e26867 100644
--- a/tests/modeltests/fixtures/tests.py
+++ b/tests/modeltests/fixtures/tests.py
@@ -1,13 +1,14 @@
import StringIO
import sys
-from django.test import TestCase, TransactionTestCase
from django.conf import settings
from django.core import management
from django.db import DEFAULT_DB_ALIAS
+from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from models import Article, Blog, Book, Category, Person, Spy, Tag, Visa
+
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
@@ -291,46 +292,46 @@ class FixtureLoadingTests(TestCase):
self._dumpdata_assert(['fixtures'], """
""", format='xml', natural_keys=True)
-if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.mysql':
- class FixtureTransactionTests(TransactionTestCase):
- def _dumpdata_assert(self, args, output, format='json'):
- new_io = StringIO.StringIO()
- management.call_command('dumpdata', *args, **{'format':format, 'stdout':new_io})
- command_output = new_io.getvalue().strip()
- self.assertEqual(command_output, output)
+class FixtureTransactionTests(TransactionTestCase):
+ def _dumpdata_assert(self, args, output, format='json'):
+ new_io = StringIO.StringIO()
+ management.call_command('dumpdata', *args, **{'format':format, 'stdout':new_io})
+ command_output = new_io.getvalue().strip()
+ self.assertEqual(command_output, output)
- def test_format_discovery(self):
- # Load fixture 1 again, using format discovery
- management.call_command('loaddata', 'fixture1', verbosity=0, commit=False)
- self.assertQuerysetEqual(Article.objects.all(), [
- '',
- '',
- ''
- ])
+ @skipUnlessDBFeature('supports_forward_references')
+ def test_format_discovery(self):
+ # Load fixture 1 again, using format discovery
+ management.call_command('loaddata', 'fixture1', verbosity=0, commit=False)
+ self.assertQuerysetEqual(Article.objects.all(), [
+ '',
+ '',
+ ''
+ ])
- # Try to load fixture 2 using format discovery; this will fail
- # because there are two fixture2's in the fixtures directory
- new_io = StringIO.StringIO()
- management.call_command('loaddata', 'fixture2', verbosity=0, stderr=new_io)
- output = new_io.getvalue().strip().split('\n')
- self.assertEqual(len(output), 1)
- self.assertTrue(output[0].startswith("Multiple fixtures named 'fixture2'"))
+ # Try to load fixture 2 using format discovery; this will fail
+ # because there are two fixture2's in the fixtures directory
+ new_io = StringIO.StringIO()
+ management.call_command('loaddata', 'fixture2', verbosity=0, stderr=new_io)
+ output = new_io.getvalue().strip().split('\n')
+ self.assertEqual(len(output), 1)
+ self.assertTrue(output[0].startswith("Multiple fixtures named 'fixture2'"))
- # object list is unaffected
- self.assertQuerysetEqual(Article.objects.all(), [
- '',
- '',
- ''
- ])
+ # object list is unaffected
+ self.assertQuerysetEqual(Article.objects.all(), [
+ '',
+ '',
+ ''
+ ])
- # Dump the current contents of the database as a JSON fixture
- self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16 13:00:00"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16 12:00:00"}}, {"pk": 1, "model": "fixtures.article", "fields": {"headline": "Python program becomes self aware", "pub_date": "2006-06-16 11:00:00"}}]')
+ # Dump the current contents of the database as a JSON fixture
+ self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16 13:00:00"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16 12:00:00"}}, {"pk": 1, "model": "fixtures.article", "fields": {"headline": "Python program becomes self aware", "pub_date": "2006-06-16 11:00:00"}}]')
- # Load fixture 4 (compressed), using format discovery
- management.call_command('loaddata', 'fixture4', verbosity=0, commit=False)
- self.assertQuerysetEqual(Article.objects.all(), [
- '',
- '',
- '',
- ''
- ])
+ # Load fixture 4 (compressed), using format discovery
+ management.call_command('loaddata', 'fixture4', verbosity=0, commit=False)
+ self.assertQuerysetEqual(Article.objects.all(), [
+ '',
+ '',
+ '',
+ ''
+ ])
diff --git a/tests/modeltests/lookup/models.py b/tests/modeltests/lookup/models.py
index 72b547a376..bb555615ea 100644
--- a/tests/modeltests/lookup/models.py
+++ b/tests/modeltests/lookup/models.py
@@ -4,7 +4,7 @@
This demonstrates features of the database API.
"""
-from django.db import models, DEFAULT_DB_ALIAS
+from django.db import models, DEFAULT_DB_ALIAS, connection
from django.conf import settings
class Article(models.Model):
@@ -41,15 +41,16 @@ False
# There should be some now!
>>> Article.objects.exists()
True
-"""}
-if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] in (
- 'django.db.backends.postgresql',
- 'django.db.backends.postgresql_pysycopg2'):
- __test__['API_TESTS'] += r"""
-# text matching tests for PostgreSQL 8.3
+# Integer value can be queried using string
>>> Article.objects.filter(id__iexact='1')
[]
+
+"""}
+
+if connection.features.supports_date_lookup_using_string:
+ __test__['API_TESTS'] += r"""
+# A date lookup can be performed using a string search
>>> Article.objects.filter(pub_date__startswith='2005')
[, , , , , , ]
"""
@@ -409,7 +410,7 @@ FieldError: Join on field 'headline' not permitted. Did you misspell 'starts' fo
"""
-if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.mysql':
+if connection.features.supports_regex_backreferencing:
__test__['API_TESTS'] += r"""
# grouping and backreferences
>>> Article.objects.filter(headline__regex=r'b(.).*b\1')
diff --git a/tests/modeltests/test_client/tests.py b/tests/modeltests/test_client/tests.py
index 09f292e037..c669fdbadd 100644
--- a/tests/modeltests/test_client/tests.py
+++ b/tests/modeltests/test_client/tests.py
@@ -1,6 +1,6 @@
# Validate that you can override the default test suite
-import unittest
+from django.utils import unittest
def suite():
"""
diff --git a/tests/modeltests/transactions/tests.py b/tests/modeltests/transactions/tests.py
index 9964f5d7ab..be95005195 100644
--- a/tests/modeltests/transactions/tests.py
+++ b/tests/modeltests/transactions/tests.py
@@ -1,155 +1,159 @@
-from django.test import TransactionTestCase
from django.db import connection, transaction, IntegrityError, DEFAULT_DB_ALIAS
from django.conf import settings
+from django.test import TransactionTestCase, skipUnlessDBFeature
from models import Reporter
-PGSQL = 'psycopg2' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']
-MYSQL = 'mysql' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']
class TransactionTests(TransactionTestCase):
+ def create_a_reporter_then_fail(self, first, last):
+ a = Reporter(first_name=first, last_name=last)
+ a.save()
+ raise Exception("I meant to do that")
- if not MYSQL:
+ def remove_a_reporter(self, first_name):
+ r = Reporter.objects.get(first_name="Alice")
+ r.delete()
- def create_a_reporter_then_fail(self, first, last):
- a = Reporter(first_name=first, last_name=last)
- a.save()
- raise Exception("I meant to do that")
+ def manually_managed(self):
+ r = Reporter(first_name="Dirk", last_name="Gently")
+ r.save()
+ transaction.commit()
- def remove_a_reporter(self, first_name):
- r = Reporter.objects.get(first_name="Alice")
- r.delete()
+ def manually_managed_mistake(self):
+ r = Reporter(first_name="Edward", last_name="Woodward")
+ r.save()
+ # Oops, I forgot to commit/rollback!
- def manually_managed(self):
- r = Reporter(first_name="Dirk", last_name="Gently")
- r.save()
- transaction.commit()
+ @skipUnlessDBFeature('supports_transactions')
+ def test_autocommit(self):
+ """
+ The default behavior is to autocommit after each save() action.
+ """
+ self.assertRaises(Exception,
+ self.create_a_reporter_then_fail,
+ "Alice", "Smith"
+ )
- def manually_managed_mistake(self):
- r = Reporter(first_name="Edward", last_name="Woodward")
- r.save()
- # Oops, I forgot to commit/rollback!
+ # The object created before the exception still exists
+ self.assertEqual(Reporter.objects.count(), 1)
- def execute_bad_sql(self):
- cursor = connection.cursor()
- cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
- transaction.set_dirty()
+ @skipUnlessDBFeature('supports_transactions')
+ def test_autocommit_decorator(self):
+ """
+ The autocommit decorator works exactly the same as the default behavior.
+ """
+ autocomitted_create_then_fail = transaction.autocommit(
+ self.create_a_reporter_then_fail
+ )
+ self.assertRaises(Exception,
+ autocomitted_create_then_fail,
+ "Alice", "Smith"
+ )
+ # Again, the object created before the exception still exists
+ self.assertEqual(Reporter.objects.count(), 1)
- def test_autocommit(self):
- """
- The default behavior is to autocommit after each save() action.
- """
- self.assertRaises(Exception,
- self.create_a_reporter_then_fail,
- "Alice", "Smith"
- )
+ @skipUnlessDBFeature('supports_transactions')
+ def test_autocommit_decorator_with_using(self):
+ """
+ The autocommit decorator also works with a using argument.
+ """
+ autocomitted_create_then_fail = transaction.autocommit(using='default')(
+ self.create_a_reporter_then_fail
+ )
+ self.assertRaises(Exception,
+ autocomitted_create_then_fail,
+ "Alice", "Smith"
+ )
+ # Again, the object created before the exception still exists
+ self.assertEqual(Reporter.objects.count(), 1)
- # The object created before the exception still exists
- self.assertEqual(Reporter.objects.count(), 1)
+ @skipUnlessDBFeature('supports_transactions')
+ def test_commit_on_success(self):
+ """
+ With the commit_on_success decorator, the transaction is only committed
+ if the function doesn't throw an exception.
+ """
+ committed_on_success = transaction.commit_on_success(
+ self.create_a_reporter_then_fail)
+ self.assertRaises(Exception, committed_on_success, "Dirk", "Gently")
+ # This time the object never got saved
+ self.assertEqual(Reporter.objects.count(), 0)
- def test_autocommit_decorator(self):
- """
- The autocommit decorator works exactly the same as the default behavior.
- """
- autocomitted_create_then_fail = transaction.autocommit(
- self.create_a_reporter_then_fail
- )
- self.assertRaises(Exception,
- autocomitted_create_then_fail,
- "Alice", "Smith"
- )
- # Again, the object created before the exception still exists
- self.assertEqual(Reporter.objects.count(), 1)
+ @skipUnlessDBFeature('supports_transactions')
+ def test_commit_on_success_with_using(self):
+ """
+ The commit_on_success decorator also works with a using argument.
+ """
+ using_committed_on_success = transaction.commit_on_success(using='default')(
+ self.create_a_reporter_then_fail
+ )
+ self.assertRaises(Exception,
+ using_committed_on_success,
+ "Dirk", "Gently"
+ )
+ # This time the object never got saved
+ self.assertEqual(Reporter.objects.count(), 0)
- def test_autocommit_decorator_with_using(self):
- """
- The autocommit decorator also works with a using argument.
- """
- autocomitted_create_then_fail = transaction.autocommit(using='default')(
- self.create_a_reporter_then_fail
- )
- self.assertRaises(Exception,
- autocomitted_create_then_fail,
- "Alice", "Smith"
- )
- # Again, the object created before the exception still exists
- self.assertEqual(Reporter.objects.count(), 1)
+ @skipUnlessDBFeature('supports_transactions')
+ def test_commit_on_success_succeed(self):
+ """
+ If there aren't any exceptions, the data will get saved.
+ """
+ Reporter.objects.create(first_name="Alice", last_name="Smith")
+ remove_comitted_on_success = transaction.commit_on_success(
+ self.remove_a_reporter
+ )
+ remove_comitted_on_success("Alice")
+ self.assertEqual(list(Reporter.objects.all()), [])
- def test_commit_on_success(self):
- """
- With the commit_on_success decorator, the transaction is only committed
- if the function doesn't throw an exception.
- """
- committed_on_success = transaction.commit_on_success(
- self.create_a_reporter_then_fail)
- self.assertRaises(Exception, committed_on_success, "Dirk", "Gently")
- # This time the object never got saved
- self.assertEqual(Reporter.objects.count(), 0)
+ @skipUnlessDBFeature('supports_transactions')
+ def test_manually_managed(self):
+ """
+ You can manually manage transactions if you really want to, but you
+ have to remember to commit/rollback.
+ """
+ manually_managed = transaction.commit_manually(self.manually_managed)
+ manually_managed()
+ self.assertEqual(Reporter.objects.count(), 1)
- def test_commit_on_success_with_using(self):
- """
- The commit_on_success decorator also works with a using argument.
- """
- using_committed_on_success = transaction.commit_on_success(using='default')(
- self.create_a_reporter_then_fail
- )
- self.assertRaises(Exception,
- using_committed_on_success,
- "Dirk", "Gently"
- )
- # This time the object never got saved
- self.assertEqual(Reporter.objects.count(), 0)
+ @skipUnlessDBFeature('supports_transactions')
+ def test_manually_managed_mistake(self):
+ """
+ If you forget, you'll get bad errors.
+ """
+ manually_managed_mistake = transaction.commit_manually(
+ self.manually_managed_mistake
+ )
+ self.assertRaises(transaction.TransactionManagementError,
+ manually_managed_mistake)
- def test_commit_on_success_succeed(self):
- """
- If there aren't any exceptions, the data will get saved.
- """
- Reporter.objects.create(first_name="Alice", last_name="Smith")
- remove_comitted_on_success = transaction.commit_on_success(
- self.remove_a_reporter
- )
- remove_comitted_on_success("Alice")
- self.assertEqual(list(Reporter.objects.all()), [])
+ @skipUnlessDBFeature('supports_transactions')
+ def test_manually_managed_with_using(self):
+ """
+ The commit_manually function also works with a using argument.
+ """
+ using_manually_managed_mistake = transaction.commit_manually(using='default')(
+ self.manually_managed_mistake
+ )
+ self.assertRaises(transaction.TransactionManagementError,
+ using_manually_managed_mistake
+ )
- def test_manually_managed(self):
- """
- You can manually manage transactions if you really want to, but you
- have to remember to commit/rollback.
- """
- manually_managed = transaction.commit_manually(self.manually_managed)
- manually_managed()
- self.assertEqual(Reporter.objects.count(), 1)
+class TransactionRollbackTests(TransactionTestCase):
+ def execute_bad_sql(self):
+ cursor = connection.cursor()
+ cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
+ transaction.set_dirty()
- def test_manually_managed_mistake(self):
- """
- If you forget, you'll get bad errors.
- """
- manually_managed_mistake = transaction.commit_manually(
- self.manually_managed_mistake
- )
- self.assertRaises(transaction.TransactionManagementError,
- manually_managed_mistake)
-
- def test_manually_managed_with_using(self):
- """
- The commit_manually function also works with a using argument.
- """
- using_manually_managed_mistake = transaction.commit_manually(using='default')(
- self.manually_managed_mistake
- )
- self.assertRaises(transaction.TransactionManagementError,
- using_manually_managed_mistake
- )
-
- if PGSQL:
-
- def test_bad_sql(self):
- """
- Regression for #11900: If a function wrapped by commit_on_success
- writes a transaction that can't be committed, that transaction should
- be rolled back. The bug is only visible using the psycopg2 backend,
- though the fix is generally a good idea.
- """
- execute_bad_sql = transaction.commit_on_success(self.execute_bad_sql)
- self.assertRaises(IntegrityError, execute_bad_sql)
- transaction.rollback()
+ @skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
+ def test_bad_sql(self):
+ """
+ Regression for #11900: If a function wrapped by commit_on_success
+ writes a transaction that can't be committed, that transaction should
+ be rolled back. The bug is only visible using the psycopg2 backend,
+ though the fix is generally a good idea.
+ """
+ execute_bad_sql = transaction.commit_on_success(self.execute_bad_sql)
+ self.assertRaises(IntegrityError, execute_bad_sql)
+ transaction.rollback()
diff --git a/tests/modeltests/validation/__init__.py b/tests/modeltests/validation/__init__.py
index d0a7d19d49..ca18c1bd68 100644
--- a/tests/modeltests/validation/__init__.py
+++ b/tests/modeltests/validation/__init__.py
@@ -1,4 +1,4 @@
-import unittest
+from django.utils import unittest
from django.core.exceptions import ValidationError
diff --git a/tests/modeltests/validation/test_unique.py b/tests/modeltests/validation/test_unique.py
index fb77c4d28c..2b824ae3b2 100644
--- a/tests/modeltests/validation/test_unique.py
+++ b/tests/modeltests/validation/test_unique.py
@@ -1,7 +1,9 @@
-import unittest
import datetime
+
from django.conf import settings
from django.db import connection
+from django.utils import unittest
+
from models import CustomPKModel, UniqueTogetherModel, UniqueFieldsModel, UniqueForDateModel, ModelToValidate
diff --git a/tests/modeltests/validation/validators.py b/tests/modeltests/validation/validators.py
index 3ad2c40f03..6a7d833555 100644
--- a/tests/modeltests/validation/validators.py
+++ b/tests/modeltests/validation/validators.py
@@ -1,4 +1,5 @@
-from unittest import TestCase
+from django.utils.unittest import TestCase
+
from modeltests.validation import ValidationTestCase
from models import *
diff --git a/tests/modeltests/validators/tests.py b/tests/modeltests/validators/tests.py
index 44ad176747..b4411a221a 100644
--- a/tests/modeltests/validators/tests.py
+++ b/tests/modeltests/validators/tests.py
@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
import re
import types
-from unittest import TestCase
from datetime import datetime, timedelta
+
from django.core.exceptions import ValidationError
from django.core.validators import *
+from django.utils.unittest import TestCase
+
NOW = datetime.now()
diff --git a/tests/regressiontests/admin_scripts/tests.py b/tests/regressiontests/admin_scripts/tests.py
index 3dd8ad5d13..52f1bedc44 100644
--- a/tests/regressiontests/admin_scripts/tests.py
+++ b/tests/regressiontests/admin_scripts/tests.py
@@ -4,13 +4,13 @@ advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODU
and default settings.py files.
"""
import os
-import unittest
import shutil
import sys
import re
from django import conf, bin, get_version
from django.conf import settings
+from django.utils import unittest
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None, is_dir=False, sdict=None):
diff --git a/tests/regressiontests/admin_util/tests.py b/tests/regressiontests/admin_util/tests.py
index 5ea0ac585e..6979a7a573 100644
--- a/tests/regressiontests/admin_util/tests.py
+++ b/tests/regressiontests/admin_util/tests.py
@@ -1,16 +1,15 @@
from datetime import datetime
-import unittest
from django.conf import settings
-from django.db import models
-from django.utils.formats import localize
-from django.test import TestCase
-
from django.contrib import admin
from django.contrib.admin.util import display_for_field, label_for_field, lookup_field
+from django.contrib.admin.util import NestedObjects
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
from django.contrib.sites.models import Site
-from django.contrib.admin.util import NestedObjects
+from django.db import models
+from django.test import TestCase
+from django.utils import unittest
+from django.utils.formats import localize
from models import Article, Count
diff --git a/tests/regressiontests/admin_views/tests.py b/tests/regressiontests/admin_views/tests.py
index c530839f5d..c0e1e506d1 100644
--- a/tests/regressiontests/admin_views/tests.py
+++ b/tests/regressiontests/admin_views/tests.py
@@ -12,13 +12,14 @@ from django.contrib.admin.sites import LOGIN_FORM_KEY
from django.contrib.admin.util import quote
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.forms.util import ErrorList
+import django.template.context
from django.test import TestCase
from django.utils import formats
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.translation import get_date_formats, activate, deactivate
-import django.template.context
+from django.utils import unittest
# local test models
from models import Article, BarAccount, CustomArticle, EmptyModel, \
@@ -2210,51 +2211,52 @@ class UserAdminTest(TestCase):
self.assertNotEquals(new_user.password, UNUSABLE_PASSWORD)
try:
- # If docutils isn't installed, skip the AdminDocs tests.
import docutils
-
- class AdminDocsTest(TestCase):
- fixtures = ['admin-views-users.xml']
-
- def setUp(self):
- self.client.login(username='super', password='secret')
-
- def tearDown(self):
- self.client.logout()
-
- def test_tags(self):
- response = self.client.get('/test_admin/admin/doc/tags/')
-
- # The builtin tag group exists
- self.assertContains(response, "
Built-in tags
", count=2)
-
- # A builtin tag exists in both the index and detail
- self.assertContains(response, '