diff --git a/TODO.TXT b/TODO.TXT index c525bccdec..85081d548e 100644 --- a/TODO.TXT +++ b/TODO.TXT @@ -7,7 +7,6 @@ that need to be done. I'm trying to be as granular as possible. 2) Update all old references to ``settings.DATABASE_*`` to reference ``settings.DATABASES``. This includes the following locations - * howto/custom-model-fields -- defered since it refers to custom model fields * internals/contributing -- still needs an update on TEST_* * ref/settings -- needs to be upddated for TEST_* * topics/testing -- needs update for the TEST_* settings, plus test refactor @@ -24,22 +23,17 @@ that need to be done. I'm trying to be as granular as possible. ``--database`` flag to overide that? These items will be fixed pending both community consensus, and the API - that will go in that's actually necessary for these to happen. + that will go in that's actually necessary for these to happen. Due to + internal APIs loaddata probably will need an update to load stuff into a + specific DB. 4) Rig up the test harness to work with multiple databases. This includes: - * Figure out how we can actually test multiple databases. If the user has - more than one database in ``settings.DATABASES`` we can just use the test - database for each of them. Otherwise we are going to have to make some - assumptions. Either just go for SQLite, since that's going to be easiest - (and going forward it will be included in all versions of Python we work - with), or we can try to create a database with ``test_2_`` prefix. - Similar to how we use a ``test_`` prefix by default. -5) Add the ``using`` Meta option. Tests and docs(these are to be assumed at - each stage from here on out). -6) Add the ``using`` method to ``QuerySet``. This will more or less "just - work" across multiple databases that use the same backend. However, it - will fail gratuitously when trying to use 2 different backends. + * The current strategy is to test on N dbs, where N is however many the + user defines and ensuring the data all stays seperate and no exceptions + are raised. Practically speaking this means we're only going to have + good coverage if we write a lot of tests that can break. That's life. + 7) Remove any references to the global ``django.db.connection`` object in the SQL creation process. This includes(but is probably not limited to): @@ -57,6 +51,13 @@ that need to be done. I'm trying to be as granular as possible. need to be totally refactored. There's a ticket to at least move that raw SQL and execution to ``Query``/``QuerySet`` so hopefully that makes it in before I need to tackle this. + +5) Add the ``using`` Meta option. Tests and docs(these are to be assumed at + each stage from here on out). +6) Add the ``using`` method to ``QuerySet``. This will more or less "just + work" across multiple databases that use the same backend. However, it + will fail gratuitously when trying to use 2 different backends. + 8) Implement some way to create a new ``Query`` for a different backend when we switch. There are several checks against ``self.connection`` prior to SQL construction, so we either need to defer all these(which will be diff --git a/django/core/management/commands/syncdb.py b/django/core/management/commands/syncdb.py index bc40e3cd8c..76eb9099dd 100644 --- a/django/core/management/commands/syncdb.py +++ b/django/core/management/commands/syncdb.py @@ -33,7 +33,7 @@ class Command(NoArgsCommand): if not options['database']: dbs = connections.all() else: - dbs = [options['database']] + dbs = [connections[options['database']]] for connection in dbs: # Import the 'management' module within each installed app, to register @@ -154,6 +154,15 @@ class Command(NoArgsCommand): else: transaction.commit_unless_managed() +<<<<<<< HEAD:django/core/management/commands/syncdb.py # Install the 'initial_data' fixture, using format discovery from django.core.management import call_command call_command('loaddata', 'initial_data', verbosity=verbosity) +======= + # Install the 'initial_data' fixture, using format discovery + # FIXME we only load the fixture data for one DB right now, since we + # can't control what DB it does into, once we can control this we + # should move it back into the DB loop + from django.core.management import call_command + call_command('loaddata', 'initial_data', verbosity=verbosity) +>>>>>>> 2c764d3ff7cb665ec919d1f3e2977587752c6f2c:django/core/management/commands/syncdb.py diff --git a/django/db/backends/creation.py b/django/db/backends/creation.py index 2f8677f153..fe945baff7 100644 --- a/django/db/backends/creation.py +++ b/django/db/backends/creation.py @@ -40,7 +40,7 @@ class BaseDatabaseCreation(object): pending_references = {} qn = self.connection.ops.quote_name for f in opts.local_fields: - col_type = f.db_type() + col_type = f.db_type(self.connection) tablespace = f.db_tablespace or opts.db_tablespace if col_type is None: # Skip ManyToManyFields, because they're not represented as @@ -68,7 +68,7 @@ class BaseDatabaseCreation(object): table_output.append(' '.join(field_output)) if opts.order_with_respect_to: table_output.append(style.SQL_FIELD(qn('_order')) + ' ' + \ - style.SQL_COLTYPE(models.IntegerField().db_type())) + style.SQL_COLTYPE(models.IntegerField().db_type(self.connection))) for field_constraints in opts.unique_together: table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \ ", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints])) @@ -166,7 +166,7 @@ class BaseDatabaseCreation(object): style.SQL_TABLE(qn(f.m2m_db_table())) + ' ('] table_output.append(' %s %s %s%s,' % (style.SQL_FIELD(qn('id')), - style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type()), + style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(self.connection)), style.SQL_KEYWORD('NOT NULL PRIMARY KEY'), tablespace_sql)) @@ -211,14 +211,14 @@ class BaseDatabaseCreation(object): table_output = [ ' %s %s %s %s (%s)%s,' % (style.SQL_FIELD(qn(field.m2m_column_name())), - style.SQL_COLTYPE(models.ForeignKey(model).db_type()), + style.SQL_COLTYPE(models.ForeignKey(model).db_type(self.connection)), style.SQL_KEYWORD('NOT NULL REFERENCES'), style.SQL_TABLE(qn(opts.db_table)), style.SQL_FIELD(qn(opts.pk.column)), self.connection.ops.deferrable_sql()), ' %s %s %s %s (%s)%s,' % (style.SQL_FIELD(qn(field.m2m_reverse_name())), - style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type()), + style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(self.connection)), style.SQL_KEYWORD('NOT NULL REFERENCES'), style.SQL_TABLE(qn(field.rel.to._meta.db_table)), style.SQL_FIELD(qn(field.rel.to._meta.pk.column)), @@ -310,7 +310,7 @@ class BaseDatabaseCreation(object): output.append(ds) return output - def create_test_db(self, verbosity=1, autoclobber=False): + def create_test_db(self, verbosity=1, autoclobber=False, alias=''): """ Creates a test database, prompting the user for confirmation if the database already exists. Returns the name of the test database created. @@ -325,7 +325,10 @@ class BaseDatabaseCreation(object): can_rollback = self._rollback_works() self.connection.settings_dict["DATABASE_SUPPORTS_TRANSACTIONS"] = can_rollback - call_command('syncdb', verbosity=verbosity, interactive=False) + # FIXME we end up loading the same fixture into the default DB for each + # DB we have, this causes various test failures, but can't really be + # fixed until we have an API for saving to a specific DB + call_command('syncdb', verbosity=verbosity, interactive=False, database=alias) if settings.CACHE_BACKEND.startswith('db://'): from django.core.cache import parse_backend_uri diff --git a/django/db/backends/mysql/creation.py b/django/db/backends/mysql/creation.py index 76e743d36a..5f60611871 100644 --- a/django/db/backends/mysql/creation.py +++ b/django/db/backends/mysql/creation.py @@ -48,11 +48,11 @@ class DatabaseCreation(BaseDatabaseCreation): table_output = [ ' %s %s %s,' % (style.SQL_FIELD(qn(field.m2m_column_name())), - style.SQL_COLTYPE(models.ForeignKey(model).db_type()), + style.SQL_COLTYPE(models.ForeignKey(model).db_type(self.connection)), style.SQL_KEYWORD('NOT NULL')), ' %s %s %s,' % (style.SQL_FIELD(qn(field.m2m_reverse_name())), - style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type()), + style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(self.connection)), style.SQL_KEYWORD('NOT NULL')) ] deferred = [ diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py index a3007a7f66..dc57e9401a 100644 --- a/django/db/models/fields/__init__.py +++ b/django/db/models/fields/__init__.py @@ -118,10 +118,10 @@ class Field(object): """ return value - def db_type(self): + def db_type(self, connection): """ - Returns the database column data type for this field, taking into - account the DATABASE_ENGINE setting. + Returns the database column data type for this field, for the provided + connection. """ # The default implementation of this method looks at the # backend-specific DATA_TYPES dictionary, looking up the field by its diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py index 419695b74b..4d7d771b3f 100644 --- a/django/db/models/fields/related.py +++ b/django/db/models/fields/related.py @@ -731,7 +731,7 @@ class ForeignKey(RelatedField, Field): defaults.update(kwargs) return super(ForeignKey, self).formfield(**defaults) - def db_type(self): + def db_type(self, connection): # The database column type of a ForeignKey is the column type # of the field to which it points. An exception is if the ForeignKey # points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField, @@ -743,8 +743,8 @@ class ForeignKey(RelatedField, Field): (not connection.features.related_fields_match_type and isinstance(rel_field, (PositiveIntegerField, PositiveSmallIntegerField)))): - return IntegerField().db_type() - return rel_field.db_type() + return IntegerField().db_type(connection) + return rel_field.db_type(connection) class OneToOneField(ForeignKey): """ @@ -954,8 +954,7 @@ class ManyToManyField(RelatedField, Field): defaults['initial'] = [i._get_pk_val() for i in initial] return super(ManyToManyField, self).formfield(**defaults) - def db_type(self): + def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None - diff --git a/django/db/models/sql/where.py b/django/db/models/sql/where.py index ec0545ca5b..b27bc992a6 100644 --- a/django/db/models/sql/where.py +++ b/django/db/models/sql/where.py @@ -267,7 +267,9 @@ class Constraint(object): try: if self.field: params = self.field.get_db_prep_lookup(lookup_type, value) - db_type = self.field.db_type() + # FIXME, we're using the global connection object here, once a + # WhereNode know's it's connection we should pass that through + db_type = self.field.db_type(connection) else: # This branch is used at times when we add a comparison to NULL # (we don't really want to waste time looking up the associated @@ -278,4 +280,3 @@ class Constraint(object): raise EmptyShortCircuit return (self.alias, self.col, db_type), params - diff --git a/django/db/utils.py b/django/db/utils.py index 384f26e7df..75fd7ed233 100644 --- a/django/db/utils.py +++ b/django/db/utils.py @@ -1,5 +1,6 @@ import os +from django.conf import settings from django.utils.importlib import import_module def load_backend(backend_name): @@ -39,6 +40,10 @@ class ConnectionHandler(object): conn = self.databases[alias] conn.setdefault('DATABASE_ENGINE', 'dummy') conn.setdefault('DATABASE_OPTIONS', {}) + conn.setdefault('TEST_DATABASE_CHARSET', None) + conn.setdefault('TEST_DATABASE_COLLATION', None) + conn.setdefault('TEST_DATABASE_NAME', None) + conn.setdefault('TIME_ZONE', settings.TIME_ZONE) for setting in ('DATABASE_NAME', 'DATABASE_USER', 'DATABASE_PASSWORD', 'DATABASE_HOST', 'DATABASE_PORT'): conn.setdefault(setting, '') @@ -54,5 +59,8 @@ class ConnectionHandler(object): self._connections[alias] = conn return conn + def __iter__(self): + return iter(self.databases) + def all(self): return [self[alias] for alias in self.databases] diff --git a/django/test/simple.py b/django/test/simple.py index f3c48bae33..2f74f3b0c2 100644 --- a/django/test/simple.py +++ b/django/test/simple.py @@ -186,11 +186,15 @@ def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): suite = reorder_suite(suite, (TestCase,)) - old_name = settings.DATABASE_NAME - from django.db import connection - connection.creation.create_test_db(verbosity, autoclobber=not interactive) + old_names = [] + from django.db import connections + for alias in connections: + connection = connections[alias] + old_names.append((connection, connection.settings_dict['DATABASE_NAME'])) + connection.creation.create_test_db(verbosity, autoclobber=not interactive, alias=alias) result = unittest.TextTestRunner(verbosity=verbosity).run(suite) - connection.creation.destroy_test_db(old_name, verbosity) + for connection, old_name in old_names: + connection.creation.destroy_test_db(old_name, verbosity) teardown_test_environment() diff --git a/django/test/testcases.py b/django/test/testcases.py index fa319b3c09..47e9368e6d 100644 --- a/django/test/testcases.py +++ b/django/test/testcases.py @@ -7,7 +7,7 @@ from django.conf import settings from django.core import mail from django.core.management import call_command from django.core.urlresolvers import clear_url_caches -from django.db import transaction, connection +from django.db import transaction, connections from django.http import QueryDict from django.test import _doctest as doctest from django.test.client import Client @@ -427,6 +427,13 @@ class TransactionTestCase(unittest.TestCase): (u"Template '%s' was used unexpectedly in rendering the" u" response") % template_name) +def connections_support_transactions(): + """ + Returns True if all connections support transactions. This is messy + because 2.4 doesn't support any or all. + """ + return len([None for conn in connections.all() if conn.settings_dict['DATABASE_SUPPORTS_TRANSACTIONS']]) == len(connections.all()) + class TestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase, but surrounds every test @@ -436,7 +443,7 @@ class TestCase(TransactionTestCase): """ def _fixture_setup(self): - if not connection.settings_dict['DATABASE_SUPPORTS_TRANSACTIONS']: + if not connections_support_transactions(): return super(TestCase, self)._fixture_setup() transaction.enter_transaction_management() @@ -453,10 +460,11 @@ class TestCase(TransactionTestCase): }) def _fixture_teardown(self): - if not connection.settings_dict['DATABASE_SUPPORTS_TRANSACTIONS']: + if not connections_support_transactions(): return super(TestCase, self)._fixture_teardown() restore_transaction_methods() transaction.rollback() transaction.leave_transaction_management() - connection.close() + for connection in connections.all(): + connection.close() diff --git a/django/test/utils.py b/django/test/utils.py index d34dd33d15..cffb83dca4 100644 --- a/django/test/utils.py +++ b/django/test/utils.py @@ -1,6 +1,5 @@ import sys, time, os from django.conf import settings -from django.db import connection from django.core import mail from django.test import signals from django.template import Template diff --git a/docs/howto/custom-model-fields.txt b/docs/howto/custom-model-fields.txt index 709ea49b87..bbad89acf9 100644 --- a/docs/howto/custom-model-fields.txt +++ b/docs/howto/custom-model-fields.txt @@ -263,10 +263,10 @@ approximately decreasing order of importance, so start from the top. Custom database types ~~~~~~~~~~~~~~~~~~~~~ -.. method:: db_type(self) +.. method:: db_type(self, connection) Returns the database column data type for the :class:`~django.db.models.Field`, -taking into account the current :setting:`DATABASE_ENGINE` setting. +taking into account the connection object, and the settings associated with it. Say you've created a PostgreSQL custom type called ``mytype``. You can use this field with Django by subclassing ``Field`` and implementing the :meth:`db_type` @@ -275,7 +275,7 @@ method, like so:: from django.db import models class MytypeField(models.Field): - def db_type(self): + def db_type(self, connection): return 'mytype' Once you have ``MytypeField``, you can use it in any model, just like any other @@ -290,13 +290,13 @@ If you aim to build a database-agnostic application, you should account for differences in database column types. For example, the date/time column type in PostgreSQL is called ``timestamp``, while the same column in MySQL is called ``datetime``. The simplest way to handle this in a ``db_type()`` method is to -import the Django settings module and check the :setting:`DATABASE_ENGINE` setting. +check the ``connection.settings_dict['DATABASE_ENGINE']`` attribute. + For example:: class MyDateField(models.Field): - def db_type(self): - from django.conf import settings - if settings.DATABASE_ENGINE == 'mysql': + def db_type(self, connection): + if connection.settings_dict['DATABASE_ENGINE'] == 'mysql': return 'datetime' else: return 'timestamp' @@ -304,7 +304,7 @@ For example:: The :meth:`db_type` method is only called by Django when the framework constructs the ``CREATE TABLE`` statements for your application -- that is, when you first create your tables. It's not called at any other time, so it can -afford to execute slightly complex code, such as the :setting:`DATABASE_ENGINE` +afford to execute slightly complex code, such as the ``connection.settings_dict`` check in the above example. Some database column types accept parameters, such as ``CHAR(25)``, where the @@ -315,7 +315,7 @@ sense to have a ``CharMaxlength25Field``, shown here:: # This is a silly example of hard-coded parameters. class CharMaxlength25Field(models.Field): - def db_type(self): + def db_type(self, connection): return 'char(25)' # In the model: @@ -333,7 +333,7 @@ time -- i.e., when the class is instantiated. To do that, just implement self.max_length = max_length super(BetterCharField, self).__init__(*args, **kwargs) - def db_type(self): + def db_type(self, connection): return 'char(%s)' % self.max_length # In the model: diff --git a/tests/regressiontests/multiple_database/models.py b/tests/regressiontests/multiple_database/models.py index 8b13789179..5695ecdf95 100644 --- a/tests/regressiontests/multiple_database/models.py +++ b/tests/regressiontests/multiple_database/models.py @@ -1 +1,7 @@ +from django.db import models +class Book(models.Model): + title = models.CharField(max_length=100) + + def __unicode__(self): + return self.title diff --git a/tests/regressiontests/multiple_database/tests.py b/tests/regressiontests/multiple_database/tests.py index a55a9c7123..3a19469266 100644 --- a/tests/regressiontests/multiple_database/tests.py +++ b/tests/regressiontests/multiple_database/tests.py @@ -2,6 +2,8 @@ from django.conf import settings from django.db import connections from django.test import TestCase +from models import Book + class DatabaseSettingTestCase(TestCase): def setUp(self): settings.DATABASES['__test_db'] = { @@ -15,3 +17,20 @@ class DatabaseSettingTestCase(TestCase): def test_db_connection(self): connections['default'].cursor() connections['__test_db'].cursor() + +class ConnectionTestCase(TestCase): + def test_queries(self): + for connection in connections.all(): + qn = connection.ops.quote_name + cursor = connection.cursor() + cursor.execute("""INSERT INTO %(table)s (%(col)s) VALUES (%%s)""" % { + 'table': qn(Book._meta.db_table), + 'col': qn(Book._meta.get_field_by_name('title')[0].column), + }, ('Dive Into Python',)) + + for connection in connections.all(): + qn = connection.ops.quote_name + cursor = connection.cursor() + cursor.execute("""SELECT * FROM %(table)s""" % {'table': qn(Book._meta.db_table)}) + data = cursor.fetchall() + self.assertEqual('Dive Into Python', data[0][1])