From fc374976e51881d47203807e9843b4b2b3a0674b Mon Sep 17 00:00:00 2001
From: Russell Keith-Magee <russell@keith-magee.com>
Date: Thu, 5 Aug 2010 02:13:32 +0000
Subject: [PATCH] Fixed #13946 -- Modified the database cache backend to use
 the database router to determine availability of the cache table. Thanks to
 tiemonster for the report.

git-svn-id: http://code.djangoproject.com/svn/django/trunk@13473 bcc190cf-cafb-0310-a4f2-bffc1f526a37
---
 django/core/cache/backends/db.py | 99 ++++++++++++++++++++++----------
 django/db/backends/creation.py   |  8 ++-
 docs/topics/cache.txt            | 43 ++++++++++++++
 3 files changed, 116 insertions(+), 34 deletions(-)

diff --git a/django/core/cache/backends/db.py b/django/core/cache/backends/db.py
index 3398e6a85b..1300dd4b66 100644
--- a/django/core/cache/backends/db.py
+++ b/django/core/cache/backends/db.py
@@ -1,7 +1,7 @@
 "Database cache backend."
 
 from django.core.cache.backends.base import BaseCache
-from django.db import connection, transaction, DatabaseError
+from django.db import connections, router, transaction, DatabaseError
 import base64, time
 from datetime import datetime
 try:
@@ -9,10 +9,31 @@ try:
 except ImportError:
     import pickle
 
+class Options(object):
+    """A class that will quack like a Django model _meta class.
+
+    This allows cache operations to be controlled by the router
+    """
+    def __init__(self, table):
+        self.db_table = table
+        self.app_label = 'django_cache'
+        self.module_name = 'cacheentry'
+        self.verbose_name = 'cache entry'
+        self.verbose_name_plural = 'cache entries'
+        self.object_name =  'CacheEntry'
+        self.abstract = False
+        self.managed = True
+        self.proxy = False
+
 class CacheClass(BaseCache):
     def __init__(self, table, params):
         BaseCache.__init__(self, params)
-        self._table = connection.ops.quote_name(table)
+        self._table = table
+
+        class CacheEntry(object):
+            _meta = Options(table)
+        self.cache_model_class = CacheEntry
+
         max_entries = params.get('max_entries', 300)
         try:
             self._max_entries = int(max_entries)
@@ -25,17 +46,22 @@ class CacheClass(BaseCache):
             self._cull_frequency = 3
 
     def get(self, key, default=None):
-        cursor = connection.cursor()
-        cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % self._table, [key])
+        db = router.db_for_read(self.cache_model_class)
+        table = connections[db].ops.quote_name(self._table)
+        cursor = connections[db].cursor()
+
+        cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % table, [key])
         row = cursor.fetchone()
         if row is None:
             return default
         now = datetime.now()
         if row[2] < now:
-            cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
-            transaction.commit_unless_managed()
+            db = router.db_for_write(self.cache_model_class)
+            cursor = connections[db].cursor()
+            cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
+            transaction.commit_unless_managed(using=db)
             return default
-        value = connection.ops.process_clob(row[1])
+        value = connections[db].ops.process_clob(row[1])
         return pickle.loads(base64.decodestring(value))
 
     def set(self, key, value, timeout=None):
@@ -47,56 +73,67 @@ class CacheClass(BaseCache):
     def _base_set(self, mode, key, value, timeout=None):
         if timeout is None:
             timeout = self.default_timeout
-        cursor = connection.cursor()
-        cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
+        db = router.db_for_write(self.cache_model_class)
+        table = connections[db].ops.quote_name(self._table)
+        cursor = connections[db].cursor()
+
+        cursor.execute("SELECT COUNT(*) FROM %s" % table)
         num = cursor.fetchone()[0]
         now = datetime.now().replace(microsecond=0)
         exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
         if num > self._max_entries:
-            self._cull(cursor, now)
+            self._cull(db, cursor, now)
         encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
-        cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % self._table, [key])
+        cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % table, [key])
         try:
             result = cursor.fetchone()
             if result and (mode == 'set' or
                     (mode == 'add' and result[1] < now)):
-                cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % self._table,
-                               [encoded, connection.ops.value_to_db_datetime(exp), key])
+                cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % table,
+                               [encoded, connections[db].ops.value_to_db_datetime(exp), key])
             else:
-                cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % self._table,
-                               [key, encoded, connection.ops.value_to_db_datetime(exp)])
+                cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % table,
+                               [key, encoded, connections[db].ops.value_to_db_datetime(exp)])
         except DatabaseError:
             # To be threadsafe, updates/inserts are allowed to fail silently
-            transaction.rollback_unless_managed()
+            transaction.rollback_unless_managed(using=db)
             return False
         else:
-            transaction.commit_unless_managed()
+            transaction.commit_unless_managed(using=db)
             return True
 
     def delete(self, key):
-        cursor = connection.cursor()
-        cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
-        transaction.commit_unless_managed()
+        db = router.db_for_write(self.cache_model_class)
+        table = connections[db].ops.quote_name(self._table)
+        cursor = connections[db].cursor()
+
+        cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
+        transaction.commit_unless_managed(using=db)
 
     def has_key(self, key):
+        db = router.db_for_read(self.cache_model_class)
+        table = connections[db].ops.quote_name(self._table)
+        cursor = connections[db].cursor()
+
         now = datetime.now().replace(microsecond=0)
-        cursor = connection.cursor()
-        cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s and expires > %%s" % self._table,
-                       [key, connection.ops.value_to_db_datetime(now)])
+        cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s and expires > %%s" % table,
+                       [key, connections[db].ops.value_to_db_datetime(now)])
         return cursor.fetchone() is not None
 
-    def _cull(self, cursor, now):
+    def _cull(self, db, cursor, now):
         if self._cull_frequency == 0:
             self.clear()
         else:
-            cursor.execute("DELETE FROM %s WHERE expires < %%s" % self._table,
-                           [connection.ops.value_to_db_datetime(now)])
-            cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
+            cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
+                           [connections[db].ops.value_to_db_datetime(now)])
+            cursor.execute("SELECT COUNT(*) FROM %s" % table)
             num = cursor.fetchone()[0]
             if num > self._max_entries:
-                cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % self._table, [num / self._cull_frequency])
-                cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % self._table, [cursor.fetchone()[0]])
+                cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % table, [num / self._cull_frequency])
+                cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % table, [cursor.fetchone()[0]])
 
     def clear(self):
-        cursor = connection.cursor()
-        cursor.execute('DELETE FROM %s' % self._table)
+        db = router.db_for_write(self.cache_model_class)
+        table = connections[db].ops.quote_name(self._table)
+        cursor = connections[db].cursor()
+        cursor.execute('DELETE FROM %s' % table)
diff --git a/django/db/backends/creation.py b/django/db/backends/creation.py
index 492ac1e3e9..be9b6fc91d 100644
--- a/django/db/backends/creation.py
+++ b/django/db/backends/creation.py
@@ -353,9 +353,11 @@ class BaseDatabaseCreation(object):
         call_command('syncdb', verbosity=verbosity, interactive=False, database=self.connection.alias)
 
         if settings.CACHE_BACKEND.startswith('db://'):
-            from django.core.cache import parse_backend_uri
-            _, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
-            call_command('createcachetable', cache_name)
+            from django.core.cache import parse_backend_uri, cache
+            from django.db import router
+            if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
+                _, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
+                call_command('createcachetable', cache_name, database=self.connection.alias)
 
         # Get a cursor (even though we don't need one yet). This has
         # the side effect of initializing the test database.
diff --git a/docs/topics/cache.txt b/docs/topics/cache.txt
index 9dedbcf3b9..5e263aa543 100644
--- a/docs/topics/cache.txt
+++ b/docs/topics/cache.txt
@@ -136,6 +136,49 @@ settings file. You can't use a different database backend for your cache table.
 
 Database caching works best if you've got a fast, well-indexed database server.
 
+Database caching and multiple databases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you use database caching with multiple databases, you'll also need
+to set up routing instructions for your database cache table. For the
+purposes of routing, the database cache table appears as a model named
+``CacheEntry``, in an application named ``django_cache``. This model
+won't appear in the models cache, but the model details can be used
+for routing purposes.
+
+For example, the following router would direct all cache read
+operations to ``cache_slave``, and all write operations to
+``cache_master``. The cache table will only be synchronized onto
+``cache_master``::
+
+    class CacheRouter(object):
+        """A router to control all database cache operations"""
+
+        def db_for_read(self, model, **hints):
+            "All cache read operations go to the slave"
+            if model._meta.app_label in ('django_cache',):
+                return 'cache_slave'
+            return None
+
+        def db_for_write(self, model, **hints):
+            "All cache write operations go to master"
+            if model._meta.app_label in ('django_cache',):
+                return 'cache_master'
+            return None
+
+        def allow_syncdb(self, db, model):
+            "Only synchronize the cache model on master"
+            if model._meta.app_label in ('django_cache',):
+                return db == 'cache_master'
+            return None
+
+If you don't specify routing directions for the database cache model,
+the cache backend will use the ``default`` database.
+
+Of course, if you don't use the database cache backend, you don't need
+to worry about providing routing instructions for the database cache
+model.
+
 Filesystem caching
 ------------------