1
0
mirror of https://github.com/django/django.git synced 2024-12-22 17:16:24 +00:00

Fixed #21171 -- Avoided starting a transaction when a single (or atomic queries) are executed.

Checked the following locations:

 * Model.save(): If there are parents involved, take the safe way and use
   transactions since this should be an all or nothing operation.

   If the model has no parents:

    * Signals are executed before and after the previous existing
      transaction -- they were never been part of the transaction.

    * if `force_insert` is set then only one query is executed -> atomic
      by definition and no transaction needed.

    * same applies to `force_update`.

    * If a primary key is set and no `force_*` is set Django will try an
      UPDATE and if that returns zero rows it tries an INSERT. The first
      case is completly save (single query). In the second case a
      transaction should not produce different results since the update
      query is basically a no-op then (might miss something though).

 * QuerySet.update(): no signals issued, single query -> no transaction
   needed.

 * Model/Collector.delete(): This one is fun due to the fact that is
   does many things at once.

   Most importantly though: It does send signals as part of the
   transaction, so for maximum backwards compatibility we need to be
   conservative.

   To ensure maximum compatibility the transaction here is removed only
   if the following holds true:

     * A single instance is being deleted.
     * There are no signal handlers attached to that instance.
     * There are no deletions/updates to cascade.
     * There are no parents which also need deletion.
This commit is contained in:
Florian Apolloner 2018-09-27 09:45:10 +02:00 committed by Florian Apolloner
parent 38f3de86bd
commit bc7dd8490b
7 changed files with 128 additions and 25 deletions

View File

@ -7,6 +7,7 @@ import datetime
import decimal
import os
import platform
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
@ -58,6 +59,24 @@ from .utils import Oracle_datetime # NOQA isort:skip
from .validation import DatabaseValidation # NOQA isort:skip
@contextmanager
def wrap_oracle_errors():
try:
yield
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception with the
# following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# Convert that case to Django's IntegrityError exception.
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError(*tuple(e.args))
raise
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
@ -255,21 +274,8 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def _commit(self):
if self.connection is not None:
try:
with wrap_oracle_errors():
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError(*tuple(e.args))
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
@ -500,7 +506,8 @@ class FormatStylePlaceholderCursor:
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
return self.cursor.execute(query, self._param_generator(params))
with wrap_oracle_errors():
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
@ -513,7 +520,8 @@ class FormatStylePlaceholderCursor:
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
with wrap_oracle_errors():
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def close(self):
try:

View File

@ -749,7 +749,12 @@ class Model(metaclass=ModelBase):
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
with transaction.atomic(using=using, savepoint=False):
# A transaction isn't needed if one query is issued.
if meta.parents:
context_manager = transaction.atomic(using=using, savepoint=False)
else:
context_manager = transaction.mark_for_rollback_on_error(using=using)
with context_manager:
parent_inserted = False
if not raw:
parent_inserted = self._save_parents(cls, using, update_fields)

View File

@ -118,8 +118,8 @@ class Collector:
def can_fast_delete(self, objs, from_field=None):
"""
Determine if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
Determine if the objects in the given queryset-like or single object
can be fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
@ -129,9 +129,12 @@ class Collector:
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
if hasattr(objs, '_meta'):
model = type(objs)
elif hasattr(objs, 'model') and hasattr(objs, '_raw_delete'):
model = objs.model
else:
return False
model = objs.model
if (signals.pre_delete.has_listeners(model) or
signals.post_delete.has_listeners(model) or
signals.m2m_changed.has_listeners(model)):
@ -147,7 +150,7 @@ class Collector:
for related in get_candidate_relations_to_delete(opts)
) and (
# Something like generic foreign key.
not any(hasattr(field, 'bulk_related_objects') for field in model._meta.private_fields)
not any(hasattr(field, 'bulk_related_objects') for field in opts.private_fields)
)
)
@ -269,6 +272,14 @@ class Collector:
# number of objects deleted for each model label
deleted_counter = Counter()
# Optimize for the case with a single obj and no dependencies
if len(self.data) == 1 and len(instances) == 1:
instance = list(instances)[0]
if self.can_fast_delete(instance):
with transaction.mark_for_rollback_on_error():
count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)
return count, {model._meta.label: count}
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():

View File

@ -727,7 +727,7 @@ class QuerySet:
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.atomic(using=self.db, savepoint=False):
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows

View File

@ -1,4 +1,4 @@
from contextlib import ContextDecorator
from contextlib import ContextDecorator, contextmanager
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
@ -92,6 +92,34 @@ def set_rollback(rollback, using=None):
return get_connection(using).set_rollback(rollback)
@contextmanager
def mark_for_rollback_on_error(using=None):
"""
Internal low-level utility to mark a transaction as "needs rollback" when
an exception is raised while not enforcing the enclosed block to be in a
transaction. This is needed by Model.save() and friends to avoid starting a
transaction when in autocommit mode and a single query is executed.
It's equivalent to:
connection = get_connection(using)
if connection.get_autocommit():
yield
else:
with transaction.atomic(using=using, savepoint=False):
yield
but it uses low-level utilities to avoid performance overhead.
"""
try:
yield
except Exception:
connection = get_connection(using)
if connection.in_atomic_block:
connection.needs_rollback = True
raise
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.

View File

@ -209,6 +209,11 @@ Models
* The new :meth:`.QuerySet.bulk_update` method allows efficiently updating
specific fields on multiple model instances.
* Django no longer always starts a transaction when a single query is being
performed, such as ``Model.save()``, ``QuerySet.update()``, and
``Model.delete()``. This improves the performance of autocommit by reducing
the number of database round trips.
Requests and Responses
~~~~~~~~~~~~~~~~~~~~~~

View File

@ -399,7 +399,7 @@ class AtomicMySQLTests(TransactionTestCase):
class AtomicMiscTests(TransactionTestCase):
available_apps = []
available_apps = ['transactions']
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
@ -433,6 +433,52 @@ class AtomicMiscTests(TransactionTestCase):
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
def test_mark_for_rollback_on_error_in_transaction(self):
with transaction.atomic(savepoint=False):
# Swallow the intentional error raised.
with self.assertRaisesMessage(Exception, "Oops"):
# Wrap in `mark_for_rollback_on_error` to check if the transaction is marked broken.
with transaction.mark_for_rollback_on_error():
# Ensure that we are still in a good state.
self.assertFalse(transaction.get_rollback())
raise Exception("Oops")
# Ensure that `mark_for_rollback_on_error` marked the transaction as broken …
self.assertTrue(transaction.get_rollback())
# … and further queries fail.
msg = "You can't execute queries until the end of the 'atomic' block."
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
Reporter.objects.create()
# Transaction errors are reset at the end of an transaction, so this should just work.
Reporter.objects.create()
def test_mark_for_rollback_on_error_in_autocommit(self):
self.assertTrue(transaction.get_autocommit())
# Swallow the intentional error raised.
with self.assertRaisesMessage(Exception, "Oops"):
# Wrap in `mark_for_rollback_on_error` to check if the transaction is marked broken.
with transaction.mark_for_rollback_on_error():
# Ensure that we are still in a good state.
self.assertFalse(transaction.get_connection().needs_rollback)
raise Exception("Oops")
# Ensure that `mark_for_rollback_on_error` did not mark the transaction
# as broken, since we are in autocommit mode …
self.assertFalse(transaction.get_connection().needs_rollback)
# … and further queries work nicely.
Reporter.objects.create()
@skipIfDBFeature('autocommits_when_autocommit_is_off')
class NonAutocommitTests(TransactionTestCase):