mirror of
https://github.com/django/django.git
synced 2025-07-04 17:59:13 +00:00
newforms-admin: Merged from trunk up to [7499].
git-svn-id: http://code.djangoproject.com/svn/django/branches/newforms-admin@7500 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
parent
738e6d986b
commit
886005078d
@ -51,6 +51,7 @@ class BaseDatabaseFeatures(object):
|
|||||||
uses_case_insensitive_names = False
|
uses_case_insensitive_names = False
|
||||||
uses_custom_query_class = False
|
uses_custom_query_class = False
|
||||||
empty_fetchmany_value = []
|
empty_fetchmany_value = []
|
||||||
|
update_can_self_select = True
|
||||||
|
|
||||||
class BaseDatabaseOperations(object):
|
class BaseDatabaseOperations(object):
|
||||||
"""
|
"""
|
||||||
|
@ -63,6 +63,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
|
|||||||
autoindexes_primary_keys = False
|
autoindexes_primary_keys = False
|
||||||
inline_fk_references = False
|
inline_fk_references = False
|
||||||
empty_fetchmany_value = ()
|
empty_fetchmany_value = ()
|
||||||
|
update_can_self_select = False
|
||||||
|
|
||||||
class DatabaseOperations(BaseDatabaseOperations):
|
class DatabaseOperations(BaseDatabaseOperations):
|
||||||
def date_extract_sql(self, lookup_type, field_name):
|
def date_extract_sql(self, lookup_type, field_name):
|
||||||
|
@ -67,6 +67,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
|
|||||||
autoindexes_primary_keys = False
|
autoindexes_primary_keys = False
|
||||||
inline_fk_references = False
|
inline_fk_references = False
|
||||||
empty_fetchmany_value = ()
|
empty_fetchmany_value = ()
|
||||||
|
update_can_self_select = False
|
||||||
|
|
||||||
class DatabaseOperations(BaseDatabaseOperations):
|
class DatabaseOperations(BaseDatabaseOperations):
|
||||||
def date_extract_sql(self, lookup_type, field_name):
|
def date_extract_sql(self, lookup_type, field_name):
|
||||||
|
@ -9,6 +9,7 @@ except ImportError:
|
|||||||
|
|
||||||
from django.db import get_creation_module
|
from django.db import get_creation_module
|
||||||
from django.db.models import signals
|
from django.db.models import signals
|
||||||
|
from django.db.models.query_utils import QueryWrapper
|
||||||
from django.dispatch import dispatcher
|
from django.dispatch import dispatcher
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core import validators
|
from django.core import validators
|
||||||
@ -224,6 +225,9 @@ class Field(object):
|
|||||||
|
|
||||||
def get_db_prep_lookup(self, lookup_type, value):
|
def get_db_prep_lookup(self, lookup_type, value):
|
||||||
"Returns field's value prepared for database lookup."
|
"Returns field's value prepared for database lookup."
|
||||||
|
if hasattr(value, 'as_sql'):
|
||||||
|
sql, params = value.as_sql()
|
||||||
|
return QueryWrapper(('(%s)' % sql), params)
|
||||||
if lookup_type in ('exact', 'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'month', 'day', 'search'):
|
if lookup_type in ('exact', 'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'month', 'day', 'search'):
|
||||||
return [value]
|
return [value]
|
||||||
elif lookup_type in ('range', 'in'):
|
elif lookup_type in ('range', 'in'):
|
||||||
|
@ -28,6 +28,17 @@ class QuerySet(object):
|
|||||||
# PYTHON MAGIC METHODS #
|
# PYTHON MAGIC METHODS #
|
||||||
########################
|
########################
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
"""
|
||||||
|
Allows the Queryset to be pickled.
|
||||||
|
"""
|
||||||
|
# Force the cache to be fully populated.
|
||||||
|
len(self)
|
||||||
|
|
||||||
|
obj_dict = self.__dict__.copy()
|
||||||
|
obj_dict['_iter'] = None
|
||||||
|
return obj_dict
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return repr(list(self))
|
return repr(list(self))
|
||||||
|
|
||||||
@ -37,7 +48,7 @@ class QuerySet(object):
|
|||||||
# whilst not messing up any existing iterators against the queryset.
|
# whilst not messing up any existing iterators against the queryset.
|
||||||
if self._result_cache is None:
|
if self._result_cache is None:
|
||||||
if self._iter:
|
if self._iter:
|
||||||
self._result_cache = list(self._iter())
|
self._result_cache = list(self._iter)
|
||||||
else:
|
else:
|
||||||
self._result_cache = list(self.iterator())
|
self._result_cache = list(self.iterator())
|
||||||
elif self._iter:
|
elif self._iter:
|
||||||
@ -497,9 +508,6 @@ class ValuesQuerySet(QuerySet):
|
|||||||
# QuerySet.clone() will also set up the _fields attribute with the
|
# QuerySet.clone() will also set up the _fields attribute with the
|
||||||
# names of the model fields to select.
|
# names of the model fields to select.
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return self.iterator()
|
|
||||||
|
|
||||||
def iterator(self):
|
def iterator(self):
|
||||||
self.query.trim_extra_select(self.extra_names)
|
self.query.trim_extra_select(self.extra_names)
|
||||||
names = self.query.extra_select.keys() + self.field_names
|
names = self.query.extra_select.keys() + self.field_names
|
||||||
|
@ -99,6 +99,24 @@ class Query(object):
|
|||||||
memo[id(self)] = result
|
memo[id(self)] = result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
"""
|
||||||
|
Pickling support.
|
||||||
|
"""
|
||||||
|
obj_dict = self.__dict__.copy()
|
||||||
|
del obj_dict['connection']
|
||||||
|
return obj_dict
|
||||||
|
|
||||||
|
def __setstate__(self, obj_dict):
|
||||||
|
"""
|
||||||
|
Unpickling support.
|
||||||
|
"""
|
||||||
|
self.__dict__.update(obj_dict)
|
||||||
|
# XXX: Need a better solution for this when multi-db stuff is
|
||||||
|
# supported. It's the only class-reference to the module-level
|
||||||
|
# connection variable.
|
||||||
|
self.connection = connection
|
||||||
|
|
||||||
def get_meta(self):
|
def get_meta(self):
|
||||||
"""
|
"""
|
||||||
Returns the Options instance (the model._meta) from which to start
|
Returns the Options instance (the model._meta) from which to start
|
||||||
@ -895,9 +913,15 @@ class Query(object):
|
|||||||
Add a single filter to the query. The 'filter_expr' is a pair:
|
Add a single filter to the query. The 'filter_expr' is a pair:
|
||||||
(filter_string, value). E.g. ('name__contains', 'fred')
|
(filter_string, value). E.g. ('name__contains', 'fred')
|
||||||
|
|
||||||
If 'negate' is True, this is an exclude() filter. If 'trim' is True, we
|
If 'negate' is True, this is an exclude() filter. It's important to
|
||||||
automatically trim the final join group (used internally when
|
note that this method does not negate anything in the where-clause
|
||||||
constructing nested queries).
|
object when inserting the filter constraints. This is because negated
|
||||||
|
filters often require multiple calls to add_filter() and the negation
|
||||||
|
should only happen once. So the caller is responsible for this (the
|
||||||
|
caller will normally be add_q(), so that as an example).
|
||||||
|
|
||||||
|
If 'trim' is True, we automatically trim the final join group (used
|
||||||
|
internally when constructing nested queries).
|
||||||
|
|
||||||
If 'can_reuse' is a set, we are processing a component of a
|
If 'can_reuse' is a set, we are processing a component of a
|
||||||
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
|
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
|
||||||
@ -1001,7 +1025,6 @@ class Query(object):
|
|||||||
|
|
||||||
self.where.add((alias, col, field, lookup_type, value), connector)
|
self.where.add((alias, col, field, lookup_type, value), connector)
|
||||||
if negate:
|
if negate:
|
||||||
self.where.negate()
|
|
||||||
for alias in join_list:
|
for alias in join_list:
|
||||||
self.promote_alias(alias)
|
self.promote_alias(alias)
|
||||||
if final > 1 and lookup_type != 'isnull':
|
if final > 1 and lookup_type != 'isnull':
|
||||||
@ -1039,12 +1062,12 @@ class Query(object):
|
|||||||
self.where.start_subtree(connector)
|
self.where.start_subtree(connector)
|
||||||
self.add_q(child, used_aliases)
|
self.add_q(child, used_aliases)
|
||||||
self.where.end_subtree()
|
self.where.end_subtree()
|
||||||
if q_object.negated:
|
|
||||||
self.where.children[-1].negate()
|
|
||||||
else:
|
else:
|
||||||
self.add_filter(child, connector, q_object.negated,
|
self.add_filter(child, connector, q_object.negated,
|
||||||
can_reuse=used_aliases)
|
can_reuse=used_aliases)
|
||||||
connector = q_object.connector
|
connector = q_object.connector
|
||||||
|
if q_object.negated:
|
||||||
|
self.where.negate()
|
||||||
if subtree:
|
if subtree:
|
||||||
self.where.end_subtree()
|
self.where.end_subtree()
|
||||||
|
|
||||||
|
@ -159,20 +159,37 @@ class UpdateQuery(Query):
|
|||||||
# from other tables.
|
# from other tables.
|
||||||
query = self.clone(klass=Query)
|
query = self.clone(klass=Query)
|
||||||
query.bump_prefix()
|
query.bump_prefix()
|
||||||
query.select = []
|
|
||||||
query.extra_select = {}
|
query.extra_select = {}
|
||||||
|
first_table = query.tables[0]
|
||||||
|
if query.alias_refcount[first_table] == 1:
|
||||||
|
# We can remove one table from the inner query.
|
||||||
|
query.unref_alias(first_table)
|
||||||
|
for i in xrange(1, len(query.tables)):
|
||||||
|
table = query.tables[i]
|
||||||
|
if query.alias_refcount[table]:
|
||||||
|
break
|
||||||
|
join_info = query.alias_map[table]
|
||||||
|
query.select = [(join_info[RHS_ALIAS], join_info[RHS_JOIN_COL])]
|
||||||
|
must_pre_select = False
|
||||||
|
else:
|
||||||
|
query.select = []
|
||||||
query.add_fields([query.model._meta.pk.name])
|
query.add_fields([query.model._meta.pk.name])
|
||||||
|
must_pre_select = not self.connection.features.update_can_self_select
|
||||||
|
|
||||||
# Now we adjust the current query: reset the where clause and get rid
|
# Now we adjust the current query: reset the where clause and get rid
|
||||||
# of all the tables we don't need (since they're in the sub-select).
|
# of all the tables we don't need (since they're in the sub-select).
|
||||||
self.where = self.where_class()
|
self.where = self.where_class()
|
||||||
if self.related_updates:
|
if self.related_updates or must_pre_select:
|
||||||
|
# Either we're using the idents in multiple update queries (so
|
||||||
|
# don't want them to change), or the db backend doesn't support
|
||||||
|
# selecting from the updating table (e.g. MySQL).
|
||||||
idents = []
|
idents = []
|
||||||
for rows in query.execute_sql(MULTI):
|
for rows in query.execute_sql(MULTI):
|
||||||
idents.extend([r[0] for r in rows])
|
idents.extend([r[0] for r in rows])
|
||||||
self.add_filter(('pk__in', idents))
|
self.add_filter(('pk__in', idents))
|
||||||
self.related_ids = idents
|
self.related_ids = idents
|
||||||
else:
|
else:
|
||||||
|
# The fast path. Filters and updates in one query.
|
||||||
self.add_filter(('pk__in', query))
|
self.add_filter(('pk__in', query))
|
||||||
for alias in self.tables[1:]:
|
for alias in self.tables[1:]:
|
||||||
self.alias_refcount[alias] = 0
|
self.alias_refcount[alias] = 0
|
||||||
|
@ -376,6 +376,29 @@ You can evaluate a ``QuerySet`` in the following ways:
|
|||||||
iterating over a ``QuerySet`` will take advantage of your database to
|
iterating over a ``QuerySet`` will take advantage of your database to
|
||||||
load data and instantiate objects only as you need them.
|
load data and instantiate objects only as you need them.
|
||||||
|
|
||||||
|
|
||||||
|
Pickling QuerySets
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you pickle_ a ``QuerySet``, this will also force all the results to be
|
||||||
|
loaded into memory prior to pickling. This is because pickling is usually used
|
||||||
|
as a precursor to caching and when the cached queryset is reloaded, you want
|
||||||
|
the results to already be present. This means that when you unpickle a
|
||||||
|
``QuerySet``, it contains the results at the moment it was pickled, rather
|
||||||
|
than the results that are currently in the database.
|
||||||
|
|
||||||
|
If you only want to pickle the necessary information to recreate the
|
||||||
|
``Queryset`` from the database at a later time, pickle the ``query`` attribute
|
||||||
|
of the ``QuerySet``. You can then recreate the original ``QuerySet`` (without
|
||||||
|
any results loaded) using some code like this::
|
||||||
|
|
||||||
|
>>> import pickle
|
||||||
|
>>> query = pickle.loads(s) # Assuming 's' is the pickled string.
|
||||||
|
>>> qs = MyModel.objects.all()
|
||||||
|
>>> qs.query = query # Restore the original 'query'.
|
||||||
|
|
||||||
|
.. _pickle: http://docs.python.org/lib/module-pickle.html
|
||||||
|
|
||||||
Limiting QuerySets
|
Limiting QuerySets
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
|
@ -117,6 +117,24 @@ class LoopZ(models.Model):
|
|||||||
class Meta:
|
class Meta:
|
||||||
ordering = ['z']
|
ordering = ['z']
|
||||||
|
|
||||||
|
# A model and custom default manager combination.
|
||||||
|
class CustomManager(models.Manager):
|
||||||
|
def get_query_set(self):
|
||||||
|
return super(CustomManager, self).get_query_set().filter(public=True,
|
||||||
|
tag__name='t1')
|
||||||
|
|
||||||
|
class ManagedModel(models.Model):
|
||||||
|
data = models.CharField(max_length=10)
|
||||||
|
tag = models.ForeignKey(Tag)
|
||||||
|
public = models.BooleanField(default=True)
|
||||||
|
|
||||||
|
objects = CustomManager()
|
||||||
|
normal_manager = models.Manager()
|
||||||
|
|
||||||
|
def __unicode__(self):
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
__test__ = {'API_TESTS':"""
|
__test__ = {'API_TESTS':"""
|
||||||
>>> t1 = Tag(name='t1')
|
>>> t1 = Tag(name='t1')
|
||||||
>>> t1.save()
|
>>> t1.save()
|
||||||
@ -658,5 +676,30 @@ Bug #7098 -- Make sure semi-deprecated ordering by related models syntax still
|
|||||||
works.
|
works.
|
||||||
>>> Item.objects.values('note__note').order_by('queries_note.note', 'id')
|
>>> Item.objects.values('note__note').order_by('queries_note.note', 'id')
|
||||||
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
|
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
|
||||||
|
|
||||||
|
Bug #7096 -- Make sure exclude() with multiple conditions continues to work.
|
||||||
|
>>> Tag.objects.filter(parent=t1, name='t3').order_by('name')
|
||||||
|
[<Tag: t3>]
|
||||||
|
>>> Tag.objects.exclude(parent=t1, name='t3').order_by('name')
|
||||||
|
[<Tag: t1>, <Tag: t2>, <Tag: t4>, <Tag: t5>]
|
||||||
|
>>> Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct()
|
||||||
|
[<Item: four>, <Item: three>, <Item: two>]
|
||||||
|
>>> Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name')
|
||||||
|
[<Item: four>, <Item: three>]
|
||||||
|
|
||||||
|
More twisted cases, involving nested negations.
|
||||||
|
>>> Item.objects.exclude(~Q(tags__name='t1', name='one'))
|
||||||
|
[<Item: one>]
|
||||||
|
>>> Item.objects.filter(~Q(tags__name='t1', name='one'), name='two')
|
||||||
|
[<Item: two>]
|
||||||
|
>>> Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two')
|
||||||
|
[<Item: four>, <Item: one>, <Item: three>]
|
||||||
|
|
||||||
|
Bug #7095
|
||||||
|
Updates that are filtered on the model being updated are somewhat tricky to get
|
||||||
|
in MySQL. This exercises that case.
|
||||||
|
>>> mm = ManagedModel.objects.create(data='mm1', tag=t1, public=True)
|
||||||
|
>>> ManagedModel.objects.update(data='mm')
|
||||||
|
|
||||||
"""}
|
"""}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user