diff --git a/django/contrib/gis/db/models/sql/where.py b/django/contrib/gis/db/models/sql/where.py
index 838889fbad..105cbfbec5 100644
--- a/django/contrib/gis/db/models/sql/where.py
+++ b/django/contrib/gis/db/models/sql/where.py
@@ -35,7 +35,7 @@ class GeoWhereNode(WhereNode):
return super(WhereNode, self).add(data, connector)
obj, lookup_type, value = data
- alias, col, field = obj.alias, obj.col, obj.field
+ col, field = obj.col, obj.field
if not hasattr(field, "geom_type"):
# Not a geographic field, so call `WhereNode.add`.
@@ -76,7 +76,7 @@ class GeoWhereNode(WhereNode):
# the `get_geo_where_clause` to construct the appropriate
# spatial SQL when `make_atom` is called.
annotation = GeoAnnotation(field, value, where)
- return super(WhereNode, self).add(((alias, col, field.db_type()), lookup_type, annotation, params), connector)
+ return super(WhereNode, self).add(((obj.alias, col, field.db_type()), lookup_type, annotation, params), connector)
def make_atom(self, child, qn):
obj, lookup_type, value_annot, params = child
diff --git a/django/contrib/gis/tests/relatedapp/models.py b/django/contrib/gis/tests/relatedapp/models.py
index d7dd6bbfd2..1125d7fb85 100644
--- a/django/contrib/gis/tests/relatedapp/models.py
+++ b/django/contrib/gis/tests/relatedapp/models.py
@@ -32,3 +32,13 @@ class Parcel(models.Model):
border2 = models.PolygonField(srid=2276)
objects = models.GeoManager()
def __unicode__(self): return self.name
+
+# These use the GeoManager but do not have any geographic fields.
+class Author(models.Model):
+ name = models.CharField(max_length=100)
+ objects = models.GeoManager()
+
+class Book(models.Model):
+ title = models.CharField(max_length=100)
+ author = models.ForeignKey(Author, related_name='books')
+ objects = models.GeoManager()
diff --git a/django/contrib/gis/tests/relatedapp/tests.py b/django/contrib/gis/tests/relatedapp/tests.py
index 77f6c73bb6..8c4f83b15a 100644
--- a/django/contrib/gis/tests/relatedapp/tests.py
+++ b/django/contrib/gis/tests/relatedapp/tests.py
@@ -1,10 +1,10 @@
import os, unittest
from django.contrib.gis.geos import *
from django.contrib.gis.db.backend import SpatialBackend
-from django.contrib.gis.db.models import F, Extent, Union
+from django.contrib.gis.db.models import Count, Extent, F, Union
from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_spatialite
from django.conf import settings
-from models import City, Location, DirectoryEntry, Parcel
+from models import City, Location, DirectoryEntry, Parcel, Book, Author
cities = (('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
@@ -196,8 +196,8 @@ class RelatedGeoModelTest(unittest.TestCase):
# ID values do not match their City ID values.
loc1 = Location.objects.create(point='POINT (-95.363151 29.763374)')
loc2 = Location.objects.create(point='POINT (-96.801611 32.782057)')
- dallas = City.objects.create(name='Dallas', location=loc2)
- houston = City.objects.create(name='Houston', location=loc1)
+ dallas = City.objects.create(name='Dallas', state='TX', location=loc2)
+ houston = City.objects.create(name='Houston', state='TX', location=loc1)
# The expected ID values -- notice the last two location IDs
# are out of order. We want to make sure that the related
@@ -231,6 +231,32 @@ class RelatedGeoModelTest(unittest.TestCase):
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
+ def test12_count(self):
+ "Testing `Count` aggregate use with the `GeoManager`. See #11087."
+ # Creating a new City, 'Fort Worth', that uses the same location
+ # as Dallas.
+ dallas = City.objects.get(name='Dallas')
+ ftworth = City.objects.create(name='Fort Worth', state='TX', location=dallas.location)
+
+ # Count annotation should be 2 for the Dallas location now.
+ loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
+ self.assertEqual(2, loc.num_cities)
+
+ # Creating some data for the Book/Author non-geo models that
+ # use GeoManager. See #11087.
+ tp = Author.objects.create(name='Trevor Paglen')
+ Book.objects.create(title='Torture Taxi', author=tp)
+ Book.objects.create(title='I Could Tell You But Then You Would Have to be Destroyed by Me', author=tp)
+ Book.objects.create(title='Blank Spots on the Map', author=tp)
+ wp = Author.objects.create(name='William Patry')
+ Book.objects.create(title='Patry on Copyright', author=wp)
+
+ # Should only be one author (Trevor Paglen) returned by this query, and
+ # the annotation should have 3 for the number of books.
+ qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
+ self.assertEqual(1, len(qs))
+ self.assertEqual(3, qs[0].num_books)
+
# TODO: Related tests for KML, GML, and distance lookups.
def suite():
diff --git a/django/db/models/base.py b/django/db/models/base.py
index 13ff7e8f35..a5c99865a6 100644
--- a/django/db/models/base.py
+++ b/django/db/models/base.py
@@ -411,29 +411,37 @@ class Model(object):
save.alters_data = True
- def save_base(self, raw=False, cls=None, force_insert=False,
- force_update=False):
+ def save_base(self, raw=False, cls=None, origin=None,
+ force_insert=False, force_update=False):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
- ('raw' and 'cls').
+ ('raw', 'cls', and 'origin').
"""
assert not (force_insert and force_update)
- if not cls:
+ if cls is None:
cls = self.__class__
- meta = self._meta
- signal = True
- signals.pre_save.send(sender=self.__class__, instance=self, raw=raw)
+ meta = cls._meta
+ if not meta.proxy:
+ origin = cls
else:
meta = cls._meta
- signal = False
+
+ if origin:
+ signals.pre_save.send(sender=origin, instance=self, raw=raw)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
- if not raw:
+ # We also go through this process to defer the save of proxy objects
+ # to their actual underlying model.
+ if not raw or meta.proxy:
+ if meta.proxy:
+ org = cls
+ else:
+ org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
@@ -441,7 +449,8 @@ class Model(object):
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
- self.save_base(cls=parent)
+ self.save_base(cls=parent, origin=org)
+
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
@@ -492,8 +501,8 @@ class Model(object):
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed()
- if signal:
- signals.post_save.send(sender=self.__class__, instance=self,
+ if origin:
+ signals.post_save.send(sender=origin, instance=self,
created=(not record_exists), raw=raw)
save_base.alters_data = True
diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py
index 419695b74b..78019f2bd1 100644
--- a/django/db/models/fields/related.py
+++ b/django/db/models/fields/related.py
@@ -132,12 +132,13 @@ class RelatedField(object):
v, field = getattr(v, v._meta.pk.name), v._meta.pk
except AttributeError:
pass
- if field:
- if lookup_type in ('range', 'in'):
- v = [v]
- v = field.get_db_prep_lookup(lookup_type, v)
- if isinstance(v, list):
- v = v[0]
+ if not field:
+ field = self.rel.get_related_field()
+ if lookup_type in ('range', 'in'):
+ v = [v]
+ v = field.get_db_prep_lookup(lookup_type, v)
+ if isinstance(v, list):
+ v = v[0]
return v
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
@@ -958,4 +959,3 @@ class ManyToManyField(RelatedField, Field):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
-
diff --git a/django/db/models/query.py b/django/db/models/query.py
index 6a8d7d5e64..46a86fc03c 100644
--- a/django/db/models/query.py
+++ b/django/db/models/query.py
@@ -7,6 +7,8 @@ try:
except NameError:
from sets import Set as set # Python 2.3 fallback
+from copy import deepcopy
+
from django.db import connection, transaction, IntegrityError
from django.db.models.aggregates import Aggregate
from django.db.models.fields import DateField
@@ -40,6 +42,17 @@ class QuerySet(object):
# PYTHON MAGIC METHODS #
########################
+ def __deepcopy__(self, memo):
+ """
+ Deep copy of a QuerySet doesn't populate the cache
+ """
+ obj_dict = deepcopy(self.__dict__, memo)
+ obj_dict['_iter'] = None
+
+ obj = self.__class__()
+ obj.__dict__.update(obj_dict)
+ return obj
+
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
@@ -190,7 +203,25 @@ class QuerySet(object):
index_start = len(extra_select)
aggregate_start = index_start + len(self.model._meta.fields)
- load_fields = only_load.get(self.model)
+ load_fields = []
+ # If only/defer clauses have been specified,
+ # build the list of fields that are to be loaded.
+ if only_load:
+ for field, model in self.model._meta.get_fields_with_model():
+ if model is None:
+ model = self.model
+ if field == self.model._meta.pk:
+ # Record the index of the primary key when it is found
+ pk_idx = len(load_fields)
+ try:
+ if field.name in only_load[model]:
+ # Add a field that has been explicitly included
+ load_fields.append(field.name)
+ except KeyError:
+ # Model wasn't explicitly listed in the only_load table
+ # Therefore, we need to load all fields from this model
+ load_fields.append(field.name)
+
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
@@ -355,10 +386,11 @@ class QuerySet(object):
# Delete objects in chunks to prevent the list of related objects from
# becoming too long.
+ seen_objs = None
while 1:
# Collect all the objects to be deleted in this chunk, and all the
# objects that are related to the objects that are to be deleted.
- seen_objs = CollectedObjects()
+ seen_objs = CollectedObjects(seen_objs)
for object in del_query[:CHUNK_SIZE]:
object._collect_sub_objects(seen_objs)
diff --git a/django/db/models/query_utils.py b/django/db/models/query_utils.py
index 7a5ad919a1..6a6b69013f 100644
--- a/django/db/models/query_utils.py
+++ b/django/db/models/query_utils.py
@@ -32,11 +32,21 @@ class CollectedObjects(object):
This is used for the database object deletion routines so that we can
calculate the 'leaf' objects which should be deleted first.
+
+ previously_seen is an optional argument. It must be a CollectedObjects
+ instance itself; any previously_seen collected object will be blocked from
+ being added to this instance.
"""
- def __init__(self):
+ def __init__(self, previously_seen=None):
self.data = {}
self.children = {}
+ if previously_seen:
+ self.blocked = previously_seen.blocked
+ for cls, seen in previously_seen.data.items():
+ self.blocked.setdefault(cls, SortedDict()).update(seen)
+ else:
+ self.blocked = {}
def add(self, model, pk, obj, parent_model, nullable=False):
"""
@@ -53,6 +63,9 @@ class CollectedObjects(object):
Returns True if the item already existed in the structure and
False otherwise.
"""
+ if pk in self.blocked.get(model, {}):
+ return True
+
d = self.data.setdefault(model, SortedDict())
retval = pk in d
d[pk] = obj
diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py
index d290d60e63..23f99e41ad 100644
--- a/django/db/models/sql/query.py
+++ b/django/db/models/sql/query.py
@@ -635,10 +635,10 @@ class BaseQuery(object):
# models.
workset = {}
for model, values in seen.iteritems():
- for field, f_model in model._meta.get_fields_with_model():
+ for field in model._meta.local_fields:
if field in values:
continue
- add_to_dict(workset, f_model or model, field)
+ add_to_dict(workset, model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
@@ -657,6 +657,12 @@ class BaseQuery(object):
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
+ # Now ensure that every model in the inheritance chain is mentioned
+ # in the parent list. Again, it must be mentioned to ensure that
+ # only "must include" fields are pulled in.
+ for model in orig_opts.get_parent_list():
+ if model not in seen:
+ seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
@@ -1619,10 +1625,14 @@ class BaseQuery(object):
entry.negate()
self.where.add(entry, AND)
break
- elif not (lookup_type == 'in' and not value) and field.null:
+ elif not (lookup_type == 'in'
+ and not hasattr(value, 'as_sql')
+ and not hasattr(value, '_as_sql')
+ and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
+ # We also need to handle the case where a subquery is provided
entry = self.where_class()
entry.add((Constraint(alias, col, None), 'isnull', True), AND)
entry.negate()
diff --git a/django/test/test_coverage.py b/django/test/test_coverage.py
index f71e9e99f2..4f8e3bafe6 100644
--- a/django/test/test_coverage.py
+++ b/django/test/test_coverage.py
@@ -62,9 +62,7 @@ class BaseCoverageRunner(object):
packages, self.modules, self.excludes, self.errors = get_all_modules(
coverage_modules, getattr(settings, 'COVERAGE_MODULE_EXCLUDES', []),
getattr(settings, 'COVERAGE_PATH_EXCLUDES', []))
- #for mods in self.modules.keys():
- # self.cov.analysis2(ModuleVars(mods, self.modules[mods]).source_file)
- #coverage.analysis2(self.modules[mods])
+
self.cov.report(self.modules.values(), show_missing=1)
if self.excludes:
@@ -110,12 +108,10 @@ class ReportingCoverageRunner(BaseCoverageRunner):
with the results
"""
res = super(ReportingCoverageRunner, self).run_tests( *args, **kwargs)
- #coverage._the_coverage.load()
- #covss = coverage.html.HtmlReporter(self.cov)
- self.cov.html_report(self.modules.values(), directory=self.outdir, ignore_errors=True, omit_prefixes='modeltests')
- #cov.report(self.modules.values(), self.outdir)
- #coverage._the_coverage.html_report(self.modules.values(), self.outdir)
-
+ self.cov.html_report(self.modules.values(),
+ directory=self.outdir,
+ ignore_errors=True,
+ omit_prefixes='modeltests')
print >>sys.stdout
print >>sys.stdout, _("HTML reports were output to '%s'") %self.outdir
diff --git a/tests/modeltests/custom_pk/models.py b/tests/modeltests/custom_pk/models.py
index 091f7f32b4..b1d0cb37d0 100644
--- a/tests/modeltests/custom_pk/models.py
+++ b/tests/modeltests/custom_pk/models.py
@@ -9,6 +9,8 @@ this behavior by explicitly adding ``primary_key=True`` to a field.
from django.conf import settings
from django.db import models, transaction, IntegrityError
+from fields import MyAutoField
+
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column = 'code')
first_name = models.CharField(max_length=20)
@@ -28,6 +30,16 @@ class Business(models.Model):
def __unicode__(self):
return self.name
+class Bar(models.Model):
+ id = MyAutoField(primary_key=True, db_index=True)
+
+ def __unicode__(self):
+ return repr(self.pk)
+
+
+class Foo(models.Model):
+ bar = models.ForeignKey(Bar)
+
__test__ = {'API_TESTS':"""
>>> dan = Employee(employee_code=123, first_name='Dan', last_name='Jones')
>>> dan.save()
@@ -121,6 +133,21 @@ DoesNotExist: Employee matching query does not exist.
... print "Fail with %s" % type(e)
Pass
+# Regression for #10785 -- Custom fields can be used for primary keys.
+>>> new_bar = Bar.objects.create()
+>>> new_foo = Foo.objects.create(bar=new_bar)
+>>> f = Foo.objects.get(bar=new_bar.pk)
+>>> f == new_foo
+True
+>>> f.bar == new_bar
+True
+
+>>> f = Foo.objects.get(bar=new_bar)
+>>> f == new_foo
+True
+>>> f.bar == new_bar
+True
+
"""}
# SQLite lets objects be saved with an empty primary key, even though an
diff --git a/tests/modeltests/defer/models.py b/tests/modeltests/defer/models.py
index ce65065d40..96eb427811 100644
--- a/tests/modeltests/defer/models.py
+++ b/tests/modeltests/defer/models.py
@@ -17,6 +17,12 @@ class Primary(models.Model):
def __unicode__(self):
return self.name
+class Child(Primary):
+ pass
+
+class BigChild(Primary):
+ other = models.CharField(max_length=50)
+
def count_delayed_fields(obj, debug=False):
"""
Returns the number of delayed attributes on the given model instance.
@@ -33,7 +39,7 @@ def count_delayed_fields(obj, debug=False):
__test__ = {"API_TEST": """
To all outward appearances, instances with deferred fields look the same as
-normal instances when we examine attribut values. Therefore we test for the
+normal instances when we examine attribute values. Therefore we test for the
number of deferred fields on returned instances (by poking at the internals),
as a way to observe what is going on.
@@ -98,5 +104,89 @@ Using defer() and only() with get() is also valid.
>>> Primary.objects.all()
[]
+# Regression for #10572 - A subclass with no extra fields can defer fields from the base class
+>>> _ = Child.objects.create(name="c1", value="foo", related=s1)
+
+# You can defer a field on a baseclass when the subclass has no fields
+>>> obj = Child.objects.defer("value").get(name="c1")
+>>> count_delayed_fields(obj)
+1
+>>> obj.name
+u"c1"
+>>> obj.value
+u"foo"
+>>> obj.name = "c2"
+>>> obj.save()
+
+# You can retrive a single column on a base class with no fields
+>>> obj = Child.objects.only("name").get(name="c2")
+>>> count_delayed_fields(obj)
+3
+>>> obj.name
+u"c2"
+>>> obj.value
+u"foo"
+>>> obj.name = "cc"
+>>> obj.save()
+
+>>> _ = BigChild.objects.create(name="b1", value="foo", related=s1, other="bar")
+
+# You can defer a field on a baseclass
+>>> obj = BigChild.objects.defer("value").get(name="b1")
+>>> count_delayed_fields(obj)
+1
+>>> obj.name
+u"b1"
+>>> obj.value
+u"foo"
+>>> obj.other
+u"bar"
+>>> obj.name = "b2"
+>>> obj.save()
+
+# You can defer a field on a subclass
+>>> obj = BigChild.objects.defer("other").get(name="b2")
+>>> count_delayed_fields(obj)
+1
+>>> obj.name
+u"b2"
+>>> obj.value
+u"foo"
+>>> obj.other
+u"bar"
+>>> obj.name = "b3"
+>>> obj.save()
+
+# You can retrieve a single field on a baseclass
+>>> obj = BigChild.objects.only("name").get(name="b3")
+>>> count_delayed_fields(obj)
+4
+>>> obj.name
+u"b3"
+>>> obj.value
+u"foo"
+>>> obj.other
+u"bar"
+>>> obj.name = "b4"
+>>> obj.save()
+
+# You can retrieve a single field on a baseclass
+>>> obj = BigChild.objects.only("other").get(name="b4")
+>>> count_delayed_fields(obj)
+4
+>>> obj.name
+u"b4"
+>>> obj.value
+u"foo"
+>>> obj.other
+u"bar"
+>>> obj.name = "bb"
+>>> obj.save()
+
+# Finally, we need to flush the app cache for the defer module.
+# Using only/defer creates some artifical entries in the app cache
+# that messes up later tests. Purge all entries, just to be sure.
+>>> from django.db.models.loading import cache
+>>> cache.app_models['defer'] = {}
"""}
diff --git a/tests/modeltests/proxy_models/models.py b/tests/modeltests/proxy_models/models.py
index 4b3f7d925d..e38266fb70 100644
--- a/tests/modeltests/proxy_models/models.py
+++ b/tests/modeltests/proxy_models/models.py
@@ -259,6 +259,40 @@ FieldError: Proxy model 'NoNewFields' contains model fields.
>>> OtherPerson._default_manager.all()
[, ]
+# Test save signals for proxy models
+>>> from django.db.models import signals
+>>> def make_handler(model, event):
+... def _handler(*args, **kwargs):
+... print u"%s %s save" % (model, event)
+... return _handler
+>>> h1 = make_handler('MyPerson', 'pre')
+>>> h2 = make_handler('MyPerson', 'post')
+>>> h3 = make_handler('Person', 'pre')
+>>> h4 = make_handler('Person', 'post')
+>>> signals.pre_save.connect(h1, sender=MyPerson)
+>>> signals.post_save.connect(h2, sender=MyPerson)
+>>> signals.pre_save.connect(h3, sender=Person)
+>>> signals.post_save.connect(h4, sender=Person)
+>>> dino = MyPerson.objects.create(name=u"dino")
+MyPerson pre save
+MyPerson post save
+
+# Test save signals for proxy proxy models
+>>> h5 = make_handler('MyPersonProxy', 'pre')
+>>> h6 = make_handler('MyPersonProxy', 'post')
+>>> signals.pre_save.connect(h5, sender=MyPersonProxy)
+>>> signals.post_save.connect(h6, sender=MyPersonProxy)
+>>> dino = MyPersonProxy.objects.create(name=u"pebbles")
+MyPersonProxy pre save
+MyPersonProxy post save
+
+>>> signals.pre_save.disconnect(h1, sender=MyPerson)
+>>> signals.post_save.disconnect(h2, sender=MyPerson)
+>>> signals.pre_save.disconnect(h3, sender=Person)
+>>> signals.post_save.disconnect(h4, sender=Person)
+>>> signals.pre_save.disconnect(h5, sender=MyPersonProxy)
+>>> signals.post_save.disconnect(h6, sender=MyPersonProxy)
+
# A proxy has the same content type as the model it is proxying for (at the
# storage level, it is meant to be essentially indistinguishable).
>>> ctype = ContentType.objects.get_for_model
@@ -266,7 +300,7 @@ FieldError: Proxy model 'NoNewFields' contains model fields.
True
>>> MyPersonProxy.objects.all()
-[, ]
+[, , , ]
>>> u = User.objects.create(name='Bruce')
>>> User.objects.all()
@@ -327,4 +361,11 @@ True
# Select related + filter on a related proxy of proxy field
>>> ProxyImprovement.objects.select_related().get(associated_bug__summary__icontains='fix')
+
+Proxy models can be loaded from fixtures (Regression for #11194)
+>>> from django.core import management
+>>> management.call_command('loaddata', 'mypeople.json', verbosity=0)
+>>> MyPerson.objects.get(pk=100)
+
+
"""}
diff --git a/tests/regressiontests/defer_regress/models.py b/tests/regressiontests/defer_regress/models.py
index 11ce1557fe..da9822ab88 100644
--- a/tests/regressiontests/defer_regress/models.py
+++ b/tests/regressiontests/defer_regress/models.py
@@ -84,7 +84,8 @@ Some further checks for select_related() and inherited model behaviour
(regression for #10710).
>>> c1 = Child.objects.create(name="c1", value=42)
->>> obj = Leaf.objects.create(name="l1", child=c1)
+>>> c2 = Child.objects.create(name="c2", value=37)
+>>> obj = Leaf.objects.create(name="l1", child=c1, second_child=c2)
>>> obj = Leaf.objects.only("name", "child").select_related()[0]
>>> obj.child.name
@@ -101,5 +102,24 @@ types as their non-deferred versions (bug #10738).
>>> c1 is c2 is c3
True
+# Regression for #10733 - only() can be used on a model with two foreign keys.
+>>> results = Leaf.objects.all().only('name', 'child', 'second_child').select_related()
+>>> results[0].child.name
+u'c1'
+>>> results[0].second_child.name
+u'c2'
+
+>>> results = Leaf.objects.all().only('name', 'child', 'second_child', 'child__name', 'second_child__name').select_related()
+>>> results[0].child.name
+u'c1'
+>>> results[0].second_child.name
+u'c2'
+
+# Finally, we need to flush the app cache for the defer module.
+# Using only/defer creates some artifical entries in the app cache
+# that messes up later tests. Purge all entries, just to be sure.
+>>> from django.db.models.loading import cache
+>>> cache.app_models['defer_regress'] = {}
+
"""
}
diff --git a/tests/regressiontests/queries/models.py b/tests/regressiontests/queries/models.py
index b5fa377496..0d28926149 100644
--- a/tests/regressiontests/queries/models.py
+++ b/tests/regressiontests/queries/models.py
@@ -1143,6 +1143,36 @@ True
>>> r.save()
>>> Ranking.objects.all()
[, , ]
+
+# Regression test for #10742:
+# Queries used in an __in clause don't execute subqueries
+
+>>> subq = Author.objects.filter(num__lt=3000)
+>>> qs = Author.objects.filter(pk__in=subq)
+>>> list(qs)
+[, ]
+
+# The subquery result cache should not be populated
+>>> subq._result_cache is None
+True
+
+>>> subq = Author.objects.filter(num__lt=3000)
+>>> qs = Author.objects.exclude(pk__in=subq)
+>>> list(qs)
+[, ]
+
+# The subquery result cache should not be populated
+>>> subq._result_cache is None
+True
+
+>>> subq = Author.objects.filter(num__lt=3000)
+>>> list(Author.objects.filter(Q(pk__in=subq) & Q(name='a1')))
+[]
+
+# The subquery result cache should not be populated
+>>> subq._result_cache is None
+True
+
"""}
# In Python 2.3 and the Python 2.6 beta releases, exceptions raised in __len__