diff --git a/django/contrib/admin/templates/admin/pagination.html b/django/contrib/admin/templates/admin/pagination.html index aaba97fdb7..358813290c 100644 --- a/django/contrib/admin/templates/admin/pagination.html +++ b/django/contrib/admin/templates/admin/pagination.html @@ -8,5 +8,5 @@ {% endif %} {{ cl.result_count }} {% ifequal cl.result_count 1 %}{{ cl.opts.verbose_name }}{% else %}{{ cl.opts.verbose_name_plural }}{% endifequal %} {% if show_all_url %} {% trans 'Show all' %}{% endif %} -{% if cl.formset and cl.result_count %}{% endif %} +{% if cl.formset and cl.result_count %}{% endif %}
diff --git a/django/contrib/gis/db/models/sql/where.py b/django/contrib/gis/db/models/sql/where.py index 838889fbad..105cbfbec5 100644 --- a/django/contrib/gis/db/models/sql/where.py +++ b/django/contrib/gis/db/models/sql/where.py @@ -35,7 +35,7 @@ class GeoWhereNode(WhereNode): return super(WhereNode, self).add(data, connector) obj, lookup_type, value = data - alias, col, field = obj.alias, obj.col, obj.field + col, field = obj.col, obj.field if not hasattr(field, "geom_type"): # Not a geographic field, so call `WhereNode.add`. @@ -76,7 +76,7 @@ class GeoWhereNode(WhereNode): # the `get_geo_where_clause` to construct the appropriate # spatial SQL when `make_atom` is called. annotation = GeoAnnotation(field, value, where) - return super(WhereNode, self).add(((alias, col, field.db_type()), lookup_type, annotation, params), connector) + return super(WhereNode, self).add(((obj.alias, col, field.db_type()), lookup_type, annotation, params), connector) def make_atom(self, child, qn): obj, lookup_type, value_annot, params = child diff --git a/django/contrib/gis/tests/relatedapp/models.py b/django/contrib/gis/tests/relatedapp/models.py index d7dd6bbfd2..1125d7fb85 100644 --- a/django/contrib/gis/tests/relatedapp/models.py +++ b/django/contrib/gis/tests/relatedapp/models.py @@ -32,3 +32,13 @@ class Parcel(models.Model): border2 = models.PolygonField(srid=2276) objects = models.GeoManager() def __unicode__(self): return self.name + +# These use the GeoManager but do not have any geographic fields. +class Author(models.Model): + name = models.CharField(max_length=100) + objects = models.GeoManager() + +class Book(models.Model): + title = models.CharField(max_length=100) + author = models.ForeignKey(Author, related_name='books') + objects = models.GeoManager() diff --git a/django/contrib/gis/tests/relatedapp/tests.py b/django/contrib/gis/tests/relatedapp/tests.py index 77f6c73bb6..8c4f83b15a 100644 --- a/django/contrib/gis/tests/relatedapp/tests.py +++ b/django/contrib/gis/tests/relatedapp/tests.py @@ -1,10 +1,10 @@ import os, unittest from django.contrib.gis.geos import * from django.contrib.gis.db.backend import SpatialBackend -from django.contrib.gis.db.models import F, Extent, Union +from django.contrib.gis.db.models import Count, Extent, F, Union from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_spatialite from django.conf import settings -from models import City, Location, DirectoryEntry, Parcel +from models import City, Location, DirectoryEntry, Parcel, Book, Author cities = (('Aurora', 'TX', -97.516111, 33.058333), ('Roswell', 'NM', -104.528056, 33.387222), @@ -196,8 +196,8 @@ class RelatedGeoModelTest(unittest.TestCase): # ID values do not match their City ID values. loc1 = Location.objects.create(point='POINT (-95.363151 29.763374)') loc2 = Location.objects.create(point='POINT (-96.801611 32.782057)') - dallas = City.objects.create(name='Dallas', location=loc2) - houston = City.objects.create(name='Houston', location=loc1) + dallas = City.objects.create(name='Dallas', state='TX', location=loc2) + houston = City.objects.create(name='Houston', state='TX', location=loc1) # The expected ID values -- notice the last two location IDs # are out of order. We want to make sure that the related @@ -231,6 +231,32 @@ class RelatedGeoModelTest(unittest.TestCase): q = pickle.loads(q_str) self.assertEqual(GeoQuery, q.__class__) + def test12_count(self): + "Testing `Count` aggregate use with the `GeoManager`. See #11087." + # Creating a new City, 'Fort Worth', that uses the same location + # as Dallas. + dallas = City.objects.get(name='Dallas') + ftworth = City.objects.create(name='Fort Worth', state='TX', location=dallas.location) + + # Count annotation should be 2 for the Dallas location now. + loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id) + self.assertEqual(2, loc.num_cities) + + # Creating some data for the Book/Author non-geo models that + # use GeoManager. See #11087. + tp = Author.objects.create(name='Trevor Paglen') + Book.objects.create(title='Torture Taxi', author=tp) + Book.objects.create(title='I Could Tell You But Then You Would Have to be Destroyed by Me', author=tp) + Book.objects.create(title='Blank Spots on the Map', author=tp) + wp = Author.objects.create(name='William Patry') + Book.objects.create(title='Patry on Copyright', author=wp) + + # Should only be one author (Trevor Paglen) returned by this query, and + # the annotation should have 3 for the number of books. + qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1) + self.assertEqual(1, len(qs)) + self.assertEqual(3, qs[0].num_books) + # TODO: Related tests for KML, GML, and distance lookups. def suite(): diff --git a/django/db/models/base.py b/django/db/models/base.py index 13ff7e8f35..a5c99865a6 100644 --- a/django/db/models/base.py +++ b/django/db/models/base.py @@ -411,29 +411,37 @@ class Model(object): save.alters_data = True - def save_base(self, raw=False, cls=None, force_insert=False, - force_update=False): + def save_base(self, raw=False, cls=None, origin=None, + force_insert=False, force_update=False): """ Does the heavy-lifting involved in saving. Subclasses shouldn't need to override this method. It's separate from save() in order to hide the need for overrides of save() to pass around internal-only parameters - ('raw' and 'cls'). + ('raw', 'cls', and 'origin'). """ assert not (force_insert and force_update) - if not cls: + if cls is None: cls = self.__class__ - meta = self._meta - signal = True - signals.pre_save.send(sender=self.__class__, instance=self, raw=raw) + meta = cls._meta + if not meta.proxy: + origin = cls else: meta = cls._meta - signal = False + + if origin: + signals.pre_save.send(sender=origin, instance=self, raw=raw) # If we are in a raw save, save the object exactly as presented. # That means that we don't try to be smart about saving attributes # that might have come from the parent class - we just save the # attributes we have been given to the class we have been given. - if not raw: + # We also go through this process to defer the save of proxy objects + # to their actual underlying model. + if not raw or meta.proxy: + if meta.proxy: + org = cls + else: + org = None for parent, field in meta.parents.items(): # At this point, parent's primary key field may be unknown # (for example, from administration form which doesn't fill @@ -441,7 +449,8 @@ class Model(object): if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None: setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) - self.save_base(cls=parent) + self.save_base(cls=parent, origin=org) + if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) if meta.proxy: @@ -492,8 +501,8 @@ class Model(object): setattr(self, meta.pk.attname, result) transaction.commit_unless_managed() - if signal: - signals.post_save.send(sender=self.__class__, instance=self, + if origin: + signals.post_save.send(sender=origin, instance=self, created=(not record_exists), raw=raw) save_base.alters_data = True diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py index 419695b74b..78019f2bd1 100644 --- a/django/db/models/fields/related.py +++ b/django/db/models/fields/related.py @@ -132,12 +132,13 @@ class RelatedField(object): v, field = getattr(v, v._meta.pk.name), v._meta.pk except AttributeError: pass - if field: - if lookup_type in ('range', 'in'): - v = [v] - v = field.get_db_prep_lookup(lookup_type, v) - if isinstance(v, list): - v = v[0] + if not field: + field = self.rel.get_related_field() + if lookup_type in ('range', 'in'): + v = [v] + v = field.get_db_prep_lookup(lookup_type, v) + if isinstance(v, list): + v = v[0] return v if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'): @@ -958,4 +959,3 @@ class ManyToManyField(RelatedField, Field): # A ManyToManyField is not represented by a single column, # so return None. return None - diff --git a/django/db/models/query.py b/django/db/models/query.py index 6a8d7d5e64..46a86fc03c 100644 --- a/django/db/models/query.py +++ b/django/db/models/query.py @@ -7,6 +7,8 @@ try: except NameError: from sets import Set as set # Python 2.3 fallback +from copy import deepcopy + from django.db import connection, transaction, IntegrityError from django.db.models.aggregates import Aggregate from django.db.models.fields import DateField @@ -40,6 +42,17 @@ class QuerySet(object): # PYTHON MAGIC METHODS # ######################## + def __deepcopy__(self, memo): + """ + Deep copy of a QuerySet doesn't populate the cache + """ + obj_dict = deepcopy(self.__dict__, memo) + obj_dict['_iter'] = None + + obj = self.__class__() + obj.__dict__.update(obj_dict) + return obj + def __getstate__(self): """ Allows the QuerySet to be pickled. @@ -190,7 +203,25 @@ class QuerySet(object): index_start = len(extra_select) aggregate_start = index_start + len(self.model._meta.fields) - load_fields = only_load.get(self.model) + load_fields = [] + # If only/defer clauses have been specified, + # build the list of fields that are to be loaded. + if only_load: + for field, model in self.model._meta.get_fields_with_model(): + if model is None: + model = self.model + if field == self.model._meta.pk: + # Record the index of the primary key when it is found + pk_idx = len(load_fields) + try: + if field.name in only_load[model]: + # Add a field that has been explicitly included + load_fields.append(field.name) + except KeyError: + # Model wasn't explicitly listed in the only_load table + # Therefore, we need to load all fields from this model + load_fields.append(field.name) + skip = None if load_fields and not fill_cache: # Some fields have been deferred, so we have to initialise @@ -355,10 +386,11 @@ class QuerySet(object): # Delete objects in chunks to prevent the list of related objects from # becoming too long. + seen_objs = None while 1: # Collect all the objects to be deleted in this chunk, and all the # objects that are related to the objects that are to be deleted. - seen_objs = CollectedObjects() + seen_objs = CollectedObjects(seen_objs) for object in del_query[:CHUNK_SIZE]: object._collect_sub_objects(seen_objs) diff --git a/django/db/models/query_utils.py b/django/db/models/query_utils.py index 7a5ad919a1..6a6b69013f 100644 --- a/django/db/models/query_utils.py +++ b/django/db/models/query_utils.py @@ -32,11 +32,21 @@ class CollectedObjects(object): This is used for the database object deletion routines so that we can calculate the 'leaf' objects which should be deleted first. + + previously_seen is an optional argument. It must be a CollectedObjects + instance itself; any previously_seen collected object will be blocked from + being added to this instance. """ - def __init__(self): + def __init__(self, previously_seen=None): self.data = {} self.children = {} + if previously_seen: + self.blocked = previously_seen.blocked + for cls, seen in previously_seen.data.items(): + self.blocked.setdefault(cls, SortedDict()).update(seen) + else: + self.blocked = {} def add(self, model, pk, obj, parent_model, nullable=False): """ @@ -53,6 +63,9 @@ class CollectedObjects(object): Returns True if the item already existed in the structure and False otherwise. """ + if pk in self.blocked.get(model, {}): + return True + d = self.data.setdefault(model, SortedDict()) retval = pk in d d[pk] = obj diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py index d290d60e63..23f99e41ad 100644 --- a/django/db/models/sql/query.py +++ b/django/db/models/sql/query.py @@ -635,10 +635,10 @@ class BaseQuery(object): # models. workset = {} for model, values in seen.iteritems(): - for field, f_model in model._meta.get_fields_with_model(): + for field in model._meta.local_fields: if field in values: continue - add_to_dict(workset, f_model or model, field) + add_to_dict(workset, model, field) for model, values in must_include.iteritems(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an @@ -657,6 +657,12 @@ class BaseQuery(object): # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values + # Now ensure that every model in the inheritance chain is mentioned + # in the parent list. Again, it must be mentioned to ensure that + # only "must include" fields are pulled in. + for model in orig_opts.get_parent_list(): + if model not in seen: + seen[model] = set() for model, values in seen.iteritems(): callback(target, model, values) @@ -1619,10 +1625,14 @@ class BaseQuery(object): entry.negate() self.where.add(entry, AND) break - elif not (lookup_type == 'in' and not value) and field.null: + elif not (lookup_type == 'in' + and not hasattr(value, 'as_sql') + and not hasattr(value, '_as_sql') + and not value) and field.null: # Leaky abstraction artifact: We have to specifically # exclude the "foo__in=[]" case from this handling, because # it's short-circuited in the Where class. + # We also need to handle the case where a subquery is provided entry = self.where_class() entry.add((Constraint(alias, col, None), 'isnull', True), AND) entry.negate() diff --git a/django/test/test_coverage.py b/django/test/test_coverage.py index f71e9e99f2..4f8e3bafe6 100644 --- a/django/test/test_coverage.py +++ b/django/test/test_coverage.py @@ -62,9 +62,7 @@ class BaseCoverageRunner(object): packages, self.modules, self.excludes, self.errors = get_all_modules( coverage_modules, getattr(settings, 'COVERAGE_MODULE_EXCLUDES', []), getattr(settings, 'COVERAGE_PATH_EXCLUDES', [])) - #for mods in self.modules.keys(): - # self.cov.analysis2(ModuleVars(mods, self.modules[mods]).source_file) - #coverage.analysis2(self.modules[mods]) + self.cov.report(self.modules.values(), show_missing=1) if self.excludes: @@ -110,12 +108,10 @@ class ReportingCoverageRunner(BaseCoverageRunner): with the results """ res = super(ReportingCoverageRunner, self).run_tests( *args, **kwargs) - #coverage._the_coverage.load() - #covss = coverage.html.HtmlReporter(self.cov) - self.cov.html_report(self.modules.values(), directory=self.outdir, ignore_errors=True, omit_prefixes='modeltests') - #cov.report(self.modules.values(), self.outdir) - #coverage._the_coverage.html_report(self.modules.values(), self.outdir) - + self.cov.html_report(self.modules.values(), + directory=self.outdir, + ignore_errors=True, + omit_prefixes='modeltests') print >>sys.stdout print >>sys.stdout, _("HTML reports were output to '%s'") %self.outdir diff --git a/tests/modeltests/custom_pk/models.py b/tests/modeltests/custom_pk/models.py index 091f7f32b4..b1d0cb37d0 100644 --- a/tests/modeltests/custom_pk/models.py +++ b/tests/modeltests/custom_pk/models.py @@ -9,6 +9,8 @@ this behavior by explicitly adding ``primary_key=True`` to a field. from django.conf import settings from django.db import models, transaction, IntegrityError +from fields import MyAutoField + class Employee(models.Model): employee_code = models.IntegerField(primary_key=True, db_column = 'code') first_name = models.CharField(max_length=20) @@ -28,6 +30,16 @@ class Business(models.Model): def __unicode__(self): return self.name +class Bar(models.Model): + id = MyAutoField(primary_key=True, db_index=True) + + def __unicode__(self): + return repr(self.pk) + + +class Foo(models.Model): + bar = models.ForeignKey(Bar) + __test__ = {'API_TESTS':""" >>> dan = Employee(employee_code=123, first_name='Dan', last_name='Jones') >>> dan.save() @@ -121,6 +133,21 @@ DoesNotExist: Employee matching query does not exist. ... print "Fail with %s" % type(e) Pass +# Regression for #10785 -- Custom fields can be used for primary keys. +>>> new_bar = Bar.objects.create() +>>> new_foo = Foo.objects.create(bar=new_bar) +>>> f = Foo.objects.get(bar=new_bar.pk) +>>> f == new_foo +True +>>> f.bar == new_bar +True + +>>> f = Foo.objects.get(bar=new_bar) +>>> f == new_foo +True +>>> f.bar == new_bar +True + """} # SQLite lets objects be saved with an empty primary key, even though an diff --git a/tests/modeltests/defer/models.py b/tests/modeltests/defer/models.py index ce65065d40..96eb427811 100644 --- a/tests/modeltests/defer/models.py +++ b/tests/modeltests/defer/models.py @@ -17,6 +17,12 @@ class Primary(models.Model): def __unicode__(self): return self.name +class Child(Primary): + pass + +class BigChild(Primary): + other = models.CharField(max_length=50) + def count_delayed_fields(obj, debug=False): """ Returns the number of delayed attributes on the given model instance. @@ -33,7 +39,7 @@ def count_delayed_fields(obj, debug=False): __test__ = {"API_TEST": """ To all outward appearances, instances with deferred fields look the same as -normal instances when we examine attribut values. Therefore we test for the +normal instances when we examine attribute values. Therefore we test for the number of deferred fields on returned instances (by poking at the internals), as a way to observe what is going on. @@ -98,5 +104,89 @@ Using defer() and only() with get() is also valid. >>> Primary.objects.all() [