Index: i915_gem.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c,v retrieving revision 1.23 diff -p -u -r1.23 i915_gem.c --- i915_gem.c 28 Feb 2015 04:47:44 -0000 1.23 +++ i915_gem.c 1 Mar 2015 20:22:44 -0000 @@ -578,6 +578,8 @@ int i915_gem_obj_prepare_shmem_read(stru ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ } ret = i915_gem_object_get_pages(obj); @@ -917,6 +919,8 @@ i915_gem_gtt_pwrite_fast(struct drm_devi ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) goto out_unpin; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-gtt write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -1062,6 +1066,8 @@ i915_gem_shmem_pwrite(struct drm_device ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post rendering write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ } /* Same trick applies to invalidate partially written cachelines read * before writing. */ @@ -1608,6 +1614,9 @@ static int i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { + WARN_ON(!obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS, "pre retire in non-GPU domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + WARN(!(obj->base.write_domain & I915_GEM_GPU_DOMAINS), "pre retire not in GPU domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ i915_gem_retire_requests_ring(ring); /* Manually manage the write flush as we may have not yet @@ -1620,6 +1629,10 @@ i915_gem_object_wait_rendering__tail(str obj->last_write_seqno = 0; obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post retire write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + + return 0; } @@ -1639,11 +1652,21 @@ i915_gem_object_wait_rendering(struct dr if (seqno == 0) return 0; + WARN_ON(!obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS, "in wait_rendering write domain non-GPU %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + WARN(!(obj->base.write_domain & I915_GEM_GPU_DOMAINS), "in wait_rendering write domain not in GPU %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + ret = i915_wait_seqno(ring, seqno); if (ret) return ret; - return i915_gem_object_wait_rendering__tail(obj, ring); + ret = i915_gem_object_wait_rendering__tail(obj, ring); + if (ret) + return ret; + + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering__tail write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + return 0; } /* A nonblocking variant of the above wait. This is a highly dangerous routine @@ -1668,6 +1691,10 @@ i915_gem_object_wait_rendering__nonblock if (seqno == 0) return 0; + WARN_ON(!obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS, "in wait_rendering__nonblocking write domain non-GPU %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + WARN(!(obj->base.write_domain & I915_GEM_GPU_DOMAINS), "in wait_rendering__nonblocking write domain not in GPU %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); if (ret) return ret; @@ -1683,7 +1710,13 @@ i915_gem_object_wait_rendering__nonblock if (ret) return ret; - return i915_gem_object_wait_rendering__tail(obj, ring); + ret = i915_gem_object_wait_rendering__tail(obj, ring); + if (ret) + return ret; + + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering__tail write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + return 0; } /** @@ -1735,8 +1768,15 @@ i915_gem_set_domain_ioctl(struct drm_dev if (ret) goto unref; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering__nonblocking write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + if (ret == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-gtt write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + } /* Silently promote "you're not bound, there was nothing to do" * to success, since the client was just asking us to @@ -1746,6 +1786,10 @@ i915_gem_set_domain_ioctl(struct drm_dev ret = 0; } else { ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); + if (ret) + goto unref; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-cpu write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ } unref: @@ -1893,6 +1937,9 @@ i915_gem_fault(struct uvm_faultinfo *ufi if (ret) goto unlock; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering__nonblocking write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + if ((obj->cache_level != I915_CACHE_NONE) && !HAS_LLC(dev)) { ret = -EINVAL; goto unlock; @@ -1905,6 +1952,8 @@ i915_gem_fault(struct uvm_faultinfo *ufi ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) goto unpin; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-gtt write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ ret = i915_gem_object_get_fence(obj); if (ret) @@ -2064,6 +2113,9 @@ int i915_gem_fault(struct vm_area_struct if (ret) goto unlock; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering__nonblocking write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + /* Access to snoopable pages through the GTT is incoherent. */ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { ret = -EINVAL; @@ -2078,6 +2130,8 @@ int i915_gem_fault(struct vm_area_struct ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) goto unpin; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-gtt write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ ret = i915_gem_object_get_fence(obj); if (ret) @@ -2427,9 +2481,13 @@ i915_gem_object_put_pages_gtt(struct drm if (ret) { WARN_ON(ret != -EIO); i915_gem_clflush_object(obj, true); + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "set-to-cpu failed, write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-cpu write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -2458,8 +2516,12 @@ i915_gem_object_put_pages_gtt(struct drm */ WARN_ON(ret != -EIO); i915_gem_clflush_object(obj, true); + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "set-to-cpu failed, write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post set-to-cpu write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -2506,6 +2568,9 @@ i915_gem_object_put_pages(struct drm_i91 ops->put_pages(obj); obj->pages = NULL; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post put_pages write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); @@ -2525,6 +2590,8 @@ __i915_gem_shrink(struct drm_i915_privat global_list) { if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_put_pages(obj) == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post put_pages write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ count += obj->base.size >> PAGE_SHIFT; if (count >= target) return count; @@ -2574,8 +2641,11 @@ __i915_gem_shrink(struct drm_i915_privat if (i915_vma_unbind(vma)) break; - if (i915_gem_object_put_pages(obj) == 0) + if (i915_gem_object_put_pages(obj) == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post put_pages write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ count += obj->base.size >> PAGE_SHIFT; + } drm_gem_object_unreference(&obj->base); } @@ -2600,8 +2670,11 @@ i915_gem_shrink_all(struct drm_i915_priv list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, global_list) { - if (i915_gem_object_put_pages(obj) == 0) + if (i915_gem_object_put_pages(obj) == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post put_pages write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ freed += obj->base.size >> PAGE_SHIFT; + } } return freed; } @@ -2825,6 +2898,8 @@ i915_gem_object_move_to_active(struct dr struct drm_i915_private *dev_priv = dev->dev_private; u32 seqno = intel_ring_get_seqno(ring); + WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + BUG_ON(ring == NULL); if (obj->ring != ring && obj->last_write_seqno) { /* Keep the seqno relative to the current ring */ @@ -2871,6 +2946,13 @@ i915_gem_object_move_to_inactive(struct struct i915_address_space *vm; struct i915_vma *vma; + WARN_ON(obj->ring == NULL); + WARN_ON(obj->ring && !mutex_is_locked(&obj->ring->dev->struct_mutex)); + + WARN_ON(!obj->active); + WARN(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS, "move to inactive write domain non-GPU %"PRIx32"\n", obj->base.write_domain); + WARN(!(obj->base.write_domain & I915_GEM_GPU_DOMAINS), "move to inactive write domain not GPU %"PRIx32"\n", obj->base.write_domain); + if ((obj->base.write_domain & I915_GEM_DOMAIN_GTT) != 0) { #if 0 printk(KERN_ERR "%s: %p 0x%x flushing gtt\n", __func__, obj, @@ -3456,6 +3538,10 @@ i915_gem_wait_ioctl(struct drm_device *d goto out; } + WARN_ON(!obj->active); + WARN(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS, "gem wait ioctl pre-wait write domain non-GPU %"PRIx32"\n", obj->base.write_domain); + WARN(!(obj->base.write_domain & I915_GEM_GPU_DOMAINS), "gem wait ioctl pre-wait write domain not GPU %"PRIx32"\n", obj->base.write_domain); + drm_gem_object_unreference(&obj->base); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); @@ -3465,6 +3551,9 @@ i915_gem_wait_ioctl(struct drm_device *d args->timeout_ns = timespec_to_ns(timeout); return ret; + WARN_ON(obj->active); + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "gem wait ioctl pre-wait write domain GPU %"PRIx32"\n", obj->base.write_domain); + out: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); @@ -3494,8 +3583,14 @@ i915_gem_object_sync(struct drm_i915_gem if (from == NULL || to == from) return 0; - if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) - return i915_gem_object_wait_rendering(obj, false); + if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) { + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + return 0; + } idx = intel_ring_sync_index(from, to); @@ -4257,6 +4352,8 @@ i915_gem_object_set_to_gtt_domain(struct if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ i915_gem_object_flush_cpu_write_domain(obj, false); /* Serialise direct access to this object with the barriers for @@ -4323,8 +4420,13 @@ int i915_gem_object_set_cache_level(stru ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post finish_gpu write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ i915_gem_object_finish_gtt(obj); + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post finish_gtt write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_DOMAIN_GTT, "post finish_gtt write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ /* Before SandyBridge, you could not use tiling or fence * registers with snooped memory, so relinquish any fences @@ -4518,6 +4620,9 @@ i915_gem_object_pin_to_display_plane(str i915_gem_object_flush_cpu_write_domain(obj, true); + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post flush_cpu_write_domain write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; @@ -4557,6 +4662,9 @@ i915_gem_object_finish_gpu(struct drm_i9 if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + /* Ensure that we invalidate the GPU's caches and TLBs. */ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; return 0; @@ -4581,6 +4689,9 @@ i915_gem_object_set_to_cpu_domain(struct if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post wait_rendering write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ + i915_gem_object_flush_gtt_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -5060,6 +5171,8 @@ void i915_gem_free_object(struct drm_gem if (WARN_ON(obj->pages_pin_count)) obj->pages_pin_count = 0; i915_gem_object_put_pages(obj); + WARN_ON(obj->active); /* XXX BUG */ + WARN(obj->base.write_domain & I915_GEM_GPU_DOMAINS, "post put_pages write domain %"PRIx32"\n", obj->base.write_domain); /* XXX BUG */ i915_gem_object_free_mmap_offset(obj); i915_gem_object_release_stolen(obj); Index: i915_gem_execbuffer.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c,v retrieving revision 1.5 diff -p -u -r1.5 i915_gem_execbuffer.c --- i915_gem_execbuffer.c 27 Feb 2015 16:02:03 -0000 1.5 +++ i915_gem_execbuffer.c 1 Mar 2015 20:22:44 -0000 @@ -292,6 +292,8 @@ relocate_entry_cpu(struct drm_i915_gem_o ret = i915_gem_object_set_to_cpu_domain(obj, true); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ vaddr = kmap_atomic(i915_gem_object_get_page(obj, reloc->offset >> PAGE_SHIFT)); @@ -327,6 +329,8 @@ relocate_entry_gtt(struct drm_i915_gem_o ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -1003,6 +1007,8 @@ i915_gem_execbuffer_move_to_active(struc u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; + WARN_ON(obj->active); /* not active yet */ /* XXX BUG */ + WARN_ON(obj->base.pending_write_domain & ~I915_GEM_GPU_DOMAINS); /* XXX BUG */ obj->base.write_domain = obj->base.pending_write_domain; if (obj->base.write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains;