drm/i915: Require the vm mutex for i915_vma_bind()
authorjsg <jsg@openbsd.org>
Fri, 22 Jul 2022 06:16:07 +0000 (06:16 +0000)
committerjsg <jsg@openbsd.org>
Fri, 22 Jul 2022 06:16:07 +0000 (06:16 +0000)
From Thomas Hellstrom
a6cecaf058c48c6def2548473d814a2d54cb3667 in linux 5.15.y/5.15.56
c2ea703dcafccf18d7d77d8b68fb08c2d9842b7a in mainline linux

sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
sys/dev/pci/drm/i915/i915_vma.c

index 69cf1d2..d592c6e 100644 (file)
@@ -1070,6 +1070,57 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
        return &i915->ggtt;
 }
 
+static void reloc_cache_unmap(struct reloc_cache *cache)
+{
+       void *vaddr;
+
+       if (!cache->vaddr)
+               return;
+
+       vaddr = unmask_page(cache->vaddr);
+       if (cache->vaddr & KMAP)
+               kunmap_atomic(vaddr);
+       else
+#ifdef __linux__
+               io_mapping_unmap_atomic((void __iomem *)vaddr);
+#else
+               agp_unmap_atomic(cache->map, cache->ioh);
+#endif
+}
+
+static void reloc_cache_remap(struct reloc_cache *cache,
+                             struct drm_i915_gem_object *obj)
+{
+       void *vaddr;
+
+       if (!cache->vaddr)
+               return;
+
+       if (cache->vaddr & KMAP) {
+               struct vm_page *page = i915_gem_object_get_page(obj, cache->page);
+
+               vaddr = kmap_atomic(page);
+               cache->vaddr = unmask_flags(cache->vaddr) |
+                       (unsigned long)vaddr;
+       } else {
+               struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+               unsigned long offset;
+
+               offset = cache->node.start;
+               if (!drm_mm_node_allocated(&cache->node))
+                       offset += cache->page << PAGE_SHIFT;
+
+#ifdef __linux__
+               cache->vaddr = (unsigned long)
+                       io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+#else
+               agp_map_atomic(cache->map, offset, &cache->ioh);
+               cache->vaddr = (unsigned long)
+                       bus_space_vaddr(cache->iot, cache->ioh);
+#endif
+       }
+}
+
 static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
 {
        void *vaddr;
@@ -1347,10 +1398,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
                 * batchbuffers.
                 */
                if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-                   GRAPHICS_VER(eb->i915) == 6) {
+                   GRAPHICS_VER(eb->i915) == 6 &&
+                   !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
+                       struct i915_vma *vma = target->vma;
+
+                       reloc_cache_unmap(&eb->reloc_cache);
+                       mutex_lock(&vma->vm->mutex);
                        err = i915_vma_bind(target->vma,
                                            target->vma->obj->cache_level,
                                            PIN_GLOBAL, NULL);
+                       mutex_unlock(&vma->vm->mutex);
+                       reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
                        if (err)
                                return err;
                }
index 8a20291..29c7414 100644 (file)
@@ -388,6 +388,7 @@ int i915_vma_bind(struct i915_vma *vma,
        u32 bind_flags;
        u32 vma_flags;
 
+       lockdep_assert_held(&vma->vm->mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->size > vma->node.size);