switch ggtt from agp_map to io_mapping
authorjsg <jsg@openbsd.org>
Wed, 20 Mar 2024 06:02:19 +0000 (06:02 +0000)
committerjsg <jsg@openbsd.org>
Wed, 20 Mar 2024 06:02:19 +0000 (06:02 +0000)
reduces the diff to linux, no functional change

sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
sys/dev/pci/drm/i915/gt/intel_ggtt.c
sys/dev/pci/drm/i915/gt/intel_gtt.h
sys/dev/pci/drm/i915/i915_drv.h
sys/dev/pci/drm/i915/i915_gem.c
sys/dev/pci/drm/i915/i915_vma.c

index 794750f..4aee320 100644 (file)
@@ -11,9 +11,6 @@
 
 #include <drm/drm_syncobj.h>
 
-#include <dev/pci/pcivar.h>
-#include <dev/pci/agpvar.h>
-
 #include "display/intel_frontbuffer.h"
 
 #include "gem/i915_gem_ioctls.h"
@@ -294,10 +291,6 @@ struct i915_execbuffer {
                bool has_llc : 1;
                bool has_fence : 1;
                bool needs_unfenced : 1;
-
-               struct agp_map *map;
-               bus_space_tag_t iot;
-               bus_space_handle_t ioh;
        } reloc_cache;
 
        u64 invalid_flags; /** Set of execobj.flags that are invalid */
@@ -1133,9 +1126,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
        cache->has_fence = cache->graphics_ver < 4;
        cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
        cache->node.flags = 0;
-
-       cache->map = i915->agph;
-       cache->iot = i915->bst;
 }
 
 static inline void *unmask_page(unsigned long p)
@@ -1168,11 +1158,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
        if (cache->vaddr & KMAP)
                kunmap_atomic(vaddr);
        else
-#ifdef __linux__
                io_mapping_unmap_atomic((void __iomem *)vaddr);
-#else
-               agp_unmap_atomic(cache->map, cache->ioh);
-#endif
 }
 
 static void reloc_cache_remap(struct reloc_cache *cache,
@@ -1197,14 +1183,8 @@ static void reloc_cache_remap(struct reloc_cache *cache,
                if (!drm_mm_node_allocated(&cache->node))
                        offset += cache->page << PAGE_SHIFT;
 
-#ifdef __linux__
                cache->vaddr = (unsigned long)
                        io_mapping_map_atomic_wc(&ggtt->iomap, offset);
-#else
-               agp_map_atomic(cache->map, offset, &cache->ioh);
-               cache->vaddr = (unsigned long)
-                       bus_space_vaddr(cache->iot, cache->ioh);
-#endif
        }
 }
 
@@ -1228,11 +1208,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
                struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
                intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-#ifdef __linux__
                io_mapping_unmap_atomic((void __iomem *)vaddr);
-#else
-               agp_unmap_atomic(cache->map, cache->ioh);
-#endif
 
                if (drm_mm_node_allocated(&cache->node)) {
                        ggtt->vm.clear_range(&ggtt->vm,
@@ -1299,11 +1275,7 @@ static void *reloc_iomap(struct i915_vma *batch,
 
        if (cache->vaddr) {
                intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-#ifdef __linux__
                io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
-#else
-               agp_unmap_atomic(cache->map, cache->ioh);
-#endif
        } else {
                struct i915_vma *vma = ERR_PTR(-ENODEV);
                int err;
@@ -1365,13 +1337,8 @@ static void *reloc_iomap(struct i915_vma *batch,
                offset += page << PAGE_SHIFT;
        }
 
-#ifdef __linux__
        vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
                                                         offset);
-#else
-       agp_map_atomic(cache->map, offset, &cache->ioh);
-       vaddr = bus_space_vaddr(cache->iot, cache->ioh);
-#endif
        cache->page = page;
        cache->vaddr = (unsigned long)vaddr;
 
index 407b09b..e080d45 100644 (file)
@@ -28,9 +28,6 @@
 #include "intel_gtt.h"
 #include "gen8_ppgtt.h"
 
-#include <dev/pci/pcivar.h>
-#include <dev/pci/agpvar.h>
-
 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
                                   unsigned long color,
                                   u64 *start,
@@ -53,7 +50,6 @@ static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
 static int ggtt_init_hw(struct i915_ggtt *ggtt)
 {
        struct drm_i915_private *i915 = ggtt->vm.i915;
-       int i;
 
        i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
 
@@ -77,6 +73,9 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
                ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
                                              ggtt->mappable_end);
 #else
+               bus_space_handle_t bsh;
+               int i;
+
                /* XXX would be a lot nicer to get agp info before now */
                uvm_page_physload(atop(ggtt->gmadr.start),
                    atop(ggtt->gmadr.start + ggtt->mappable_end),
@@ -94,11 +93,13 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
                for (i = 0; i < atop(ggtt->mappable_end); i++)
                        atomic_setbits_int(&(i915->pgs[i].pg_flags),
                            PG_PMAP_WC);
-               if (agp_init_map(i915->bst, ggtt->gmadr.start,
+               if (bus_space_map(i915->bst, ggtt->gmadr.start,
                    ggtt->mappable_end,
-                   BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE,
-                   &i915->agph))
+                   BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
                        panic("can't map aperture");
+               ggtt->iomap.base = ggtt->gmadr.start;
+               ggtt->iomap.size = ggtt->mappable_end;
+               ggtt->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
 #endif
        }
 
index 99f1577..9ace406 100644 (file)
@@ -358,9 +358,7 @@ struct i915_address_space {
 struct i915_ggtt {
        struct i915_address_space vm;
 
-#ifdef notyet
        struct io_mapping iomap;        /* Mapping to our CPU mappable region */
-#endif
        struct resource gmadr;          /* GMADR resource */
        resource_size_t mappable_end;   /* End offset that we can CPU map */
 
index 4b1f268..f2ccc7d 100644 (file)
@@ -217,7 +217,6 @@ struct inteldrm_softc {
        bus_dma_tag_t dmat;
        bus_space_tag_t iot;
        bus_space_tag_t bst;
-       struct agp_map *agph;
        bus_space_handle_t opregion_ioh;
        bus_space_handle_t opregion_rvda_ioh;
        bus_size_t opregion_rvda_size;
index abcdf26..826d424 100644 (file)
@@ -39,8 +39,6 @@
 #include <drm/drm_cache.h>
 #include <drm/drm_vma_manager.h>
 
-#include <dev/pci/agpvar.h>
-
 #include "display/intel_display.h"
 #include "display/intel_frontbuffer.h"
 
@@ -279,7 +277,6 @@ err_unlock:
        return ret;
 }
 
-#ifdef __linux__
 static inline bool
 gtt_user_read(struct io_mapping *mapping,
              loff_t base, int offset,
@@ -303,34 +300,6 @@ gtt_user_read(struct io_mapping *mapping,
        }
        return unwritten;
 }
-#else
-static inline bool
-gtt_user_read(struct drm_i915_private *dev_priv,
-             loff_t base, int offset,
-             char __user *user_data, int length)
-{
-       bus_space_handle_t bsh;
-       void __iomem *vaddr;
-       unsigned long unwritten;
-
-       /* We can use the cpu mem copy function because this is X86. */
-       agp_map_atomic(dev_priv->agph, base, &bsh);
-       vaddr = bus_space_vaddr(dev_priv->bst, bsh);
-       unwritten = __copy_to_user_inatomic(user_data,
-                                           (void __force *)vaddr + offset,
-                                           length);
-       agp_unmap_atomic(dev_priv->agph, bsh);
-       if (unwritten) {
-               agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
-               vaddr = bus_space_vaddr(dev_priv->bst, bsh);
-               unwritten = copy_to_user(user_data,
-                                        (void __force *)vaddr + offset,
-                                        length);
-               agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
-       }
-       return unwritten;
-}
-#endif
 
 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
                                             struct drm_mm_node *node,
@@ -460,7 +429,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                        page_base += offset & LINUX_PAGE_MASK;
                }
 
-               if (gtt_user_read(i915, page_base, page_offset,
+               if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
                                  user_data, page_length)) {
                        ret = -EFAULT;
                        break;
@@ -542,7 +511,7 @@ out:
 /* This is the fast write path which cannot handle
  * page faults in the source data
  */
-#ifdef __linux__
+
 static inline bool
 ggtt_write(struct io_mapping *mapping,
           loff_t base, int offset,
@@ -565,33 +534,6 @@ ggtt_write(struct io_mapping *mapping,
 
        return unwritten;
 }
-#else
-static inline bool
-ggtt_write(struct drm_i915_private *dev_priv,
-          loff_t base, int offset,
-          char __user *user_data, int length)
-{
-       bus_space_handle_t bsh;
-       void __iomem *vaddr;
-       unsigned long unwritten;
-
-       /* We can use the cpu mem copy function because this is X86. */
-       agp_map_atomic(dev_priv->agph, base, &bsh);
-       vaddr = bus_space_vaddr(dev_priv->bst, bsh);
-       unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
-                                                     user_data, length);
-       agp_unmap_atomic(dev_priv->agph, bsh);
-       if (unwritten) {
-               agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
-               vaddr = bus_space_vaddr(dev_priv->bst, bsh);
-               unwritten = copy_from_user((void __force *)vaddr + offset,
-                                          user_data, length);
-               agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
-       }
-
-       return unwritten;
-}
-#endif
 
 /**
  * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
@@ -674,7 +616,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                 * If the object is non-shmem backed, we retry again with the
                 * path that handles page fault.
                 */
-               if (ggtt_write(i915, page_base, page_offset,
+               if (ggtt_write(&ggtt->iomap, page_base, page_offset,
                               user_data, page_length)) {
                        ret = -EFAULT;
                        break;
index 5c3aa47..21237cd 100644 (file)
@@ -43,8 +43,6 @@
 #include "i915_vma.h"
 #include "i915_vma_resource.h"
 
-#include <dev/pci/agpvar.h>
-
 static inline void assert_vma_held_evict(const struct i915_vma *vma)
 {
        /*
@@ -582,22 +580,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
                        ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
                                                          vma->obj->base.size);
                } else if (i915_vma_is_map_and_fenceable(vma)) {
-#ifdef __linux__
                        ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
                                                i915_vma_offset(vma),
                                                i915_vma_size(vma));
-#else
-               {
-                       struct drm_i915_private *dev_priv = vma->vm->i915;
-                       err = agp_map_subregion(dev_priv->agph, i915_vma_offset(vma),
-                           i915_vma_size(vma), &vma->bsh);
-                       if (err) {
-                               err = -err;
-                               goto err;
-                       }
-                       ptr = bus_space_vaddr(dev_priv->bst, vma->bsh);
-               }
-#endif
                } else {
                        ptr = (void __iomem *)
                                i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
@@ -616,10 +601,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
                if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
                        if (page_unmask_bits(ptr))
                                __i915_gem_object_release_map(vma->obj);
-#ifdef __linux__
                        else
                                io_mapping_unmap(ptr);
-#endif
                        ptr = vma->iomap;
                }
        }
@@ -1879,14 +1862,8 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
 
        if (page_unmask_bits(vma->iomap))
                __i915_gem_object_release_map(vma->obj);
-       else {
-#ifdef __linux__
+       else
                io_mapping_unmap(vma->iomap);
-#else
-               struct drm_i915_private *dev_priv = vma->vm->i915;
-               agp_unmap_subregion(dev_priv->agph, vma->bsh, vma->node.size);
-#endif
-       }
        vma->iomap = NULL;
 }