From e64fda4000380e0dbfa2725857f96ea40e6cf019 Mon Sep 17 00:00:00 2001 From: jsg Date: Wed, 20 Mar 2024 06:02:19 +0000 Subject: [PATCH] switch ggtt from agp_map to io_mapping reduces the diff to linux, no functional change --- .../pci/drm/i915/gem/i915_gem_execbuffer.c | 33 ---------- sys/dev/pci/drm/i915/gt/intel_ggtt.c | 15 +++-- sys/dev/pci/drm/i915/gt/intel_gtt.h | 2 - sys/dev/pci/drm/i915/i915_drv.h | 1 - sys/dev/pci/drm/i915/i915_gem.c | 64 +------------------ sys/dev/pci/drm/i915/i915_vma.c | 25 +------- 6 files changed, 12 insertions(+), 128 deletions(-) diff --git a/sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c b/sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c index 794750f0a5a..4aee32010ea 100644 --- a/sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c +++ b/sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c @@ -11,9 +11,6 @@ #include -#include -#include - #include "display/intel_frontbuffer.h" #include "gem/i915_gem_ioctls.h" @@ -294,10 +291,6 @@ struct i915_execbuffer { bool has_llc : 1; bool has_fence : 1; bool needs_unfenced : 1; - - struct agp_map *map; - bus_space_tag_t iot; - bus_space_handle_t ioh; } reloc_cache; u64 invalid_flags; /** Set of execobj.flags that are invalid */ @@ -1133,9 +1126,6 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->has_fence = cache->graphics_ver < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.flags = 0; - - cache->map = i915->agph; - cache->iot = i915->bst; } static inline void *unmask_page(unsigned long p) @@ -1168,11 +1158,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache) if (cache->vaddr & KMAP) kunmap_atomic(vaddr); else -#ifdef __linux__ io_mapping_unmap_atomic((void __iomem *)vaddr); -#else - agp_unmap_atomic(cache->map, cache->ioh); -#endif } static void reloc_cache_remap(struct reloc_cache *cache, @@ -1197,14 +1183,8 @@ static void reloc_cache_remap(struct reloc_cache *cache, if (!drm_mm_node_allocated(&cache->node)) offset += cache->page << PAGE_SHIFT; -#ifdef __linux__ cache->vaddr = (unsigned long) io_mapping_map_atomic_wc(&ggtt->iomap, offset); -#else - agp_map_atomic(cache->map, offset, &cache->ioh); - cache->vaddr = (unsigned long) - bus_space_vaddr(cache->iot, cache->ioh); -#endif } } @@ -1228,11 +1208,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer struct i915_ggtt *ggtt = cache_to_ggtt(cache); intel_gt_flush_ggtt_writes(ggtt->vm.gt); -#ifdef __linux__ io_mapping_unmap_atomic((void __iomem *)vaddr); -#else - agp_unmap_atomic(cache->map, cache->ioh); -#endif if (drm_mm_node_allocated(&cache->node)) { ggtt->vm.clear_range(&ggtt->vm, @@ -1299,11 +1275,7 @@ static void *reloc_iomap(struct i915_vma *batch, if (cache->vaddr) { intel_gt_flush_ggtt_writes(ggtt->vm.gt); -#ifdef __linux__ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); -#else - agp_unmap_atomic(cache->map, cache->ioh); -#endif } else { struct i915_vma *vma = ERR_PTR(-ENODEV); int err; @@ -1365,13 +1337,8 @@ static void *reloc_iomap(struct i915_vma *batch, offset += page << PAGE_SHIFT; } -#ifdef __linux__ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, offset); -#else - agp_map_atomic(cache->map, offset, &cache->ioh); - vaddr = bus_space_vaddr(cache->iot, cache->ioh); -#endif cache->page = page; cache->vaddr = (unsigned long)vaddr; diff --git a/sys/dev/pci/drm/i915/gt/intel_ggtt.c b/sys/dev/pci/drm/i915/gt/intel_ggtt.c index 407b09b69b3..e080d45f4ca 100644 --- a/sys/dev/pci/drm/i915/gt/intel_ggtt.c +++ b/sys/dev/pci/drm/i915/gt/intel_ggtt.c @@ -28,9 +28,6 @@ #include "intel_gtt.h" #include "gen8_ppgtt.h" -#include -#include - static void i915_ggtt_color_adjust(const struct drm_mm_node *node, unsigned long color, u64 *start, @@ -53,7 +50,6 @@ static void i915_ggtt_color_adjust(const struct drm_mm_node *node, static int ggtt_init_hw(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; - int i; i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); @@ -77,6 +73,9 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); #else + bus_space_handle_t bsh; + int i; + /* XXX would be a lot nicer to get agp info before now */ uvm_page_physload(atop(ggtt->gmadr.start), atop(ggtt->gmadr.start + ggtt->mappable_end), @@ -94,11 +93,13 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) for (i = 0; i < atop(ggtt->mappable_end); i++) atomic_setbits_int(&(i915->pgs[i].pg_flags), PG_PMAP_WC); - if (agp_init_map(i915->bst, ggtt->gmadr.start, + if (bus_space_map(i915->bst, ggtt->gmadr.start, ggtt->mappable_end, - BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, - &i915->agph)) + BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh)) panic("can't map aperture"); + ggtt->iomap.base = ggtt->gmadr.start; + ggtt->iomap.size = ggtt->mappable_end; + ggtt->iomap.iomem = bus_space_vaddr(i915->bst, bsh); #endif } diff --git a/sys/dev/pci/drm/i915/gt/intel_gtt.h b/sys/dev/pci/drm/i915/gt/intel_gtt.h index 99f15779011..9ace406f7a4 100644 --- a/sys/dev/pci/drm/i915/gt/intel_gtt.h +++ b/sys/dev/pci/drm/i915/gt/intel_gtt.h @@ -358,9 +358,7 @@ struct i915_address_space { struct i915_ggtt { struct i915_address_space vm; -#ifdef notyet struct io_mapping iomap; /* Mapping to our CPU mappable region */ -#endif struct resource gmadr; /* GMADR resource */ resource_size_t mappable_end; /* End offset that we can CPU map */ diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h index 4b1f2687112..f2ccc7dcbca 100644 --- a/sys/dev/pci/drm/i915/i915_drv.h +++ b/sys/dev/pci/drm/i915/i915_drv.h @@ -217,7 +217,6 @@ struct inteldrm_softc { bus_dma_tag_t dmat; bus_space_tag_t iot; bus_space_tag_t bst; - struct agp_map *agph; bus_space_handle_t opregion_ioh; bus_space_handle_t opregion_rvda_ioh; bus_size_t opregion_rvda_size; diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c index abcdf26ceb8..826d4249e3c 100644 --- a/sys/dev/pci/drm/i915/i915_gem.c +++ b/sys/dev/pci/drm/i915/i915_gem.c @@ -39,8 +39,6 @@ #include #include -#include - #include "display/intel_display.h" #include "display/intel_frontbuffer.h" @@ -279,7 +277,6 @@ err_unlock: return ret; } -#ifdef __linux__ static inline bool gtt_user_read(struct io_mapping *mapping, loff_t base, int offset, @@ -303,34 +300,6 @@ gtt_user_read(struct io_mapping *mapping, } return unwritten; } -#else -static inline bool -gtt_user_read(struct drm_i915_private *dev_priv, - loff_t base, int offset, - char __user *user_data, int length) -{ - bus_space_handle_t bsh; - void __iomem *vaddr; - unsigned long unwritten; - - /* We can use the cpu mem copy function because this is X86. */ - agp_map_atomic(dev_priv->agph, base, &bsh); - vaddr = bus_space_vaddr(dev_priv->bst, bsh); - unwritten = __copy_to_user_inatomic(user_data, - (void __force *)vaddr + offset, - length); - agp_unmap_atomic(dev_priv->agph, bsh); - if (unwritten) { - agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh); - vaddr = bus_space_vaddr(dev_priv->bst, bsh); - unwritten = copy_to_user(user_data, - (void __force *)vaddr + offset, - length); - agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE); - } - return unwritten; -} -#endif static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, struct drm_mm_node *node, @@ -460,7 +429,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, page_base += offset & LINUX_PAGE_MASK; } - if (gtt_user_read(i915, page_base, page_offset, + if (gtt_user_read(&ggtt->iomap, page_base, page_offset, user_data, page_length)) { ret = -EFAULT; break; @@ -542,7 +511,7 @@ out: /* This is the fast write path which cannot handle * page faults in the source data */ -#ifdef __linux__ + static inline bool ggtt_write(struct io_mapping *mapping, loff_t base, int offset, @@ -565,33 +534,6 @@ ggtt_write(struct io_mapping *mapping, return unwritten; } -#else -static inline bool -ggtt_write(struct drm_i915_private *dev_priv, - loff_t base, int offset, - char __user *user_data, int length) -{ - bus_space_handle_t bsh; - void __iomem *vaddr; - unsigned long unwritten; - - /* We can use the cpu mem copy function because this is X86. */ - agp_map_atomic(dev_priv->agph, base, &bsh); - vaddr = bus_space_vaddr(dev_priv->bst, bsh); - unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, - user_data, length); - agp_unmap_atomic(dev_priv->agph, bsh); - if (unwritten) { - agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh); - vaddr = bus_space_vaddr(dev_priv->bst, bsh); - unwritten = copy_from_user((void __force *)vaddr + offset, - user_data, length); - agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE); - } - - return unwritten; -} -#endif /** * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the @@ -674,7 +616,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * If the object is non-shmem backed, we retry again with the * path that handles page fault. */ - if (ggtt_write(i915, page_base, page_offset, + if (ggtt_write(&ggtt->iomap, page_base, page_offset, user_data, page_length)) { ret = -EFAULT; break; diff --git a/sys/dev/pci/drm/i915/i915_vma.c b/sys/dev/pci/drm/i915/i915_vma.c index 5c3aa47426a..21237cd4471 100644 --- a/sys/dev/pci/drm/i915/i915_vma.c +++ b/sys/dev/pci/drm/i915/i915_vma.c @@ -43,8 +43,6 @@ #include "i915_vma.h" #include "i915_vma_resource.h" -#include - static inline void assert_vma_held_evict(const struct i915_vma *vma) { /* @@ -582,22 +580,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ptr = i915_gem_object_lmem_io_map(vma->obj, 0, vma->obj->base.size); } else if (i915_vma_is_map_and_fenceable(vma)) { -#ifdef __linux__ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, i915_vma_offset(vma), i915_vma_size(vma)); -#else - { - struct drm_i915_private *dev_priv = vma->vm->i915; - err = agp_map_subregion(dev_priv->agph, i915_vma_offset(vma), - i915_vma_size(vma), &vma->bsh); - if (err) { - err = -err; - goto err; - } - ptr = bus_space_vaddr(dev_priv->bst, vma->bsh); - } -#endif } else { ptr = (void __iomem *) i915_gem_object_pin_map(vma->obj, I915_MAP_WC); @@ -616,10 +601,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { if (page_unmask_bits(ptr)) __i915_gem_object_release_map(vma->obj); -#ifdef __linux__ else io_mapping_unmap(ptr); -#endif ptr = vma->iomap; } } @@ -1879,14 +1862,8 @@ static void __i915_vma_iounmap(struct i915_vma *vma) if (page_unmask_bits(vma->iomap)) __i915_gem_object_release_map(vma->obj); - else { -#ifdef __linux__ + else io_mapping_unmap(vma->iomap); -#else - struct drm_i915_private *dev_priv = vma->vm->i915; - agp_unmap_subregion(dev_priv->agph, vma->bsh, vma->node.size); -#endif - } vma->iomap = NULL; } -- 2.20.1