#include <drm/drm_syncobj.h>
-#include <dev/pci/pcivar.h>
-#include <dev/pci/agpvar.h>
-
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_ioctls.h"
bool has_llc : 1;
bool has_fence : 1;
bool needs_unfenced : 1;
-
- struct agp_map *map;
- bus_space_tag_t iot;
- bus_space_handle_t ioh;
} reloc_cache;
u64 invalid_flags; /** Set of execobj.flags that are invalid */
cache->has_fence = cache->graphics_ver < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
-
- cache->map = i915->agph;
- cache->iot = i915->bst;
}
static inline void *unmask_page(unsigned long p)
if (cache->vaddr & KMAP)
kunmap_atomic(vaddr);
else
-#ifdef __linux__
io_mapping_unmap_atomic((void __iomem *)vaddr);
-#else
- agp_unmap_atomic(cache->map, cache->ioh);
-#endif
}
static void reloc_cache_remap(struct reloc_cache *cache,
if (!drm_mm_node_allocated(&cache->node))
offset += cache->page << PAGE_SHIFT;
-#ifdef __linux__
cache->vaddr = (unsigned long)
io_mapping_map_atomic_wc(&ggtt->iomap, offset);
-#else
- agp_map_atomic(cache->map, offset, &cache->ioh);
- cache->vaddr = (unsigned long)
- bus_space_vaddr(cache->iot, cache->ioh);
-#endif
}
}
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-#ifdef __linux__
io_mapping_unmap_atomic((void __iomem *)vaddr);
-#else
- agp_unmap_atomic(cache->map, cache->ioh);
-#endif
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-#ifdef __linux__
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
-#else
- agp_unmap_atomic(cache->map, cache->ioh);
-#endif
} else {
struct i915_vma *vma = ERR_PTR(-ENODEV);
int err;
offset += page << PAGE_SHIFT;
}
-#ifdef __linux__
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
-#else
- agp_map_atomic(cache->map, offset, &cache->ioh);
- vaddr = bus_space_vaddr(cache->iot, cache->ioh);
-#endif
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
-#include <dev/pci/pcivar.h>
-#include <dev/pci/agpvar.h>
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- int i;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
ggtt->mappable_end);
#else
+ bus_space_handle_t bsh;
+ int i;
+
/* XXX would be a lot nicer to get agp info before now */
uvm_page_physload(atop(ggtt->gmadr.start),
atop(ggtt->gmadr.start + ggtt->mappable_end),
for (i = 0; i < atop(ggtt->mappable_end); i++)
atomic_setbits_int(&(i915->pgs[i].pg_flags),
PG_PMAP_WC);
- if (agp_init_map(i915->bst, ggtt->gmadr.start,
+ if (bus_space_map(i915->bst, ggtt->gmadr.start,
ggtt->mappable_end,
- BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE,
- &i915->agph))
+ BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
panic("can't map aperture");
+ ggtt->iomap.base = ggtt->gmadr.start;
+ ggtt->iomap.size = ggtt->mappable_end;
+ ggtt->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
#endif
}
struct i915_ggtt {
struct i915_address_space vm;
-#ifdef notyet
struct io_mapping iomap; /* Mapping to our CPU mappable region */
-#endif
struct resource gmadr; /* GMADR resource */
resource_size_t mappable_end; /* End offset that we can CPU map */
bus_dma_tag_t dmat;
bus_space_tag_t iot;
bus_space_tag_t bst;
- struct agp_map *agph;
bus_space_handle_t opregion_ioh;
bus_space_handle_t opregion_rvda_ioh;
bus_size_t opregion_rvda_size;
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
-#include <dev/pci/agpvar.h>
-
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
return ret;
}
-#ifdef __linux__
static inline bool
gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
}
return unwritten;
}
-#else
-static inline bool
-gtt_user_read(struct drm_i915_private *dev_priv,
- loff_t base, int offset,
- char __user *user_data, int length)
-{
- bus_space_handle_t bsh;
- void __iomem *vaddr;
- unsigned long unwritten;
-
- /* We can use the cpu mem copy function because this is X86. */
- agp_map_atomic(dev_priv->agph, base, &bsh);
- vaddr = bus_space_vaddr(dev_priv->bst, bsh);
- unwritten = __copy_to_user_inatomic(user_data,
- (void __force *)vaddr + offset,
- length);
- agp_unmap_atomic(dev_priv->agph, bsh);
- if (unwritten) {
- agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
- vaddr = bus_space_vaddr(dev_priv->bst, bsh);
- unwritten = copy_to_user(user_data,
- (void __force *)vaddr + offset,
- length);
- agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
- }
- return unwritten;
-}
-#endif
static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
struct drm_mm_node *node,
page_base += offset & LINUX_PAGE_MASK;
}
- if (gtt_user_read(i915, page_base, page_offset,
+ if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
/* This is the fast write path which cannot handle
* page faults in the source data
*/
-#ifdef __linux__
+
static inline bool
ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
return unwritten;
}
-#else
-static inline bool
-ggtt_write(struct drm_i915_private *dev_priv,
- loff_t base, int offset,
- char __user *user_data, int length)
-{
- bus_space_handle_t bsh;
- void __iomem *vaddr;
- unsigned long unwritten;
-
- /* We can use the cpu mem copy function because this is X86. */
- agp_map_atomic(dev_priv->agph, base, &bsh);
- vaddr = bus_space_vaddr(dev_priv->bst, bsh);
- unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
- user_data, length);
- agp_unmap_atomic(dev_priv->agph, bsh);
- if (unwritten) {
- agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
- vaddr = bus_space_vaddr(dev_priv->bst, bsh);
- unwritten = copy_from_user((void __force *)vaddr + offset,
- user_data, length);
- agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
- }
-
- return unwritten;
-}
-#endif
/**
* i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (ggtt_write(i915, page_base, page_offset,
+ if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
#include "i915_vma.h"
#include "i915_vma_resource.h"
-#include <dev/pci/agpvar.h>
-
static inline void assert_vma_held_evict(const struct i915_vma *vma)
{
/*
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
} else if (i915_vma_is_map_and_fenceable(vma)) {
-#ifdef __linux__
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
i915_vma_offset(vma),
i915_vma_size(vma));
-#else
- {
- struct drm_i915_private *dev_priv = vma->vm->i915;
- err = agp_map_subregion(dev_priv->agph, i915_vma_offset(vma),
- i915_vma_size(vma), &vma->bsh);
- if (err) {
- err = -err;
- goto err;
- }
- ptr = bus_space_vaddr(dev_priv->bst, vma->bsh);
- }
-#endif
} else {
ptr = (void __iomem *)
i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
if (page_unmask_bits(ptr))
__i915_gem_object_release_map(vma->obj);
-#ifdef __linux__
else
io_mapping_unmap(ptr);
-#endif
ptr = vma->iomap;
}
}
if (page_unmask_bits(vma->iomap))
__i915_gem_object_release_map(vma->obj);
- else {
-#ifdef __linux__
+ else
io_mapping_unmap(vma->iomap);
-#else
- struct drm_i915_private *dev_priv = vma->vm->i915;
- agp_unmap_subregion(dev_priv->agph, vma->bsh, vma->node.size);
-#endif
- }
vma->iomap = NULL;
}