Switch to an MI implementation of kmap_atomic() and implement
authorkettenis <kettenis@openbsd.org>
Wed, 28 Jul 2021 13:28:04 +0000 (13:28 +0000)
committerkettenis <kettenis@openbsd.org>
Wed, 28 Jul 2021 13:28:04 +0000 (13:28 +0000)
kmap_atomic_prot().  Use this to unstub ttm_copy_io_ttm_page()
and ttm_copy_ttm_io_page().  This fixes suspend/resume of machines
with certain radeondrm(4) hardware.

Based on a diff from jsg@.  Tested by Edd Barrett and Alf Schlichting.
ok jsg@

sys/dev/pci/drm/drm_linux.c
sys/dev/pci/drm/include/linux/highmem.h
sys/dev/pci/drm/ttm/ttm_bo_util.c

index 126c078..72c5321 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: drm_linux.c,v 1.81 2021/07/26 06:24:22 jsg Exp $      */
+/*     $OpenBSD: drm_linux.c,v 1.82 2021/07/28 13:28:04 kettenis Exp $ */
 /*
  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
@@ -571,6 +571,29 @@ kunmap_va(void *addr)
 #endif
 }
 
+vaddr_t kmap_atomic_va;
+int kmap_atomic_inuse;
+
+void *
+kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
+{
+       KASSERT(!kmap_atomic_inuse);
+
+       kmap_atomic_inuse = 1;
+       pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
+           PROT_READ | PROT_WRITE);
+       return (void *)kmap_atomic_va;
+}
+
+void
+kunmap_atomic(void *addr)
+{
+       KASSERT(kmap_atomic_inuse);
+       
+       pmap_kremove(kmap_atomic_va, PAGE_SIZE);
+       kmap_atomic_inuse = 0;
+}
+
 void *
 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
      pgprot_t prot)
@@ -2348,6 +2371,9 @@ drm_linux_init(void)
 
        pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
            "idrpl", NULL);
+
+       kmap_atomic_va =
+           (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
 }
 
 void
index 3ce510b..170f0be 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: highmem.h,v 1.3 2020/06/14 15:20:07 jsg Exp $ */
+/*     $OpenBSD: highmem.h,v 1.4 2021/07/28 13:28:05 kettenis Exp $    */
 /*
  * Copyright (c) 2013, 2014, 2015 Mark Kettenis
  *
 #include <sys/param.h>
 #include <uvm/uvm_extern.h>
 #include <linux/uaccess.h>
+#include <asm/pgtable.h>
 
 void   *kmap(struct vm_page *);
-void    kunmap_va(void *addr);
+void   kunmap_va(void *addr);
 
 #define kmap_to_page(ptr)      (ptr)
 
-#if defined(__i386__) || defined(__amd64__)
+void   *kmap_atomic_prot(struct vm_page *, pgprot_t);
+void   kunmap_atomic(void *);
 
 static inline void *
 kmap_atomic(struct vm_page *pg)
 {
-       vaddr_t va;
-
-#if defined (__HAVE_PMAP_DIRECT)
-       va = pmap_map_direct(pg);
-#else
-       extern vaddr_t pmap_tmpmap_pa(paddr_t);
-       va = pmap_tmpmap_pa(VM_PAGE_TO_PHYS(pg));
-#endif
-       return (void *)va;
+       return kmap_atomic_prot(pg, PAGE_KERNEL);
 }
 
-static inline void
-kunmap_atomic(void *addr)
-{
-#if defined (__HAVE_PMAP_DIRECT)
-       pmap_unmap_direct((vaddr_t)addr);
-#else
-       extern void pmap_tmpunmap_pa(void);
-       pmap_tmpunmap_pa();
-#endif
-}
-
-#endif /* defined(__i386__) || defined(__amd64__) */
-
 #endif
index e8db696..e8aa52a 100644 (file)
@@ -186,9 +186,6 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
                                unsigned long page,
                                pgprot_t prot)
 {
-       STUB();
-       return -ENOSYS;
-#ifdef notyet
        struct vm_page *d = ttm->pages[page];
        void *dst;
 
@@ -205,16 +202,12 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
        kunmap_atomic(dst);
 
        return 0;
-#endif
 }
 
 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
                                unsigned long page,
                                pgprot_t prot)
 {
-       STUB();
-       return -ENOSYS;
-#ifdef notyet
        struct vm_page *s = ttm->pages[page];
        void *src;
 
@@ -231,7 +224,6 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
        kunmap_atomic(src);
 
        return 0;
-#endif
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,