Fix loading memory allocated with bus_dmamem_alloc(9). The old could would
authorkettenis <kettenis@openbsd.org>
Fri, 9 Jan 2015 14:23:25 +0000 (14:23 +0000)
committerkettenis <kettenis@openbsd.org>
Fri, 9 Jan 2015 14:23:25 +0000 (14:23 +0000)
always load all allocated pages instead of the size specified in the
bus_dmamap_load_raw(9) call.  Also fixes the corner case where a specified
boundary is less than the page size, which would always create multiple
segments, even if the specified size was smaller than the boundary.

Fixes xhci(4) on sparc64.

sys/arch/sparc64/dev/iommu.c
sys/arch/sparc64/dev/viommu.c

index da63933..13f48d2 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: iommu.c,v 1.71 2014/11/16 12:30:59 deraadt Exp $      */
+/*     $OpenBSD: iommu.c,v 1.72 2015/01/09 14:23:25 kettenis Exp $     */
 /*     $NetBSD: iommu.c,v 1.47 2002/02/08 20:03:45 eeh Exp $   */
 
 /*
@@ -1252,8 +1252,8 @@ iommu_dvmamap_load_mlist(bus_dma_tag_t t, struct iommu_state *is,
        for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
                pa = VM_PAGE_TO_PHYS(m);
 
-               err = iommu_dvmamap_append_range(t, map, pa, PAGE_SIZE,
-                   flags, boundary);
+               err = iommu_dvmamap_append_range(t, map, pa,
+                   MIN(PAGE_SIZE, size), flags, boundary);
                if (err == EFBIG)
                        return (err);
                if (err) {
@@ -1262,6 +1262,9 @@ iommu_dvmamap_load_mlist(bus_dma_tag_t t, struct iommu_state *is,
                            pa + PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
                        return (err);
                }
+               if (size < PAGE_SIZE)
+                       break;
+               size -= PAGE_SIZE;
        }
 
        return (0);
index d9c31d8..2b95a70 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: viommu.c,v 1.15 2014/05/10 12:20:38 kettenis Exp $    */
+/*     $OpenBSD: viommu.c,v 1.16 2015/01/09 14:23:25 kettenis Exp $    */
 /*     $NetBSD: iommu.c,v 1.47 2002/02/08 20:03:45 eeh Exp $   */
 
 /*
@@ -784,8 +784,8 @@ viommu_dvmamap_load_mlist(bus_dma_tag_t t, struct iommu_state *is,
        for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
                pa = VM_PAGE_TO_PHYS(m);
 
-               err = viommu_dvmamap_append_range(t, map, pa, PAGE_SIZE,
-                   flags, boundary);
+               err = viommu_dvmamap_append_range(t, map, pa,
+                   MIN(PAGE_SIZE, size), flags, boundary);
                if (err == EFBIG)
                        return (err);
                if (err) {
@@ -794,6 +794,9 @@ viommu_dvmamap_load_mlist(bus_dma_tag_t t, struct iommu_state *is,
                            pa + PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
                        return (err);
                }
+               if (size < PAGE_SIZE)
+                       break;
+               size -= PAGE_SIZE;
        }
 
        return (0);