-/* $OpenBSD: bus_dma.c,v 1.51 2019/06/09 12:52:04 kettenis Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.52 2024/08/14 18:31:33 bluhm Exp $ */
/* $NetBSD: bus_dma.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
/*-
bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
{
struct bus_dmamap *map;
+ struct pglist mlist;
+ struct vm_page **pg, *pgnext;
+ size_t mapsize, sz, ssize;
+ vaddr_t va, sva;
void *mapstore;
- size_t mapsize;
+ int npages, error;
+ const struct kmem_dyn_mode *kd;
/*
* Allocate and initialize the DMA map. The end of the map
*/
mapsize = sizeof(struct bus_dmamap) +
(sizeof(bus_dma_segment_t) * (nsegments - 1));
+
+ /* allocate and use bounce buffers when running as SEV guest */
+ if (cpu_sev_guestmode) {
+ /* this many pages plus one in case we get split */
+ npages = round_page(size) / PAGE_SIZE + 1;
+ if (npages < nsegments)
+ npages = nsegments;
+ mapsize += sizeof(struct vm_page *) * npages;
+ }
+
if ((mapstore = malloc(mapsize, M_DEVBUF,
(flags & BUS_DMA_NOWAIT) ?
(M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO))) == NULL)
map->_dm_segcnt = nsegments;
map->_dm_maxsegsz = maxsegsz;
map->_dm_boundary = boundary;
+ if (cpu_sev_guestmode) {
+ map->_dm_pages = (void *)&map->dm_segs[nsegments];
+ map->_dm_npages = npages;
+ }
map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+ if (!cpu_sev_guestmode) {
+ *dmamp = map;
+ return (0);
+ }
+
+ sz = npages << PGSHIFT;
+ kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
+ va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, kd);
+ if (va == 0) {
+ map->_dm_npages = 0;
+ free(map, M_DEVBUF, mapsize);
+ return (ENOMEM);
+ }
+
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(sz, 0, -1, PAGE_SIZE, 0, &mlist, nsegments,
+ (flags & BUS_DMA_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK);
+ if (error) {
+ map->_dm_npages = 0;
+ km_free((void *)va, sz, &kv_any, &kp_none);
+ free(map, M_DEVBUF, mapsize);
+ return (ENOMEM);
+ }
+
+ sva = va;
+ ssize = sz;
+ pgnext = TAILQ_FIRST(&mlist);
+ for (pg = map->_dm_pages; npages--; va += PAGE_SIZE, pg++) {
+ *pg = pgnext;
+ error = pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(*pg),
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED |
+ PMAP_CANFAIL | PMAP_NOCRYPT);
+ if (error) {
+ pmap_update(pmap_kernel());
+ map->_dm_npages = 0;
+ km_free((void *)sva, ssize, &kv_any, &kp_none);
+ free(map, M_DEVBUF, mapsize);
+ uvm_pglistfree(&mlist);
+ return (ENOMEM);
+ }
+ pgnext = TAILQ_NEXT(*pg, pageq);
+ bzero((void *)va, PAGE_SIZE);
+ }
+ pmap_update(pmap_kernel());
+ map->_dm_pgva = sva;
+
*dmamp = map;
return (0);
}
_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
{
size_t mapsize;
-
+ struct vm_page **pg;
+ struct pglist mlist;
+
+ if (map->_dm_pgva) {
+ km_free((void *)map->_dm_pgva, map->_dm_npages << PGSHIFT,
+ &kv_any, &kp_none);
+ }
+
+ if (map->_dm_pages) {
+ TAILQ_INIT(&mlist);
+ for (pg = map->_dm_pages; map->_dm_npages--; pg++) {
+ TAILQ_INSERT_TAIL(&mlist, *pg, pageq);
+ }
+ uvm_pglistfree(&mlist);
+ }
+
mapsize = sizeof(struct bus_dmamap) +
(sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
free(map, M_DEVBUF, mapsize);
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
+ map->_dm_nused = 0;
}
/*
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
bus_size_t size, int op)
{
- /* Nothing to do here. */
+ bus_dma_segment_t *sg;
+ int i, off = addr;
+ bus_size_t l;
+
+ if (!cpu_sev_guestmode)
+ return;
+
+ for (i = map->_dm_segcnt, sg = map->dm_segs; size && i--; sg++) {
+ if (off >= sg->ds_len) {
+ off -= sg->ds_len;
+ continue;
+ }
+
+ l = sg->ds_len - off;
+ if (l > size)
+ l = size;
+ size -= l;
+
+ /* PREREAD and POSTWRITE are no-ops. */
+
+ /* READ: device -> memory */
+ if (op & BUS_DMASYNC_POSTREAD) {
+ bcopy((void *)(sg->_ds_bounce_va + off),
+ (void *)(sg->_ds_va + off), l);
+ }
+
+ /* WRITE: memory -> device */
+ if (op & BUS_DMASYNC_PREWRITE) {
+ bcopy((void *)(sg->_ds_va + off),
+ (void *)(sg->_ds_bounce_va + off), l);
+ }
+
+ off = 0;
+ }
}
/*
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
- vaddr_t vaddr = (vaddr_t)buf;
- int seg;
+ vaddr_t pgva = -1, vaddr = (vaddr_t)buf;
+ int seg, page, off;
pmap_t pmap;
+ struct vm_page *pg;
if (p != NULL)
pmap = p->p_vmspace->vm_map.pmap;
panic("Non dma-reachable buffer at curaddr %#lx(raw)",
curaddr);
+ if (cpu_sev_guestmode) {
+ /* use bounce buffer */
+ if (map->_dm_nused + 1 >= map->_dm_npages)
+ return (ENOMEM);
+
+ off = vaddr & PAGE_MASK;
+ pg = map->_dm_pages[page = map->_dm_nused++];
+ curaddr = VM_PAGE_TO_PHYS(pg) + off;
+
+ pgva = map->_dm_pgva + (page << PGSHIFT) + off;
+ }
+
/*
* Compute the segment size, and adjust counts.
*/
if (first) {
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg]._ds_va = vaddr;
+ map->dm_segs[seg]._ds_bounce_va = pgva;
first = 0;
} else {
if (curaddr == lastaddr &&
break;
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg]._ds_va = vaddr;
+ map->dm_segs[seg]._ds_bounce_va = pgva;
}
}