From e208b5629e93dc00cc43012e18092d238479df80 Mon Sep 17 00:00:00 2001 From: jcs Date: Thu, 20 Apr 2023 19:28:30 +0000 Subject: [PATCH] add viogpu, a VirtIO GPU driver works enough to get a console on qemu with more work to come from others feedback from miod ok patrick --- share/man/man4/Makefile | 4 +- share/man/man4/viogpu.4 | 50 +++ sys/arch/amd64/conf/GENERIC | 5 +- sys/arch/arm64/conf/GENERIC | 4 +- sys/arch/arm64/conf/RAMDISK | 4 +- sys/dev/pv/files.pv | 6 +- sys/dev/pv/viogpu.c | 718 ++++++++++++++++++++++++++++++++++++ sys/dev/pv/viogpu.h | 450 ++++++++++++++++++++++ sys/dev/pv/virtio.c | 10 +- sys/dev/pv/virtioreg.h | 3 +- sys/dev/wscons/wsconsio.h | 3 +- 11 files changed, 1247 insertions(+), 10 deletions(-) create mode 100644 share/man/man4/viogpu.4 create mode 100644 sys/dev/pv/viogpu.c create mode 100644 sys/dev/pv/viogpu.h diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile index 4f119c79561..ad8d357442d 100644 --- a/share/man/man4/Makefile +++ b/share/man/man4/Makefile @@ -1,4 +1,4 @@ -# $OpenBSD: Makefile,v 1.839 2023/04/19 00:13:23 dlg Exp $ +# $OpenBSD: Makefile,v 1.840 2023/04/20 19:28:30 jcs Exp $ MAN= aac.4 abcrtc.4 abl.4 ac97.4 acphy.4 acrtc.4 \ acpi.4 acpiac.4 acpials.4 acpiasus.4 acpibat.4 \ @@ -108,7 +108,7 @@ MAN= aac.4 abcrtc.4 abl.4 ac97.4 acphy.4 acrtc.4 \ uvisor.4 uvscom.4 uwacom.4 uxrcom.4 \ veb.4 vether.4 vga.4 vgafb.4 vge.4 \ viapm.4 viasio.4 vic.4 video.4 vio.4 \ - vioblk.4 viocon.4 viomb.4 viornd.4 vioscsi.4 virtio.4 vlan.4 \ + vioblk.4 viocon.4 viogpu.4 viomb.4 viornd.4 vioscsi.4 virtio.4 vlan.4 \ vmmci.4 vmt.4 vmwpvs.4 vmx.4 vnd.4 vr.4 \ vscsi.4 vte.4 vxlan.4 \ watchdog.4 wb.4 wbenv.4 wbng.4 wbsd.4 wbsio.4 wd.4 wdc.4 we.4 \ diff --git a/share/man/man4/viogpu.4 b/share/man/man4/viogpu.4 new file mode 100644 index 00000000000..81eec53e1ea --- /dev/null +++ b/share/man/man4/viogpu.4 @@ -0,0 +1,50 @@ +.\" $OpenBSD: viogpu.4,v 1.1 2023/04/20 19:28:30 jcs Exp $ +.\" +.\" Copyright (c) 2023 joshua stein +.\" +.\" Permission to use, copy, modify, and distribute this software for any +.\" purpose with or without fee is hereby granted, provided that the above +.\" copyright notice and this permission notice appear in all copies. +.\" +.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +.\" +.Dd $Mdocdate: April 20 2023 $ +.Dt VIOGPU 4 +.Os +.Sh NAME +.Nm viogpu +.Nd VirtIO GPU device +.Sh SYNOPSIS +.Cd "viogpu* at virtio?" +.Cd "wsdisplay* at viogpu?" +.Sh DESCRIPTION +The +.Nm +driver provides support for the +.Xr virtio 4 +GPU interface provided by QEMU and other virtual machines to create a +.Xr wscons 4 +console. +.El +.Sh SEE ALSO +.Xr intro 4 , +.Xr virtio 4 +.Xr wscons 4 , +.Xr wsdisplay 4 , +.Sh HISTORY +The +.Nm +driver first appeared in +.Ox 7.4 . +.Sh AUTHORS +.An -nosplit +The +.Nm +driver was written by +.An joshua stein Aq Mt jcs@openbsd.org . diff --git a/sys/arch/amd64/conf/GENERIC b/sys/arch/amd64/conf/GENERIC index 67c14c71def..1c940d9ea9c 100644 --- a/sys/arch/amd64/conf/GENERIC +++ b/sys/arch/amd64/conf/GENERIC @@ -1,4 +1,4 @@ -# $OpenBSD: GENERIC,v 1.515 2023/03/31 08:22:09 kevlo Exp $ +# $OpenBSD: GENERIC,v 1.516 2023/04/20 19:28:30 jcs Exp $ # # For further information on compiling OpenBSD kernels, see the config(8) # man page. @@ -706,3 +706,6 @@ viornd* at virtio? # Virtio entropy device vioscsi* at virtio? # Virtio SCSI device #viocon* at virtio? # Virtio console device vmmci* at virtio? # VMM control interface +#viogpu* at virtio? # VirtIO GPU device +#wsdisplay0 at viogpu? console 1 +#wsdisplay* at viogpu? mux -1 diff --git a/sys/arch/arm64/conf/GENERIC b/sys/arch/arm64/conf/GENERIC index aacb6fc042b..edb65a3ef41 100644 --- a/sys/arch/arm64/conf/GENERIC +++ b/sys/arch/arm64/conf/GENERIC @@ -1,4 +1,4 @@ -# $OpenBSD: GENERIC,v 1.265 2023/04/19 00:40:24 dlg Exp $ +# $OpenBSD: GENERIC,v 1.266 2023/04/20 19:28:30 jcs Exp $ # # GENERIC machine description file # @@ -119,6 +119,8 @@ vioblk* at virtio? viomb* at virtio? viornd* at virtio? vioscsi* at virtio? +viogpu* at virtio? +wsdisplay* at viogpu? option WSDISPLAY_COMPAT_USL # VT handling option WSDISPLAY_COMPAT_RAWKBD # provide raw scancodes; needed for X11 diff --git a/sys/arch/arm64/conf/RAMDISK b/sys/arch/arm64/conf/RAMDISK index 56b1ce2e4ef..d630c574a68 100644 --- a/sys/arch/arm64/conf/RAMDISK +++ b/sys/arch/arm64/conf/RAMDISK @@ -1,4 +1,4 @@ -# $OpenBSD: RAMDISK,v 1.197 2023/04/19 00:40:24 dlg Exp $ +# $OpenBSD: RAMDISK,v 1.198 2023/04/20 19:28:30 jcs Exp $ machine arm64 maxusers 4 @@ -103,6 +103,8 @@ vioblk* at virtio? viomb* at virtio? viornd* at virtio? vioscsi* at virtio? +viogpu* at virtio? +wsdisplay* at viogpu? simplefb* at fdt? wsdisplay* at simplefb? diff --git a/sys/dev/pv/files.pv b/sys/dev/pv/files.pv index e081897d582..bec16181f46 100644 --- a/sys/dev/pv/files.pv +++ b/sys/dev/pv/files.pv @@ -1,4 +1,4 @@ -# $OpenBSD: files.pv,v 1.16 2020/01/24 05:14:51 jsg Exp $ +# $OpenBSD: files.pv,v 1.17 2023/04/20 19:28:31 jcs Exp $ # # Config file and device description for paravirtual devices. # Included by ports that need it. @@ -80,3 +80,7 @@ file dev/pv/vioscsi.c vioscsi device vmmci attach vmmci at virtio file dev/pv/vmmci.c vmmci + +device viogpu: drmbase, wsemuldisplaydev, rasops32 +attach viogpu at virtio +file dev/pv/viogpu.c viogpu diff --git a/sys/dev/pv/viogpu.c b/sys/dev/pv/viogpu.c new file mode 100644 index 00000000000..c00a6b395d4 --- /dev/null +++ b/sys/dev/pv/viogpu.c @@ -0,0 +1,718 @@ +/* $OpenBSD: viogpu.c,v 1.1 2023/04/20 19:28:31 jcs Exp $ */ + +/* + * Copyright (c) 2021-2023 joshua stein + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include + +#if VIRTIO_DEBUG +#define DPRINTF(x...) printf(x) +#else +#define DPRINTF(x...) +#endif + +struct viogpu_softc; + +int viogpu_match(struct device *, void *, void *); +void viogpu_attach(struct device *, struct device *, void *); +int viogpu_send_cmd(struct viogpu_softc *, void *, size_t, void *, size_t); +int viogpu_vq_wait(struct virtqueue *vq); +void viogpu_rx_soft(void *arg); + +int viogpu_get_display_info(struct viogpu_softc *); +int viogpu_create_2d(struct viogpu_softc *, int, int, int); +int viogpu_set_scanout(struct viogpu_softc *, int, int, int, int); +int viogpu_attach_backing(struct viogpu_softc *, int, bus_dmamap_t); +int viogpu_transfer_to_host_2d(struct viogpu_softc *sc, int, uint32_t, + uint32_t); +int viogpu_flush_resource(struct viogpu_softc *, int, uint32_t, uint32_t); + +void viogpu_repaint(void *); + +int viogpu_wsioctl(void *, u_long, caddr_t, int, struct proc *); +paddr_t viogpu_wsmmap(void *, off_t, int); +int viogpu_alloc_screen(void *, const struct wsscreen_descr *, void **, + int *, int *, uint32_t *); +int viogpu_show_screen(void *, void *, int, void (*)(void *, int, int), + void *); +void viogpu_enter_ddb(void *, void *); +void viogpu_doswitch(void *); + +#define VIOGPU_HEIGHT 160 +#define VIOGPU_WIDTH 160 + +struct viogpu_softc { + struct device sc_dev; + struct virtio_softc *sc_virtio; +#define VQCTRL 0 +#define VQCURS 1 + struct virtqueue sc_vqs[2]; + + bus_dma_segment_t sc_dma_seg; + bus_dmamap_t sc_dma_map; + size_t sc_dma_size; + void *sc_cmd; + int sc_fence_id; + + int sc_fb_width; + int sc_fb_height; + bus_dma_segment_t sc_fb_dma_seg; + bus_dmamap_t sc_fb_dma_map; + size_t sc_fb_dma_size; + caddr_t sc_fb_dma_kva; + + struct rasops_info sc_ri; + struct wsscreen_descr sc_wsd; + struct wsscreen_list sc_wsl; + struct wsscreen_descr *sc_scrlist[1]; + struct wsdisplay_charcell sc_fb_bs[VIOGPU_HEIGHT * VIOGPU_WIDTH]; + int console; + int primary; + + struct timeout sc_timo; +}; + +struct virtio_feature_name viogpu_feature_names[] = { +#if VIRTIO_DEBUG + { VIRTIO_GPU_F_VIRGL, "VirGL" }, + { VIRTIO_GPU_F_EDID, "EDID" }, +#endif + { 0, NULL }, +}; + +struct wsscreen_descr viogpu_stdscreen = { "std" }; + +const struct wsscreen_descr *viogpu_scrlist[] = { + &viogpu_stdscreen, +}; + +struct wsscreen_list viogpu_screenlist = { + nitems(viogpu_scrlist), viogpu_scrlist +}; + +struct wsdisplay_accessops viogpu_accessops = { + .ioctl = viogpu_wsioctl, + .mmap = viogpu_wsmmap, + .alloc_screen = viogpu_alloc_screen, + .free_screen = rasops_free_screen, + .show_screen = rasops_show_screen, + .getchar = rasops_getchar, + .load_font = rasops_load_font, + .list_font = rasops_list_font, + .scrollback = rasops_scrollback, +}; + +const struct cfattach viogpu_ca = { + sizeof(struct viogpu_softc), + viogpu_match, + viogpu_attach, + NULL +}; + +struct cfdriver viogpu_cd = { + NULL, "viogpu", DV_DULL +}; + +int +viogpu_match(struct device *parent, void *match, void *aux) +{ + struct virtio_softc *va = aux; + + if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_GPU) + return 1; + + return 0; +} + +void +viogpu_attach(struct device *parent, struct device *self, void *aux) +{ + struct viogpu_softc *sc = (struct viogpu_softc *)self; + struct virtio_softc *vsc = (struct virtio_softc *)parent; + struct wsemuldisplaydev_attach_args waa; + struct rasops_info *ri = &sc->sc_ri; + uint32_t defattr; + int nsegs; + + if (vsc->sc_child != NULL) { + printf(": child already attached for %s\n", parent->dv_xname); + return; + } + vsc->sc_child = self; + + virtio_negotiate_features(vsc, viogpu_feature_names); + if (!vsc->sc_version_1) { + printf(": requires virtio version 1\n"); + return; + } + + vsc->sc_ipl = IPL_TTY; + softintr_establish(IPL_TTY, viogpu_rx_soft, vsc); + sc->sc_virtio = vsc; + + /* allocate command and cursor virtqueues */ + vsc->sc_vqs = sc->sc_vqs; + if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCTRL], VQCTRL, NBPG, 1, + "control")) { + printf(": alloc_vq failed\n"); + return; + } + sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_wait; + + if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], VQCURS, NBPG, 1, + "cursor")) { + printf(": alloc_vq failed\n"); + return; + } + vsc->sc_nvqs = nitems(sc->sc_vqs); + + /* setup DMA space for sending commands */ + sc->sc_dma_size = NBPG; + if (bus_dmamap_create(vsc->sc_dmat, sc->sc_dma_size, 1, + sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, + &sc->sc_dma_map) != 0) { + printf(": create failed"); + goto err; + } + if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_dma_size, 16, 0, + &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) { + printf(": alloc failed"); + goto destroy; + } + if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_dma_seg, nsegs, + sc->sc_dma_size, (caddr_t *)&sc->sc_cmd, BUS_DMA_NOWAIT) != 0) { + printf(": map failed"); + goto free; + } + if (bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_cmd, + sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0) { + printf(": load failed"); + goto unmap; + } + + if (viogpu_get_display_info(sc) != 0) + goto unmap; + + /* setup DMA space for actual framebuffer */ + sc->sc_fb_dma_size = sc->sc_fb_width * sc->sc_fb_height * 4; + if (bus_dmamap_create(vsc->sc_dmat, sc->sc_fb_dma_size, 1, + sc->sc_fb_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, + &sc->sc_fb_dma_map) != 0) + goto unmap; + if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_fb_dma_size, 1024, 0, + &sc->sc_fb_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) + goto fb_destroy; + if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_fb_dma_seg, nsegs, + sc->sc_fb_dma_size, &sc->sc_fb_dma_kva, BUS_DMA_NOWAIT) != 0) + goto fb_free; + if (bus_dmamap_load(vsc->sc_dmat, sc->sc_fb_dma_map, + sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT) != 0) + goto fb_unmap; + + if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0) + goto fb_unmap; + + if (viogpu_attach_backing(sc, 1, sc->sc_fb_dma_map) != 0) + goto fb_unmap; + + if (viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width, + sc->sc_fb_height) != 0) + goto fb_unmap; + + sc->console = 1; + + ri->ri_hw = sc; + ri->ri_bits = sc->sc_fb_dma_kva; + ri->ri_flg = RI_VCONS | RI_CENTER | RI_CLEAR | RI_WRONLY; + ri->ri_depth = 32; + ri->ri_width = sc->sc_fb_width; + ri->ri_height = sc->sc_fb_height; + ri->ri_stride = ri->ri_width * ri->ri_depth / 8; + ri->ri_bpos = 0; /* B8G8R8X8 */ + ri->ri_bnum = 8; + ri->ri_gpos = 8; + ri->ri_gnum = 8; + ri->ri_rpos = 16; + ri->ri_rnum = 8; + ri->ri_bs = sc->sc_fb_bs; + rasops_init(ri, VIOGPU_HEIGHT, VIOGPU_WIDTH); + + strlcpy(sc->sc_wsd.name, "std", sizeof(sc->sc_wsd.name)); + sc->sc_wsd.capabilities = ri->ri_caps; + sc->sc_wsd.nrows = ri->ri_rows; + sc->sc_wsd.ncols = ri->ri_cols; + sc->sc_wsd.textops = &ri->ri_ops; + sc->sc_wsd.fontwidth = ri->ri_font->fontwidth; + sc->sc_wsd.fontheight = ri->ri_font->fontheight; + + sc->sc_scrlist[0] = &sc->sc_wsd; + sc->sc_wsl.nscreens = 1; + sc->sc_wsl.screens = (const struct wsscreen_descr **)sc->sc_scrlist; + + printf(": %dx%d, %dbpp\n", ri->ri_width, ri->ri_height, ri->ri_depth); + + timeout_set(&sc->sc_timo, viogpu_repaint, sc); + viogpu_repaint(sc); + + if (sc->console) { + ri->ri_ops.pack_attr(ri->ri_active, 0, 0, 0, &defattr); + wsdisplay_cnattach(&sc->sc_wsd, ri->ri_active, 0, 0, defattr); + } + + memset(&waa, 0, sizeof(waa)); + waa.scrdata = &sc->sc_wsl; + waa.accessops = &viogpu_accessops; + waa.accesscookie = ri; + waa.console = sc->console; + + config_found_sm(self, &waa, wsemuldisplaydevprint, + wsemuldisplaydevsubmatch); + return; + +fb_unmap: + bus_dmamem_unmap(vsc->sc_dmat, (caddr_t)&sc->sc_fb_dma_kva, + sc->sc_fb_dma_size); +fb_free: + bus_dmamem_free(vsc->sc_dmat, &sc->sc_fb_dma_seg, 1); +fb_destroy: + bus_dmamap_destroy(vsc->sc_dmat, sc->sc_fb_dma_map); +unmap: + bus_dmamem_unmap(vsc->sc_dmat, (caddr_t)&sc->sc_cmd, sc->sc_dma_size); +free: + bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1); +destroy: + bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map); +err: + printf(": DMA setup failed\n"); + return; +} + +void +viogpu_repaint(void *arg) +{ + struct viogpu_softc *sc = (struct viogpu_softc *)arg; + int s; + + s = spltty(); + + viogpu_transfer_to_host_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height); + viogpu_flush_resource(sc, 1, sc->sc_fb_width, sc->sc_fb_height); + + timeout_add_msec(&sc->sc_timo, 10); + splx(s); +} + +int +viogpu_vq_wait(struct virtqueue *vq) +{ + struct virtio_softc *vsc = vq->vq_owner; + struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child; + int slot, len; + + while (virtio_dequeue(vsc, vq, &slot, &len) != 0) + ; + + bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size, + BUS_DMASYNC_POSTREAD); + + virtio_dequeue_commit(vq, slot); + + return 1; +} + +void +viogpu_rx_soft(void *arg) +{ + struct virtio_softc *vsc = (struct virtio_softc *)arg; + struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child; + struct virtqueue *vq = &sc->sc_vqs[VQCTRL]; + int slot, len; + + while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { + bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, + slot, len, BUS_DMASYNC_POSTREAD); + virtio_dequeue_commit(vq, slot); + } +} + +int +viogpu_send_cmd(struct viogpu_softc *sc, void *cmd, size_t cmd_size, void *ret, + size_t ret_size) +{ + struct virtio_softc *vsc = sc->sc_virtio; + struct virtqueue *vq = &vsc->sc_vqs[VQCTRL]; + struct virtio_gpu_ctrl_hdr *hdr = + (struct virtio_gpu_ctrl_hdr *)sc->sc_cmd; + struct virtio_gpu_ctrl_hdr *ret_hdr = (struct virtio_gpu_ctrl_hdr *)ret; + int slot, r; + + memcpy(sc->sc_cmd, cmd, cmd_size); + memset(sc->sc_cmd + cmd_size, 0, ret_size); + +#if VIRTIO_DEBUG + printf("%s: [%ld -> %ld]: ", __func__, cmd_size, ret_size); + for (int i = 0; i < cmd_size; i++) { + printf(" %02x", ((unsigned char *)sc->sc_cmd)[i]); + } + printf("\n"); +#endif + + hdr->flags |= VIRTIO_GPU_FLAG_FENCE; + hdr->fence_id = ++sc->sc_fence_id; + + r = virtio_enqueue_prep(vq, &slot); + if (r != 0) + panic("%s: control vq busy", sc->sc_dev.dv_xname); + + r = bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_cmd, + cmd_size + ret_size, NULL, BUS_DMA_NOWAIT); + if (r != 0) + panic("%s: dmamap load failed", sc->sc_dev.dv_xname); + + r = virtio_enqueue_reserve(vq, slot, sc->sc_dma_map->dm_nsegs + 1); + if (r != 0) + panic("%s: control vq busy", sc->sc_dev.dv_xname); + + bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size, + BUS_DMASYNC_PREWRITE); + + virtio_enqueue_p(vq, slot, sc->sc_dma_map, 0, cmd_size, 1); + virtio_enqueue_p(vq, slot, sc->sc_dma_map, cmd_size, ret_size, 0); + virtio_enqueue_commit(vsc, vq, slot, 1); + + viogpu_vq_wait(vq); + + bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, cmd_size, ret_size, + BUS_DMASYNC_POSTREAD); + + memcpy(ret, sc->sc_cmd + cmd_size, ret_size); + + if (ret_hdr->fence_id != sc->sc_fence_id) + printf("%s: return fence id not right (0x%llx != 0x%x)\n", + __func__, ret_hdr->fence_id, sc->sc_fence_id); + + return 0; +} + +int +viogpu_get_display_info(struct viogpu_softc *sc) +{ + struct virtio_gpu_ctrl_hdr hdr = { 0 }; + struct virtio_gpu_resp_display_info info = { 0 }; + + hdr.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO; + + viogpu_send_cmd(sc, &hdr, sizeof(hdr), &info, sizeof(info)); + + if (info.hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) { + printf("%s: failed getting display info\n", + sc->sc_dev.dv_xname); + return 1; + } + + if (!info.pmodes[0].enabled) { + printf("%s: pmodes[0] is not enabled\n", sc->sc_dev.dv_xname); + return 1; + } + + sc->sc_fb_width = info.pmodes[0].r.width; + sc->sc_fb_height = info.pmodes[0].r.height; + + return 0; +} + +int +viogpu_create_2d(struct viogpu_softc *sc, int resource_id, int width, + int height) +{ + struct virtio_gpu_resource_create_2d res = { 0 }; + struct virtio_gpu_ctrl_hdr resp = { 0 }; + + res.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D; + res.resource_id = resource_id; + res.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; + res.width = width; + res.height = height; + + viogpu_send_cmd(sc, &res, sizeof(res), &resp, sizeof(resp)); + + if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) { + printf("%s: failed CREATE_2D: %d\n", sc->sc_dev.dv_xname, + resp.type); + return 1; + } + + return 0; +} + +int +viogpu_set_scanout(struct viogpu_softc *sc, int scanout_id, int resource_id, + int width, int height) +{ + struct virtio_gpu_set_scanout ss = { 0 }; + struct virtio_gpu_ctrl_hdr resp = { 0 }; + + ss.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT; + ss.scanout_id = scanout_id; + ss.resource_id = resource_id; + ss.r.width = width; + ss.r.height = height; + + viogpu_send_cmd(sc, &ss, sizeof(ss), &resp, sizeof(resp)); + + if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) { + printf("%s: failed SET_SCANOUT: %d\n", sc->sc_dev.dv_xname, + resp.type); + return 1; + } + + return 0; +} + +int +viogpu_attach_backing(struct viogpu_softc *sc, int resource_id, + bus_dmamap_t dmamap) +{ + struct virtio_gpu_resource_attach_backing_entries { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 nr_entries; + struct virtio_gpu_mem_entry entries[1]; + } __packed backing = { 0 }; + struct virtio_gpu_ctrl_hdr resp = { 0 }; + + backing.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING; + backing.resource_id = resource_id; + backing.nr_entries = nitems(backing.entries); + backing.entries[0].addr = dmamap->dm_segs[0].ds_addr; + backing.entries[0].length = dmamap->dm_segs[0].ds_len; + + if (dmamap->dm_nsegs > 1) + printf("%s: TODO: send all %d segs\n", __func__, + dmamap->dm_nsegs); + +#if VIRTIO_DEBUG + printf("%s: backing addr 0x%llx length %d\n", __func__, + backing.entries[0].addr, backing.entries[0].length); +#endif + + viogpu_send_cmd(sc, &backing, sizeof(backing), &resp, sizeof(resp)); + + if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) { + printf("%s: failed ATTACH_BACKING: %d\n", sc->sc_dev.dv_xname, + resp.type); + return 1; + } + + return 0; +} + +int +viogpu_transfer_to_host_2d(struct viogpu_softc *sc, int resource_id, + uint32_t width, uint32_t height) +{ + struct virtio_gpu_transfer_to_host_2d tth = { 0 }; + struct virtio_gpu_ctrl_hdr resp = { 0 }; + + tth.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D; + tth.resource_id = resource_id; + tth.r.width = width; + tth.r.height = height; + + viogpu_send_cmd(sc, &tth, sizeof(tth), &resp, sizeof(resp)); + + if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) { + printf("%s: failed TRANSFER_TO_HOST: %d\n", sc->sc_dev.dv_xname, + resp.type); + return 1; + } + + return 0; +} + +int +viogpu_flush_resource(struct viogpu_softc *sc, int resource_id, uint32_t width, + uint32_t height) +{ + struct virtio_gpu_resource_flush flush = { 0 }; + struct virtio_gpu_ctrl_hdr resp = { 0 }; + + flush.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH; + flush.resource_id = resource_id; + flush.r.width = width; + flush.r.height = height; + + viogpu_send_cmd(sc, &flush, sizeof(flush), &resp, sizeof(resp)); + + if (resp.type != VIRTIO_GPU_RESP_OK_NODATA) { + printf("%s: failed RESOURCE_FLUSH: %d\n", sc->sc_dev.dv_xname, + resp.type); + return 1; + } + + return 0; +} + +int +viogpu_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p) +{ + struct rasops_info *ri = v; + struct wsdisplay_param *dp = (struct wsdisplay_param *)data; + struct wsdisplay_fbinfo *wdf; + + switch (cmd) { + case WSDISPLAYIO_GETPARAM: + if (ws_get_param) + return ws_get_param(dp); + return -1; + case WSDISPLAYIO_SETPARAM: + if (ws_set_param) + return ws_set_param(dp); + return -1; + case WSDISPLAYIO_GTYPE: + *(u_int *)data = WSDISPLAY_TYPE_VIOGPU; + break; + case WSDISPLAYIO_GINFO: + wdf = (struct wsdisplay_fbinfo *)data; + wdf->width = ri->ri_width; + wdf->height = ri->ri_height; + wdf->depth = ri->ri_depth; + wdf->stride = ri->ri_stride; + wdf->cmsize = 0; + wdf->offset = 0; + break; + case WSDISPLAYIO_LINEBYTES: + *(u_int *)data = ri->ri_stride; + break; + case WSDISPLAYIO_SMODE: + break; + case WSDISPLAYIO_GETSUPPORTEDDEPTH: + *(u_int *)data = WSDISPLAYIO_DEPTH_24_32; + break; + case WSDISPLAYIO_GVIDEO: + case WSDISPLAYIO_SVIDEO: + break; + default: + return -1; + } + + return 0; +} + +paddr_t +viogpu_wsmmap(void *v, off_t off, int prot) +{ + struct rasops_info *ri = v; + struct viogpu_softc *sc = ri->ri_hw; + size_t size = sc->sc_fb_dma_size; + + if (off < 0 || off >= size) + return -1; + + return (((paddr_t)sc->sc_fb_dma_kva + off) | PMAP_NOCACHE); +} + +int +viogpu_alloc_screen(void *v, const struct wsscreen_descr *type, + void **cookiep, int *curxp, int *curyp, uint32_t *attrp) +{ + return rasops_alloc_screen(v, cookiep, curxp, curyp, attrp); +} + +#if 0 +int +viogpu_fb_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct viogpu_softc *sc = helper->dev->dev_private; + struct drm_device *ddev = helper->dev; + struct viogpu_framebuffer *sfb = to_viogpu_framebuffer(helper->fb); + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_framebuffer *fb = helper->fb; + struct wsemuldisplaydev_attach_args aa; + struct rasops_info *ri = &sc->ro; + struct viogpufb_attach_args sfa; + unsigned int bytes_per_pixel; + struct fb_info *info; + size_t size; + int error; + + if (viogpu_get_display_info(sc) != 0) + return -1; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + + mode_cmd.width = sc->sc_fb_width; + mode_cmd.height = sc->sc_fb_height; + mode_cmd.pitches[0] = sc->sc_fb_width * bytes_per_pixel; + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = roundup(mode_cmd.pitches[0] * mode_cmd.height, PAGE_SIZE); + + sfb->obj = drm_gem_cma_create(ddev, size); + if (sfb->obj == NULL) { + DRM_ERROR("failed to allocate memory for framebuffer\n"); + return -ENOMEM; + } + + drm_helper_mode_fill_fb_struct(ddev, fb, &mode_cmd); + fb->format = drm_format_info(DRM_FORMAT_ARGB8888); + fb->obj[0] = &sfb->obj->base; + error = drm_framebuffer_init(ddev, fb, &viogpu_framebuffer_funcs); + if (error != 0) { + DRM_ERROR("failed to initialize framebuffer\n"); + return error; + } + + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + DRM_ERROR("Failed to allocate fb_info\n"); + return PTR_ERR(info); + } + info->par = helper; + + error = viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height); + if (error) + return error; + + error = viogpu_attach_backing(sc, 1, sfb->obj->dmamap); + if (error) + return error; + + error = viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width, sc->sc_fb_height); + if (error) + return error; + + return 0; +} +#endif diff --git a/sys/dev/pv/viogpu.h b/sys/dev/pv/viogpu.h new file mode 100644 index 00000000000..ea67ae02608 --- /dev/null +++ b/sys/dev/pv/viogpu.h @@ -0,0 +1,450 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie + * Gerd Hoffmann + * + * This header is BSD licensed so anyone can use the definitions + * to implement compatible drivers/servers: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef VIRTIO_GPU_HW_H +#define VIRTIO_GPU_HW_H + +#include + +#define __u8 uint8_t +#define __u32 uint32_t +#define __le16 uint16_t +#define __le32 uint32_t +#define __le64 uint64_t + +/* + * VIRTIO_GPU_CMD_CTX_* + * VIRTIO_GPU_CMD_*_3D + */ +#define VIRTIO_GPU_F_VIRGL (1ULL << 0) + +/* + * VIRTIO_GPU_CMD_GET_EDID + */ +#define VIRTIO_GPU_F_EDID (1ULL << 1) +/* + * VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID + */ +#define VIRTIO_GPU_F_RESOURCE_UUID (1ULL << 2) + +/* + * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB + */ +#define VIRTIO_GPU_F_RESOURCE_BLOB (1ULL << 3) + +enum virtio_gpu_ctrl_type { + VIRTIO_GPU_UNDEFINED = 0, + + /* 2d commands */ + VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100, + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_UNREF, + VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + VIRTIO_GPU_CMD_GET_CAPSET_INFO, + VIRTIO_GPU_CMD_GET_CAPSET, + VIRTIO_GPU_CMD_GET_EDID, + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, + VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, + VIRTIO_GPU_CMD_SET_SCANOUT_BLOB, + + /* 3d commands */ + VIRTIO_GPU_CMD_CTX_CREATE = 0x0200, + VIRTIO_GPU_CMD_CTX_DESTROY, + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + VIRTIO_GPU_CMD_SUBMIT_3D, + VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB, + VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB, + + /* cursor commands */ + VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, + VIRTIO_GPU_CMD_MOVE_CURSOR, + + /* success responses */ + VIRTIO_GPU_RESP_OK_NODATA = 0x1100, + VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + VIRTIO_GPU_RESP_OK_CAPSET_INFO, + VIRTIO_GPU_RESP_OK_CAPSET, + VIRTIO_GPU_RESP_OK_EDID, + VIRTIO_GPU_RESP_OK_RESOURCE_UUID, + VIRTIO_GPU_RESP_OK_MAP_INFO, + + /* error responses */ + VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, + VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, +}; + +enum virtio_gpu_shm_id { + VIRTIO_GPU_SHM_ID_UNDEFINED = 0, + /* + * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB + * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB + */ + VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1 +}; + +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) + +struct virtio_gpu_ctrl_hdr { + __le32 type; + __le32 flags; + __le64 fence_id; + __le32 ctx_id; + __le32 padding; +} __packed; + +/* data passed in the cursor vq */ + +struct virtio_gpu_cursor_pos { + __le32 scanout_id; + __le32 x; + __le32 y; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */ +struct virtio_gpu_update_cursor { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_cursor_pos pos; /* update & move */ + __le32 resource_id; /* update only */ + __le32 hot_x; /* update only */ + __le32 hot_y; /* update only */ + __le32 padding; +} __packed; + +/* data passed in the control vq, 2d related */ + +struct virtio_gpu_rect { + __le32 x; + __le32 y; + __le32 width; + __le32 height; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */ +struct virtio_gpu_resource_unref { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */ +struct virtio_gpu_resource_create_2d { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 format; + __le32 width; + __le32 height; +} __packed; + +/* VIRTIO_GPU_CMD_SET_SCANOUT */ +struct virtio_gpu_set_scanout { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + __le32 scanout_id; + __le32 resource_id; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */ +struct virtio_gpu_resource_flush { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + __le32 resource_id; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */ +struct virtio_gpu_transfer_to_host_2d { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + __le64 offset; + __le32 resource_id; + __le32 padding; +} __packed; + +struct virtio_gpu_mem_entry { + __le64 addr; + __le32 length; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */ +struct virtio_gpu_resource_attach_backing { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 nr_entries; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */ +struct virtio_gpu_resource_detach_backing { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */ +#define VIRTIO_GPU_MAX_SCANOUTS 16 +struct virtio_gpu_resp_display_info { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_display_one { + struct virtio_gpu_rect r; + __le32 enabled; + __le32 flags; + } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; +} __packed; + +/* data passed in the control vq, 3d related */ + +struct virtio_gpu_box { + __le32 x, y, z; + __le32 w, h, d; +} __packed; + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */ +struct virtio_gpu_transfer_host_3d { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_box box; + __le64 offset; + __le32 resource_id; + __le32 level; + __le32 stride; + __le32 layer_stride; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */ +#define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0) +struct virtio_gpu_resource_create_3d { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 target; + __le32 format; + __le32 bind; + __le32 width; + __le32 height; + __le32 depth; + __le32 array_size; + __le32 last_level; + __le32 nr_samples; + __le32 flags; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_CTX_CREATE */ +struct virtio_gpu_ctx_create { + struct virtio_gpu_ctrl_hdr hdr; + __le32 nlen; + __le32 padding; + char debug_name[64]; +} __packed; + +/* VIRTIO_GPU_CMD_CTX_DESTROY */ +struct virtio_gpu_ctx_destroy { + struct virtio_gpu_ctrl_hdr hdr; +} __packed; + +/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */ +struct virtio_gpu_ctx_resource { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_SUBMIT_3D */ +struct virtio_gpu_cmd_submit { + struct virtio_gpu_ctrl_hdr hdr; + __le32 size; + __le32 padding; +} __packed; + +#define VIRTIO_GPU_CAPSET_VIRGL 1 +#define VIRTIO_GPU_CAPSET_VIRGL2 2 + +/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ +struct virtio_gpu_get_capset_info { + struct virtio_gpu_ctrl_hdr hdr; + __le32 capset_index; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */ +struct virtio_gpu_resp_capset_info { + struct virtio_gpu_ctrl_hdr hdr; + __le32 capset_id; + __le32 capset_max_version; + __le32 capset_max_size; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_GET_CAPSET */ +struct virtio_gpu_get_capset { + struct virtio_gpu_ctrl_hdr hdr; + __le32 capset_id; + __le32 capset_version; +} __packed; + +/* VIRTIO_GPU_RESP_OK_CAPSET */ +struct virtio_gpu_resp_capset { + struct virtio_gpu_ctrl_hdr hdr; + __u8 capset_data[]; +} __packed; + +/* VIRTIO_GPU_CMD_GET_EDID */ +struct virtio_gpu_cmd_get_edid { + struct virtio_gpu_ctrl_hdr hdr; + __le32 scanout; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_RESP_OK_EDID */ +struct virtio_gpu_resp_edid { + struct virtio_gpu_ctrl_hdr hdr; + __le32 size; + __le32 padding; + __u8 edid[1024]; +} __packed; + +#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) + +struct virtio_gpu_config { + __le32 events_read; + __le32 events_clear; + __le32 num_scanouts; + __le32 num_capsets; +} __packed; + +/* simple formats for fbcon/X use */ +enum virtio_gpu_formats { + VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1, + VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2, + VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3, + VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4, + + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67, + VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68, + + VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121, + VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134, +}; + +/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */ +struct virtio_gpu_resource_assign_uuid { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; +} __packed; + +/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */ +struct virtio_gpu_resp_resource_uuid { + struct virtio_gpu_ctrl_hdr hdr; + __u8 uuid[16]; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */ +struct virtio_gpu_resource_create_blob { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; +#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001 +#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002 +#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003 + +#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001 +#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002 +#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004 + /* zero is invalid blob mem */ + __le32 blob_mem; + __le32 blob_flags; + __le32 nr_entries; + __le64 blob_id; + __le64 size; + /* + * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow + */ +} __packed; + +/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */ +struct virtio_gpu_set_scanout_blob { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + __le32 scanout_id; + __le32 resource_id; + __le32 width; + __le32 height; + __le32 format; + __le32 padding; + __le32 strides[4]; + __le32 offsets[4]; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */ +struct virtio_gpu_resource_map_blob { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; + __le64 offset; +} __packed; + +/* VIRTIO_GPU_RESP_OK_MAP_INFO */ +#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f +#define VIRTIO_GPU_MAP_CACHE_NONE 0x00 +#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01 +#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02 +#define VIRTIO_GPU_MAP_CACHE_WC 0x03 +struct virtio_gpu_resp_map_info { + struct virtio_gpu_ctrl_hdr hdr; + __u32 map_info; + __u32 padding; +} __packed; + +/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */ +struct virtio_gpu_resource_unmap_blob { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; +} __packed; + +#endif diff --git a/sys/dev/pv/virtio.c b/sys/dev/pv/virtio.c index cb841a4c2a9..d085376d054 100644 --- a/sys/dev/pv/virtio.c +++ b/sys/dev/pv/virtio.c @@ -1,4 +1,4 @@ -/* $OpenBSD: virtio.c,v 1.21 2022/01/09 05:42:58 jsg Exp $ */ +/* $OpenBSD: virtio.c,v 1.22 2023/04/20 19:28:31 jcs Exp $ */ /* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */ /* @@ -64,7 +64,13 @@ static const char * const virtio_device_name[] = { "Rpmsg", /* 7 */ "SCSI host", /* 8 */ "9P Transport", /* 9 */ - "mac80211 wlan" /* 10 */ + "mac80211 wlan", /* 10 */ + NULL, /* 11 */ + NULL, /* 12 */ + NULL, /* 13 */ + NULL, /* 14 */ + NULL, /* 15 */ + "GPU", /* 16 */ }; #define NDEVNAMES (sizeof(virtio_device_name)/sizeof(char*)) diff --git a/sys/dev/pv/virtioreg.h b/sys/dev/pv/virtioreg.h index acc43eaaafa..1ac17a9bd21 100644 --- a/sys/dev/pv/virtioreg.h +++ b/sys/dev/pv/virtioreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: virtioreg.h,v 1.4 2019/03/24 18:21:12 sf Exp $ */ +/* $OpenBSD: virtioreg.h,v 1.5 2023/04/20 19:28:31 jcs Exp $ */ /* $NetBSD: virtioreg.h,v 1.1 2011/10/30 12:12:21 hannken Exp $ */ /* @@ -80,6 +80,7 @@ #define PCI_PRODUCT_VIRTIO_SCSI 8 #define PCI_PRODUCT_VIRTIO_9P 9 #define PCI_PRODUCT_VIRTIO_MAC80211 10 +#define PCI_PRODUCT_VIRTIO_GPU 16 #define PCI_PRODUCT_VIRTIO_VMMCI 65535 /* private id */ /* device-independent feature bits */ diff --git a/sys/dev/wscons/wsconsio.h b/sys/dev/wscons/wsconsio.h index de483493360..9755ee60f18 100644 --- a/sys/dev/wscons/wsconsio.h +++ b/sys/dev/wscons/wsconsio.h @@ -1,4 +1,4 @@ -/* $OpenBSD: wsconsio.h,v 1.98 2022/07/15 17:57:27 kettenis Exp $ */ +/* $OpenBSD: wsconsio.h,v 1.99 2023/04/20 19:28:31 jcs Exp $ */ /* $NetBSD: wsconsio.h,v 1.74 2005/04/28 07:15:44 martin Exp $ */ /* @@ -442,6 +442,7 @@ struct wsmouse_parameters { #define WSDISPLAY_TYPE_EFIFB 71 /* EFI framebuffer */ #define WSDISPLAY_TYPE_RKDRM 72 /* Rockchip KMS framebuffer */ #define WSDISPLAY_TYPE_ASTFB 73 /* AST framebuffer */ +#define WSDISPLAY_TYPE_VIOGPU 74 /* VirtIO GPU */ /* Basic display information. Not applicable to all display types. */ struct wsdisplay_fbinfo { -- 2.20.1