From: deraadt Date: Thu, 11 Jan 1996 17:58:59 +0000 (+0000) Subject: from netbsd; VM86 support, by John Kohl, touched up a bit by charles X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=c0dd7c19d50c657a7b54c7ab9eef581ff50f9f1d;p=openbsd from netbsd; VM86 support, by John Kohl, touched up a bit by charles --- diff --git a/sys/arch/i386/conf/files.i386 b/sys/arch/i386/conf/files.i386 index 8d1d4141d51..f636bc170f3 100644 --- a/sys/arch/i386/conf/files.i386 +++ b/sys/arch/i386/conf/files.i386 @@ -1,4 +1,4 @@ -# $NetBSD: files.i386,v 1.60 1995/10/11 04:19:29 mycroft Exp $ +# $NetBSD: files.i386,v 1.61 1996/01/08 13:51:30 mycroft Exp $ # # new style config file for i386 architecture # @@ -126,6 +126,9 @@ file arch/i386/pci/pci_machdep.c pci # Compatibility modules # +# VM86 mode +file arch/i386/i386/vm86.c vm86 + # SVR4 binary compatibility (COMPAT_SVR4) include "../../../compat/svr4/files.svr4" file arch/i386/i386/svr4_machdep.c compat_svr4 diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c index 9659d1ca71b..46a1e5bf534 100644 --- a/sys/arch/i386/i386/machdep.c +++ b/sys/arch/i386/i386/machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: machdep.c,v 1.183 1996/01/04 22:22:01 jtc Exp $ */ +/* $NetBSD: machdep.c,v 1.185 1996/01/08 20:12:20 mycroft Exp $ */ /*- * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. @@ -92,6 +92,10 @@ #include #include +#ifdef VM86 +#include +#endif + #include "isa.h" #include "npx.h" #if NNPX > 0 @@ -537,6 +541,8 @@ sendsig(catcher, sig, mask, code) /* * Build the signal context to be used by sigreturn. */ + frame.sf_sc.sc_err = tf->tf_err; + frame.sf_sc.sc_trapno = tf->tf_trapno; frame.sf_sc.sc_onstack = oonstack; frame.sf_sc.sc_mask = mask; #ifdef VM86 @@ -545,6 +551,9 @@ sendsig(catcher, sig, mask, code) frame.sf_sc.sc_fs = tf->tf_vm86_fs; frame.sf_sc.sc_es = tf->tf_vm86_es; frame.sf_sc.sc_ds = tf->tf_vm86_ds; + frame.sf_sc.sc_eflags = tf->tf_eflags; + SETFLAGS(frame.sf_sc.sc_eflags, VM86_EFLAGS(p), + VM86_FLAGMASK(p)|PSL_VIF); } else #endif { @@ -552,19 +561,19 @@ sendsig(catcher, sig, mask, code) __asm("movl %%fs,%w0" : "=r" (frame.sf_sc.sc_fs)); frame.sf_sc.sc_es = tf->tf_es; frame.sf_sc.sc_ds = tf->tf_ds; + frame.sf_sc.sc_eflags = tf->tf_eflags; } - frame.sf_sc.sc_edi = tf->tf_edi; - frame.sf_sc.sc_esi = tf->tf_esi; - frame.sf_sc.sc_ebp = tf->tf_ebp; - frame.sf_sc.sc_ebx = tf->tf_ebx; - frame.sf_sc.sc_edx = tf->tf_edx; - frame.sf_sc.sc_ecx = tf->tf_ecx; - frame.sf_sc.sc_eax = tf->tf_eax; - frame.sf_sc.sc_eip = tf->tf_eip; - frame.sf_sc.sc_cs = tf->tf_cs; - frame.sf_sc.sc_eflags = tf->tf_eflags; - frame.sf_sc.sc_esp = tf->tf_esp; - frame.sf_sc.sc_ss = tf->tf_ss; + frame.sf_sc.sc_edi = tf->tf_edi; + frame.sf_sc.sc_esi = tf->tf_esi; + frame.sf_sc.sc_ebp = tf->tf_ebp; + frame.sf_sc.sc_ebx = tf->tf_ebx; + frame.sf_sc.sc_edx = tf->tf_edx; + frame.sf_sc.sc_ecx = tf->tf_ecx; + frame.sf_sc.sc_eax = tf->tf_eax; + frame.sf_sc.sc_eip = tf->tf_eip; + frame.sf_sc.sc_cs = tf->tf_cs; + frame.sf_sc.sc_esp = tf->tf_esp; + frame.sf_sc.sc_ss = tf->tf_ss; if (copyout(&frame, fp, sizeof(frame)) != 0) { /* @@ -578,14 +587,16 @@ sendsig(catcher, sig, mask, code) /* * Build context to run handler in. */ - tf->tf_esp = (int)fp; + __asm("movl %w0,%%gs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL))); + __asm("movl %w0,%%fs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL))); + tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); + tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = (int)(((char *)PS_STRINGS) - (esigcode - sigcode)); + tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); #ifdef VM86 tf->tf_eflags &= ~PSL_VM; #endif - tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); - tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); - tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); + tf->tf_esp = (int)fp; tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); } @@ -646,25 +657,28 @@ sys_sigreturn(p, v, retval) tf->tf_vm86_fs = context.sc_fs; tf->tf_vm86_es = context.sc_es; tf->tf_vm86_ds = context.sc_ds; + tf->tf_eflags = context.sc_eflags; + SETFLAGS(VM86_EFLAGS(p), context.sc_eflags, + VM86_FLAGMASK(p)|PSL_VIF); } else #endif { /* %fs and %gs were restored by the trampoline. */ tf->tf_es = context.sc_es; tf->tf_ds = context.sc_ds; + tf->tf_eflags = context.sc_eflags; } - tf->tf_edi = context.sc_edi; - tf->tf_esi = context.sc_esi; - tf->tf_ebp = context.sc_ebp; - tf->tf_ebx = context.sc_ebx; - tf->tf_edx = context.sc_edx; - tf->tf_ecx = context.sc_ecx; - tf->tf_eax = context.sc_eax; - tf->tf_eip = context.sc_eip; - tf->tf_cs = context.sc_cs; - tf->tf_eflags = context.sc_eflags; - tf->tf_esp = context.sc_esp; - tf->tf_ss = context.sc_ss; + tf->tf_edi = context.sc_edi; + tf->tf_esi = context.sc_esi; + tf->tf_ebp = context.sc_ebp; + tf->tf_ebx = context.sc_ebx; + tf->tf_edx = context.sc_edx; + tf->tf_ecx = context.sc_ecx; + tf->tf_eax = context.sc_eax; + tf->tf_eip = context.sc_eip; + tf->tf_cs = context.sc_cs; + tf->tf_esp = context.sc_esp; + tf->tf_ss = context.sc_ss; return (EJUSTRETURN); } diff --git a/sys/arch/i386/i386/sys_machdep.c b/sys/arch/i386/i386/sys_machdep.c index 22dbb2cc21f..412f5f128a9 100644 --- a/sys/arch/i386/i386/sys_machdep.c +++ b/sys/arch/i386/i386/sys_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: sys_machdep.c,v 1.25.2.1 1995/10/15 06:54:02 mycroft Exp $ */ +/* $NetBSD: sys_machdep.c,v 1.27 1996/01/08 13:51:36 mycroft Exp $ */ /*- * Copyright (c) 1995 Charles M. Hannum. All rights reserved. @@ -66,6 +66,10 @@ #include #include +#ifdef VM86 +#include +#endif + extern vm_map_t kernel_map; #ifdef TRACE @@ -270,6 +274,14 @@ i386_set_ldt(p, args, retval) if (n == fsslot || n == gsslot) return (EBUSY); break; + case SDT_MEMEC: + case SDT_MEMEAC: + case SDT_MEMERC: + case SDT_MEMERAC: + /* Must be "present" if executable and conforming. */ + if (desc.sd.sd_p == 0) + return (EACCES); + break; case SDT_MEMRO: case SDT_MEMROA: case SDT_MEMRW: @@ -411,6 +423,12 @@ sys_sysarch(p, v, retval) error = i386_set_ioperm(p, SCARG(uap, parms), retval); break; +#ifdef VM86 + case I386_VM86: + error = i386_vm86(p, SCARG(uap, parms), retval); + break; +#endif + default: error = EINVAL; break; diff --git a/sys/arch/i386/i386/trap.c b/sys/arch/i386/i386/trap.c index fb69df71a30..f6feb3f1faf 100644 --- a/sys/arch/i386/i386/trap.c +++ b/sys/arch/i386/i386/trap.c @@ -1,4 +1,4 @@ -/* $NetBSD: trap.c,v 1.91 1995/12/09 05:00:27 mycroft Exp $ */ +/* $NetBSD: trap.c,v 1.92 1996/01/08 13:51:38 mycroft Exp $ */ #undef DEBUG #define DEBUG @@ -262,9 +262,15 @@ trap(frame) frame.tf_eip = resume; return; + case T_PROTFLT|T_USER: /* protection fault */ +#ifdef VM86 + if (frame.tf_eflags & PSL_VM) { + vm86_gpfault(p, type & ~T_USER); + goto out; + } +#endif case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: - case T_PROTFLT|T_USER: /* protection fault */ case T_ALIGNFLT|T_USER: trapsignal(p, SIGBUS, type &~ T_USER); goto out; @@ -521,6 +527,17 @@ syscall(frame) #endif params = (caddr_t)frame.tf_esp + sizeof(int); +#ifdef VM86 + /* + * VM86 mode application found our syscall trap gate by accident; let + * it get a SIGSYS and have the VM86 handler in the process take care + * of it. + */ + if (frame.tf_eflags & PSL_VM) + code = -1; + else +#endif + switch (code) { case SYS_syscall: #ifdef COMPAT_LINUX diff --git a/sys/arch/i386/i386/vm86.c b/sys/arch/i386/i386/vm86.c new file mode 100644 index 00000000000..5797db43e50 --- /dev/null +++ b/sys/arch/i386/i386/vm86.c @@ -0,0 +1,490 @@ +/* $NetBSD: vm86.c,v 1.3 1996/01/08 22:23:35 mycroft Exp $ */ + +/* + * Copyright (c) 1995 John T. Kohl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef SYSVMSG +#include +#endif +#ifdef SYSVSEM +#include +#endif +#ifdef SYSVSHM +#include +#endif + +#include +#include +#include + +static void return_to_32bit __P((struct proc *, int)); +static void fast_intxx __P((struct proc *, int)); + +#define SETDIRECT ((~(PSL_USERSTATIC|PSL_NT)) & 0xffff) +#define GETDIRECT (SETDIRECT|0x02a) /* add in two MBZ bits */ + +#define IP(tf) (*(u_short *)&tf->tf_eip) +#define SP(tf) (*(u_short *)&tf->tf_esp) + + +#define putword(base, ptr, val) \ +__asm__ __volatile__( \ + "decw %w0\n\t" \ + "movb %h2,0(%1,%0)\n\t" \ + "decw %w0\n\t" \ + "movb %b2,0(%1,%0)" \ + : "=r" (ptr) \ + : "r" (base), "q" (val), "0" (ptr)) + +#define putdword(base, ptr, val) \ +__asm__ __volatile__( \ + "rorl $16,%2\n\t" \ + "decw %w0\n\t" \ + "movb %h2,0(%1,%0)\n\t" \ + "decw %w0\n\t" \ + "movb %b2,0(%1,%0)\n\t" \ + "rorl $16,%2\n\t" \ + "decw %w0\n\t" \ + "movb %h2,0(%1,%0)\n\t" \ + "decw %w0\n\t" \ + "movb %b2,0(%1,%0)" \ + : "=r" (ptr) \ + : "r" (base), "q" (val), "0" (ptr)) + +#define getbyte(base, ptr) \ +({ unsigned long __res; \ +__asm__ __volatile__( \ + "movb 0(%1,%0),%b2\n\t" \ + "incw %w0" \ + : "=r" (ptr), "=r" (base), "=q" (__res) \ + : "0" (ptr), "1" (base), "2" (0)); \ +__res; }) + +#define getword(base, ptr) \ +({ unsigned long __res; \ +__asm__ __volatile__( \ + "movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "movb 0(%1,%0),%h2\n\t" \ + "incw %w0" \ + : "=r" (ptr), "=r" (base), "=q" (__res) \ + : "0" (ptr), "1" (base), "2" (0)); \ +__res; }) + +#define getdword(base, ptr) \ +({ unsigned long __res; \ +__asm__ __volatile__( \ + "movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "movb 0(%1,%0),%h2\n\t" \ + "incw %w0\n\t" \ + "rorl $16,%2\n\t" \ + "movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "movb 0(%1,%0),%h2\n\t" \ + "incw %w0\n\t" \ + "rorl $16,%2" \ + : "=r" (ptr), "=r" (base), "=q" (__res) \ + : "0" (ptr), "1" (base)); \ +__res; }) + + +static __inline__ int +is_bitset(nr, bitmap) + int nr; + caddr_t bitmap; +{ + u_int byte; /* bt instruction doesn't do + bytes--it examines ints! */ + bitmap += nr / NBBY; + nr = nr % NBBY; + byte = fubyte(bitmap); + + __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" + :"=r" (nr) + :"r" (byte),"r" (nr)); + return (nr); +} + + +static __inline__ void +set_vif(p) + struct proc *p; +{ + + VM86_EFLAGS(p) |= PSL_VIF; + if (VM86_EFLAGS(p) & PSL_VIP) + return_to_32bit(p, VM86_STI); +} + +static __inline__ void +set_vflags(p, flags) + struct proc *p; + int flags; +{ + struct trapframe *tf = p->p_md.md_regs; + + SETFLAGS(VM86_EFLAGS(p), flags, VM86_FLAGMASK(p)); + SETFLAGS(tf->tf_eflags, flags, SETDIRECT); + if (flags & PSL_I) + set_vif(p); +} + +static __inline__ void +set_vflags_short(p, flags) + struct proc *p; + int flags; +{ + struct trapframe *tf = p->p_md.md_regs; + + SETFLAGS(VM86_EFLAGS(p), flags, VM86_FLAGMASK(p) & 0xffff); + SETFLAGS(tf->tf_eflags, flags, SETDIRECT); + if (flags & PSL_I) + set_vif(p); +} + +static __inline__ int +get_vflags(p) + struct proc *p; +{ + struct trapframe *tf = p->p_md.md_regs; + int flags = 0; + + SETFLAGS(flags, VM86_EFLAGS(p), VM86_FLAGMASK(p)); + SETFLAGS(flags, tf->tf_eflags, GETDIRECT); + if (VM86_EFLAGS(p) & PSL_VIF) + flags |= PSL_I; + return (flags); +} + + +#define V86_AH(regs) (((u_char *)&((regs)->tf_eax))[1]) +#define V86_AL(regs) (((u_char *)&((regs)->tf_eax))[0]) + +static void +fast_intxx(p, intrno) + struct proc *p; + int intrno; +{ + struct trapframe *tf = p->p_md.md_regs; + /* + * handle certain interrupts directly by pushing the interrupt + * frame and resetting registers, but only if user said that's ok + * (i.e. not revectored.) Otherwise bump to 32-bit user handler. + */ + struct vm86_struct *u_vm86p; + struct { u_short ip, cs; } ihand; + + u_short cs; + u_long ss, sp; + + /* + * Note: u_vm86p points to user-space, we only compute offsets + * and don't deref it. is_revectored() above does fubyte() to + * get stuff from it + */ + u_vm86p = (struct vm86_struct *)p->p_addr->u_pcb.vm86_userp; + + /* + * If coming from BIOS segment, or going to BIOS segment, or user + * requested special handling, return to user space with indication + * of which INT was requested. + */ + cs = tf->tf_cs; + if (cs == BIOSSEG || is_bitset(intrno, &u_vm86p->int_byuser[0])) + goto vector; + + /* + * If it's interrupt 0x21 (special in the DOS world) and the + * sub-command (in AH) was requested for special handling, + * return to user mode. + */ + if (intrno == 0x21 && is_bitset(V86_AH(tf), &u_vm86p->int21_byuser[0])) + goto vector; + + /* + * Fetch intr handler info from "real-mode" IDT based at addr 0 in + * the user address space. + */ + if (copyin((caddr_t)(intrno * sizeof(ihand)), &ihand, sizeof(ihand))) + goto bad; + + if (ihand.cs == BIOSSEG) + goto vector; + + /* + * Otherwise, push flags, cs, eip, and jump to handler to + * simulate direct INT call. + */ + ss = tf->tf_ss << 4; + sp = SP(tf); + + putword(ss, sp, get_vflags(p)); + putword(ss, sp, tf->tf_cs); + putword(ss, sp, IP(tf)); + SP(tf) = sp; + + IP(tf) = ihand.ip; + tf->tf_cs = ihand.cs; + + /* disable further "hardware" interrupts, turn off any tracing. */ + VM86_EFLAGS(p) &= ~PSL_VIF; + tf->tf_eflags &= ~PSL_VIF|PSL_T; + return; + +vector: + return_to_32bit(p, VM86_MAKEVAL(VM86_INTx, intrno)); + return; + +bad: + return_to_32bit(p, VM86_UNKNOWN); + return; +} + +static void +return_to_32bit(p, retval) + struct proc *p; + int retval; +{ + + /* + * We can't set the virtual flags in our real trap frame, + * since it's used to jump to the signal handler. Instead we + * let sendsig() pull in the VM86_EFLAGS bits. + */ + if (p->p_sigmask & sigmask(SIGURG)) { +#ifdef DIAGNOSTIC + printf("pid %d killed on VM86 protocol screwup (SIGURG blocked)\n", + p->p_pid); +#endif + sigexit(p, SIGILL); + /* NOTREACHED */ + } + trapsignal(p, SIGURG, retval); +} + +#define CLI 0xFA +#define STI 0xFB +#define INTxx 0xCD +#define IRET 0xCF +#define OPSIZ 0x66 +#define INT3 0xCC /* Actually the process gets 32-bit IDT to handle it */ +#define LOCK 0xF0 +#define PUSHF 0x9C +#define POPF 0x9D + +/* + * Handle a GP fault that occurred while in VM86 mode. Things that are easy + * to handle here are done here (much more efficient than trapping to 32-bit + * handler code and then having it restart VM86 mode). + */ +void +vm86_gpfault(p, type) + struct proc *p; + int type; +{ + struct trapframe *tf = p->p_md.md_regs; + /* + * we want to fetch some stuff from the current user virtual + * address space for checking. remember that the frame's + * segment selectors are real-mode style selectors. + */ + u_char tmpbyte; + u_long cs, ip, ss, sp; + + cs = tf->tf_cs << 4; + ip = IP(tf); + ss = tf->tf_ss << 4; + sp = SP(tf); + + /* + * For most of these, we must set all the registers before calling + * macros/functions which might do a return_to_32bit. + */ + tmpbyte = getbyte(cs, ip); + IP(tf) = ip; + switch (tmpbyte) { + case CLI: + /* simulate handling of IF */ + VM86_EFLAGS(p) &= ~PSL_VIF; + tf->tf_eflags &= ~PSL_VIF; + break; + + case STI: + /* simulate handling of IF. + * XXX the i386 enables interrupts one instruction later. + * code here is wrong, but much simpler than doing it Right. + */ + set_vif(p); + break; + + case INTxx: + /* try fast intxx, or return to 32bit mode to handle it. */ + tmpbyte = getbyte(cs, ip); + IP(tf) = ip; + fast_intxx(p, tmpbyte); + break; + + case PUSHF: + putword(ss, sp, get_vflags(p)); + SP(tf) = sp; + break; + + case IRET: + IP(tf) = getword(ss, sp); + tf->tf_cs = getword(ss, sp); + case POPF: + set_vflags_short(p, getword(ss, sp)); + SP(tf) = sp; + break; + + case OPSIZ: + tmpbyte = getbyte(cs, ip); + IP(tf) = ip; + switch (tmpbyte) { + case PUSHF: + putdword(ss, sp, get_vflags(p)); + SP(tf) = sp; + break; + + case IRET: + IP(tf) = getdword(ss, sp); + tf->tf_cs = getdword(ss, sp); + case POPF: + set_vflags(p, getdword(ss, sp)); + SP(tf) = sp; + break; + + default: + IP(tf) -= 2; + goto bad; + } + break; + + case LOCK: + default: + IP(tf) -= 1; + goto bad; + } + return; + +bad: + return_to_32bit(p, VM86_UNKNOWN); + return; +} + +int +i386_vm86(p, args, retval) + struct proc *p; + char *args; + register_t *retval; +{ + struct trapframe *tf = p->p_md.md_regs; + struct vm86_kern vm86s; + int err; + + if (err = copyin(args, &vm86s, sizeof(vm86s))) + return err; + + p->p_addr->u_pcb.vm86_userp = (void *)args; + +#define DOVREG(reg) tf->tf_vm86_##reg = (u_short) vm86s.regs.vmsc.sc_##reg +#define DOREG(reg) tf->tf_##reg = (u_short) vm86s.regs.vmsc.sc_##reg + + DOVREG(ds); + DOVREG(es); + DOVREG(fs); + DOVREG(gs); + DOREG(edi); + DOREG(esi); + DOREG(ebp); + DOREG(eax); + DOREG(ebx); + DOREG(ecx); + DOREG(edx); + DOREG(eip); + DOREG(cs); + DOREG(esp); + DOREG(ss); + +#undef DOVREG +#undef DOREG + + SETFLAGS(VM86_EFLAGS(p), vm86s.regs.vmsc.sc_eflags, VM86_FLAGMASK(p)|PSL_VIF); + SETFLAGS(tf->tf_eflags, vm86s.regs.vmsc.sc_eflags, SETDIRECT); + tf->tf_eflags |= PSL_VM; + + /* + * Keep mask of flags we simulate to simulate a particular type of + * processor. + */ + switch (vm86s.ss_cpu_type) { + case VCPU_086: + case VCPU_186: + case VCPU_286: + VM86_FLAGMASK(p) = 0; + break; + case VCPU_386: + VM86_FLAGMASK(p) = PSL_NT|PSL_IOPL; + break; + case VCPU_486: + VM86_FLAGMASK(p) = PSL_AC|PSL_NT|PSL_IOPL; + break; + case VCPU_586: + default: + VM86_FLAGMASK(p) = PSL_ID|PSL_AC|PSL_NT|PSL_IOPL; + break; + } + + /* Going into vm86 mode jumps off the signal stack. */ + p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + + return (EJUSTRETURN); +} diff --git a/sys/arch/i386/include/pcb.h b/sys/arch/i386/include/pcb.h index 1cb5ad46318..3cff2b28c2d 100644 --- a/sys/arch/i386/include/pcb.h +++ b/sys/arch/i386/include/pcb.h @@ -1,4 +1,4 @@ -/* $NetBSD: pcb.h,v 1.20 1995/10/11 04:20:16 mycroft Exp $ */ +/* $NetBSD: pcb.h,v 1.21 1996/01/08 13:51:42 mycroft Exp $ */ /*- * Copyright (c) 1995 Charles M. Hannum. All rights reserved. @@ -53,6 +53,8 @@ #include #include +#define NIOPORTS 1024 /* # of ports we allow to be mapped */ + struct pcb { struct i386tss pcb_tss; #define pcb_cr3 pcb_tss.tss_cr3 @@ -73,7 +75,10 @@ struct pcb { int pcb_flags; #define PCB_USER_LDT 0x01 /* has user-set LDT */ caddr_t pcb_onfault; /* copyin/out fault recovery */ - u_long pcb_iomap[1024/32]; /* I/O bitmap */ + int vm86_eflags; /* virtual eflags for vm86 mode */ + int vm86_flagmask; /* flag mask for vm86 mode */ + void *vm86_userp; /* XXX performance hack */ + u_long pcb_iomap[NIOPORTS/32]; /* I/O bitmap */ }; /* diff --git a/sys/arch/i386/include/signal.h b/sys/arch/i386/include/signal.h index ef3f2b95c15..8e35b78ba28 100644 --- a/sys/arch/i386/include/signal.h +++ b/sys/arch/i386/include/signal.h @@ -1,4 +1,4 @@ -/* $NetBSD: signal.h,v 1.5 1995/05/01 14:14:11 mycroft Exp $ */ +/* $NetBSD: signal.h,v 1.6 1996/01/08 13:51:43 mycroft Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1991 Regents of the University of California. @@ -65,6 +65,7 @@ struct sigcontext { int sc_edx; int sc_ecx; int sc_eax; + /* XXX */ int sc_eip; int sc_cs; int sc_eflags; @@ -73,6 +74,9 @@ struct sigcontext { int sc_onstack; /* sigstack state to restore */ int sc_mask; /* signal mask to restore */ + + int sc_trapno; /* XXX should be above */ + int sc_err; }; #define sc_sp sc_esp diff --git a/sys/arch/i386/include/sysarch.h b/sys/arch/i386/include/sysarch.h index a2f440d775c..e6d5dca475b 100644 --- a/sys/arch/i386/include/sysarch.h +++ b/sys/arch/i386/include/sysarch.h @@ -1,4 +1,4 @@ -/* $NetBSD: sysarch.h,v 1.7 1995/10/11 04:20:26 mycroft Exp $ */ +/* $NetBSD: sysarch.h,v 1.8 1996/01/08 13:51:44 mycroft Exp $ */ #ifndef _I386_SYSARCH_H_ #define _I386_SYSARCH_H_ @@ -11,6 +11,7 @@ #define I386_IOPL 2 #define I386_GET_IOPERM 3 #define I386_SET_IOPERM 4 +#define I386_VM86 5 struct i386_get_ldt_args { int start; diff --git a/sys/arch/i386/include/vm86.h b/sys/arch/i386/include/vm86.h new file mode 100644 index 00000000000..1f21db3dbdc --- /dev/null +++ b/sys/arch/i386/include/vm86.h @@ -0,0 +1,79 @@ +/* $NetBSD: vm86.h,v 1.1 1996/01/08 13:51:45 mycroft Exp $ */ + +/* + * Copyright (c) 1995 John T. Kohl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define SETFLAGS(targ, new, newmask) (targ) = ((targ) & ~(newmask)) | ((new) & (newmask)) +#define VM86_EFLAGS(p) ((p)->p_addr->u_pcb.vm86_eflags) +#define VM86_FLAGMASK(p) ((p)->p_addr->u_pcb.vm86_flagmask) + +#define VM86_TYPE(x) ((x) & 0xff) +#define VM86_ARG(x) (((x) & 0xff00) >> 8) +#define VM86_MAKEVAL(type,arg) ((type) | (((arg) & 0xff) << 8)) +#define VM86_STI 0 +#define VM86_INTx 1 +#define VM86_SIGNAL 2 +#define VM86_UNKNOWN 3 + +struct vm86_regs { + struct sigcontext vmsc; +}; + +struct vm86_kern { /* kernel uses this stuff */ + struct vm86_regs regs; + unsigned long ss_cpu_type; +}; +#define cpu_type substr.ss_cpu_type + +/* + * Kernel keeps copy of user-mode address of this, but doesn't copy it in. + */ +struct vm86_struct { + struct vm86_kern substr; + unsigned long screen_bitmap; /* not used/supported (yet) */ + unsigned long flags; /* not used/supported (yet) */ + unsigned char int_byuser[32]; /* 256 bits each: pass control to user */ + unsigned char int21_byuser[32]; /* otherwise, handle directly */ +}; + +#define BIOSSEG 0x0f000 + +#define VCPU_086 0 +#define VCPU_186 1 +#define VCPU_286 2 +#define VCPU_386 3 +#define VCPU_486 4 +#define VCPU_586 5 + +#ifdef _KERNEL +int i386_vm86 __P((struct proc *, char *, register_t *)); +void vm86_gpfault __P((struct proc *, int)); +#else +int i386_vm86 __P((struct vm86_struct *vmcp)); +#endif