}
pri = ra->ra_intr[0].int_pri;
printf(" pri %d, softpri %d\n", pri, PIL_AUSOFT);
- amd = (volatile struct amd7930 *)(ra->ra_vaddr ?
- ra->ra_vaddr : mapiodev(ra->ra_paddr, sizeof (*amd),
- ca->ca_bustype));
+ amd = (volatile struct amd7930 *)(ra->ra_vaddr ? ra->ra_vaddr :
+ mapiodev(ra->ra_reg, 0, sizeof (*amd), ca->ca_bustype));
sc->sc_map.mr_mmr1 = AMD_MMR1_GX | AMD_MMR1_GER |
AMD_MMR1_GR | AMD_MMR1_STG;
struct sbusdev sc_sd; /* sbus device */
struct fbdevice sc_fb; /* frame buffer device */
volatile struct bwtworeg *sc_reg;/* control registers */
- caddr_t sc_phys; /* display RAM (phys addr) */
+ struct rom_reg sc_phys; /* display RAM (phys addr) */
int sc_bustype;
};
register struct bwtwo_softc *sc = (struct bwtwo_softc *)self;
register struct confargs *ca = args;
register int node = ca->ca_ra.ra_node, ramsize;
- register struct bwtwo_all *p;
int isconsole;
char *nam;
sc->sc_fb.fb_linebytes = sc->sc_fb.fb_type.fb_width / 8;
nam = "bwtwo";
break;
+#endif
#endif
case BUS_OBIO:
+#if defined(SUN4M)
+ if (cputyp == CPU_SUN4M) { /* 4m has framebuffer on obio */
+ nam = getpropstring(node, "model");
+ break;
+ }
+#endif
+#if defined(SUN4)
node = 0;
nam = "bwtwo";
break;
#endif
-
case BUS_SBUS:
#if defined(SUN4C) || defined(SUN4M)
nam = getpropstring(node, "model");
-#endif
break;
+#endif
}
+ sc->sc_phys = ca->ca_ra.ra_reg[0];
+ sc->sc_bustype = ca->ca_bustype;
sc->sc_fb.fb_type.fb_depth = 1;
fb_setsize(&sc->sc_fb, sc->sc_fb.fb_type.fb_depth,
* registers ourselves. We only need the video RAM if we are
* going to print characters via rconsole.
*/
- p = (struct bwtwo_all *)ca->ca_ra.ra_paddr;
if ((sc->sc_fb.fb_pixels = ca->ca_ra.ra_vaddr) == NULL && isconsole) {
/* this probably cannot happen (on sun4c), but what the heck */
- sc->sc_fb.fb_pixels = mapiodev(p->ba_ram, ramsize,
- ca->ca_bustype);
+ sc->sc_fb.fb_pixels = mapiodev(ca->ca_ra.ra_reg, BWREG_MEM,
+ ramsize, ca->ca_bustype);
}
- sc->sc_reg = (volatile struct bwtworeg *)mapiodev((caddr_t)&p->ba_reg,
- sizeof(p->ba_reg), ca->ca_bustype);
- sc->sc_phys = p->ba_ram;
+ sc->sc_reg = (volatile struct bwtworeg *)mapiodev(ca->ca_ra.ra_reg,
+ BWREG_REG, sizeof(struct bwtworeg), ca->ca_bustype);
/* Insure video is enabled */
bwtwoenable(sc, 1);
* I turned on PMAP_NC here to disable the cache as I was
* getting horribly broken behaviour with it on.
*/
- return ((int)sc->sc_phys + off + PMAP_OBIO + PMAP_NC);
+ return (REG2PHYS(&sc->sc_phys, BWREG_MEM+off, sc->sc_bustype) | PMAP_NC);
}
struct device sc_dev; /* base device */
struct fbdevice sc_fb; /* frame buffer device */
volatile struct bt_regs *sc_bt; /* Brooktree registers */
- caddr_t sc_phys; /* display RAM (phys addr) */
+ struct rom_reg sc_phys; /* display RAM (phys addr) */
+ int sc_bustype; /* type of bus we live on */
int sc_blanked; /* true if blanked */
union bt_cmap sc_cmap; /* Brooktree color map */
};
* Ridiculous!
*/
isconsole = node == fbnode && fbconstty != NULL;
- p = (struct cgeight_all *)ca->ca_ra.ra_paddr;
if (ca->ca_ra.ra_vaddr == NULL) {
/* this probably cannot happen, but what the heck */
- ca->ca_ra.ra_vaddr = mapiodev(p->ba_overlay, ramsize,
- ca->ca_bustype);
+ ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_reg, 0,
+ ramsize, ca->ca_bustype);
}
sc->sc_fb.fb_pixels = (char *)((int)ca->ca_ra.ra_vaddr +
CG8REG_COLOUR - CG8REG_OVERLAY);
- sc->sc_bt = bt = (volatile struct bt_regs *)
- mapiodev((caddr_t)&p->ba_btreg, sizeof(p->ba_btreg),
- ca->ca_bustype);
- sc->sc_phys = p->ba_overlay;
+#define O(memb) ((u_int)(&((struct cgeight_all *)0)->memb))
+ sc->sc_bt = bt = (volatile struct bt_regs *)mapiodev(ca->ca_ra.ra_reg,
+ O(ba_btreg), sizeof(struct bt_regs), ca->ca_bustype);
+ sc->sc_phys = ca->ca_ra.ra_reg[0];
+ sc->sc_bustype = ca->ca_bustype;
/* tell the enable plane to look at the mono image */
memset(ca->ca_ra.ra_vaddr, 0xff,
* I turned on PMAP_NC here to disable the cache as I was
* getting horribly broken behaviour with it on.
*/
- return ((u_int)sc->sc_phys + poff + PMAP_OBIO + PMAP_NC);
+ return (REG2PHYS(&sc->sc_phys, off, sc->sc_bustype) | PMAP_NC);
}
struct device sc_dev; /* base device */
struct fbdevice sc_fb; /* frame buffer device */
volatile struct bt_regs *sc_bt; /* Brooktree registers */
- caddr_t sc_phys; /* display RAM (phys addr) */
+ struct rom_reg sc_phys; /* display RAM (phys addr) */
+ int sc_bustype; /* type of bus we live on */
int sc_blanked; /* true if blanked */
union bt_cmap sc_cmap; /* Brooktree color map */
};
register struct confargs *ca = args;
register int node = 0, ramsize, i;
register volatile struct bt_regs *bt;
- register struct cgfour_all *p;
int isconsole;
sc->sc_fb.fb_driver = &cgfourfbdriver;
* the mmap code down below doesn't use it all. Ridiculous!
*/
isconsole = node == fbnode && fbconstty != NULL;
- p = (struct cgfour_all *)ca->ca_ra.ra_paddr;
if (ca->ca_ra.ra_vaddr == NULL) {
/* this probably cannot happen, but what the heck */
- ca->ca_ra.ra_vaddr = mapiodev(p->ba_overlay, ramsize,
- ca->ca_bustype);
+ ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_reg, 0,
+ ramsize, ca->ca_bustype);
}
sc->sc_fb.fb_pixels = (char *)((int)ca->ca_ra.ra_vaddr +
CG4REG_COLOUR - CG4REG_OVERLAY);
- sc->sc_bt = bt = (volatile struct bt_regs *)
- mapiodev((caddr_t)&p->ba_btreg, sizeof(p->ba_btreg),
- ca->ca_bustype);
- sc->sc_phys = p->ba_overlay;
+#define O(memb) ((u_int)(&((struct cgfour_all *)0)->memb))
+ sc->sc_bt = bt = (volatile struct bt_regs *)mapiodev(ca->ca_ra.ra_reg,
+ O(ba_btreg), sizeof(struct bt_regs), ca->ca_bustype);
+ sc->sc_phys = ca->ca_ra.ra_reg[0];
+ sc->sc_bustype = ca->ca_bustype;
/* grab initial (current) color map */
bt->bt_addr = 0;
poff = off + (CG4REG_COLOUR - CG4REG_OVERLAY) - START_COLOUR;
else
return (-1);
- return ((u_int)sc->sc_phys + poff + PMAP_OBIO + PMAP_NC);
+ return (REG2PHYS(&sc->sc_phys, off, sc->sc_bustype) | PMAP_NC);
}
struct device sc_dev; /* base device */
struct sbusdev sc_sd; /* sbus device */
struct fbdevice sc_fb; /* frame buffer device */
- volatile struct cg6_layout *sc_physadr; /* phys addr of h/w */
+ struct rom_reg sc_physadr; /* phys addr of h/w */
+ int sc_bustype; /* type of bus we live on */
volatile struct bt_regs *sc_bt; /* Brooktree registers */
volatile int *sc_fhc; /* FHC register */
volatile struct cg6_thc *sc_thc; /* THC registers */
{
register struct cgsix_softc *sc = (struct cgsix_softc *)self;
register struct confargs *ca = args;
- register int node, ramsize, i, isconsole;
+ register int node = ca->ca_ra.ra_node, ramsize, i, isconsole;
register volatile struct bt_regs *bt;
register volatile struct cg6_layout *p;
char *nam;
nam = "cgsix";
break;
#endif /* SUN4 */
+ case BUS_OBIO:
+#if defined(SUN4M)
+ if (cputyp == CPU_SUN4M) { /* 4m has framebuffer on obio */
+ nam = getpropstring(node, "model");
+ break;
+ }
+#endif
+ break;
case BUS_SBUS:
- node = ca->ca_ra.ra_node;
nam = getpropstring(node, "model");
break;
}
sc->sc_fb.fb_type.fb_size = ramsize;
printf(": %s, %d x %d", nam,
sc->sc_fb.fb_type.fb_width, sc->sc_fb.fb_type.fb_height);
+ isconsole = node == fbnode && fbconstty != NULL;
/*
* Dunno what the PROM has mapped, though obviously it must have
* the video RAM mapped. Just map what we care about for ourselves
* (the FHC, THC, and Brooktree registers).
*/
- isconsole = node == fbnode && fbconstty != NULL;
- sc->sc_physadr = p = (struct cg6_layout *)ca->ca_ra.ra_paddr;
- sc->sc_bt = bt = (volatile struct bt_regs *)
- mapiodev((caddr_t)&p->cg6_bt_un.un_btregs, sizeof *sc->sc_bt,
- ca->ca_bustype);
- sc->sc_fhc = (volatile int *)
- mapiodev((caddr_t)&p->cg6_fhc_un.un_fhc, sizeof *sc->sc_fhc,
- ca->ca_bustype);
- sc->sc_thc = (volatile struct cg6_thc *)
- mapiodev((caddr_t)&p->cg6_thc_un.un_thc, sizeof *sc->sc_thc,
- ca->ca_bustype);
- sc->sc_tec = (volatile struct cg6_tec_xxx *)
- mapiodev((caddr_t)&p->cg6_tec_un.un_tec, sizeof *sc->sc_tec,
- ca->ca_bustype);
+#define O(memb) ((u_int)(&((struct cg6_layout *)0)->memb))
+ sc->sc_physadr = ca->ca_ra.ra_reg[0];
+ sc->sc_bustype = ca->ca_bustype;
+ sc->sc_bt = bt = (volatile struct bt_regs *)mapiodev(ca->ca_ra.ra_reg,
+ O(cg6_bt_un.un_btregs), sizeof *sc->sc_bt, ca->ca_bustype);
+ sc->sc_fhc = (volatile int *)mapiodev(ca->ca_ra.ra_reg,
+ O(cg6_fhc_un.un_fhc), sizeof *sc->sc_fhc, ca->ca_bustype);
+ sc->sc_thc = (volatile struct cg6_thc *)mapiodev(ca->ca_ra.ra_reg,
+ O(cg6_thc_un.un_thc), sizeof *sc->sc_thc, ca->ca_bustype);
+ sc->sc_tec = (volatile struct cg6_tec_xxx *)mapiodev(ca->ca_ra.ra_reg,
+ O(cg6_tec_un.un_tec), sizeof *sc->sc_tec, ca->ca_bustype);
sc->sc_fhcrev = (*sc->sc_fhc >> FHC_REV_SHIFT) &
(FHC_REV_MASK >> FHC_REV_SHIFT);
if (isconsole) {
printf(" (console)\n");
+#ifdef RASTERCONSOLE
+ sc->sc_fb.fb_pixels = (caddr_t)mapiodev(ca->ca_ra.ra_reg,
+ O(cg6_ram[0]), ramsize, ca->ca_bustype);
+ fbrcons_init(&sc->sc_fb);
+#endif
} else
printf("\n");
#if defined(SUN4C) || defined(SUN4M)
u = off - mo->mo_uaddr;
sz = mo->mo_size ? mo->mo_size : sc->sc_fb.fb_type.fb_size;
if (u < sz)
- return ((int)sc->sc_physadr + u + mo->mo_physoff +
- PMAP_OBIO + PMAP_NC);
+ return (REG2PHYS(&sc->sc_physadr, u + mo->mo_physoff,
+ sc->sc_bustype) | PMAP_NC);
}
#ifdef DEBUG
{
struct sbusdev sc_sd; /* sbus device */
struct fbdevice sc_fb; /* frame buffer device */
volatile struct bt_regs *sc_bt; /* Brooktree registers */
- caddr_t sc_phys; /* display RAM (phys addr) */
+ struct rom_reg sc_phys; /* display RAM (phys addr) */
+ int sc_bustype; /* type of bus we live on */
int sc_blanked; /* true if blanked */
union bt_cmap sc_cmap; /* Brooktree color map */
};
{
register struct cgthree_softc *sc = (struct cgthree_softc *)self;
register struct confargs *ca = args;
- register int node, ramsize, i;
+ register int node = ca->ca_ra.ra_node, ramsize, i;
register volatile struct bt_regs *bt;
register struct cgthree_all *p;
int isconsole;
*/
sc->sc_fb.fb_type.fb_type = FBTYPE_SUN3COLOR;
switch (ca->ca_bustype) {
+#if defined(SUN4M)
+ case BUS_OBIO:
+ if (cputyp == CPU_SUN4M) /* 4m has framebuffer on obio */
+ nam = getpropstring(node, "model");
+ break;
+#endif
+
case BUS_VME32:
node = 0;
nam = "cgthree";
break;
case BUS_SBUS:
- node = ca->ca_ra.ra_node;
nam = getpropstring(node, "model");
break;
}
p = (struct cgthree_all *)ca->ca_ra.ra_paddr;
if ((sc->sc_fb.fb_pixels = ca->ca_ra.ra_vaddr) == NULL && isconsole) {
/* this probably cannot happen, but what the heck */
- sc->sc_fb.fb_pixels = mapiodev(p->ba_ram, ramsize, ca->ca_bustype);
+ sc->sc_fb.fb_pixels = mapiodev(ca->ca_ra.ra_reg, CG3REG_MEM,
+ ramsize, ca->ca_bustype);
}
- sc->sc_bt = bt = (volatile struct bt_regs *)
- mapiodev((caddr_t)&p->ba_btreg, sizeof(p->ba_btreg), ca->ca_bustype);
- sc->sc_phys = p->ba_ram;
+ sc->sc_bt = bt = (volatile struct bt_regs *)mapiodev(ca->ca_ra.ra_reg,
+ CG3REG_REG, sizeof(struct bt_regs), ca->ca_bustype);
+
+ sc->sc_phys = ca->ca_ra.ra_reg[0];
+ sc->sc_bustype = ca->ca_bustype;
/* grab initial (current) color map */
bt->bt_addr = 0;
* I turned on PMAP_NC here to disable the cache as I was
* getting horribly broken behaviour with it on.
*/
- return ((int)sc->sc_phys + off + PMAP_OBIO + PMAP_NC);
+ return (REG2PHYS(&sc->sc_phys, CG3REG_MEM+off, sc->sc_bustype) | PMAP_NC);
}
struct cgtwo_softc {
struct device sc_dev; /* base device */
struct fbdevice sc_fb; /* frame buffer device */
- caddr_t sc_phys; /* display RAM (phys addr) */
+ struct rom_reg sc_phys; /* display RAM (phys addr) */
+ int sc_bustype; /* type of bus we live on */
volatile struct cg2statusreg *sc_reg; /* CG2 control registers */
volatile u_short *sc_cmap;
#define sc_redmap(sc) ((sc)->sc_cmap)
isconsole = 0;
}
#endif
- sc->sc_phys = (caddr_t)ca->ca_ra.ra_paddr;
+ sc->sc_phys = ca->ca_ra.ra_reg[0];
+ sc->sc_bustype = ca->ca_bustype;
+
if ((sc->sc_fb.fb_pixels = ca->ca_ra.ra_vaddr) == NULL && isconsole) {
/* this probably cannot happen, but what the heck */
- sc->sc_fb.fb_pixels = mapiodev(sc->sc_phys + CG2_PIXMAP_OFF,
+ sc->sc_fb.fb_pixels = mapiodev(ca->ca_ra.ra_reg, CG2_PIXMAP_OFF,
CG2_PIXMAP_SIZE, ca->ca_bustype);
}
#ifndef offsetof
#define offsetof(type, member) ((size_t)(&((type *)0)->member))
#endif
- sc->sc_reg = (volatile struct cg2statusreg *)
- mapiodev((caddr_t)sc->sc_phys +
- CG2_ROPMEM_OFF + offsetof(struct cg2fb, status.reg),
- sizeof(struct cg2statusreg), ca->ca_bustype);
+ sc->sc_reg = (volatile struct cg2statusreg *)mapiodev(ca->ca_ra.ra_reg,
+ CG2_ROPMEM_OFF + offsetof(struct cg2fb, status.reg),
+ sizeof(struct cg2statusreg), ca->ca_bustype);
- sc->sc_cmap = (volatile u_short *)
- mapiodev((caddr_t)sc->sc_phys +
- CG2_ROPMEM_OFF + offsetof(struct cg2fb, redmap[0]),
- 3 * CG2_CMSIZE, ca->ca_bustype);
+ sc->sc_cmap = (volatile u_short *)mapiodev(ca->ca_ra.ra_reg,
+ CG2_ROPMEM_OFF + offsetof(struct cg2fb, redmap[0]),
+ 3 * CG2_CMSIZE, ca->ca_bustype);
if (isconsole) {
printf(" (console)\n");
if ((unsigned)off >= sc->sc_fb.fb_type.fb_size)
return (-1);
- return ((int)sc->sc_phys + off + PMAP_VME32 + PMAP_NC);
+ return (REG2PHYS(&sc->sc_phys, off, PMAP_VME32) | PMAP_NC);
}
/* XXX modifying ra_vaddr is bad! */
if (ca->ca_ra.ra_vaddr == NULL)
- ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_paddr,
+ ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_reg, 0,
ca->ca_ra.ra_len, ca->ca_bustype);
if ((u_long)ca->ca_ra.ra_paddr & PGOFSET)
(u_long)ca->ca_ra.ra_vaddr |= ((u_long)ca->ca_ra.ra_paddr & PGOFSET);
sc->sc_regs = (struct espregs *) ca->ca_ra.ra_vaddr;
else {
sc->sc_regs = (struct espregs *)
- mapiodev(ca->ca_ra.ra_paddr, ca->ca_ra.ra_len,
+ mapiodev(ca->ca_ra.ra_reg, 0, ca->ca_ra.ra_len,
ca->ca_bustype);
}
espr = sc->sc_regs;
case BUS_VME16:
case BUS_VME32:
case BUS_OBIO:
+#if defined(SUN4M)
+ if (cputyp == CPU_SUN4M) { /* 4m has framebuffer on obio */
+ fb->fb_type.fb_width = getpropint(node, "width",
+ def_width);
+ fb->fb_type.fb_height = getpropint(node, "height",
+ def_height);
+ fb->fb_linebytes = getpropint(node, "linebytes",
+ (fb->fb_type.fb_width * depth) / 8);
+ break;
+ }
+#endif
/* Set up some defaults. */
fb->fb_type.fb_width = def_width;
fb->fb_type.fb_height = def_height;
/* software state, per controller */
struct fdc_softc {
- struct dkdevice sc_dk; /* boilerplate */
+ struct device sc_dev; /* boilerplate */
struct intrhand sc_sih;
struct intrhand sc_hih;
caddr_t sc_reg;
if (ca->ca_ra.ra_vaddr)
fdc->sc_reg = (caddr_t)ca->ca_ra.ra_vaddr;
else
- fdc->sc_reg = (caddr_t)mapiodev(ca->ca_ra.ra_paddr,
+ fdc->sc_reg = (caddr_t)mapiodev(ca->ca_ra.ra_reg, 0,
ca->ca_ra.ra_len,
ca->ca_bustype);
printf(" CFGLOCK: unexpected response");
}
- evcnt_attach(&fdc->sc_dk.dk_dev, "intr", &fdc->sc_intrcnt);
+ evcnt_attach(&fdc->sc_dev, "intr", &fdc->sc_intrcnt);
printf(" pri %d, softpri %d: chip 8207%c\n", pri, PIL_FDSOFT, code);
if (fdc->sc_flags & FDC_82077) {
/* select drive and turn on motor */
*fdc->sc_reg_dor = drive | FDO_FRST | FDO_MOEN(drive);
+ /* wait for motor to spin up */
+ delay(250000);
} else {
auxregbisc(AUXIO_FDS, 0);
}
- /* wait for motor to spin up */
- delay(250000);
fdc->sc_nstat = 0;
out_fdc(fdc, NE7CMD_RECAL);
sc->memcopy = bcopy;
sc->memzero = bzero;
sc->sc_msize = 65536; /* XXX */
- sc->sc_reg = mapiodev(ca->ca_ra.ra_paddr, sizeof(struct ieob),
+ sc->sc_reg = mapiodev(ca->ca_ra.ra_reg, 0, sizeof(struct ieob),
ca->ca_bustype);
ieo = (volatile struct ieob *) sc->sc_reg;
sc->memcopy = wcopy;
sc->memzero = wzero;
sc->sc_msize = 65536; /* XXX */
- sc->sc_reg = mapiodev(ca->ca_ra.ra_paddr, sizeof(struct ievme),
+ sc->sc_reg = mapiodev(ca->ca_ra.ra_reg, 0, sizeof(struct ievme),
ca->ca_bustype);
iev = (volatile struct ievme *) sc->sc_reg;
/* top 12 bits */
rampaddr = (u_long)ca->ca_ra.ra_paddr & 0xfff00000;
/* 4 more */
rampaddr = rampaddr | ((iev->status & IEVME_HADDR) << 16);
- sc->sc_maddr = mapiodev((caddr_t)rampaddr, sc->sc_msize,
+ rampaddr -= (u_long)ca->ca_ra.ra_paddr;
+ sc->sc_maddr = mapiodev(ca->ca_ra.ra_reg, rampaddr, sc->sc_msize,
ca->ca_bustype);
sc->sc_iobase = sc->sc_maddr;
iev->pectrl = iev->pectrl | IEVME_PARACK; /* clear to start */
int len;
int bustype;
{
+ struct rom_reg rr;
u_long pf = (u_long)pa >> PGSHIFT;
u_long va, pte;
int pgtype;
return ((void *)va);
}
}
- return mapiodev(pa, len, bustype);
+ rr.rr_paddr = pa;
+ return mapiodev(&rr, 0, len, bustype);
}
void *
-/* $Id: pfour.c,v 1.4 1995/11/09 21:26:13 deraadt Exp $ */
+/* $Id: pfour.c,v 1.5 1995/12/15 13:56:26 deraadt Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
return;
}
- sc->sc_vaddr = (u_long *)mapiodev((caddr_t)(ca->ca_ra.ra_paddr + PFOUR_REG),
+ sc->sc_vaddr = (u_long *)mapiodev(ca->ca_ra.ra_reg, PFOUR_REG,
NBPG, ca->ca_bustype);
if (sc->sc_vaddr == NULL) {
printf("\n");
struct bootpath *bp;
/* Map the controller registers. */
- regs = (struct si_regs *)mapiodev(ra->ra_paddr,
+ regs = (struct si_regs *)mapiodev(ra->ra_reg, 0,
sizeof(struct si_regs), ca->ca_bustype);
/* Establish the interrupt. */
-/* $NetBSD: xd.c,v 1.9 1995/09/25 20:12:44 chuck Exp $ */
+/* $NetBSD: xd.c,v 1.10 1995/12/11 12:40:20 pk Exp $ */
/*
*
* x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r
*
* author: Chuck Cranor <chuck@ccrc.wustl.edu>
- * id: $Id: xd.c,v 1.1.1.1 1995/10/18 08:51:40 deraadt Exp $
+ * id: $Id: xd.c,v 1.2 1995/12/15 13:56:27 deraadt Exp $
* started: 27-Feb-95
* references: [1] Xylogics Model 753 User's Manual
* part number: 166-753-001, Revision B, May 21, 1988.
* "xdc_*" functions are internal, all others are external interfaces
*/
-/* external (XXX should migrate to std include file?) */
-extern caddr_t dvma_malloc __P((size_t));
-extern void dvma_free __P((caddr_t, size_t));
-extern caddr_t dvma_mapin __P((struct vm_map *, vm_offset_t, int, int));
-extern void dvma_mapout __P((vm_offset_t, vm_offset_t, int));
extern int pil_to_vme[]; /* from obio.c */
/* internals */
struct xdc_attach_args { /* this is the "aux" args to xdattach */
int driveno; /* unit number */
- char *dvmabuf; /* scratch buffer for reading disk label */
+ char *buf; /* scratch buffer for reading disk label */
+ char *dvmabuf; /* DVMA address of above */
int fullmode; /* submit mode */
int booting; /* are we booting or not? */
};
/* get addressing and intr level stuff from autoconfig and load it
* into our xdc_softc. */
- ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_paddr,
+ ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_reg, 0,
ca->ca_ra.ra_len, ca->ca_bustype);
if ((u_long) ca->ca_ra.ra_paddr & PGOFSET)
(u_long) ca->ca_ra.ra_vaddr |=
* iorq's up front. thus, we avoid linked lists and the costs
* associated with them in exchange for wasting a little memory. */
- xdc->iopbase = (struct xd_iopb *)
- dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */
+ xdc->dvmaiopb = (struct xd_iopb *)
+ dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb), &xdc->iopbase,
+ M_NOWAIT);
bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb));
+ /* Setup device view of DVMA address */
xdc->dvmaiopb = (struct xd_iopb *) ((u_long) xdc->iopbase - DVMA_BASE);
+
xdc->reqs = (struct xd_iorq *)
malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT);
bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq));
/* now we must look for disks using autoconfig */
- xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS);
+ xa.dvmabuf = (char *)dvma_malloc(XDFM_BPS, &xa.buf, M_NOWAIT);
xa.fullmode = XD_SUB_POLL;
xa.booting = 1;
for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++)
(void) config_found(self, (void *) &xa, NULL);
- dvma_free(xa.dvmabuf, XDFM_BPS);
+ dvma_free(xa.dvmabuf, XDFM_BPS, &xa.buf);
bootpath_store(1, NULL);
/* start the watchdog clock */
newstate = XD_DRIVE_NOLABEL;
xd->hw_spt = spt;
- if (xdgetdisklabel(xd, xa->dvmabuf) != XD_ERR_AOK)
+ if (xdgetdisklabel(xd, xa->buf) != XD_ERR_AOK)
goto done;
/* inform the user of what is up */
printf("%s: <%s>, pcyl %d, hw_spt %d\n", xd->sc_dev.dv_xname,
- xa->dvmabuf, xd->pcyl, spt);
+ xa->buf, xd->pcyl, spt);
mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS);
printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
xd->sc_dev.dv_xname, mb, xd->ncyl, xd->nhead, xd->nsect,
}
/* check dkbad for sanity */
- dkb = (struct dkbad *) xa->dvmabuf;
+ dkb = (struct dkbad *) xa->buf;
for (lcv = 0; lcv < 126; lcv++) {
if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
dkb->bt_bad[lcv].bt_cyl == 0) &&
printf("%s: warning: invalid bad144 sector!\n",
xd->sc_dev.dv_xname);
} else {
- bcopy(xa->dvmabuf, &xd->dkb, XDFM_BPS);
+ bcopy(xa->buf, &xd->dkb, XDFM_BPS);
}
if (xa->booting) {
if (xd->state == XD_DRIVE_UNKNOWN) {
xa.driveno = xd->xd_drive;
- xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS);
+ xa.dvmabuf = (char *)dvma_malloc(XDFM_BPS, &xa.buf, M_NOWAIT);
xa.fullmode = XD_SUB_WAIT;
xa.booting = 0;
xdattach((struct device *) xd->parent, (struct device *) xd, &xa);
- dvma_free(xa.dvmabuf, XDFM_BPS);
+ dvma_free(xa.dvmabuf, XDFM_BPS, &xa.buf);
if (xd->state == XD_DRIVE_UNKNOWN) {
return (EIO);
}
if (xd->state == XD_DRIVE_UNKNOWN) {
xa.driveno = xd->xd_drive;
- xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS);
+ xa.dvmabuf = (char *)dvma_malloc(XDFM_BPS, &xa.buf, M_NOWAIT);
xa.fullmode = XD_SUB_WAIT;
xa.booting = 0;
xdattach((struct device *)xd->parent, (struct device *)xd, &xa);
- dvma_free(xa.dvmabuf, XDFM_BPS);
+ dvma_free(xa.dvmabuf, XDFM_BPS, &xa.buf);
if (xd->state == XD_DRIVE_UNKNOWN) {
bp->b_error = EIO;
goto bad;
* algorithm built into the hardware.
*/
- { /* XXX DVMA mapin */
-
- /* DVMA: if we've got a kernel buf structure we map it into
- * DVMA space here. the advantage to this is that it allows
- * us to sleep if there isn't space in the DVMA area. the
- * disadvantage to this is that we are mapping this in earlier
- * than we have to, and thus possibly wasting DVMA space. in
- * an ideal world we would like to map it in once we know we
- * can submit an IOPB (at this point we don't know if we can
- * submit or not). (XXX) If the DVMA system gets redone this
- * mapin can be moved elsewhere. */
-
- caddr_t x;
- if ((bp->b_flags & B_PHYS) == 0) {
- x = dvma_mapin(kernel_map, (vm_offset_t)bp->b_data,
- bp->b_bcount, 1);
- if (x == NULL)
- panic("xd mapin");
- bp->b_resid = (long) x; /* XXX we store DVMA addr in
- * b_resid, thus overloading
- * it */
- }
- } /* XXX end DVMA mapin */
-
s = splbio(); /* protect the queues */
/* first, give jobs in front of us a chance */
-
parent = xd->parent;
while (parent->nfree > 0 && parent->sc_wq.b_actf)
if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK)
break;
/* if there are no free iorq's, then we just queue and return. the
- * buffs will get picked up later by xdcintr(). */
+ * buffs will get picked up later by xdcintr().
+ */
if (parent->nfree == 0) {
wq = &xd->parent->sc_wq;
splx(s);
return;
}
- /* now we have free iopb's and we are at splbio... start 'em up */
+ /* now we have free iopb's and we are at splbio... start 'em up */
if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) {
return;
}
* load request. we have to calculate the correct block number based
* on partition info.
*
- * also, note that there are two kinds of buf structures, those with
- * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is
- * a raw I/O (to a cdevsw) and we are doing I/O directly to the users'
- * buffer which has already been mapped into DVMA space. however, if
- * B_PHYS is not set, then the buffer is a normal system buffer which
- * does *not* live in DVMA space. in that case we call dvma_mapin to
- * map it into DVMA space so we can do the DMA I/O to it.
- *
- * in cases where we do a dvma_mapin, note that iorq points to the buffer
- * as mapped into DVMA space, where as the bp->b_data points to its
- * non-DVMA mapping.
+ * note that iorq points to the buffer as mapped into DVMA space,
+ * where as the bp->b_data points to its non-DVMA mapping.
*/
block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
xdsc->sc_dk.dk_label.d_partitions[partno].p_offset);
- if ((bp->b_flags & B_PHYS) == 0) {
- dbuf = (caddr_t) bp->b_resid; /* XXX: overloaded resid from
- * xdstrategy() */
- bp->b_resid = bp->b_bcount; /* XXX? */
-#ifdef someday
-
- /* XXX: this is where we would really like to do the DVMA
- * mapin, but we get called from intr here so we can't sleep
- * so we can't do it. */
- /* allocate DVMA, map in */
-
- if (dbuf == NULL) { /* out of DVMA space */
- printf("%s: warning: out of DVMA space\n", xdcsc->sc_dev.dv_xname);
- XDC_FREE(xdcsc, rqno);
- wq = &xdcsc->sc_wq; /* put at end of queue */
- bp->b_actf = 0;
- bp->b_actb = wq->b_actb;
- *wq->b_actb = bp;
- wq->b_actb = &bp->b_actf;
- return (XD_ERR_FAIL); /* XXX: need some sort of
- * call-back scheme here? */
- }
-#endif /* someday */
- } else {
- dbuf = bp->b_data;
+ dbuf = kdvma_mapin(bp->b_data, bp->b_bcount, 0);
+ if (dbuf == NULL) { /* out of DVMA space */
+ printf("%s: warning: out of DVMA space\n",
+ xdcsc->sc_dev.dv_xname);
+ XDC_FREE(xdcsc, rqno);
+ wq = &xdcsc->sc_wq; /* put at end of queue */
+ bp->b_actf = 0;
+ bp->b_actb = wq->b_actb;
+ *wq->b_actb = bp;
+ wq->b_actb = &bp->b_actf;
+ return (XD_ERR_FAIL); /* XXX: need some sort of
+ * call-back scheme here? */
}
/* init iorq and load iopb from it */
/* fix queues based on "blast-mode" */
for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
- if (XD_STATE(xdcsc->reqs[lcv].mode) != XD_SUB_POLL &&
- XD_STATE(xdcsc->reqs[lcv].mode) != XD_SUB_WAIT &&
- XD_STATE(xdcsc->reqs[lcv].mode) != XD_SUB_NORM)
+ register struct xd_iorq *iorq = &xdcsc->reqs[lcv];
+
+ if (XD_STATE(iorq->mode) != XD_SUB_POLL &&
+ XD_STATE(iorq->mode) != XD_SUB_WAIT &&
+ XD_STATE(iorq->mode) != XD_SUB_NORM)
/* is it active? */
continue;
xdcsc->nrun--; /* it isn't running any more */
if (blastmode == XD_RSET_ALL || blastmode != lcv) {
/* failed */
- xdcsc->reqs[lcv].errno = error;
+ iorq->errno = error;
xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1;
switch (XD_STATE(xdcsc->reqs[lcv].mode)) {
case XD_SUB_NORM:
- xdcsc->reqs[lcv].buf->b_error = EIO;
- xdcsc->reqs[lcv].buf->b_flags |= B_ERROR;
- xdcsc->reqs[lcv].buf->b_resid =
- xdcsc->reqs[lcv].sectcnt * XDFM_BPS;
- if ((xdcsc->reqs[lcv].buf->b_flags & B_PHYS) == 0) {
- dvma_mapout(
- (vm_offset_t)xdcsc->reqs[lcv].dbufbase,
- (vm_offset_t)xdcsc->reqs[lcv].buf->b_un.b_addr,
- xdcsc->reqs[lcv].buf->b_bcount);
- }
- biodone(xdcsc->reqs[lcv].buf);
+ iorq->buf->b_error = EIO;
+ iorq->buf->b_flags |= B_ERROR;
+ iorq->buf->b_resid =
+ iorq->sectcnt * XDFM_BPS;
+ dvma_mapout(
+ (vm_offset_t)iorq->dbufbase,
+ (vm_offset_t)iorq->buf->b_un.b_addr,
+ iorq->buf->b_bcount);
+ biodone(iorq->buf);
XDC_FREE(xdcsc, lcv); /* add to free list */
break;
case XD_SUB_WAIT:
- wakeup(&xdcsc->reqs[lcv]);
+ wakeup(iorq);
case XD_SUB_POLL:
xdcsc->ndone++;
- xdcsc->reqs[lcv].mode =
- XD_NEWSTATE(xdcsc->reqs[lcv].mode, XD_SUB_DONE);
+ iorq->mode =
+ XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
break;
}
} else {
bp->b_resid = 0; /* done */
}
- if ((bp->b_flags & B_PHYS) == 0) {
- dvma_mapout((vm_offset_t) iorq->dbufbase,
- (vm_offset_t) bp->b_un.b_addr,
- bp->b_bcount);
- }
+ dvma_mapout((vm_offset_t) iorq->dbufbase,
+ (vm_offset_t) bp->b_un.b_addr,
+ bp->b_bcount);
XDC_FREE(xdcsc, rqno);
biodone(bp);
break;
{
int s, err, rqno, dummy;
- caddr_t dvmabuf = NULL;
+ caddr_t dvmabuf = NULL, buf = NULL;
struct xdc_softc *xdcsc;
/* check sanity of requested command */
/* create DVMA buffer for request if needed */
if (xio->dlen) {
- dvmabuf = dvma_malloc(xio->dlen);
+ dvmabuf = dvma_malloc(xio->dlen, &buf, M_WAITOK);
if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) {
- if (err = copyin(xio->dptr, dvmabuf, xio->dlen)) {
- dvma_free(dvmabuf, xio->dlen);
+ if (err = copyin(xio->dptr, buf, xio->dlen)) {
+ dvma_free(dvmabuf, xio->dlen, &buf);
return (err);
}
}
XDC_DONE(xdcsc, rqno, dummy);
if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD)
- err = copyout(dvmabuf, xio->dptr, xio->dlen);
+ err = copyout(buf, xio->dptr, xio->dlen);
done:
splx(s);
if (dvmabuf)
- dvma_free(dvmabuf, xio->dlen);
+ dvma_free(dvmabuf, xio->dlen, &buf);
return (err);
}
-/* $NetBSD: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp $ */
+/* $NetBSD: xy.c,v 1.2 1995/12/11 12:40:25 pk Exp $ */
/*
*
* x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r
*
* author: Chuck Cranor <chuck@ccrc.wustl.edu>
- * id: $Id: xy.c,v 1.1.1.1 1995/10/18 08:51:41 deraadt Exp $
+ * id: $Id: xy.c,v 1.2 1995/12/15 13:56:29 deraadt Exp $
* started: 14-Sep-95
* references: [1] Xylogics Model 753 User's Manual
* part number: 166-753-001, Revision B, May 21, 1988.
* "xyc_*" functions are internal, all others are external interfaces
*/
-/* external (XXX should migrate to std include file?) */
-extern caddr_t dvma_malloc __P((size_t));
-extern void dvma_free __P((caddr_t, size_t));
-extern caddr_t dvma_mapin __P((struct vm_map *, vm_offset_t, int, int));
-extern void dvma_mapout __P((vm_offset_t, vm_offset_t, int));
extern int pil_to_vme[]; /* from obio.c */
/* internals */
struct xyc_attach_args { /* this is the "aux" args to xyattach */
int driveno; /* unit number */
- char *dvmabuf; /* scratch buffer for reading disk label */
+ char *buf; /* scratch buffer for reading disk label */
+ char *dvmabuf; /* DVMA address of above */
int fullmode; /* submit mode */
int booting; /* are we booting or not? */
};
struct xyc_attach_args xa;
int lcv, err, pri, res, pbsz;
void *tmp, *tmp2;
+ void *dtmp, *dtmp2;
u_long ultmp;
/* get addressing and intr level stuff from autoconfig and load it
* into our xyc_softc. */
- ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_paddr,
+ ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_reg, 0,
ca->ca_ra.ra_len, ca->ca_bustype);
if ((u_long) ca->ca_ra.ra_paddr & PGOFSET)
(u_long) ca->ca_ra.ra_vaddr |=
*/
pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb);
- tmp = tmp2 = (struct xy_iopb *) dvma_malloc(pbsz); /* KVA */
- ultmp = (u_long) tmp;
+ dtmp = dtmp2 = (struct xy_iopb *)dvma_malloc(pbsz, &tmp, M_NOWAIT);
+ tmp2 = tmp;
+ ultmp = (u_long) dtmp;
if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) {
- tmp = (struct xy_iopb *) dvma_malloc(pbsz); /* retry! */
- dvma_free(tmp2, pbsz);
- ultmp = (u_long) tmp;
+ dtmp = (struct xy_iopb *)
+ dvma_malloc(pbsz, &tmp, M_NOWAIT); /* retry! */
+ dvma_free(dtmp2, pbsz, &tmp2);
+ ultmp = (u_long) dtmp;
if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) {
printf("%s: can't alloc IOPB mem in 64K\n",
xyc->sc_dev.dv_xname);
return;
}
}
+ bzero(tmp, pbsz);
xyc->iopbase = tmp;
- bzero(xyc->iopbase, pbsz);
- xyc->dvmaiopb = (struct xy_iopb *) ((u_long) xyc->iopbase - DVMA_BASE);
+ xyc->dvmaiopb = (struct xy_iopb *) ((u_long)dtmp - DVMA_BASE);
xyc->reqs = (struct xy_iorq *)
malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), M_DEVBUF, M_NOWAIT);
bzero(xyc->reqs, XYC_MAXIOPB * sizeof(struct xy_iorq));
/* now we must look for disks using autoconfig */
- xa.dvmabuf = (char *) dvma_malloc(XYFM_BPS);
+ xa.dvmabuf = (char *)dvma_malloc(XYFM_BPS, &xa.buf, M_NOWAIT);
xa.fullmode = XY_SUB_POLL;
xa.booting = 1;
for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++)
(void) config_found(self, (void *) &xa, NULL);
- dvma_free(xa.dvmabuf, XYFM_BPS);
+ dvma_free(xa.dvmabuf, XYFM_BPS, &xa.buf);
bootpath_store(1, NULL);
/* start the watchdog clock */
newstate = XY_DRIVE_NOLABEL;
xy->hw_spt = spt = 0; /* XXX needed ? */
- if (xygetdisklabel(xy, xa->dvmabuf) != XY_ERR_AOK)
+ if (xygetdisklabel(xy, xa->buf) != XY_ERR_AOK)
goto done;
/* inform the user of what is up */
printf("%s: <%s>, pcyl %d\n", xy->sc_dev.dv_xname,
- xa->dvmabuf, xy->pcyl);
+ xa->buf, xy->pcyl);
mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS);
printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
xy->sc_dev.dv_xname, mb, xy->ncyl, xy->nhead, xy->nsect,
}
/* check dkbad for sanity */
- dkb = (struct dkbad *) xa->dvmabuf;
+ dkb = (struct dkbad *) xa->buf;
for (lcv = 0; lcv < 126; lcv++) {
if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
dkb->bt_bad[lcv].bt_cyl == 0) &&
printf("%s: warning: invalid bad144 sector!\n",
xy->sc_dev.dv_xname);
} else {
- bcopy(xa->dvmabuf, &xy->dkb, XYFM_BPS);
+ bcopy(xa->buf, &xy->dkb, XYFM_BPS);
}
if (xa->booting) {
if (xy->state == XY_DRIVE_UNKNOWN) {
xa.driveno = xy->xy_drive;
- xa.dvmabuf = (char *) dvma_malloc(XYFM_BPS);
+ xa.dvmabuf = (char *)dvma_malloc(XYFM_BPS, &xa.buf, M_NOWAIT);
xa.fullmode = XY_SUB_WAIT;
xa.booting = 0;
xyattach((struct device *) xy->parent,
(struct device *) xy, &xa);
- dvma_free(xa.dvmabuf, XYFM_BPS);
+ dvma_free(xa.dvmabuf, XYFM_BPS, &xa.buf);
if (xy->state == XY_DRIVE_UNKNOWN) {
return (EIO);
}
if (xy->state == XY_DRIVE_UNKNOWN) {
xa.driveno = xy->xy_drive;
- xa.dvmabuf = (char *) dvma_malloc(XYFM_BPS);
+ xa.dvmabuf = (char *)dvma_malloc(XYFM_BPS, &xa.buf, M_NOWAIT);
xa.fullmode = XY_SUB_WAIT;
xa.booting = 0;
xyattach((struct device *)xy->parent, (struct device *)xy, &xa);
- dvma_free(xa.dvmabuf, XYFM_BPS);
+ dvma_free(xa.dvmabuf, XYFM_BPS, &xa.buf);
if (xy->state == XY_DRIVE_UNKNOWN) {
bp->b_error = EIO;
goto bad;
* now we know we have a valid buf structure that we need to do I/O
* on.
*/
-
- { /* XXX DVMA mapin */
-
- /* DVMA: if we've got a kernel buf structure we map it into
- * DVMA space here. the advantage to this is that it allows
- * us to sleep if there isn't space in the DVMA area. the
- * disadvantage to this is that we are mapping this in earlier
- * than we have to, and thus possibly wasting DVMA space. in
- * an ideal world we would like to map it in once we know we
- * can submit an IOPB (at this point we don't know if we can
- * submit or not). (XXX) If the DVMA system gets redone this
- * mapin can be moved elsewhere. */
-
- caddr_t x;
- if ((bp->b_flags & B_PHYS) == 0) {
- x = dvma_mapin(kernel_map, (vm_offset_t)bp->b_data,
- bp->b_bcount, 1);
- if (x == NULL)
- panic("xy mapin");
- bp->b_error = (int) x; /* XXX we store DVMA addr in
- * b_error, thus overloading
- * it. VERY ugly. note
- * that xd.c uses b_resid, but
- * we can't because disksort
- * uses it */
- }
- } /* XXX end DVMA mapin */
-
s = splbio(); /* protect the queues */
disksort(&xy->xyq, bp);
* load request. we have to calculate the correct block number based
* on partition info.
*
- * also, note that there are two kinds of buf structures, those with
- * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is
- * a raw I/O (to a cdevsw) and we are doing I/O directly to the users'
- * buffer which has already been mapped into DVMA space. however, if
- * B_PHYS is not set, then the buffer is a normal system buffer which
- * does *not* live in DVMA space. in that case we call dvma_mapin to
- * map it into DVMA space so we can do the DMA I/O to it.
- *
- * in cases where we do a dvma_mapin, note that iorq points to the buffer
- * as mapped into DVMA space, where as the bp->b_data points to its
- * non-DVMA mapping.
+ * note that iorq points to the buffer as mapped into DVMA space,
+ * where as the bp->b_data points to its non-DVMA mapping.
*/
block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
xysc->sc_dk.dk_label.d_partitions[partno].p_offset);
- if ((bp->b_flags & B_PHYS) == 0) {
- dbuf = (caddr_t) bp->b_error; /* XXX: overloaded error from
- * xystrategy() */
- bp->b_error = 0; /* XXX? */
-#ifdef someday
-
- /* XXX: this is where we would really like to do the DVMA
- * mapin, but we get called from intr here so we can't sleep
- * so we can't do it. */
- /* allocate DVMA, map in */
-
- if (dbuf == NULL) { /* out of DVMA space */
- printf("%s: warning: out of DVMA space\n",
- xycsc->sc_dev.dv_xname);
- return (XY_ERR_FAIL); /* XXX: need some sort of
- * call-back scheme here? */
- }
-#endif /* someday */
- } else {
- dbuf = bp->b_data;
+ dbuf = kdvma_mapin(bp->b_data, bp->b_bcount, 0);
+ if (dbuf == NULL) { /* out of DVMA space */
+ printf("%s: warning: out of DVMA space\n",
+ xycsc->sc_dev.dv_xname);
+ return (XY_ERR_FAIL); /* XXX: need some sort of
+ * call-back scheme here? */
}
/* init iorq and load iopb from it */
/* fix queues based on "blast-mode" */
for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
- if (XY_STATE(xycsc->reqs[lcv].mode) != XY_SUB_POLL &&
- XY_STATE(xycsc->reqs[lcv].mode) != XY_SUB_WAIT &&
- XY_STATE(xycsc->reqs[lcv].mode) != XY_SUB_NORM)
+ register struct xy_iorq *iorq = &xycsc->reqs[lcv];
+
+ if (XY_STATE(iorq->mode) != XY_SUB_POLL &&
+ XY_STATE(iorq->mode) != XY_SUB_WAIT &&
+ XY_STATE(iorq->mode) != XY_SUB_NORM)
/* is it active? */
continue;
if (blastmode == XY_RSET_ALL ||
- blastmode != &xycsc->reqs[lcv]) {
+ blastmode != iorq) {
/* failed */
- xycsc->reqs[lcv].errno = error;
+ iorq->errno = error;
xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1;
- switch (XY_STATE(xycsc->reqs[lcv].mode)) {
+ switch (XY_STATE(iorq->mode)) {
case XY_SUB_NORM:
- xycsc->reqs[lcv].buf->b_error = EIO;
- xycsc->reqs[lcv].buf->b_flags |= B_ERROR;
- xycsc->reqs[lcv].buf->b_resid =
- xycsc->reqs[lcv].sectcnt * XYFM_BPS;
- if ((xycsc->reqs[lcv].buf->b_flags & B_PHYS) == 0) {
- dvma_mapout(
- (vm_offset_t)xycsc->reqs[lcv].dbufbase,
- (vm_offset_t)xycsc->reqs[lcv].buf->b_un.b_addr,
- xycsc->reqs[lcv].buf->b_bcount);
- }
- xycsc->reqs[lcv].xy->xyq.b_actf =
- xycsc->reqs[lcv].buf->b_actf;
- biodone(xycsc->reqs[lcv].buf);
- xycsc->reqs[lcv].mode = XY_SUB_FREE;
+ iorq->buf->b_error = EIO;
+ iorq->buf->b_flags |= B_ERROR;
+ iorq->buf->b_resid = iorq->sectcnt * XYFM_BPS;
+ dvma_mapout((vm_offset_t)iorq->dbufbase,
+ (vm_offset_t)iorq->buf->b_un.b_addr,
+ iorq->buf->b_bcount);
+ iorq->xy->xyq.b_actf = iorq->buf->b_actf;
+ biodone(iorq->buf);
+ iorq->mode = XY_SUB_FREE;
break;
case XY_SUB_WAIT:
- wakeup(&xycsc->reqs[lcv]);
+ wakeup(iorq);
case XY_SUB_POLL:
- xycsc->reqs[lcv].mode =
- XY_NEWSTATE(xycsc->reqs[lcv].mode, XY_SUB_DONE);
+ iorq->mode =
+ XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
break;
}
} else {
bp->b_resid = 0; /* done */
}
- if ((bp->b_flags & B_PHYS) == 0) {
- dvma_mapout((vm_offset_t) iorq->dbufbase,
- (vm_offset_t) bp->b_un.b_addr,
- bp->b_bcount);
- }
+ dvma_mapout((vm_offset_t) iorq->dbufbase,
+ (vm_offset_t) bp->b_un.b_addr,
+ bp->b_bcount);
iorq->mode = XY_SUB_FREE;
iorq->xy->xyq.b_actf = bp->b_actf;
biodone(bp);
{
int s, err, rqno, dummy;
- caddr_t dvmabuf = NULL;
+ caddr_t dvmabuf = NULL, buf = NULL;
struct xyc_softc *xycsc;
/* check sanity of requested command */
/* create DVMA buffer for request if needed */
if (xio->dlen) {
- dvmabuf = dvma_malloc(xio->dlen);
+ dvmabuf = dvma_malloc(xio->dlen, &buf, M_WAITOK);
if (xio->cmd == XYCMD_WR) {
- if (err = copyin(xio->dptr, dvmabuf, xio->dlen)) {
- dvma_free(dvmabuf, xio->dlen);
+ if (err = copyin(xio->dptr, buf, xio->dlen)) {
+ dvma_free(dvmabuf, xio->dlen, &buf);
return (err);
}
}
XYC_DONE(xycsc, dummy);
if (xio->cmd == XYCMD_RD)
- err = copyout(dvmabuf, xio->dptr, xio->dlen);
+ err = copyout(buf, xio->dptr, xio->dlen);
done:
splx(s);
if (dvmabuf)
- dvma_free(dvmabuf, xio->dlen);
+ dvma_free(dvmabuf, xio->dlen, &buf);
return (err);
}
* it will use that instead of creating one, but you must only do this if
* you get it from ../sparc/vaddrs.h.
*/
-void *mapdev __P((void *pa, int va, int size, int bustype));
-#define mapiodev(pa, size, bustype) mapdev(pa, 0, size, bustype)
+void *mapdev __P((struct rom_reg *rr, int va, int offset,
+ int size, int bustype));
+#define mapiodev(rr, offset, size, bustype) mapdev(rr, 0, offset, size, bustype)
void *bus_map __P((void *pa, int len, int bustype));
void *bus_tmp __P((void *pa, int bustype));
void bus_untmp __P((void));
+#ifdef notyet
+/*
+ * REG2PHYS is provided for drivers with a `d_mmap' function.
+ */
+#define REG2PHYS(rr, offset, bt) \
+ (((u_int)(rr)->rr_paddr + (offset)) | \
+ ((cputyp == CPU_SUN4M) \
+ ? ((rr)->rr_iospace << PMAP_SHFT4M) \
+ : bt2pmt[bt]) \
+ )
+#else
+#define REG2PHYS(rr, offset, bt) \
+ (((u_int)(rr)->rr_paddr + (offset)) | (bt2pmt[bt]) \
+ )
+#endif
+
/*
* Memory description arrays. Shared between pmap.c and autoconf.c; no
* one else should use this (except maybe mem.c, e.g., if we fix the VM to
*/
#ifdef _KERNEL
#ifndef LOCORE
-extern vm_offset_t dvmabase;
+extern vm_offset_t dvma_base;
+extern vm_offset_t dvma_end;
extern struct map *dvmamap;
#endif
-#endif
/*
* The dvma resource map is defined in page units, which are numbered 1 to N.
* Use these macros to convert to/from virtual addresses.
*/
-#define rctov(n) (ctob(((n)-1))+dvmabase)
-#define vtorc(v) ((btoc((v)-dvmabase))+1)
+#define rctov(n) (ctob(((n)-1))+dvma_base)
+#define vtorc(v) ((btoc((v)-dvma_base))+1)
+
+extern caddr_t kdvma_mapin __P((caddr_t, int, int));
+extern caddr_t dvma_malloc __P((size_t, void *, int));
+extern void dvma_free __P((caddr_t, size_t, void *));
+#endif
#ifdef _KERNEL
#endif
#ifdef _KERNEL
+#ifndef LOCORE
extern int cputyp;
extern int cpumod;
#endif
+#endif
/*
* Values for the cputyp variable.
*/
#define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES)
#define MACHINE_NONCONTIG /* VM <=> pmap interface modifier */
+
+#if defined (_KERNEL) && !defined(LOCORE)
+struct vm_map;
+vm_offset_t dvma_mapin __P((struct vm_map *, vm_offset_t, int, int));
+int dvma_mapout __P((vm_offset_t, vm_offset_t, int));
+#endif
struct confargs *ca = aux;
struct romaux *ra = &ca->ca_ra;
- (void)mapdev(ra->ra_paddr, AUXREG_VA, sizeof(long), ca->ca_bustype);
- auxio_reg = AUXIO_REG;
+ auxio_reg = mapdev(ra->ra_reg, AUXREG_VA, 0, sizeof(long),
+ ca->ca_bustype);
+ if ((u_long)auxio_reg != AUXREG_VA)
+ panic("unable to map auxreg");
printf("\n");
#ifdef BLINK
blink((caddr_t)0);
oldclk = 1; /* we've got an oldie! */
printf("\n");
- i7 = (volatile struct intersil7170 *) mapiodev(ra->ra_paddr,
+ i7 = (volatile struct intersil7170 *) mapiodev(ra->ra_reg, 0,
sizeof(*i7), ca->ca_bustype);
idp = &idprom;
printf("\n");
- eeprom_va = (char *)mapiodev(ra->ra_paddr, sizeof(struct eeprom),
+ eeprom_va = (char *)mapiodev(ra->ra_reg, 0, sizeof(struct eeprom),
ca->ca_bustype);
eeprom_nvram = 0;
/*
* the MK48T08 is 8K
*/
- cl = (struct clockreg *)mapiodev(ra->ra_paddr, 2 * NBPG,
+ cl = (struct clockreg *)mapiodev(ra->ra_reg, 0, 2 * NBPG,
ca->ca_bustype);
pmap_changeprot(pmap_kernel(), (vm_offset_t)cl, VM_PROT_READ, 1);
pmap_changeprot(pmap_kernel(), (vm_offset_t)cl + NBPG, VM_PROT_READ, 1);
/*
* the MK48T02 is 2K
*/
- cl = (struct clockreg *)mapiodev(ra->ra_paddr, sizeof *clockreg,
+ cl = (struct clockreg *)mapiodev(ra->ra_reg, 0, sizeof *clockreg,
ca->ca_bustype);
pmap_changeprot(pmap_kernel(), (vm_offset_t)cl, VM_PROT_READ, 1);
idp = &cl->cl_idprom;
* we have a fixed virtual address for the timer, to make
* microtime() faster.
*/
- (void)mapdev(ra->ra_paddr, TIMERREG_VA, sizeof(struct timerreg),
- ca->ca_bustype);
+ if ((int)mapdev(ra->ra_reg, TIMERREG_VA, 0, sizeof(struct timerreg),
+ ca->ca_bustype) != TIMERREG_VA)
+ panic("unable to map timer");
timerok = 1;
/* should link interrupt handlers here, rather than compiled-in? */
}
* dvmamap is used to manage DVMA memory. Note: this coincides with
* the memory range in `phys_map' (which is mostly a place-holder).
*/
+vm_offset_t dvma_base, dvma_end;
struct map *dvmamap;
-vm_offset_t dvmabase;
static int ndvmamap; /* # of entries in dvmamap */
caddr_t allocsys();
* map, but we want one completely separate, even though it uses
* the same pmap.
*/
- phys_map = vm_map_create(pmap_kernel(), DVMA_BASE, DVMA_END, 1);
+#if defined(SUN4M) && 0
+ if (cputyp == CPU_SUN4M) {
+ dvma_base = DVMA4M_BASE;
+ dvma_end = (vm_offset_t)(0 - NBPG); /* want 4BG, but cant express */
+ } else
+#endif
+ {
+ dvma_base = DVMA_BASE;
+ dvma_end = DVMA_END;
+ }
+ phys_map = vm_map_create(pmap_kernel(), dvma_base, dvma_end, 1);
if (phys_map == NULL)
panic("unable to create DVMA map");
/*
- * For now, allocate half of DVMA space for a (privately managed)
- * pool of addresses for double mappings.
+ * Allocate DVMA space and dump into a privately managed
+ * resource map for double mappings which is usable from
+ * interrupt contexts.
*/
- dvmabase = kmem_alloc_wait(phys_map, (DVMA_END-DVMA_BASE)/2);
- rminit(dvmamap, btoc((DVMA_END-DVMA_BASE)/2),
- vtorc(dvmabase), "dvmamap", ndvmamap);
+ if (kmem_alloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base)
+ panic("unable to allocate from DVMA map");
+ rminit(dvmamap, btoc((dvma_end-dvma_base)),
+ vtorc(dvma_base), "dvmamap", ndvmamap);
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
/*
* Map an I/O device given physical address and size in bytes, e.g.,
*
- * mydev = (struct mydev *)mapdev(myioaddr, 0, sizeof(struct mydev), pmtype);
- *
+ * mydev = (struct mydev *)mapdev(myioaddr, 0, 0,
+ * sizeof(struct mydev), pmtype);
+ *
* See also machine/autoconf.h.
*/
void *
-mapdev(phys, virt, size, bustype)
- register void *phys;
- register int virt, size;
+mapdev(rr, virt, offset, size, bustype)
+ register struct rom_reg *rr;
+ register int offset, virt, size;
register int bustype;
{
- register vm_offset_t v;
+ register vm_offset_t v, pa;
register void *ret;
static vm_offset_t iobase;
- int pmtype = bt2pmt[bustype];
+ int pmtype;
if (iobase == NULL)
iobase = IODEV_BASE;
panic("mapiodev");
}
ret = (void *)v;
- phys = (void *)trunc_page(phys);
+ pa = trunc_page(rr->rr_paddr + offset);
+#ifdef notyet
+ pmtype = (cputyp == CPU_SUN4M)
+ ? (rr->rr_iospace << PMAP_SHFT4M)
+ : bt2pmt[bustype];
+#else
+ pmtype = bt2pmt[bustype];
+#endif
do {
- pmap_enter(pmap_kernel(), v,
- (vm_offset_t)phys | pmtype | PMAP_NC,
+ pmap_enter(pmap_kernel(), v, pa | pmtype | PMAP_NC,
VM_PROT_READ | VM_PROT_WRITE, 1);
v += PAGE_SIZE;
- phys += PAGE_SIZE;
+ pa += PAGE_SIZE;
} while ((size -= PAGE_SIZE) > 0);
return (ret);
}
ra->ra_vaddr = (caddr_t)par_err_reg;
} else {
par_err_reg = ra->ra_vaddr ? (volatile int *)ra->ra_vaddr :
- (volatile int *)mapiodev(ra->ra_paddr, sizeof(int),
+ (volatile int *)mapiodev(ra->ra_reg, 0, sizeof(int),
ca->ca_bustype);
}
printf("\n");
-/* $NetBSD: vm_machdep.c,v 1.14 1995/06/26 22:46:04 pk Exp $ */
+/* $NetBSD: vm_machdep.c,v 1.18 1995/12/11 12:44:39 pk Exp $ */
/*
* Copyright (c) 1992, 1993
}
/*
- * Map a range [va, va+len] in the given map to a kernel address
- * in DVMA space.
+ * Wrapper for dvma_mapin() in kernel space,
+ * so drivers need not include VM goo to get at kernel_map.
+ */
+caddr_t
+kdvma_mapin(va, len, canwait)
+ caddr_t va;
+ int len, canwait;
+{
+ return ((caddr_t)dvma_mapin(kernel_map, (vm_offset_t)va, len, canwait));
+}
+
+caddr_t
+dvma_malloc(len, kaddr, flags)
+ size_t len;
+ void *kaddr;
+ int flags;
+{
+ vm_offset_t kva;
+ vm_offset_t dva;
+
+ kva = (vm_offset_t)malloc(len, M_DEVBUF, flags);
+ if (kva == NULL)
+ return (NULL);
+
+ *(vm_offset_t *)kaddr = kva;
+ dva = dvma_mapin(kernel_map, kva, len, (flags & M_NOWAIT) ? 0 : 1);
+ if (dva == NULL) {
+ free((void *)kva, M_DEVBUF);
+ return (NULL);
+ }
+ return (caddr_t)dva;
+}
+
+void
+dvma_free(dva, len, kaddr)
+ caddr_t dva;
+ size_t len;
+ void *kaddr;
+{
+ vm_offset_t kva = *(vm_offset_t *)kaddr;
+
+ dvma_mapout((vm_offset_t)dva, kva, len);
+ free((void *)kva, M_DEVBUF);
+}
+
+/*
+ * Map a range [va, va+len] of wired virtual addresses in the given map
+ * to a kernel address in DVMA space.
*/
vm_offset_t
dvma_mapin(map, va, len, canwait)
vm_offset_t va;
int len, canwait;
{
- vm_offset_t kva, tva, va_0 = va;
+ vm_offset_t kva, tva;
register int npf, s;
register vm_offset_t pa;
- long pn;
+ long off, pn;
- npf = btoc(round_page(len));
+ off = (int)va & PGOFSET;
+ va -= off;
+ len = round_page(len + off);
+ npf = btoc(len);
+
+ kvm_uncache((caddr_t)va, len >> PGSHIFT);
s = splimp();
for (;;) {
+
pn = rmalloc(dvmamap, npf);
+
if (pn != 0)
break;
if (canwait) {
pa = pmap_extract(vm_map_pmap(map), va);
if (pa == 0)
panic("dvma_mapin: null page frame");
-
+ pa = trunc_page(pa);
+
+#if defined(SUN4M) && 0
+ if (cputyp == CPU_SUN4M) {
+ iommu_enter(tva, pa);
+ } else
+#endif
+ {
/*
- * ### pmap_enter distributes this mapping to all contexts...
- * maybe we should avoid this extra work
+ * pmap_enter distributes this mapping to all
+ * contexts... maybe we should avoid this extra work
*/
+#ifdef notyet
+#if defined(SUN4)
+ if (have_iocache)
+ pa |= PG_IOC;
+#endif
+#endif
pmap_enter(pmap_kernel(), tva,
- trunc_page(pa) | PMAP_NC,
+ pa | PMAP_NC,
VM_PROT_READ|VM_PROT_WRITE, 1);
+ }
+
tva += PAGE_SIZE;
va += PAGE_SIZE;
}
-
- if (vactype == VAC_WRITEBACK)
- cache_flush((caddr_t)va_0, len); /* XXX only needed on write */
-
- return kva;
+ return kva + off;
}
/*
vm_offset_t kva, va;
int len;
{
- register int s;
+ register int s, off;
+
+ off = (int)kva & PGOFSET;
+ kva -= off;
+ len = round_page(len + off);
+#if defined(SUN4M) && 0
+ if (cputyp == CPU_SUN4M)
+ iommu_remove(kva, len);
+ else
+#endif
pmap_remove(pmap_kernel(), kva, kva + len);
s = splimp();
vmapbuf(bp)
register struct buf *bp;
{
- register int len;
- register caddr_t addr;
+ register vm_offset_t addr, kva, pa;
+ register vm_size_t size, off;
+ register int npf;
struct proc *p;
- int off;
- vm_offset_t kva;
+ register struct vm_map *map;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- addr = bp->b_saveaddr = bp->b_un.b_addr;
- off = (int)addr & PGOFSET;
p = bp->b_proc;
- len = round_page(bp->b_bcount + off);
- kva = dvma_mapin(&p->p_vmspace->vm_map, addr-off, len, 1);
- bp->b_un.b_addr = (caddr_t) (kva + off);
+ map = &p->p_vmspace->vm_map;
+ bp->b_saveaddr = bp->b_data;
+ addr = (vm_offset_t)bp->b_saveaddr;
+ off = addr & PGOFSET;
+ size = round_page(bp->b_bcount + off);
+ kva = kmem_alloc_wait(kernel_map, size);
+ bp->b_data = (caddr_t)(kva + off);
+ addr = trunc_page(addr);
+ npf = btoc(size);
+ while (npf--) {
+ pa = pmap_extract(vm_map_pmap(map), (vm_offset_t)addr);
+ if (pa == 0)
+ panic("vmapbuf: null page frame");
+
+ /*
+ * pmap_enter distributes this mapping to all
+ * contexts... maybe we should avoid this extra work
+ */
+ pmap_enter(pmap_kernel(), kva,
+ pa | PMAP_NC,
+ VM_PROT_READ|VM_PROT_WRITE, 1);
+
+ addr += PAGE_SIZE;
+ kva += PAGE_SIZE;
+ }
}
/*
vunmapbuf(bp)
register struct buf *bp;
{
- register vm_offset_t kva = (vm_offset_t)bp->b_un.b_addr;
- register int off, npf;
+ register vm_offset_t kva = (vm_offset_t)bp->b_data;
+ register vm_size_t size, off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- bp->b_un.b_addr = bp->b_saveaddr;
+ kva = (vm_offset_t)bp->b_data;
+ off = kva & PGOFSET;
+ size = round_page(bp->b_bcount + off);
+ kmem_free_wakeup(kernel_map, trunc_page(kva), size);
+ bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
-
- off = (int)kva & PGOFSET;
- kva -= off;
- dvma_mapout(kva, bp->b_un.b_addr, round_page(bp->b_bcount + off));
-}
-
-/*
- * Allocate physical memory space in the dvma virtual address range.
- */
-caddr_t
-dvma_malloc(size)
- size_t size;
-{
- vm_size_t vsize;
- caddr_t va;
-
- vsize = round_page(size);
- va = (caddr_t)kmem_alloc(phys_map, vsize);
- if (va == NULL)
- panic("dvma_malloc");
- kvm_uncache(va, vsize >> PGSHIFT);
- return (va);
-}
-
-/*
- * Free dvma addresses allocated with dvma_malloc()
- */
-void
-dvma_free(ptr, size)
- caddr_t ptr;
- size_t size;
-{
- kmem_free(phys_map, (vm_offset_t)ptr, size);
+ if (vactype != VAC_NONE)
+ cache_flush(bp->b_un.b_addr, bp->b_bcount - bp->b_resid);
}