int last_context; /* Last current context */
/* VBLANK support */
+ int vblank_disable_allowed;
int *vbl_queue; /* vbl wait channel */
atomic_t *_vblank_count; /* no vblank interrupts */
DRM_SPINTYPE vbl_lock; /* locking for vblank operations */
atomic_t *vblank_refcount; /* no. users for vlank interrupts */
u_int32_t *last_vblank; /* locked, used for overflow handling*/
int *vblank_enabled; /* make sure we only disable once */
- u_int32_t *vblank_premodeset; /* compensation for wraparounds */
- int *vblank_suspend; /* Don't wait while crtc is disabled */
+ int *vblank_inmodeset; /* X DDX is currently setting mode */
struct timeout vblank_disable_timer;
int num_crtcs; /* number of crtcs on device */
void drm_vblank_cleanup(struct drm_device *);
int drm_vblank_init(struct drm_device *, int);
u_int32_t drm_vblank_count(struct drm_device *, int);
-void drm_update_vblank_count(struct drm_device *, int);
int drm_vblank_get(struct drm_device *, int);
void drm_vblank_put(struct drm_device *, int);
int drm_modeset_ctl(struct drm_device *, void *, struct drm_file *);
unsigned long );
/* Inline replacements for DRM_IOREMAP macros */
+#define drm_core_ioremap_wc drm_core_ioremap
static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
map->handle = drm_ioremap(dev, map);
irqreturn_t drm_irq_handler_wrap(DRM_IRQ_ARGS);
void vblank_disable(void *);
+void drm_update_vblank_count(struct drm_device *, int);
void drm_locked_task(void *context, void *pending);
int
struct drm_device *dev = (struct drm_device*)arg;
int i;
+ if (!dev->vblank_disable_allowed)
+ return;
+
for (i=0; i < dev->num_crtcs; i++){
DRM_SPINLOCK(&dev->vbl_lock);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
+ dev->last_vblank[i] =
+ dev->driver.get_vblank_counter(dev, i);
dev->driver.disable_vblank(dev, i);
dev->vblank_enabled[i] = 0;
}
vblank_disable(dev);
- if (dev->vbl_queue)
- drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) *
- dev->num_crtcs, M_DRM);
+ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) *
+ dev->num_crtcs, M_DRM);
#if 0 /* disabled for now */
- if (dev-vbl_sigs)
- drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
- M_DRM);
+ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, M_DRM);
#endif
- if (dev->_vblank_count)
- drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
- dev->num_crtcs, M_DRM);
- if (dev->vblank_refcount)
- drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
- dev->num_crtcs, M_DRM);
- if (dev->vblank_enabled)
- drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
- dev->num_crtcs, M_DRM);
- if (dev->last_vblank)
- drm_free(dev->last_vblank, sizeof(*dev->last_vblank) *
- dev->num_crtcs, M_DRM);
- if (dev->vblank_premodeset)
- drm_free(dev->vblank_premodeset,
- sizeof(*dev->vblank_premodeset) * dev->num_crtcs, M_DRM);
- if (dev->vblank_suspend)
- drm_free(dev->vblank_suspend,
- sizeof(*dev->vblank_suspend) * dev->num_crtcs, M_DRM);
+ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
+ dev->num_crtcs, M_DRM);
+ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
+ dev->num_crtcs, M_DRM);
+ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
+ dev->num_crtcs, M_DRM);
+ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) *
+ dev->num_crtcs, M_DRM);
+ drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
+ dev->num_crtcs, M_DRM);
dev->num_crtcs = 0;
DRM_SPINUNINIT(&dev->vbl_lock);
if ((dev->vblank_enabled = drm_calloc(num_crtcs,
sizeof(*dev->vblank_enabled), M_DRM)) == NULL)
goto err;
- if ((dev->last_vblank = drm_calloc(num_crtcs, sizeof(*dev->last_vblank),
- M_DRM)) == NULL)
- goto err;
- if ((dev->vblank_premodeset = drm_calloc(num_crtcs,
- sizeof(*dev->vblank_premodeset), M_DRM)) == NULL)
+ if ((dev->last_vblank = drm_calloc(num_crtcs,
+ sizeof(*dev->last_vblank), M_DRM)) == NULL)
goto err;
- if ((dev->vblank_suspend = drm_calloc(num_crtcs,
- sizeof(*dev->vblank_suspend), M_DRM)) == NULL)
+ if ((dev->vblank_inmodeset = drm_calloc(num_crtcs,
+ sizeof(*dev->vblank_inmodeset), M_DRM)) == NULL)
goto err;
-
/* Zero everything */
for (i = 0; i < num_crtcs; i++) {
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
+ dev->vblank_disable_allowed = 0;
+
return (0);
err:
{
u_int32_t cur_vblank, diff;
- if (dev->vblank_suspend[crtc])
- return;
-
/*
- * Deal with the possibility of lost vblanks due to disabled interrupts
- * counter overflow may have happened.
+ * Interrupt was disabled prior to this call, so deal with counter wrap
+ * note that we may have lost a full dev->max_vblank_count events if
+ * the register is small or the interrupts were off for a long time.
*/
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
- DRM_SPINLOCK(&dev->vbl_lock);
- if (cur_vblank < dev->last_vblank[crtc]) {
- if (cur_vblank == dev->last_vblank[crtc] -1)
- diff = 0;
- else {
- diff = dev->max_vblank_count - dev->last_vblank[crtc];
- diff += cur_vblank;
- }
- } else {
- diff = cur_vblank - dev->last_vblank[crtc];
- }
- dev->last_vblank[crtc] = cur_vblank;
- DRM_SPINUNLOCK(&dev->vbl_lock);
+ diff = cur_vblank - dev->last_vblank[crtc];
+ if (cur_vblank < dev->last_vblank[crtc])
+ diff += dev->max_vblank_count;
atomic_add(diff, &dev->_vblank_count[crtc]);
}
atomic_dec(&dev->vblank_refcount[crtc]);
} else {
dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
}
}
DRM_SPINUNLOCK(&dev->vbl_lock);
- return ret;
+ return (ret);
}
void
/* Last user schedules interrupt disable */
atomic_dec(&dev->vblank_refcount[crtc]);
if (dev->vblank_refcount[crtc] == 0)
- timeout_add(&dev->vblank_disable_timer, 5*DRM_HZ);
+ timeout_add_sec(&dev->vblank_disable_timer, 5);
DRM_SPINUNLOCK(&dev->vbl_lock);
}
struct drm_modeset_ctl *modeset = data;
int crtc, ret = 0;
+ /* not initialised yet, just noop */
+ if (dev->num_crtcs == 0)
+ goto out;
+
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) {
ret = EINVAL;
goto out;
}
+ /* If interrupts are enabled/disabled between calls to this ioctl then
+ * it can get nasty. So just grab a reference so that the interrupts
+ * keep going through the modeset
+ */
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- dev->vblank_premodeset[crtc] =
- dev->driver.get_vblank_counter(dev, crtc);
- dev->vblank_suspend[crtc] = 1;
+ if (dev->vblank_inmodeset[crtc] == 0) {
+ dev->vblank_inmodeset[crtc] = 1;
+ drm_vblank_get(dev, crtc);
+ }
break;
case _DRM_POST_MODESET:
- if (dev->vblank_suspend[crtc]) {
- uint32_t new =
- dev->driver.get_vblank_counter(dev, crtc);
- /* Compensate for spurious wraparound */
- if (new < dev->vblank_premodeset[crtc])
- atomic_sub(dev->max_vblank_count + new -
- dev->vblank_premodeset[crtc],
- &dev->_vblank_count[crtc]);
+ if (dev->vblank_inmodeset[crtc]) {
+ DRM_SPINLOCK(&dev->vbl_lock);
+ dev->vblank_disable_allowed = 1;
+ dev->vblank_inmodeset[crtc] = 0;
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+ drm_vblank_put(dev, crtc);
}
- dev->vblank_suspend[crtc] = 0;
break;
default:
ret = EINVAL;
+ break;
}
out:
- return ret;
+ return (ret);
}
int
if (crtc >= dev->num_crtcs)
return EINVAL;
- drm_update_vblank_count(dev, crtc);
+ ret = drm_vblank_get(dev, crtc);
+ if (ret)
+ return (ret);
seq = drm_vblank_count(dev,crtc);
if (vblwait->request.type & _DRM_VBLANK_RELATIVE) {
#endif
ret = EINVAL;
} else {
- if (!dev->vblank_suspend[crtc]) {
- unsigned long cur_vblank;
-
- ret = drm_vblank_get(dev, crtc);
- if (ret)
- return ret;
- while (ret == 0) {
- DRM_SPINLOCK(&dev->vbl_lock);
- if (((cur_vblank = drm_vblank_count(dev, crtc))
- - vblwait->request.sequence) <= (1 << 23)) {
- DRM_SPINUNLOCK(&dev->vbl_lock);
- break;
- }
- ret = msleep(&dev->vbl_queue[crtc],
- &dev->vbl_lock, PZERO | PCATCH,
- "drmvblq", 3 * DRM_HZ);
+ while (ret == 0) {
+ DRM_SPINLOCK(&dev->vbl_lock);
+ if ((drm_vblank_count(dev, crtc)
+ - vblwait->request.sequence) <= (1 << 23)) {
DRM_SPINUNLOCK(&dev->vbl_lock);
+ break;
}
- drm_vblank_put(dev, crtc);
+ ret = msleep(&dev->vbl_queue[crtc],
+ &dev->vbl_lock, PZERO | PCATCH,
+ "drmvblq", 3 * DRM_HZ);
+ DRM_SPINUNLOCK(&dev->vbl_lock);
}
if (ret != EINTR) {
struct timeval now;
+
microtime(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
}
}
+ drm_vblank_put(dev, crtc);
return (ret);
}
void
drm_handle_vblank(struct drm_device *dev, int crtc)
{
- drm_update_vblank_count(dev, crtc);
+ atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_vbl_send_signals(dev, crtc);
}
*/
/*
-
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
#endif
+ intel_opregion_init(dev);
#endif
return ret;
if (dev_priv->mmio_map)
drm_rmmap(dev, dev_priv->mmio_map);
+#ifdef __linux__
+ intel_opregion_free(dev);
+#endif
+
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
#ifdef __linux__
int flip;
} drm_i915_vbl_swap_t;
+#ifdef __linux__
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+
+ int enabled;
+};
+#endif
+
typedef struct drm_i915_private {
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
struct drm_buffer_object *sarea_bo;
struct drm_bo_kmap_obj sarea_kmap;
#endif
+
+#ifdef __linux__
+ struct intel_opregion opregion;
+#endif
+
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device * dev);
extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern void i915_enable_interrupt(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
#endif
+#ifdef __linux__
+/* i915_opregion.c */
+extern int intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_free(struct drm_device *dev);
+extern void opregion_asle_intr(struct drm_device *dev);
+extern void opregion_enable_asle(struct drm_device *dev);
+#endif
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
extern void intel_init_chipset_flush_compat(struct drm_device *dev);
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1)
+#define I915_ASLE_INTERRUPT (1<<0)
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
}
}
-#if 0
-static int i915_in_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long pipedsl, vblank, vtotal;
- unsigned long vbl_start, vbl_end, cur_line;
-
- pipedsl = pipe ? PIPEBDSL : PIPEADSL;
- vblank = pipe ? VBLANK_B : VBLANK_A;
- vtotal = pipe ? VTOTAL_B : VTOTAL_A;
- vbl_start = I915_READ(vblank) & VBLANK_START_MASK;
- vbl_end = (I915_READ(vblank) >> VBLANK_END_SHIFT) & VBLANK_END_MASK;
-
- cur_line = I915_READ(pipedsl);
-
- if (cur_line >= vbl_start)
- return 1;
-
- return 0;
-}
-#endif
u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
count = (high1 << 8) | low;
- /*
- * If we're in the middle of the vblank period, the
- * above regs won't have been updated yet, so return
- * an incremented count to stay accurate
- */
-#if 0
- if (i915_in_vblank(dev, pipe))
- count++;
-#endif
- /* count may be reset by other driver(e.g. 2D driver),
- we have no way to know if it is wrapped or resetted
- when count is zero. do a rough guess.
- */
- if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
- dev->last_vblank[pipe] = 0;
-
return count;
}
int vblank = 0;
iir = I915_READ(IIR);
-#if 0
- DRM_DEBUG("flag=%08x\n", iir);
-#endif
- if (iir == 0) {
- DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
- iir,
- I915_READ(IMR),
- I915_READ(IER),
- I915_READ(PIPEASTAT),
- I915_READ(PIPEBSTAT));
+ if (iir == 0)
return IRQ_NONE;
- }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
}
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
pipeb_stats = I915_READ(PIPEBSTAT);
+ /* Ack the event */
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+
+ /* The vblank interrupt gets enabled even if we didn't ask for
+ it, so make sure it's shut down again */
+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
+ pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
+
if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS))
{
vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 1));
}
+
+#ifdef __linux__
+ if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE)
+ opregion_asle_intr(dev);
+#endif
I915_WRITE(PIPEBSTAT, pipeb_stats);
}
+#ifdef __linux__
+ if (iir & I915_ASLE_INTERRUPT)
+ opregion_asle_intr(dev);
+#endif
+
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
}
}
-static void i915_enable_interrupt (struct drm_device *dev)
+void i915_enable_interrupt (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+#ifdef __linux__
+ opregion_enable_asle(dev);
+#endif
+
I915_WRITE(IER, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
- DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
- return -EINVAL;
- }
-
- dev_priv->vblank_pipe = pipe->pipe;
-
return 0;
}
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
- u16 flag;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- flag = I915_READ(IER);
-
- pipe->pipe = 0;
- if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
- pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
- if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
- pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
return 0;
}
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
- drm_update_vblank_count(dev, pipe);
+ /*
+ * We take the ref here and put it when the swap actually completes
+ * in the tasklet.
+ */
+ ret = drm_vblank_get(dev, pipe);
+ if (ret)
+ return ret;
curseq = drm_vblank_count(dev, pipe);
if (seqtype == _DRM_VBLANK_RELATIVE)
swap->sequence = curseq + 1;
} else {
DRM_DEBUG("Missed target sequence\n");
+ drm_vblank_put(dev, pipe);
return -EINVAL;
}
}
irqflags);
DRM_DEBUG("Invalid drawable ID %d\n",
swap->drawable);
+ drm_vblank_put(dev, pipe);
return -EINVAL;
}
DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
+ drm_vblank_put(dev, pipe);
return 0;
}
}
if (dev_priv->swaps_pending >= 100) {
DRM_DEBUG("Too many swaps queued\n");
+ drm_vblank_put(dev, pipe);
return -EBUSY;
}
if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
+ drm_vblank_put(dev, pipe);
return -ENOMEM;
}
DRM_DEBUG("\n");
- ret = drm_vblank_get(dev, pipe);
- if (ret) {
- drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
- return ret;
- }
-
vbl_swap->drw_id = swap->drawable;
vbl_swap->plane = plane;
vbl_swap->sequence = swap->sequence;
if (ret)
return ret;
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
i915_enable_interrupt(dev);
return -EFAULT;
}
+ box.x2--; /* Hardware expects inclusive bottom-right corner */
+ box.y2--;
+
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
box.x1 = (box.x1) &
R300_CLIPRECT_MASK;
R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
-
}
+
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
return 0;
}
-static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
- drm_radeon_kcmd_buffer_t *cmdbuf)
+static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
- int count, ret;
+ u32 *cmd;
+ int count;
+ int expected_count;
RING_LOCALS;
- count=(cmd[0]>>16) & 0x3fff;
+ cmd = (u32 *) cmdbuf->buf;
+ count = (cmd[0]>>16) & 0x3fff;
+ expected_count = cmd[1] >> 16;
+ if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+ expected_count = (expected_count+1)/2;
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
- return -EINVAL;
- }
- ret = !radeon_check_offset(dev_priv, cmd[2]);
- if (ret) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ if (count && count != expected_count) {
+ DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
+ count, expected_count);
return -EINVAL;
}
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
+ if (!count) {
+ drm_r300_cmd_header_t header;
+
+ if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
+ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
+ return -EINVAL;
+ }
+
+ header.u = *(unsigned int *)cmdbuf->buf;
+
+ cmdbuf->buf += sizeof(header);
+ cmdbuf->bufsz -= sizeof(header);
+ cmd = (u32 *) cmdbuf->buf;
+
+ if (header.header.cmd_type != R300_CMD_PACKET3 ||
+ header.packet3.packet != R300_CMD_PACKET3_RAW ||
+ cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
+ return -EINVAL;
+ }
+
+ if ((cmd[1] & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+ return -EINVAL;
+ }
+ if (!radeon_check_offset(dev_priv, cmd[2])) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ return -EINVAL;
+ }
+ if (cmd[3] != expected_count) {
+ DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
+ cmd[3], expected_count);
+ return -EINVAL;
+ }
+
+ BEGIN_RING(4);
+ OUT_RING(cmd[0]);
+ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
+ ADVANCE_RING();
+
+ cmdbuf->buf += 4*4;
+ cmdbuf->bufsz -= 4*4;
+ }
+
return 0;
}
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
case RADEON_CP_INDX_BUFFER:
- /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
- return r300_emit_indx_buffer(dev_priv, cmdbuf);
+ DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
+ return -EINVAL;
case RADEON_CP_3D_DRAW_IMMD_2:
/* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2:
/* triggers drawing of vertex buffers setup elsewhere */
+ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+ RADEON_PURGE_EMITED);
+ break;
case RADEON_CP_3D_DRAW_INDX_2:
/* triggers drawing using indices to vertex buffer */
/* whenever we send vertex we clear flush & purge */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
- break;
+ return r300_emit_draw_indx_2(dev_priv, cmdbuf);
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
{
uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS;
-
+
cache_z = R300_ZC_FLUSH;
cache_2d = R300_RB2D_DC_FLUSH;
cache_3d = R300_RB3D_DC_FLUSH;
return 0;
DRM_UDELAY(1);
}
+ DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(RADEON_RBBM_STATUS),
+ RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
}
DRM_UDELAY(1);
}
+ DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(RADEON_RBBM_STATUS),
+ RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
*/
dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
- switch(init->func) {
- case RADEON_INIT_R200_CP:
- dev_priv->microcode_version = UCODE_R200;
- break;
- case RADEON_INIT_R300_CP:
- dev_priv->microcode_version = UCODE_R300;
- break;
- default:
- dev_priv->microcode_version = UCODE_R100;
- }
-
dev_priv->do_boxes = 0;
dev_priv->cp_mode = init->cp_mode;
*/
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
(dev_priv->color_fmt << 10) |
- (dev_priv->microcode_version ==
- UCODE_R100 ? RADEON_ZBLOCK16 : 0));
+ (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
dev_priv->depth_clear.rb3d_zstencilcntl =
(dev_priv->depth_fmt |
dev_priv->gart_info.mapping.size =
dev_priv->gart_info.table_size;
- drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
+ drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
break;
}
+ dev_priv->chip_family = flags & RADEON_FAMILY_MASK;
if (drm_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
else if (drm_device_is_pcie(dev))
CHIP_LAST,
};
-enum radeon_cp_microcode_version {
- UCODE_R100,
- UCODE_R200,
- UCODE_R300,
-};
-
/*
* Chip flags
*/
int usec_timeout;
- int microcode_version;
-
struct {
u32 boxes;
int freelist_timeouts;
int num_gb_pipes;
int track_flush;
+ uint32_t chip_family; /* extract from flags */
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
} else { \
- OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
} \
} while (0)
u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- u32 crtc_cnt_reg, crtc_status_reg;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
+ if (crtc < 0 || crtc > 1) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
- if (crtc == 0) {
- crtc_cnt_reg = R500_D1CRTC_FRAME_COUNT;
- crtc_status_reg = R500_D1CRTC_STATUS;
- } else if (crtc == 1) {
- crtc_cnt_reg = R500_D2CRTC_FRAME_COUNT;
- crtc_status_reg = R500_D2CRTC_STATUS;
- } else
- return -EINVAL;
- return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
-
+ if (crtc == 0)
+ return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
+ else
+ return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
} else {
- if (crtc == 0) {
- crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME;
- crtc_status_reg = RADEON_CRTC_STATUS;
- } else if (crtc == 1) {
- crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
- crtc_status_reg = RADEON_CRTC2_STATUS;
- } else {
- return -EINVAL;
- }
- return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
+ if (crtc == 0)
+ return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
+ else
+ return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
}
}
int radeon_vblank_crtc_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
- u32 flag;
- u32 value;
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
- flag = RADEON_READ(R500_DxMODE_INT_MASK);
- value = 0;
- if (flag & R500_D1MODE_INT_MASK)
- value |= DRM_RADEON_VBLANK_CRTC1;
-
- if (flag & R500_D2MODE_INT_MASK)
- value |= DRM_RADEON_VBLANK_CRTC2;
- } else {
- flag = RADEON_READ(RADEON_GEN_INT_CNTL);
- value = 0;
- if (flag & RADEON_CRTC_VBLANK_MASK)
- value |= DRM_RADEON_VBLANK_CRTC1;
-
- if (flag & RADEON_CRTC2_VBLANK_MASK)
- value |= DRM_RADEON_VBLANK_CRTC2;
- }
- return value;
+ return dev_priv->vblank_crtc;
}
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
list_for_each(p, heap) {
int start = (p->start + mask) & ~mask;
- if (p->file_priv == 0 && start + size <= p->start + p->size)
+ if (p->file_priv == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, file_priv);
}
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
- if (p->next->file_priv == 0) {
+ if (p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
}
- if (p->prev->file_priv == 0) {
+ if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
* 'heap' to stop it being subsumed.
*/
list_for_each(p, heap) {
- while (p->file_priv == 0 && p->next->file_priv == 0) {
+ while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
case RADEON_CP_3D_DRAW_INDX_2:
case RADEON_3D_CLEAR_HIZ:
/* safe but r200 only */
- if (dev_priv->microcode_version != UCODE_R200) {
- DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+ if ((dev_priv->chip_family < CHIP_R200) ||
+ (dev_priv->chip_family > CHIP_RV280)) {
+ DRM_ERROR("Invalid 3d packet for non r200-class chip\n");
return -EINVAL;
}
break;
break;
case RADEON_3D_RNDR_GEN_INDX_PRIM:
- if (dev_priv->microcode_version != UCODE_R100) {
- DRM_ERROR("Invalid 3d packet for r200-class chip\n");
+ if (dev_priv->chip_family > CHIP_RS200) {
+ DRM_ERROR("Invalid 3d packet for non-r100-class chip\n");
return -EINVAL;
}
if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
break;
case RADEON_CP_INDX_BUFFER:
- if (dev_priv->microcode_version != UCODE_R200) {
- DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+ /* safe but r200 only */
+ if ((dev_priv->chip_family < CHIP_R200) ||
+ (dev_priv->chip_family > CHIP_RV280)) {
+ DRM_ERROR("Invalid 3d packet for non-r200-class chip\n");
return -EINVAL;
}
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
int tileoffset, nrtilesx, nrtilesy, j;
/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
if ((dev_priv->flags & RADEON_HAS_HIERZ)
- && !(dev_priv->microcode_version == UCODE_R200)) {
+ && (dev_priv->chip_family < CHIP_R200)) {
/* FIXME : figure this out for r200 (when hierz is enabled). Or
maybe r200 actually doesn't need to put the low-res z value into
the tile cache like r100, but just needs to clear the hi-level z-buffer?
ADVANCE_RING();
tileoffset += depthpixperline >> 6;
}
- } else if (dev_priv->microcode_version == UCODE_R200) {
+ } else if ((dev_priv->chip_family >= CHIP_R200) &&
+ (dev_priv->chip_family <= CHIP_RV280)) {
/* works for rv250. */
/* find first macro tile (8x2 4x4 z-pixels on rv250) */
tileoffset =
/* TODO don't always clear all hi-level z tiles */
if ((dev_priv->flags & RADEON_HAS_HIERZ)
- && (dev_priv->microcode_version == UCODE_R200)
+ && ((dev_priv->chip_family >= CHIP_R200) &&
+ (dev_priv->chip_family <= CHIP_RV280))
&& (flags & RADEON_USE_HIERZ))
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
/* FIXME : the mask supposedly contains low-res z values. So can't set
* rendering a quad into just those buffers. Thus, we have to
* make sure the 3D engine is configured correctly.
*/
- else if ((dev_priv->microcode_version == UCODE_R200) &&
- (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+ else if ((dev_priv->chip_family >= CHIP_R200) &&
+ (dev_priv->chip_family <= CHIP_RV280) &&
+ (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
int tempPP_CNTL;
int tempRE_CNTL;
orig_nbox = cmdbuf->nbox;
- if (dev_priv->microcode_version == UCODE_R300) {
+ if (dev_priv->chip_family >= CHIP_R300) {
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);