-/* $OpenBSD: drmP.h,v 1.191 2015/04/11 04:36:10 jsg Exp $ */
+/* $OpenBSD: drmP.h,v 1.192 2015/04/12 03:54:10 jsg Exp $ */
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*/
uint32_t max_vblank_count;
struct mutex event_lock;
- int *vbl_queue;
+ wait_queue_head_t *vbl_queue;
atomic_t *_vblank_count;
struct timeval *_vblank_time;
struct mutex vblank_time_lock;
-/* $OpenBSD: drm_irq.c,v 1.61 2015/04/06 12:25:10 jsg Exp $ */
+/* $OpenBSD: drm_irq.c,v 1.62 2015/04/12 03:54:10 jsg Exp $ */
/**
* \file drm_irq.c
* IRQ support
dev->num_crtcs = num_crtcs;
- dev->vbl_queue = kmalloc(sizeof(int) * num_crtcs,
+ dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
GFP_KERNEL);
if (!dev->vbl_queue)
goto err;
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
+ init_waitqueue_head(&dev->vbl_queue[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
-/* $OpenBSD: drm_linux.h,v 1.20 2015/04/11 14:39:37 jsg Exp $ */
+/* $OpenBSD: drm_linux.h,v 1.21 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright (c) 2013, 2014 Mark Kettenis
*
typedef bus_addr_t dma_addr_t;
typedef bus_addr_t phys_addr_t;
-typedef int wait_queue_head_t;
#define __force
#define __always_unused
#define write_lock(rwl) rw_enter_write(rwl)
#define write_unlock(rwl) rw_exit_write(rwl)
+struct wait_queue_head {
+ struct mutex lock;
+};
+typedef struct wait_queue_head wait_queue_head_t;
+
+static inline void
+init_waitqueue_head(wait_queue_head_t *wq)
+{
+ mtx_init(&wq->lock, IPL_NONE);
+}
+
#define wake_up(x) wakeup(x)
#define wake_up_all(x) wakeup(x)
#define wake_up_all_locked(x) wakeup(x)
-/* $OpenBSD: i915_gem.c,v 1.89 2015/04/08 02:28:13 jsg Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.90 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
-#if 0
init_waitqueue_head(&dev_priv->pending_flip_queue);
-#endif
dev_priv->mm.interruptible = true;
-/* $OpenBSD: intel_ringbuffer.c,v 1.25 2015/02/12 04:56:03 kettenis Exp $ */
+/* $OpenBSD: intel_ringbuffer.c,v 1.26 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright © 2008-2010 Intel Corporation
*
ring->size = 32 * PAGE_SIZE;
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
-// init_waitqueue_head(&ring->irq_queue);
+ init_waitqueue_head(&ring->irq_queue);
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
-/* $OpenBSD: intel_ringbuffer.h,v 1.4 2015/04/03 13:10:59 jsg Exp $ */
+/* $OpenBSD: intel_ringbuffer.h,v 1.5 2015/04/12 03:54:10 jsg Exp $ */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
u32 outstanding_lazy_request;
bool gpu_caches_dirty;
- int irq_queue;
+ wait_queue_head_t irq_queue;
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
-/* $OpenBSD: radeon.h,v 1.13 2015/04/11 05:10:13 jsg Exp $ */
+/* $OpenBSD: radeon.h,v 1.14 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* alignment).
*/
struct radeon_sa_manager {
- int wq;
- struct mutex wq_lock;
+ wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
atomic_t ring_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
atomic_t pflip[RADEON_MAX_CRTCS];
- int vblank_queue;
+ wait_queue_head_t vblank_queue;
bool hpd[RADEON_MAX_HPD_PINS];
bool afmt[RADEON_MAX_AFMT_BLOCKS];
union radeon_irq_stat_regs stat_regs;
struct radeon_scratch scratch;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- int fence_queue;
+ wait_queue_head_t fence_queue;
struct rwlock ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
-/* $OpenBSD: radeon_device.c,v 1.12 2015/04/06 12:25:10 jsg Exp $ */
+/* $OpenBSD: radeon_device.c,v 1.13 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
rw_init(&rdev->gpu_clock_mutex, "gpuclk");
rw_init(&rdev->pm.mclk_lock, "mclk");
rw_init(&rdev->exclusive_lock, "rdnexc");
-#ifdef notyet
init_waitqueue_head(&rdev->irq.vblank_queue);
-#endif
r = radeon_gem_init(rdev);
if (r)
return r;
-/* $OpenBSD: radeon_fence.c,v 1.5 2015/04/06 07:38:49 jsg Exp $ */
+/* $OpenBSD: radeon_fence.c,v 1.6 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
{
int ring;
-#ifdef notyet
init_waitqueue_head(&rdev->fence_queue);
-#endif
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
-/* $OpenBSD: radeon_sa.c,v 1.7 2015/04/06 07:38:49 jsg Exp $ */
+/* $OpenBSD: radeon_sa.c,v 1.8 2015/04/12 03:54:10 jsg Exp $ */
/*
* Copyright 2011 Red Hat Inc.
* All Rights Reserved.
{
int i, r;
-#ifdef notyet
init_waitqueue_head(&sa_manager->wq);
-#endif
- mtx_init(&sa_manager->wq_lock, IPL_NONE);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
- spin_lock(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq.lock);
do {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
- spin_unlock(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq.lock);
return 0;
}
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
- spin_unlock(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
- spin_lock(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT && block) {
r = 0;
while (r == 0) {
if (radeon_sa_event(sa_manager, size, align))
break;
- error = msleep(&sa_manager->wq, &sa_manager->wq_lock,
+ error = msleep(&sa_manager->wq, &sa_manager->wq.lock,
PZERO | PCATCH, "samgr", 0);
if (error == ERESTART)
error = EINTR; /* XXX */
} while (!r);
- spin_unlock(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
}
sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq.lock);
if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
radeon_sa_bo_remove_locked(*sa_bo);
}
wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq.lock);
*sa_bo = NULL;
}
{
struct radeon_sa_bo *i;
- spin_lock(&sa_manager->wq_lock);
+ spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
}
seq_printf(m, "\n");
}
- spin_unlock(&sa_manager->wq_lock);
+ spin_unlock(&sa_manager->wq.lock);
}
#endif
-/* $OpenBSD: ttm_bo.c,v 1.14 2015/04/06 05:35:29 jsg Exp $ */
+/* $OpenBSD: ttm_bo.c,v 1.15 2015/04/12 03:54:10 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
refcount_init(&bo->list_kref, 1);
atomic_set(&bo->cpu_writers, 0);
atomic_set(&bo->reserved, 1);
-#ifdef notyet
init_waitqueue_head(&bo->event_queue);
-#endif
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
-/* $OpenBSD: ttm_bo_api.h,v 1.2 2015/02/10 06:19:36 jsg Exp $ */
+/* $OpenBSD: ttm_bo_api.h,v 1.3 2015/04/12 03:54:10 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
u_int kref;
u_int list_kref;
- int event_queue;
+ wait_queue_head_t event_queue;
/**
* Members protected by the bo::reserved lock.
-/* $OpenBSD: ttm_bo_util.c,v 1.12 2015/04/08 04:03:06 jsg Exp $ */
+/* $OpenBSD: ttm_bo_util.c,v 1.13 2015/04/12 03:54:10 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* TODO: Explicit member copy would probably be better here.
*/
-#ifdef notyet
init_waitqueue_head(&fbo->event_queue);
-#endif
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
-/* $OpenBSD: ttm_lock.c,v 1.4 2015/04/06 05:35:29 jsg Exp $ */
+/* $OpenBSD: ttm_lock.c,v 1.5 2015/04/12 03:54:10 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
void ttm_lock_init(struct ttm_lock *lock)
{
mtx_init(&lock->lock, IPL_NONE);
-#ifdef notyet
init_waitqueue_head(&lock->queue);
-#endif
lock->rw = 0;
lock->flags = 0;
lock->kill_takers = false;
-/* $OpenBSD: ttm_lock.h,v 1.1 2013/08/12 04:11:53 jsg Exp $ */
+/* $OpenBSD: ttm_lock.h,v 1.2 2015/04/12 03:54:10 jsg Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
struct ttm_lock {
struct ttm_base_object base;
- int queue;
+ wait_queue_head_t queue;
struct mutex lock;
int32_t rw;
uint32_t flags;