void *driver_priv;
};
-typedef struct drm_lock_data {
- drm_hw_lock_t *hw_lock; /* Hardware lock */
- struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/
- int lock_queue; /* Queue of blocked processes */
- unsigned long lock_time; /* Time of last lock in jiffies */
-} drm_lock_data_t;
+struct drm_lock_data {
+ drm_hw_lock_t *hw_lock; /* Hardware lock */
+ /* Unique identifier of holding process (NULL is kernel) */
+ struct drm_file *file_priv;
+ int lock_queue; /* Queue of blocked processes */
+ unsigned long lock_time; /* Time of last lock in jiffies */
+};
/* This structure, in the struct drm_device, is always initialized while
* the device is open. dev->dma_lock protects the incrementing of
int max_context;
- drm_lock_data_t lock; /* Information on hardware lock */
+ struct drm_lock_data lock; /* Information on hardware lock */
/* DMA queues (contexts) */
drm_device_dma_t *dma; /* Optional pointer for DMA support */
void drm_write32(drm_local_map_t *, unsigned long, u_int32_t);
/* Locking IOCTL support (drm_lock.c) */
-int drm_lock_take(__volatile__ unsigned int *, unsigned int);
-int drm_lock_transfer(struct drm_device *, __volatile__ unsigned int *,
- unsigned int);
-int drm_lock_free(struct drm_device *, __volatile__ unsigned int *,
- unsigned int);
+int drm_lock_take(struct drm_lock_data *, unsigned int);
+int drm_lock_transfer(struct drm_lock_data *, unsigned int);
+int drm_lock_free(struct drm_lock_data *, unsigned int);
/* Buffer management support (drm_bufs.c) */
unsigned long drm_get_resource_start(struct drm_device *, unsigned int);
if (dev->driver.reclaim_buffers_locked != NULL)
dev->driver.reclaim_buffers_locked(dev, file_priv);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
retcode = EINTR;
break;
}
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
+ if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
}
if (retcode == 0) {
dev->driver.reclaim_buffers_locked(dev, file_priv);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
+ drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
}
DRM_LOCK(); /* XXX drm_lock_take() should do its own locking */
if (dev->locked_task_call == NULL ||
- drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) {
+ drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT) == 0) {
DRM_UNLOCK();
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
dev->locked_task_call(dev);
- drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
dev->locked_task_call = NULL;
#include "drmP.h"
int
-drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
+drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
{
- unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
do {
old = *lock;
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int
-drm_lock_transfer(struct drm_device *dev, __volatile__ unsigned int *lock,
+drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context)
{
- unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
- dev->lock.file_priv = NULL;
+ lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
}
int
-drm_lock_free(struct drm_device *dev, __volatile__ unsigned int *lock,
+drm_lock_free(struct drm_lock_data *lock_data,
unsigned int context)
{
- unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
- dev->lock.file_priv = NULL;
+ lock_data->file_priv = NULL;
do {
old = *lock;
new = 0;
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
- DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
+ DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
return 0;
}
int
drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_lock_t *lock = data;
- int ret = 0;
+ drm_lock_t *lock = data;
+ int ret = 0;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_LOCK();
for (;;) {
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) {
+ if (drm_lock_take(&dev->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
}
/* Contention */
- ret = DRM_SLEEPLOCK((void *)&dev->lock.lock_queue, &dev->dev_lock,
- PZERO | PCATCH, "drmlk2", 0);
+ ret = DRM_SLEEPLOCK((void *)&dev->lock.lock_queue,
+ &dev->dev_lock, PZERO | PCATCH, "drmlkq", 0);
if (ret != 0)
break;
}
int
drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_lock_t *lock = data;
+ drm_lock_t *lock = data;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK();
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
+ if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
DRM_UNLOCK();