enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
- _DRM_VBLANK_NEXTONMISS)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
+/**
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ u_int32_t type;
+ u_int32_t length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ u_int64_t user_data;
+ u_int32_t tv_sec;
+ u_int32_t tv_usec;
+ u_int32_t sequence;
+ u_int32_t reserved;
+};
+
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
int seg_count;
};
+struct drm_pending_event {
+ TAILQ_ENTRY(drm_pending_event) link;
+ struct drm_event *event;
+ struct drm_file *file_priv;
+ void (*destroy)(struct drm_pending_event *);
+};
+
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ struct drm_event_vblank event;
+};
+
+TAILQ_HEAD(drmevlist, drm_pending_event);
+
struct drm_file {
SPLAY_HEAD(drm_obj_tree, drm_handle) obj_tree;
+ struct drmevlist evlist;
struct mutex table_lock;
+ struct selinfo rsel;
SPLAY_ENTRY(drm_file) link;
int authenticated;
unsigned long ioctl_count;
dev_t kdev;
drm_magic_t magic;
+ int event_space;
int flags;
int master;
int minor;
int vb_num; /* number of crtcs */
u_int32_t vb_max; /* counter reg size */
struct drm_vblank {
+ struct drmevlist vbl_events; /* vblank events */
u_int32_t vbl_last; /* Last recieved */
u_int32_t vbl_count; /* interrupt no. */
int vbl_refs; /* Number of users */
/* VBLANK support */
struct drm_vblank_info *vblank; /* One per ctrc */
+ struct mutex event_lock;
pid_t buf_pgid;
struct device *drm_attach_pci(const struct drm_driver_info *,
struct pci_attach_args *, int, struct device *);
dev_type_ioctl(drmioctl);
+dev_type_read(drmread);
+dev_type_poll(drmpoll);
dev_type_open(drmopen);
dev_type_close(drmclose);
dev_type_mmap(drmmmap);
int drm_detach(struct device *, int);
int drm_activate(struct device *, int);
int drmprint(void *, const char *);
+int drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
+ struct drm_pending_event **);
int drm_getunique(struct drm_device *, void *, struct drm_file *);
int drm_version(struct drm_device *, void *, struct drm_file *);
rw_init(&dev->dev_lock, "drmdevlk");
mtx_init(&dev->lock.spinlock, IPL_NONE);
+ mtx_init(&dev->event_lock, IPL_TTY);
TAILQ_INIT(&dev->maplist);
SPLAY_INIT(&dev->files);
file_priv->kdev = kdev;
file_priv->flags = flags;
file_priv->minor = minor(kdev);
+ TAILQ_INIT(&file_priv->evlist);
+ file_priv->event_space = 4096; /* 4k for event buffer */
DRM_DEBUG("minor = %d\n", file_priv->minor);
/* for compatibility root is always authenticated */
int
drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
{
- struct drm_device *dev = drm_get_device_from_kdev(kdev);
- struct drm_file *file_priv;
- int retcode = 0;
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct drm_file *file_priv;
+ struct drm_pending_event *ev, *evtmp;
+ int i, retcode = 0;
if (dev == NULL)
return (ENXIO);
if (dev->driver->flags & DRIVER_DMA)
drm_reclaim_buffers(dev, file_priv);
+ mtx_enter(&dev->event_lock);
+ for (i = 0; i < dev->vblank->vb_num; i++) {
+ struct drmevlist *list = &dev->vblank->vb_crtcs[i].vbl_events;
+ for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list);
+ ev = evtmp) {
+ evtmp = TAILQ_NEXT(ev, link);
+ if (ev->file_priv == file_priv) {
+ TAILQ_REMOVE(list, ev, link);
+ drm_vblank_put(dev, i);
+ ev->destroy(ev);
+ }
+ }
+ }
+ while ((ev = TAILQ_FIRST(&file_priv->evlist)) != NULL) {
+ TAILQ_REMOVE(&file_priv->evlist, ev, link);
+ ev->destroy(ev);
+ }
+ mtx_leave(&dev->event_lock);
+
DRM_LOCK();
if (dev->driver->flags & DRIVER_GEM) {
struct drm_handle *han;
return (EINVAL);
}
+int
+drmread(dev_t kdev, struct uio *uio, int ioflag)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct drm_file *file_priv;
+ struct drm_pending_event *ev;
+ int error = 0;
+
+ if (dev == NULL)
+ return (ENXIO);
+
+ DRM_LOCK();
+ file_priv = drm_find_file_by_minor(dev, minor(kdev));
+ DRM_UNLOCK();
+ if (file_priv == NULL)
+ return (ENXIO);
+
+ /*
+ * The semantics are a little weird here. We will wait until we
+ * have events to process, but as soon as we have events we will
+ * only deliver as many as we have.
+ * Note that events are atomic, if the read buffer will not fit in
+ * a whole event, we won't read any of it out.
+ */
+ mtx_enter(&dev->event_lock);
+ while (error == 0 && TAILQ_EMPTY(&file_priv->evlist)) {
+ if (ioflag & IO_NDELAY) {
+ mtx_leave(&dev->event_lock);
+ return (EAGAIN);
+ }
+ error = msleep(&file_priv->evlist, &dev->event_lock,
+ PWAIT | PCATCH, "drmread", 0);
+ }
+ if (error) {
+ mtx_leave(&dev->event_lock);
+ return (error);
+ }
+ while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
+ MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
+ /* XXX we always destroy the event on error. */
+ error = uiomove(ev->event, ev->event->length, uio);
+ ev->destroy(ev);
+ if (error)
+ break;
+ mtx_enter(&dev->event_lock);
+ }
+ MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
+
+ return (error);
+}
+
+/*
+ * Deqeue an event from the file priv in question. returning 1 if an
+ * event was found. We take the resid from the read as a parameter because
+ * we will only dequeue and event if the read buffer has space to fit the
+ * entire thing.
+ *
+ * We are called locked, but we will *unlock* the queue on return so that
+ * we may sleep to copyout the event.
+ */
+int
+drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
+ size_t resid, struct drm_pending_event **out)
+{
+ struct drm_pending_event *ev;
+ int gotone = 0;
+
+ MUTEX_ASSERT_LOCKED(&dev->event_lock);
+ if ((ev = TAILQ_FIRST(&file_priv->evlist)) == NULL ||
+ ev->event->length > resid)
+ goto out;
+
+ TAILQ_REMOVE(&file_priv->evlist, ev, link);
+ file_priv->event_space += ev->event->length;
+ *out = ev;
+ gotone = 1;
+
+out:
+ mtx_leave(&dev->event_lock);
+
+ return (gotone);
+}
+
+/* XXX kqfilter ... */
+int
+drmpoll(dev_t kdev, int events, struct proc *p)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct drm_file *file_priv;
+ int revents = 0;
+
+ if (dev == NULL)
+ return (POLLERR);
+
+ DRM_LOCK();
+ file_priv = drm_find_file_by_minor(dev, minor(kdev));
+ DRM_UNLOCK();
+ if (file_priv == NULL)
+ return (POLLERR);
+
+ mtx_enter(&dev->event_lock);
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (!TAILQ_EMPTY(&file_priv->evlist))
+ revents |= events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(p, &file_priv->rsel);
+ }
+ mtx_leave(&dev->event_lock);
+
+ return (revents);
+}
+
struct drm_local_map *
drm_getsarea(struct drm_device *dev)
{
void drm_update_vblank_count(struct drm_device *, int);
void vblank_disable(void *);
+int drm_queue_vblank_event(struct drm_device *, int,
+ union drm_wait_vblank *, struct drm_file *);
+void drm_handle_vblank_events(struct drm_device *, int);
#ifdef DRM_VBLANK_DEBUG
#define DPRINTF(x...) do { printf(x); } while(/* CONSTCOND */ 0)
int
drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
+ int i;
+
dev->vblank = malloc(sizeof(*dev->vblank) + (num_crtcs *
sizeof(struct drm_vblank)), M_DRM, M_WAITOK | M_CANFAIL | M_ZERO);
if (dev->vblank == NULL)
dev->vblank->vb_num = num_crtcs;
mtx_init(&dev->vblank->vb_lock, IPL_TTY);
timeout_set(&dev->vblank->vb_disable_timer, vblank_disable, dev);
+ for (i = 0; i < num_crtcs; i++)
+ TAILQ_INIT(&dev->vblank->vb_crtcs[i].vbl_events);
return (0);
}
if ((ret = drm_vblank_get(dev, crtc)) != 0)
return (ret);
- seq = drm_vblank_count(dev,crtc);
+ seq = drm_vblank_count(dev, crtc);
if (vblwait->request.type & _DRM_VBLANK_RELATIVE) {
vblwait->request.sequence += seq;
vblwait->request.sequence = seq + 1;
}
+ if (flags & _DRM_VBLANK_EVENT)
+ return (drm_queue_vblank_event(dev, crtc, vblwait, file_priv));
+
DPRINTF("%s: %d waiting on %d, current %d\n", __func__, crtc,
vblwait->request.sequence, drm_vblank_count(dev, crtc));
DRM_WAIT_ON(ret, &dev->vblank->vb_crtcs[crtc], &dev->vblank->vb_lock,
return (ret);
}
+int
+drm_queue_vblank_event(struct drm_device *dev, int crtc,
+ union drm_wait_vblank *vblwait, struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *vev;
+ struct timeval now;
+ u_int seq;
+
+
+ vev = drm_calloc(1, sizeof(*vev));
+ if (vev == NULL)
+ return (ENOMEM);
+
+ vev->event.base.type = DRM_EVENT_VBLANK;
+ vev->event.base.length = sizeof(vev->event);
+ vev->event.user_data = vblwait->request.signal;
+ vev->base.event = &vev->event.base;
+ vev->base.file_priv = file_priv;
+ vev->base.destroy = (void (*) (struct drm_pending_event *))drm_free;
+
+ microtime(&now);
+
+ mtx_enter(&dev->event_lock);
+ if (file_priv->event_space < sizeof(vev->event)) {
+ mtx_leave(&dev->event_lock);
+ drm_free(vev);
+ return (ENOMEM);
+ }
+
+
+ seq = drm_vblank_count(dev, crtc);
+ file_priv->event_space -= sizeof(vev->event);
+
+ DPRINTF("%s: queueing event %d on crtc %d\n", __func__, seq, crtc);
+
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ vev->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ vev->event.tv_sec = now.tv_sec;
+ vev->event.tv_usec = now.tv_usec;
+ DPRINTF("%s: already passed, dequeuing: crtc %d, value %d\n",
+ __func__, crtc, seq);
+ drm_vblank_put(dev, crtc);
+ TAILQ_INSERT_TAIL(&file_priv->evlist, &vev->base, link);
+ wakeup(&file_priv->evlist);
+ selwakeup(&file_priv->rsel);
+ } else {
+ TAILQ_INSERT_TAIL(&dev->vblank->vb_crtcs[crtc].vbl_events,
+ &vev->base, link);
+ }
+ mtx_leave(&dev->event_lock);
+
+ return (0);
+}
+
+void
+drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+ struct drmevlist *list;
+ struct drm_pending_event *ev, *tmp;
+ struct drm_pending_vblank_event *vev;
+ struct timeval now;
+ u_int seq;
+
+ list = &dev->vblank->vb_crtcs[crtc].vbl_events;
+ microtime(&now);
+ seq = drm_vblank_count(dev, crtc);
+
+ mtx_enter(&dev->event_lock);
+ for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list); ev = tmp) {
+ tmp = TAILQ_NEXT(ev, link);
+
+ vev = (struct drm_pending_vblank_event *)ev;
+
+ if ((seq - vev->event.sequence) > (1 << 23))
+ continue;
+ DPRINTF("%s: got vblank event on crtc %d, value %d\n",
+ __func__, crtc, seq);
+
+ vev->event.sequence = seq;
+ vev->event.tv_sec = now.tv_sec;
+ vev->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, crtc);
+ TAILQ_REMOVE(list, ev, link);
+ TAILQ_INSERT_TAIL(&ev->file_priv->evlist, ev, link);
+ wakeup(&ev->file_priv->evlist);
+ selwakeup(&ev->file_priv->rsel);
+ }
+ mtx_leave(&dev->event_lock);
+}
+
void
drm_handle_vblank(struct drm_device *dev, int crtc)
{
dev->vblank->vb_crtcs[crtc].vbl_count++;
wakeup(&dev->vblank->vb_crtcs[crtc]);
mtx_leave(&dev->vblank->vb_lock);
+ drm_handle_vblank_events(dev, crtc);
}
-/* $OpenBSD: conf.h,v 1.101 2010/07/15 04:46:33 mglocker Exp $ */
+/* $OpenBSD: conf.h,v 1.102 2010/07/18 21:01:06 oga Exp $ */
/* $NetBSD: conf.h,v 1.33 1996/05/03 20:03:32 christos Exp $ */
/*-
/* open, close, read, ioctl, poll, mmap, nokqfilter */
#define cdev_drm_init(c,n) { \
- dev_init(c,n,open), dev_init(c,n,close), (dev_type_read((*))) enodev, \
+ dev_init(c,n,open), dev_init(c,n,close), dev_init(c, n, read), \
(dev_type_write((*))) enodev, dev_init(c,n,ioctl), \
- (dev_type_stop((*))) enodev, 0, selfalse, \
+ (dev_type_stop((*))) enodev, 0, dev_init(c,n,poll), \
dev_init(c,n,mmap), 0, D_CLONE }
/* open, close, ioctl */