make wait_queue_head a struct with a mutex
authorjsg <jsg@openbsd.org>
Sun, 12 Apr 2015 03:54:10 +0000 (03:54 +0000)
committerjsg <jsg@openbsd.org>
Sun, 12 Apr 2015 03:54:10 +0000 (03:54 +0000)
better matches linux behaviour

15 files changed:
sys/dev/pci/drm/drmP.h
sys/dev/pci/drm/drm_irq.c
sys/dev/pci/drm/drm_linux.h
sys/dev/pci/drm/i915/i915_gem.c
sys/dev/pci/drm/i915/intel_ringbuffer.c
sys/dev/pci/drm/i915/intel_ringbuffer.h
sys/dev/pci/drm/radeon/radeon.h
sys/dev/pci/drm/radeon/radeon_device.c
sys/dev/pci/drm/radeon/radeon_fence.c
sys/dev/pci/drm/radeon/radeon_sa.c
sys/dev/pci/drm/ttm/ttm_bo.c
sys/dev/pci/drm/ttm/ttm_bo_api.h
sys/dev/pci/drm/ttm/ttm_bo_util.c
sys/dev/pci/drm/ttm/ttm_lock.c
sys/dev/pci/drm/ttm/ttm_lock.h

index c36d8a9..a7164b9 100644 (file)
@@ -1,4 +1,4 @@
-/* $OpenBSD: drmP.h,v 1.191 2015/04/11 04:36:10 jsg Exp $ */
+/* $OpenBSD: drmP.h,v 1.192 2015/04/12 03:54:10 jsg Exp $ */
 /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
  * Created: Mon Jan  4 10:05:05 1999 by faith@precisioninsight.com
  */
@@ -592,7 +592,7 @@ struct drm_device {
        uint32_t                 max_vblank_count;
        struct mutex             event_lock;
 
-       int                     *vbl_queue;
+       wait_queue_head_t       *vbl_queue;
        atomic_t                *_vblank_count;
        struct timeval          *_vblank_time;
        struct mutex             vblank_time_lock;
index 516245c..1b6c360 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: drm_irq.c,v 1.61 2015/04/06 12:25:10 jsg Exp $        */
+/*     $OpenBSD: drm_irq.c,v 1.62 2015/04/12 03:54:10 jsg Exp $        */
 /**
  * \file drm_irq.c
  * IRQ support
@@ -236,7 +236,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 
        dev->num_crtcs = num_crtcs;
 
-       dev->vbl_queue = kmalloc(sizeof(int) * num_crtcs,
+       dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
                                 GFP_KERNEL);
        if (!dev->vbl_queue)
                goto err;
@@ -281,6 +281,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 
        /* Zero per-crtc vblank stuff */
        for (i = 0; i < num_crtcs; i++) {
+               init_waitqueue_head(&dev->vbl_queue[i]);
                atomic_set(&dev->_vblank_count[i], 0);
                atomic_set(&dev->vblank_refcount[i], 0);
        }
index ca347fe..aafc466 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: drm_linux.h,v 1.20 2015/04/11 14:39:37 jsg Exp $      */
+/*     $OpenBSD: drm_linux.h,v 1.21 2015/04/12 03:54:10 jsg Exp $      */
 /*
  * Copyright (c) 2013, 2014 Mark Kettenis
  *
@@ -34,7 +34,6 @@ typedef uint32_t __be32;
 
 typedef bus_addr_t dma_addr_t;
 typedef bus_addr_t phys_addr_t;
-typedef int wait_queue_head_t;
 
 #define __force
 #define __always_unused
@@ -238,6 +237,17 @@ spin_unlock_irqrestore(struct mutex *mtxp, __unused unsigned long flags)
 #define write_lock(rwl)                        rw_enter_write(rwl)
 #define write_unlock(rwl)              rw_exit_write(rwl)
 
+struct wait_queue_head {
+       struct mutex lock;
+};
+typedef struct wait_queue_head wait_queue_head_t;
+
+static inline void
+init_waitqueue_head(wait_queue_head_t *wq)
+{
+       mtx_init(&wq->lock, IPL_NONE);
+}
+
 #define wake_up(x)                     wakeup(x)
 #define wake_up_all(x)                 wakeup(x)
 #define wake_up_all_locked(x)          wakeup(x)
index 0dc5341..9e47773 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: i915_gem.c,v 1.89 2015/04/08 02:28:13 jsg Exp $       */
+/*     $OpenBSD: i915_gem.c,v 1.90 2015/04/12 03:54:10 jsg Exp $       */
 /*
  * Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
  *
@@ -4304,9 +4304,7 @@ i915_gem_load(struct drm_device *dev)
        i915_gem_reset_fences(dev);
 
        i915_gem_detect_bit_6_swizzle(dev);
-#if 0
        init_waitqueue_head(&dev_priv->pending_flip_queue);
-#endif
 
        dev_priv->mm.interruptible = true;
 
index eb67065..e42e8b8 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: intel_ringbuffer.c,v 1.25 2015/02/12 04:56:03 kettenis Exp $  */
+/*     $OpenBSD: intel_ringbuffer.c,v 1.26 2015/04/12 03:54:10 jsg Exp $       */
 /*
  * Copyright © 2008-2010 Intel Corporation
  *
@@ -1202,7 +1202,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        ring->size = 32 * PAGE_SIZE;
        memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
 
-//     init_waitqueue_head(&ring->irq_queue);
+       init_waitqueue_head(&ring->irq_queue);
 
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(ring);
index 22459af..3512e49 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: intel_ringbuffer.h,v 1.4 2015/04/03 13:10:59 jsg Exp $        */
+/*     $OpenBSD: intel_ringbuffer.h,v 1.5 2015/04/12 03:54:10 jsg Exp $        */
 
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
@@ -131,7 +131,7 @@ struct  intel_ring_buffer {
        u32 outstanding_lazy_request;
        bool gpu_caches_dirty;
 
-       int irq_queue;
+       wait_queue_head_t irq_queue;
 
        /**
         * Do an explicit TLB flush before MI_SET_CONTEXT
index e077633..59a1fff 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: radeon.h,v 1.13 2015/04/11 05:10:13 jsg Exp $ */
+/*     $OpenBSD: radeon.h,v 1.14 2015/04/12 03:54:10 jsg Exp $ */
 /*
  * Copyright 2008 Advanced Micro Devices, Inc.
  * Copyright 2008 Red Hat Inc.
@@ -382,8 +382,7 @@ struct radeon_bo_list {
  * alignment).
  */
 struct radeon_sa_manager {
-       int                     wq;
-       struct mutex            wq_lock;
+       wait_queue_head_t       wq;
        struct radeon_bo        *bo;
        struct list_head        *hole;
        struct list_head        flist[RADEON_NUM_RINGS];
@@ -601,7 +600,7 @@ struct radeon_irq {
        atomic_t                        ring_int[RADEON_NUM_RINGS];
        bool                            crtc_vblank_int[RADEON_MAX_CRTCS];
        atomic_t                        pflip[RADEON_MAX_CRTCS];
-       int                             vblank_queue;
+       wait_queue_head_t               vblank_queue;
        bool                            hpd[RADEON_MAX_HPD_PINS];
        bool                            afmt[RADEON_MAX_AFMT_BLOCKS];
        union radeon_irq_stat_regs      stat_regs;
@@ -1623,7 +1622,7 @@ struct radeon_device {
        struct radeon_scratch           scratch;
        struct radeon_mman              mman;
        struct radeon_fence_driver      fence_drv[RADEON_NUM_RINGS];
-       int                             fence_queue;
+       wait_queue_head_t               fence_queue;
        struct rwlock                   ring_lock;
        struct radeon_ring              ring[RADEON_NUM_RINGS];
        bool                            ib_pool_ready;
index a88b0bf..085d94c 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: radeon_device.c,v 1.12 2015/04/06 12:25:10 jsg Exp $  */
+/*     $OpenBSD: radeon_device.c,v 1.13 2015/04/12 03:54:10 jsg Exp $  */
 /*
  * Copyright 2008 Advanced Micro Devices, Inc.
  * Copyright 2008 Red Hat Inc.
@@ -1023,9 +1023,7 @@ int radeon_device_init(struct radeon_device *rdev,
        rw_init(&rdev->gpu_clock_mutex, "gpuclk");
        rw_init(&rdev->pm.mclk_lock, "mclk");
        rw_init(&rdev->exclusive_lock, "rdnexc");
-#ifdef notyet
        init_waitqueue_head(&rdev->irq.vblank_queue);
-#endif
        r = radeon_gem_init(rdev);
        if (r)
                return r;
index 76d76b1..92e14c4 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: radeon_fence.c,v 1.5 2015/04/06 07:38:49 jsg Exp $    */
+/*     $OpenBSD: radeon_fence.c,v 1.6 2015/04/12 03:54:10 jsg Exp $    */
 /*
  * Copyright 2009 Jerome Glisse.
  * All Rights Reserved.
@@ -849,9 +849,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
 {
        int ring;
 
-#ifdef notyet
        init_waitqueue_head(&rdev->fence_queue);
-#endif
        for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
                radeon_fence_driver_init_ring(rdev, ring);
        }
index e51ea99..bb2301c 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: radeon_sa.c,v 1.7 2015/04/06 07:38:49 jsg Exp $       */
+/*     $OpenBSD: radeon_sa.c,v 1.8 2015/04/12 03:54:10 jsg Exp $       */
 /*
  * Copyright 2011 Red Hat Inc.
  * All Rights Reserved.
@@ -54,10 +54,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 {
        int i, r;
 
-#ifdef notyet
        init_waitqueue_head(&sa_manager->wq);
-#endif
-       mtx_init(&sa_manager->wq_lock, IPL_NONE);
        sa_manager->bo = NULL;
        sa_manager->size = size;
        sa_manager->domain = domain;
@@ -334,7 +331,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
        INIT_LIST_HEAD(&(*sa_bo)->olist);
        INIT_LIST_HEAD(&(*sa_bo)->flist);
 
-       spin_lock(&sa_manager->wq_lock);
+       spin_lock(&sa_manager->wq.lock);
        do {
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                        fences[i] = NULL;
@@ -346,23 +343,23 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 
                        if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
                                                   size, align)) {
-                               spin_unlock(&sa_manager->wq_lock);
+                               spin_unlock(&sa_manager->wq.lock);
                                return 0;
                        }
 
                        /* see if we can skip over some allocations */
                } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
-               spin_unlock(&sa_manager->wq_lock);
+               spin_unlock(&sa_manager->wq.lock);
                r = radeon_fence_wait_any(rdev, fences, false);
-               spin_lock(&sa_manager->wq_lock);
+               spin_lock(&sa_manager->wq.lock);
                /* if we have nothing to wait for block */
                if (r == -ENOENT && block) {
                        r = 0;
                        while (r == 0) {
                                if (radeon_sa_event(sa_manager, size, align))
                                        break;
-                               error = msleep(&sa_manager->wq, &sa_manager->wq_lock,
+                               error = msleep(&sa_manager->wq, &sa_manager->wq.lock,
                                    PZERO | PCATCH, "samgr", 0);
                                if (error == ERESTART)
                                        error = EINTR; /* XXX */
@@ -375,7 +372,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 
        } while (!r);
 
-       spin_unlock(&sa_manager->wq_lock);
+       spin_unlock(&sa_manager->wq.lock);
        kfree(*sa_bo);
        *sa_bo = NULL;
        return r;
@@ -391,7 +388,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
        }
 
        sa_manager = (*sa_bo)->manager;
-       spin_lock(&sa_manager->wq_lock);
+       spin_lock(&sa_manager->wq.lock);
        if (fence && !radeon_fence_signaled(fence)) {
                (*sa_bo)->fence = radeon_fence_ref(fence);
                list_add_tail(&(*sa_bo)->flist,
@@ -400,7 +397,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
                radeon_sa_bo_remove_locked(*sa_bo);
        }
        wake_up_all_locked(&sa_manager->wq);
-       spin_unlock(&sa_manager->wq_lock);
+       spin_unlock(&sa_manager->wq.lock);
        *sa_bo = NULL;
 }
 
@@ -410,7 +407,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
 {
        struct radeon_sa_bo *i;
 
-       spin_lock(&sa_manager->wq_lock);
+       spin_lock(&sa_manager->wq.lock);
        list_for_each_entry(i, &sa_manager->olist, olist) {
                if (&i->olist == sa_manager->hole) {
                        seq_printf(m, ">");
@@ -425,6 +422,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
                }
                seq_printf(m, "\n");
        }
-       spin_unlock(&sa_manager->wq_lock);
+       spin_unlock(&sa_manager->wq.lock);
 }
 #endif
index 6eef2a4..535e536 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: ttm_bo.c,v 1.14 2015/04/06 05:35:29 jsg Exp $ */
+/*     $OpenBSD: ttm_bo.c,v 1.15 2015/04/12 03:54:10 jsg Exp $ */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -1235,9 +1235,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        refcount_init(&bo->list_kref, 1);
        atomic_set(&bo->cpu_writers, 0);
        atomic_set(&bo->reserved, 1);
-#ifdef notyet
        init_waitqueue_head(&bo->event_queue);
-#endif
        INIT_LIST_HEAD(&bo->lru);
        INIT_LIST_HEAD(&bo->ddestroy);
        INIT_LIST_HEAD(&bo->swap);
index f0a09c7..f69e1af 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: ttm_bo_api.h,v 1.2 2015/02/10 06:19:36 jsg Exp $      */
+/*     $OpenBSD: ttm_bo_api.h,v 1.3 2015/04/12 03:54:10 jsg Exp $      */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -207,7 +207,7 @@ struct ttm_buffer_object {
 
        u_int kref;
        u_int list_kref;
-       int event_queue;
+       wait_queue_head_t event_queue;
 
        /**
         * Members protected by the bo::reserved lock.
index a38cb10..eafa80b 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: ttm_bo_util.c,v 1.12 2015/04/08 04:03:06 jsg Exp $    */
+/*     $OpenBSD: ttm_bo_util.c,v 1.13 2015/04/12 03:54:10 jsg Exp $    */
 /**************************************************************************
  *
  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -447,9 +447,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
         * TODO: Explicit member copy would probably be better here.
         */
 
-#ifdef notyet
        init_waitqueue_head(&fbo->event_queue);
-#endif
        INIT_LIST_HEAD(&fbo->ddestroy);
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
index da2279c..d0dfc2f 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: ttm_lock.c,v 1.4 2015/04/06 05:35:29 jsg Exp $        */
+/*     $OpenBSD: ttm_lock.c,v 1.5 2015/04/12 03:54:10 jsg Exp $        */
 /**************************************************************************
  *
  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -44,9 +44,7 @@ void   ttm_write_lock_downgrade(struct ttm_lock *);
 void ttm_lock_init(struct ttm_lock *lock)
 {
        mtx_init(&lock->lock, IPL_NONE);
-#ifdef notyet
        init_waitqueue_head(&lock->queue);
-#endif
        lock->rw = 0;
        lock->flags = 0;
        lock->kill_takers = false;
index c2f40cb..52341b6 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: ttm_lock.h,v 1.1 2013/08/12 04:11:53 jsg Exp $        */
+/*     $OpenBSD: ttm_lock.h,v 1.2 2015/04/12 03:54:10 jsg Exp $        */
 /**************************************************************************
  *
  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -68,7 +68,7 @@
 
 struct ttm_lock {
        struct ttm_base_object base;
-       int queue;
+       wait_queue_head_t queue;
        struct mutex lock;
        int32_t rw;
        uint32_t flags;