*/
return atomic_read(&dev->open_count) == 0;
}
+#endif /* __linux__ */
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
+#ifdef notyet
.set_gpu_state = amdgpu_switcheroo_set_state,
.reprobe = NULL,
.can_switch = amdgpu_switcheroo_can_switch,
+#endif
};
-#endif /* __linux__ */
/**
* amdgpu_device_ip_set_clockgating_state - set the CG state
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
+ struct pci_dev *pdev = i915->drm.pdev;
#ifdef __linux__
struct drm_i915_private *i915;
#endif
#ifndef _ACPI_VIDEO_H
#define _ACPI_VIDEO_H
-#define acpi_video_register()
-#define acpi_video_unregister()
+static inline void
+acpi_video_register(void)
+{
+}
+
+static inline void
+acpi_video_unregister(void)
+{
+}
#endif
#ifndef _ASM_IOSF_MBI_H
#define _ASM_IOSF_MBI_H
+struct notifier_block;
+
#define MBI_PMIC_BUS_ACCESS_BEGIN 1
#define MBI_PMIC_BUS_ACCESS_END 2
-#define iosf_mbi_assert_punit_acquired()
-#define iosf_mbi_punit_acquire()
-#define iosf_mbi_punit_release()
-#define iosf_mbi_register_pmic_bus_access_notifier(x) 0
-#define iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(x) 0
+static inline void
+iosf_mbi_assert_punit_acquired(void)
+{
+}
+
+static inline void
+iosf_mbi_punit_acquire(void)
+{
+}
+
+static inline void
+iosf_mbi_punit_release(void)
+{
+}
+
+static inline int
+iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
+{
+ return 0;
+}
#endif
#ifndef _LINUX_AER_H
#define _LINUX_AER_H
-#define pci_enable_pcie_error_reporting(x)
+static inline void
+pci_enable_pcie_error_reporting(struct pci_dev *pdev)
+{
+}
#endif
#include <sys/task.h>
struct backlight_device;
+struct device;
struct backlight_properties {
int type;
void *data;
};
-#define bl_get_data(bd) (bd)->data
+static inline void *
+bl_get_data(struct backlight_device *bd)
+{
+ return bd->data;
+}
#define BACKLIGHT_RAW 0
#define BACKLIGHT_FIRMWARE 1
int backlight_enable(struct backlight_device *);
int backlight_disable(struct backlight_device *);
-#define devm_of_find_backlight(x) NULL
+static inline struct backlight_device *
+devm_of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
#endif
#include <linux/preempt.h>
-#define local_bh_disable()
-#define local_bh_enable()
+static inline void
+local_bh_disable(void)
+{
+}
+
+static inline void
+local_bh_enable(void)
+{
+}
#endif
#ifndef _LINUX_CONSOLE_H
#define _LINUX_CONSOLE_H
-#define console_lock()
-#define console_trylock() 1
-#define console_unlock()
-#define vgacon_text_force() false
+#include <linux/types.h>
+
+static inline void
+console_lock(void)
+{
+}
+
+static inline int
+console_trylock(void)
+{
+ return 1;
+}
+
+static inline void
+console_unlock(void)
+{
+}
+
+static inline bool
+vgacon_text_force(void)
+{
+ return false;
+}
#endif
-/* $OpenBSD: dma-buf.h,v 1.3 2021/07/07 02:38:36 jsg Exp $ */
+/* $OpenBSD: dma-buf.h,v 1.4 2022/03/01 04:08:04 jsg Exp $ */
/*
* Copyright (c) 2018 Mark Kettenis
*
#include <linux/list.h>
struct dma_buf_ops;
+struct device;
struct dma_buf {
const struct dma_buf_ops *ops;
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *);
-#define dma_buf_attach(x, y) NULL
-#define dma_buf_detach(x, y) panic("dma_buf_detach")
+static inline struct dma_buf_attachment *
+dma_buf_attach(struct dma_buf *buf, struct device *dev)
+{
+ return NULL;
+}
+
+static inline void
+dma_buf_detach(struct dma_buf *buf, struct dma_buf_attachment *dba)
+{
+ panic("dma_buf_detach");
+}
#endif
enum dma_data_direction {
DMA_NONE,
+ DMA_BIDIRECTIONAL,
};
#endif
#include <linux/sizes.h>
#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+
+struct device;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) -1)
-#define dma_set_coherent_mask(x, y) 0
-#define dma_set_max_seg_size(x, y) 0
-#define dma_set_mask(x, y) 0
-#define dma_set_mask_and_coherent(x, y) 0
-#define dma_addressing_limited(x) false
+static inline int
+dma_set_coherent_mask(struct device *dev, uint64_t m)
+{
+ return 0;
+}
+
+static inline int
+dma_set_max_seg_size(struct device *dev, unsigned int sz)
+{
+ return 0;
+}
+
+static inline int
+dma_set_mask(struct device *dev, uint64_t m)
+{
+ return 0;
+}
+
+static inline int
+dma_set_mask_and_coherent(void *dev, uint64_t m)
+{
+ return 0;
+}
+
+static inline bool
+dma_addressing_limited(void *dev)
+{
+ return false;
+}
+
+static inline dma_addr_t
+dma_map_page(void *dev, struct vm_page *page, size_t offset,
+ size_t size, enum dma_data_direction dir)
+{
+ return VM_PAGE_TO_PHYS(page);
+}
-#define DMA_BIDIRECTIONAL 0
+static inline void
+dma_unmap_page(void *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+}
-#define dma_map_page(dev, page, offset, size, dir) VM_PAGE_TO_PHYS(page)
-#define dma_unmap_page(dev, addr, size, dir) do {} while(0)
-#define dma_mapping_error(dev, addr) 0
+static inline int
+dma_mapping_error(void *dev, dma_addr_t addr)
+{
+ return 0;
+}
#endif
-/* $OpenBSD: i2c.h,v 1.4 2022/01/14 06:53:14 jsg Exp $ */
+/* $OpenBSD: i2c.h,v 1.5 2022/03/01 04:08:04 jsg Exp $ */
/*
* Copyright (c) 2017 Mark Kettenis
*
};
int i2c_transfer(struct i2c_adapter *, struct i2c_msg *, int);
-#define i2c_add_adapter(x) 0
-#define i2c_del_adapter(x)
-#define __i2c_transfer(adap, msgs, num) i2c_transfer(adap, msgs, num)
+
+static inline int
+__i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ return i2c_transfer(adap, msgs, num);
+}
+
+static inline int
+i2c_add_adapter(struct i2c_adapter *adap)
+{
+ return 0;
+}
+
+static inline void
+i2c_del_adapter(struct i2c_adapter *adap)
+{
+}
static inline void *
i2c_get_adapdata(struct i2c_adapter *adap)
-/* $OpenBSD: idr.h,v 1.4 2020/11/14 15:00:20 kettenis Exp $ */
+/* $OpenBSD: idr.h,v 1.5 2022/03/01 04:08:04 jsg Exp $ */
/*
* Copyright (c) 2016 Mark Kettenis
*
void idr_init(struct idr *);
void idr_preload(unsigned int);
int idr_alloc(struct idr *, void *, int, int, gfp_t);
-#define idr_preload_end()
void *idr_find(struct idr *, unsigned long);
void *idr_replace(struct idr *, void *, unsigned long);
void *idr_remove(struct idr *, unsigned long);
void idr_destroy(struct idr *);
int idr_for_each(struct idr *, int (*)(int, void *, void *), void *);
void *idr_get_next(struct idr *, int *);
-#define idr_init_base(idr, base) idr_init(idr)
#define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; id++)
+static inline void
+idr_init_base(struct idr *idr, int base)
+{
+ idr_init(idr);
+}
+
+static inline void
+idr_preload_end(void)
+{
+}
+
static inline bool
idr_is_empty(const struct idr *idr)
{
#ifndef _LINUX_KMEMLEAK_H
#define _LINUX_KMEMLEAK_H
-#define kmemleak_update_trace(x)
+static inline void
+kmemleak_update_trace(const void *p)
+{
+}
#endif
-/* $OpenBSD: pci.h,v 1.9 2022/02/28 02:40:16 jsg Exp $ */
+/* $OpenBSD: pci.h,v 1.10 2022/03/01 04:08:04 jsg Exp $ */
/*
* Copyright (c) 2015 Mark Kettenis
*
return pcie_capability_write_word(pdev, PCI_PCIE_DCSR, val);
}
-#define pci_set_master(x)
-#define pci_clear_master(x)
+static inline void
+pci_set_master(struct pci_dev *pdev)
+{
+}
+
+static inline void
+pci_clear_master(struct pci_dev *pdev)
+{
+}
+
+static inline void
+pci_save_state(struct pci_dev *pdev)
+{
+}
-#define pci_save_state(x)
-#define pci_restore_state(x)
+static inline void
+pci_restore_state(struct pci_dev *pdev)
+{
+}
-#define pci_enable_msi(x) 0
-#define pci_disable_msi(x)
+static inline int
+pci_enable_msi(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline void
+pci_disable_msi(struct pci_dev *pdev)
+{
+}
typedef enum {
PCI_D0,
*width = pcie_get_width_cap(bdev);
}
-#define pci_save_state(x)
-#define pci_enable_device(x) 0
-#define pci_disable_device(x)
-#define pci_is_thunderbolt_attached(x) false
-#define pci_set_drvdata(x, y)
+static inline int
+pci_enable_device(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline void
+pci_disable_device(struct pci_dev *pdev)
+{
+}
+
+static inline bool
+pci_is_thunderbolt_attached(struct pci_dev *pdev)
+{
+ return false;
+}
+
+static inline void
+pci_set_drvdata(struct pci_dev *pdev, void *data)
+{
+}
static inline int
pci_domain_nr(struct pci_bus *pbus)
return 0;
}
+static inline void
+pci_unregister_driver(void *d)
+{
+}
+
#define PCI_CLASS_DISPLAY_VGA \
((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA)
#define PCI_CLASS_DISPLAY_OTHER \
((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_MISC)
-#define pci_unregister_driver(x)
-
#endif /* _LINUX_PCI_H_ */
#define PM_QOS_DEFAULT_VALUE -1
-#define cpu_latency_qos_update_request(a, b)
-#define cpu_latency_qos_add_request(a, b)
-#define cpu_latency_qos_remove_request(a)
-#define cpu_latency_qos_request_active(a) false
+static inline void
+cpu_latency_qos_update_request(struct pm_qos_request *r, int v)
+{
+}
+
+static inline void
+cpu_latency_qos_add_request(struct pm_qos_request *r, int v)
+{
+}
+
+static inline void
+cpu_latency_qos_remove_request(struct pm_qos_request *r)
+{
+}
+
+static inline bool
+cpu_latency_qos_request_active(struct pm_qos_request *r)
+{
+ return false;
+}
#endif
#include <sys/device.h>
#include <linux/pm.h>
-#define pm_runtime_mark_last_busy(x)
-#define pm_runtime_use_autosuspend(x)
-#define pm_runtime_dont_use_autosuspend(x)
-#define pm_runtime_put_autosuspend(x)
-#define pm_runtime_set_autosuspend_delay(x, y)
-#define pm_runtime_set_active(x)
-#define pm_runtime_allow(x)
-#define pm_runtime_put_noidle(x)
-#define pm_runtime_forbid(x)
-#define pm_runtime_get_noresume(x)
-#define pm_runtime_put(x)
+static inline void
+pm_runtime_mark_last_busy(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_use_autosuspend(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_dont_use_autosuspend(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_put_autosuspend(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_set_autosuspend_delay(struct device *dev, int x)
+{
+}
+
+static inline void
+pm_runtime_set_active(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_allow(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_put_noidle(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_forbid(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_get_noresume(struct device *dev)
+{
+}
+
+static inline void
+pm_runtime_put(struct device *dev)
+{
+}
static inline int
pm_runtime_get_sync(struct device *dev)
#include <asm/preempt.h>
-#define preempt_enable()
-#define preempt_disable()
+static inline void
+preempt_enable(void)
+{
+}
+
+static inline void
+preempt_disable(void)
+{
+}
static inline bool
in_irq(void)
#endif
}
-#define in_interrupt() in_irq()
-#define in_task() (!in_irq())
-#define in_atomic() 0
+static inline bool
+in_interrupt(void)
+{
+ return in_irq();
+}
+
+static inline bool
+in_task(void)
+{
+ return !in_irq();
+}
+
+static inline bool
+in_atomic(void)
+{
+ return false;
+}
#endif
#include <sys/types.h>
#include <sys/systm.h>
-#define get_random_u32() arc4random()
-#define get_random_int() arc4random()
+static inline uint32_t
+get_random_u32(void)
+{
+ return arc4random();
+}
+
+static inline unsigned int
+get_random_int(void)
+{
+ return arc4random();
+}
static inline uint64_t
get_random_u64(void)
#include <sys/reboot.h>
-#define register_reboot_notifier(x)
-#define unregister_reboot_notifier(x)
+struct notifier_block;
#define SYS_RESTART 0
+static inline void
+register_reboot_notifier(struct notifier_block *nb)
+{
+}
+
+static inline void
+unregister_reboot_notifier(struct notifier_block *nb)
+{
+}
+
static inline void
orderly_poweroff(bool force)
{
* XXX For now, we don't want the shrinker to be too aggressive, so
* pretend we're not called from the pagedaemon even if we are.
*/
-#define current_is_kswapd() 0
+static inline int
+current_is_kswapd(void)
+{
+ return 0;
+}
#endif
#ifndef _LINUX_TIMEKEEPING_H
#define _LINUX_TIMEKEEPING_H
-#define ktime_get_boottime() ktime_get()
-#define ktime_get_boottime_ns() ktime_get_ns()
-#define get_seconds() gettime()
-
static inline time_t
ktime_get_real_seconds(void)
{
return ktime_get();
}
+static inline ktime_t
+ktime_get_boottime(void)
+{
+ return ktime_get();
+}
+
+static inline uint64_t
+ktime_get_boottime_ns(void)
+{
+ return ktime_get_ns();
+}
+
#endif
#include <linux/fb.h>
+#define VGA_SWITCHEROO_CAN_SWITCH_DDC 1
+
struct pci_dev;
+struct dev_pm_domain;
-#define vga_switcheroo_register_client(a, b, c) 0
-#define vga_switcheroo_unregister_client(a)
-#define vga_switcheroo_process_delayed_switch()
-#define vga_switcheroo_fini_domain_pm_ops(x)
-#define vga_switcheroo_handler_flags() 0
-#define vga_switcheroo_client_fb_set(a, b)
-#define vga_switcheroo_init_domain_pm_ops(a, b)
+struct vga_switcheroo_client_ops {
+};
-#define VGA_SWITCHEROO_CAN_SWITCH_DDC 1
+static inline int
+vga_switcheroo_register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops, bool x)
+{
+ return 0;
+}
+
+static inline void
+vga_switcheroo_unregister_client(struct pci_dev *pdev)
+{
+}
+
+static inline void
+vga_switcheroo_process_delayed_switch(void)
+{
+}
+
+static inline void
+vga_switcheroo_fini_domain_pm_ops(struct device *dev)
+{
+}
+
+static inline int
+vga_switcheroo_handler_flags(void)
+{
+ return 0;
+}
+
+static inline void
+vga_switcheroo_client_fb_set(struct pci_dev *pdev, struct fb_info *fbi)
+{
+}
+
+static inline void
+vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *dom)
+{
+}
static inline int
vga_switcheroo_lock_ddc(struct pci_dev *pdev)
-/* $OpenBSD: workqueue.h,v 1.7 2022/01/14 06:53:14 jsg Exp $ */
+/* $OpenBSD: workqueue.h,v 1.8 2022/03/01 04:08:04 jsg Exp $ */
/*
* Copyright (c) 2015 Mark Kettenis
*
void flush_workqueue(struct workqueue_struct *);
bool flush_work(struct work_struct *);
bool flush_delayed_work(struct delayed_work *);
-#define flush_scheduled_work() flush_workqueue(system_wq)
-#define drain_workqueue(x) flush_workqueue(x)
+
+static inline void
+flush_scheduled_work(void)
+{
+ flush_workqueue(system_wq);
+}
+
+static inline void
+drain_workqueue(struct workqueue_struct *wq)
+{
+ flush_workqueue(wq);
+}
static inline void
destroy_work_on_stack(struct work_struct *work)
task_del(work->tq, &work->task);
}
-#define destroy_delayed_work_on_stack(x)
+static inline void
+destroy_delayed_work_on_stack(struct delayed_work *dwork)
+{
+}
struct rcu_work {
struct work_struct work;
#ifndef _MEDIA_CEC_NOTIFIER_H
#define _MEDIA_CEC_NOTIFIER_H
+struct cec_notifier;
+
struct cec_connector_info {
};
-#define cec_notifier_set_phys_addr_from_edid(x, y)
-#define cec_notifier_phys_addr_invalidate(x)
-#define cec_notifier_put(x)
-#define cec_notifier_get_conn(x, y) NULL
-#define cec_fill_conn_info_from_drm(x, y)
-#define cec_notifier_conn_register(x, y, z) (void *)1
-#define cec_notifier_conn_unregister(x)
+static inline void
+cec_notifier_set_phys_addr_from_edid(struct cec_notifier *cn,
+ const struct edid *edid)
+{
+}
+
+static inline void
+cec_notifier_phys_addr_invalidate(struct cec_notifier *cn)
+{
+}
+
+static inline void
+cec_fill_conn_info_from_drm(struct cec_connector_info *ci,
+ const struct drm_connector *c)
+{
+}
+
+static inline struct cec_notifier *
+cec_notifier_conn_register(struct device *dev, const char *port,
+ const struct cec_connector_info *ci)
+{
+ return (void *)1;
+}
+
+static inline void
+cec_notifier_conn_unregister(struct cec_notifier *cn)
+{
+}
#endif
#ifndef _TRACE_EVENTS_DMA_FENCE_H
#define _TRACE_EVENTS_DMA_FENCE_H
-#define trace_dma_fence_destroy(a)
-#define trace_dma_fence_emit(a)
-#define trace_dma_fence_enable_signal(a)
-#define trace_dma_fence_init(a)
-#define trace_dma_fence_signaled(a)
-#define trace_dma_fence_wait_end(a)
-#define trace_dma_fence_wait_start(a)
+struct dma_fence;
+
+static inline void
+trace_dma_fence_destroy(struct dma_fence *f)
+{
+}
+
+static inline void
+trace_dma_fence_emit(struct dma_fence *f)
+{
+}
+
+static inline void
+trace_dma_fence_enable_signal(struct dma_fence *f)
+{
+}
+
+static inline void
+trace_dma_fence_init(struct dma_fence *f)
+{
+}
+
+static inline void
+trace_dma_fence_signaled(struct dma_fence *f)
+{
+}
+
+static inline void
+trace_dma_fence_wait_end(struct dma_fence *f)
+{
+}
+
+static inline void
+trace_dma_fence_wait_start(struct dma_fence *f)
+{
+}
#endif
void
radeondrm_attachhook(struct device *self)
{
- struct radeon_device *rdev = (struct radeon_device *)self;
- int r, acpi_status;
+ struct radeon_device *rdev = (struct radeon_device *)self;
+ struct drm_device *dev = rdev->ddev;
+ int r, acpi_status;
/* radeon_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
return -ENOSYS;
#endif
} else {
-#ifdef notyet
size_t size = (1ULL << order) * PAGE_SIZE;
-#endif
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(pool->dev, addr))