From: jsg Date: Mon, 6 May 2024 01:59:03 +0000 (+0000) Subject: mm, treewide: introduce NR_PAGE_ORDERS X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=192fc1c096302d914bbc869bdcc5269f6d0065f7;p=openbsd mm, treewide: introduce NR_PAGE_ORDERS From Kirill A. Shutemov ded1ffea52132e58eaaa7d4ea39477f911796a40 in linux-6.6.y/6.6.30 fd37721803c6e73619108f76ad2e12a9aa5fafaf in mainline linux --- diff --git a/sys/dev/pci/drm/include/drm/ttm/ttm_pool.h b/sys/dev/pci/drm/include/drm/ttm/ttm_pool.h index 0ffb28e8903..983fff1f731 100644 --- a/sys/dev/pci/drm/include/drm/ttm/ttm_pool.h +++ b/sys/dev/pci/drm/include/drm/ttm/ttm_pool.h @@ -81,7 +81,7 @@ struct ttm_pool { bool use_dma32; struct { - struct ttm_pool_type orders[MAX_ORDER + 1]; + struct ttm_pool_type orders[NR_PAGE_ORDERS]; } caching[TTM_NUM_CACHING_TYPES]; }; diff --git a/sys/dev/pci/drm/ttm/tests/ttm_device_test.c b/sys/dev/pci/drm/ttm/tests/ttm_device_test.c index b1b423b68cd..19eaff22e6a 100644 --- a/sys/dev/pci/drm/ttm/tests/ttm_device_test.c +++ b/sys/dev/pci/drm/ttm/tests/ttm_device_test.c @@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test) if (params->pools_init_expected) { for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { - for (int j = 0; j <= MAX_ORDER; ++j) { + for (int j = 0; j < NR_PAGE_ORDERS; ++j) { pt = pool->caching[i].orders[j]; KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool); KUNIT_EXPECT_EQ(test, pt.caching, i); diff --git a/sys/dev/pci/drm/ttm/ttm_pool.c b/sys/dev/pci/drm/ttm/ttm_pool.c index 8b6f53856dd..8c132111ab4 100644 --- a/sys/dev/pci/drm/ttm/ttm_pool.c +++ b/sys/dev/pci/drm/ttm/ttm_pool.c @@ -70,11 +70,11 @@ module_param(page_pool_size, ulong, 0644); static atomic_long_t allocated_pages; -static struct ttm_pool_type global_write_combined[MAX_ORDER + 1]; -static struct ttm_pool_type global_uncached[MAX_ORDER + 1]; +static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS]; +static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS]; -static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1]; -static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1]; +static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS]; +static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS]; static spinlock_t shrinker_lock; static struct list_head shrinker_list; @@ -712,7 +712,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, if (use_dma_alloc || nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j <= MAX_ORDER; ++j) + for (j = 0; j < NR_PAGE_ORDERS; ++j) ttm_pool_type_init(&pool->caching[i].orders[j], pool, i, j); } @@ -733,7 +733,7 @@ void ttm_pool_fini(struct ttm_pool *pool) if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j <= MAX_ORDER; ++j) + for (j = 0; j < NR_PAGE_ORDERS; ++j) ttm_pool_type_fini(&pool->caching[i].orders[j]); } @@ -795,7 +795,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m) unsigned int i; seq_puts(m, "\t "); - for (i = 0; i <= MAX_ORDER; ++i) + for (i = 0; i < NR_PAGE_ORDERS; ++i) seq_printf(m, " ---%2u---", i); seq_puts(m, "\n"); } @@ -806,7 +806,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, { unsigned int i; - for (i = 0; i <= MAX_ORDER; ++i) + for (i = 0; i < NR_PAGE_ORDERS; ++i) seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); seq_puts(m, "\n"); } @@ -915,7 +915,7 @@ int ttm_pool_mgr_init(unsigned long num_pages) mtx_init(&shrinker_lock, IPL_NONE); INIT_LIST_HEAD(&shrinker_list); - for (i = 0; i <= MAX_ORDER; ++i) { + for (i = 0; i < NR_PAGE_ORDERS; ++i) { ttm_pool_type_init(&global_write_combined[i], NULL, ttm_write_combined, i); ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); @@ -948,7 +948,7 @@ void ttm_pool_mgr_fini(void) { unsigned int i; - for (i = 0; i <= MAX_ORDER; ++i) { + for (i = 0; i < NR_PAGE_ORDERS; ++i) { ttm_pool_type_fini(&global_write_combined[i]); ttm_pool_type_fini(&global_uncached[i]);