From: jsg Date: Fri, 12 May 2023 11:16:58 +0000 (+0000) Subject: drm/ttm: optimize pool allocations a bit v2 X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=d641b4f9832202e5774e762676f50de17411b5f5;p=openbsd drm/ttm: optimize pool allocations a bit v2 From Christian Koenig 5e5a4185c66f9478a8cb2f74eed1a5b5a5000b13 in linux-6.1.y/6.1.28 735c466465eba51deaee3012d8403c10fc7c8c03 in mainline linux --- diff --git a/sys/dev/pci/drm/ttm/ttm_pool.c b/sys/dev/pci/drm/ttm/ttm_pool.c index e47829c9651..799112bec2b 100644 --- a/sys/dev/pci/drm/ttm/ttm_pool.c +++ b/sys/dev/pci/drm/ttm/ttm_pool.c @@ -485,6 +485,31 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p #endif /* notyet */ +/* Called when we got a page, either from a pool or newly allocated */ +static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, + struct vm_page *p, dma_addr_t **dma_addr, + unsigned long *num_pages, + struct vm_page ***pages, + unsigned long **orders) +{ + unsigned int i; + int r; + + if (*dma_addr) { + r = ttm_pool_map(pool, order, p, dma_addr); + if (r) + return r; + } + + *num_pages -= 1 << order; + for (i = 1 << order; i; --i, ++(*pages), ++p, ++(*orders)) { + **pages = p; + **orders = order; + } + + return 0; +} + /** * ttm_pool_alloc - Fill a ttm_tt object * @@ -529,46 +554,56 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); num_pages; order = min_t(unsigned int, order, __fls(num_pages))) { - bool apply_caching = false; struct ttm_pool_type *pt; pt = ttm_pool_select_type(pool, tt->caching, order); p = pt ? ttm_pool_type_take(pt) : NULL; if (p) { - apply_caching = true; - } else { - p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat); - if (p && PageHighMem(p)) - apply_caching = true; - } - - if (!p) { - if (order) { - --order; - continue; - } - r = -ENOMEM; - goto error_free_all; - } - - if (apply_caching) { r = ttm_pool_apply_caching(caching, pages, tt->caching); if (r) goto error_free_page; - caching = pages + (1 << order); + + do { + r = ttm_pool_page_allocated(pool, order, p, + &dma_addr, + &num_pages, + &pages, &orders); + if (r) + goto error_free_page; + + if (num_pages < (1 << order)) + break; + + p = ttm_pool_type_take(pt); + } while (p); + caching = pages; } - if (dma_addr) { - r = ttm_pool_map(pool, order, p, &dma_addr); + while (num_pages >= (1 << order) && + (p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat))) { + + if (PageHighMem(p)) { + r = ttm_pool_apply_caching(caching, pages, + tt->caching); + if (r) + goto error_free_page; + } + r = ttm_pool_page_allocated(pool, order, p, &dma_addr, + &num_pages, &pages, &orders); if (r) goto error_free_page; + if (PageHighMem(p)) + caching = pages; } - num_pages -= 1 << order; - for (i = 1 << order; i; --i) { - *(pages++) = p++; - *(orders++) = order; + if (!p) { + if (order) { + --order; + continue; + } + r = -ENOMEM; + goto error_free_all; } }