From: jsg Date: Thu, 8 Jul 2021 13:07:53 +0000 (+0000) Subject: revert drm_mm to our previous drm 5.7.y version X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=9e16f3f26eafbf789fc91f3edf868230bb821852;p=openbsd revert drm_mm to our previous drm 5.7.y version Josh Rickmar and several developers reported X would sometimes fail to start on laptops with raven ridge and picasso apus using amdgpu. drm:pid71504:amdgpu_bo_pin_restricted *ERROR* 0xffff800001836e18 pin failed [drm] *ERROR* Failed to pin framebuffer with error -12 ttm_bo_mem_space() ret -ENOMEM ttm_bo_move_buffer() ttm_bo_validate() amdgpu_bo_pin_restricted() By all reports this does not occur with the old drm_mm. --- diff --git a/sys/dev/pci/drm/drm_mm.c b/sys/dev/pci/drm/drm_mm.c index 79cfcb9113b..17c3fffcd0d 100644 --- a/sys/dev/pci/drm/drm_mm.c +++ b/sys/dev/pci/drm/drm_mm.c @@ -240,6 +240,20 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, #endif } +#define DRM_RB_INSERT(root, member, expr) do { \ + struct rb_node **link = &root.rb_node, *rb = NULL; \ + u64 x = expr(node); \ + while (*link) { \ + rb = *link; \ + if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \ + link = &rb->rb_left; \ + else \ + link = &rb->rb_right; \ + } \ + rb_link_node(&node->member, rb, link); \ + rb_insert_color(&node->member, &root); \ +} while (0) + #define HOLE_SIZE(NODE) ((NODE)->hole_size) #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) @@ -269,48 +283,16 @@ static void insert_hole_size(struct rb_root_cached *root, rb_insert_color_cached(&node->rb_hole_size, root, first); } -#ifdef notyet -RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, - struct drm_mm_node, rb_hole_addr, - u64, subtree_max_hole, HOLE_SIZE) -#endif - -static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node) -{ - struct rb_node **link = &root->rb_node, *rb_parent = NULL; - u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; - struct drm_mm_node *parent; - - while (*link) { - rb_parent = *link; - parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr); - if (parent->subtree_max_hole < subtree_max_hole) - parent->subtree_max_hole = subtree_max_hole; - if (start < HOLE_ADDR(parent)) - link = &parent->rb_hole_addr.rb_left; - else - link = &parent->rb_hole_addr.rb_right; - } - - rb_link_node(&node->rb_hole_addr, rb_parent, link); -#ifdef notyet - rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks); -#else - rb_insert_color(&node->rb_hole_addr, root); -#endif -} - static void add_hole(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; node->hole_size = __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); - node->subtree_max_hole = node->hole_size; DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); insert_hole_size(&mm->holes_size, node); - insert_hole_addr(&mm->holes_addr, node); + DRM_RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); list_add(&node->hole_stack, &mm->hole_stack); } @@ -321,14 +303,8 @@ static void rm_hole(struct drm_mm_node *node) list_del(&node->hole_stack); rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); -#ifdef notyet - rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr, - &augment_callbacks); -#else rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); -#endif node->hole_size = 0; - node->subtree_max_hole = 0; DRM_MM_BUG_ON(drm_mm_hole_follows(node)); } @@ -343,6 +319,11 @@ static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); } +static inline u64 rb_hole_size(struct rb_node *rb) +{ + return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; +} + static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) { struct rb_node *rb = mm->holes_size.rb_root.rb_node; @@ -363,12 +344,7 @@ static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) return best; } -static bool usable_hole_addr(struct rb_node *rb, u64 size) -{ - return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; -} - -static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) +static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) { struct rb_node *rb = mm->holes_addr.rb_node; struct drm_mm_node *node = NULL; @@ -376,9 +352,6 @@ static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) while (rb) { u64 hole_start; - if (!usable_hole_addr(rb, size)) - break; - node = rb_hole_addr_to_node(rb); hole_start = __drm_mm_hole_node_start(node); @@ -404,10 +377,10 @@ first_hole(struct drm_mm *mm, return best_hole(mm, size); case DRM_MM_INSERT_LOW: - return find_hole_addr(mm, start, size); + return find_hole(mm, start); case DRM_MM_INSERT_HIGH: - return find_hole_addr(mm, end, size); + return find_hole(mm, end); case DRM_MM_INSERT_EVICT: return list_first_entry_or_null(&mm->hole_stack, @@ -416,45 +389,9 @@ first_hole(struct drm_mm *mm, } } -/** - * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions - * @name: name of function to declare - * @first: first rb member to traverse (either rb_left or rb_right). - * @last: last rb member to traverse (either rb_right or rb_left). - * - * This macro declares a function to return the next hole of the addr rb tree. - * While traversing the tree we take the searched size into account and only - * visit branches with potential big enough holes. - */ - -#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ -static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ -{ \ - struct rb_node *parent, *node = &entry->rb_hole_addr; \ - \ - if (!entry || RB_EMPTY_NODE(node)) \ - return NULL; \ - \ - if (usable_hole_addr(node->first, size)) { \ - node = node->first; \ - while (usable_hole_addr(node->last, size)) \ - node = node->last; \ - return rb_hole_addr_to_node(node); \ - } \ - \ - while ((parent = rb_parent(node)) && node == parent->first) \ - node = parent; \ - \ - return rb_hole_addr_to_node(parent); \ -} - -DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) -DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) - static struct drm_mm_node * next_hole(struct drm_mm *mm, struct drm_mm_node *node, - u64 size, enum drm_mm_insert_mode mode) { switch (mode) { @@ -463,10 +400,10 @@ next_hole(struct drm_mm *mm, return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); case DRM_MM_INSERT_LOW: - return next_hole_low_addr(node, size); + return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); case DRM_MM_INSERT_HIGH: - return next_hole_high_addr(node, size); + return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)); case DRM_MM_INSERT_EVICT: node = list_next_entry(node, hole_stack); @@ -500,7 +437,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) return -ENOSPC; /* Find the relevant hole to add our node to */ - hole = find_hole_addr(mm, node->start, 0); + hole = find_hole(mm, node->start); if (!hole) return -ENOSPC; @@ -580,7 +517,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; for (hole = first_hole(mm, range_start, range_end, size, mode); hole; - hole = once ? NULL : next_hole(mm, hole, size, mode)) { + hole = once ? NULL : next_hole(mm, hole, mode)) { u64 hole_start = __drm_mm_hole_node_start(hole); u64 hole_end = hole_start + hole->hole_size; u64 adj_start, adj_end; diff --git a/sys/dev/pci/drm/include/drm/drm_mm.h b/sys/dev/pci/drm/include/drm/drm_mm.h index 9b4292f229c..ee8b0e80ca9 100644 --- a/sys/dev/pci/drm/include/drm/drm_mm.h +++ b/sys/dev/pci/drm/include/drm/drm_mm.h @@ -168,7 +168,6 @@ struct drm_mm_node { struct rb_node rb_hole_addr; u64 __subtree_last; u64 hole_size; - u64 subtree_max_hole; unsigned long flags; #define DRM_MM_NODE_ALLOCATED_BIT 0 #define DRM_MM_NODE_SCANNED_BIT 1 @@ -338,7 +337,7 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) /** * drm_mm_nodes - list of nodes under the drm_mm range manager - * @mm: the struct drm_mm range manager + * @mm: the struct drm_mm range manger * * As the drm_mm range manager hides its node_list deep with its * structure, extracting it looks painful and repetitive. This is