Reuse pmap_pte_insert() in pmap_clear_modify()
authorjca <jca@openbsd.org>
Mon, 18 Sep 2023 17:01:41 +0000 (17:01 +0000)
committerjca <jca@openbsd.org>
Mon, 18 Sep 2023 17:01:41 +0000 (17:01 +0000)
Stricter code which further reduces the difference between the pmap of
arm64 and riscv64 and also the number of functions where member pted_pte
is manipulated.

ok drahn@ kettenis@

sys/arch/riscv64/riscv64/pmap.c

index 25d1fda..50ee1ee 100644 (file)
@@ -1,4 +1,4 @@
-/*     $OpenBSD: pmap.c,v 1.33 2023/09/03 00:23:25 jca Exp $   */
+/*     $OpenBSD: pmap.c,v 1.34 2023/09/18 17:01:41 jca Exp $   */
 
 /*
  * Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
@@ -1807,17 +1807,13 @@ int
 pmap_clear_modify(struct vm_page *pg)
 {
        struct pte_desc *pted;
-       pt_entry_t *pl3 = NULL;
 
        atomic_clearbits_int(&pg->pg_flags, PG_PMAP_MOD);
 
        mtx_enter(&pg->mdpage.pv_mtx);
        LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
-               if (pmap_vp_lookup(pted->pted_pmap, pted->pted_va & ~PAGE_MASK, &pl3) == NULL)
-                       panic("failed to look up pte");
-               *pl3 &= ~PTE_W;
                pted->pted_pte &= ~PROT_WRITE;
-
+               pmap_pte_insert(pted);
                tlb_flush_page(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
        }
        mtx_leave(&pg->mdpage.pv_mtx);